bnx2x: Remove misleading error print
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
89794a6f
YR
60#define DRV_MODULE_VERSION "1.52.1-3"
61#define DRV_MODULE_RELDATE "2009/11/05"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
94static int num_rx_queues;
95module_param(num_rx_queues, int, 0);
96MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
97 " (default is half number of CPUs)");
98
99static int num_tx_queues;
100module_param(num_tx_queues, int, 0);
101MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
102 " (default is half number of CPUs)");
555f6c78 103
19680c48 104static int disable_tpa;
19680c48 105module_param(disable_tpa, int, 0);
9898f86d 106MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
107
108static int int_mode;
109module_param(int_mode, int, 0);
110MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
111
a18f5128
EG
112static int dropless_fc;
113module_param(dropless_fc, int, 0);
114MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
115
9898f86d 116static int poll;
a2fbb9ea 117module_param(poll, int, 0);
9898f86d 118MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
119
120static int mrrs = -1;
121module_param(mrrs, int, 0);
122MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
123
9898f86d 124static int debug;
a2fbb9ea 125module_param(debug, int, 0);
9898f86d
EG
126MODULE_PARM_DESC(debug, " Default debug msglevel");
127
128static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 129
1cf167f2 130static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
131
132enum bnx2x_board_type {
133 BCM57710 = 0,
34f80b04
EG
134 BCM57711 = 1,
135 BCM57711E = 2,
a2fbb9ea
ET
136};
137
34f80b04 138/* indexed by board_type, above */
53a10565 139static struct {
a2fbb9ea
ET
140 char *name;
141} board_info[] __devinitdata = {
34f80b04
EG
142 { "Broadcom NetXtreme II BCM57710 XGb" },
143 { "Broadcom NetXtreme II BCM57711 XGb" },
144 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
145};
146
34f80b04 147
a2fbb9ea 148static const struct pci_device_id bnx2x_pci_tbl[] = {
e4ed7113
EG
149 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
150 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
152 { 0 }
153};
154
155MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
156
157/****************************************************************************
158* General service functions
159****************************************************************************/
160
161/* used only at init
162 * locking is done by mcp
163 */
573f2035 164void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
165{
166 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
169 PCICFG_VENDOR_ID_OFFSET);
170}
171
a2fbb9ea
ET
172static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
173{
174 u32 val;
175
176 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
177 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
178 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
179 PCICFG_VENDOR_ID_OFFSET);
180
181 return val;
182}
a2fbb9ea
ET
183
184static const u32 dmae_reg_go_c[] = {
185 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
186 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
187 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
188 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
189};
190
191/* copy command into DMAE command memory and set DMAE command go */
192static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
193 int idx)
194{
195 u32 cmd_offset;
196 int i;
197
198 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
199 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
200 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
201
ad8d3948
EG
202 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
203 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
204 }
205 REG_WR(bp, dmae_reg_go_c[idx], 1);
206}
207
ad8d3948
EG
208void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
209 u32 len32)
a2fbb9ea 210{
5ff7b6d4 211 struct dmae_command dmae;
a2fbb9ea 212 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
213 int cnt = 200;
214
215 if (!bp->dmae_ready) {
216 u32 *data = bnx2x_sp(bp, wb_data[0]);
217
218 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
219 " using indirect\n", dst_addr, len32);
220 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
221 return;
222 }
223
5ff7b6d4 224 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 225
5ff7b6d4
EG
226 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
227 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
228 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 229#ifdef __BIG_ENDIAN
5ff7b6d4 230 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 231#else
5ff7b6d4 232 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 233#endif
5ff7b6d4
EG
234 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
235 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
236 dmae.src_addr_lo = U64_LO(dma_addr);
237 dmae.src_addr_hi = U64_HI(dma_addr);
238 dmae.dst_addr_lo = dst_addr >> 2;
239 dmae.dst_addr_hi = 0;
240 dmae.len = len32;
241 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
242 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
243 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 244
c3eefaf6 245 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
246 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
247 "dst_addr [%x:%08x (%08x)]\n"
248 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
249 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
250 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
251 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 252 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
253 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
254 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 255
5ff7b6d4
EG
256 mutex_lock(&bp->dmae_mutex);
257
a2fbb9ea
ET
258 *wb_comp = 0;
259
5ff7b6d4 260 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
261
262 udelay(5);
ad8d3948
EG
263
264 while (*wb_comp != DMAE_COMP_VAL) {
265 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
266
ad8d3948 267 if (!cnt) {
c3eefaf6 268 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
269 break;
270 }
ad8d3948 271 cnt--;
12469401
YG
272 /* adjust delay for emulation/FPGA */
273 if (CHIP_REV_IS_SLOW(bp))
274 msleep(100);
275 else
276 udelay(5);
a2fbb9ea 277 }
ad8d3948
EG
278
279 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
280}
281
c18487ee 282void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 283{
5ff7b6d4 284 struct dmae_command dmae;
a2fbb9ea 285 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
286 int cnt = 200;
287
288 if (!bp->dmae_ready) {
289 u32 *data = bnx2x_sp(bp, wb_data[0]);
290 int i;
291
292 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
293 " using indirect\n", src_addr, len32);
294 for (i = 0; i < len32; i++)
295 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
296 return;
297 }
298
5ff7b6d4 299 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 300
5ff7b6d4
EG
301 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
302 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
303 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 304#ifdef __BIG_ENDIAN
5ff7b6d4 305 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 306#else
5ff7b6d4 307 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 308#endif
5ff7b6d4
EG
309 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
310 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
311 dmae.src_addr_lo = src_addr >> 2;
312 dmae.src_addr_hi = 0;
313 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
314 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
315 dmae.len = len32;
316 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
317 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
318 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 319
c3eefaf6 320 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
321 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
322 "dst_addr [%x:%08x (%08x)]\n"
323 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
324 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
325 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
326 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 327
5ff7b6d4
EG
328 mutex_lock(&bp->dmae_mutex);
329
330 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
331 *wb_comp = 0;
332
5ff7b6d4 333 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
334
335 udelay(5);
ad8d3948
EG
336
337 while (*wb_comp != DMAE_COMP_VAL) {
338
ad8d3948 339 if (!cnt) {
c3eefaf6 340 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
341 break;
342 }
ad8d3948 343 cnt--;
12469401
YG
344 /* adjust delay for emulation/FPGA */
345 if (CHIP_REV_IS_SLOW(bp))
346 msleep(100);
347 else
348 udelay(5);
a2fbb9ea 349 }
ad8d3948 350 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
351 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
352 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
353
354 mutex_unlock(&bp->dmae_mutex);
355}
356
573f2035
EG
357void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
358 u32 addr, u32 len)
359{
360 int offset = 0;
361
362 while (len > DMAE_LEN32_WR_MAX) {
363 bnx2x_write_dmae(bp, phys_addr + offset,
364 addr + offset, DMAE_LEN32_WR_MAX);
365 offset += DMAE_LEN32_WR_MAX * 4;
366 len -= DMAE_LEN32_WR_MAX;
367 }
368
369 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
370}
371
ad8d3948
EG
372/* used only for slowpath so not inlined */
373static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
374{
375 u32 wb_write[2];
376
377 wb_write[0] = val_hi;
378 wb_write[1] = val_lo;
379 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 380}
a2fbb9ea 381
ad8d3948
EG
382#ifdef USE_WB_RD
383static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
384{
385 u32 wb_data[2];
386
387 REG_RD_DMAE(bp, reg, wb_data, 2);
388
389 return HILO_U64(wb_data[0], wb_data[1]);
390}
391#endif
392
a2fbb9ea
ET
393static int bnx2x_mc_assert(struct bnx2x *bp)
394{
a2fbb9ea 395 char last_idx;
34f80b04
EG
396 int i, rc = 0;
397 u32 row0, row1, row2, row3;
398
399 /* XSTORM */
400 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_INDEX_OFFSET);
402 if (last_idx)
403 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
404
405 /* print the asserts */
406 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
407
408 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
409 XSTORM_ASSERT_LIST_OFFSET(i));
410 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
411 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
412 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
413 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
414 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
415 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
416
417 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
418 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
419 " 0x%08x 0x%08x 0x%08x\n",
420 i, row3, row2, row1, row0);
421 rc++;
422 } else {
423 break;
424 }
425 }
426
427 /* TSTORM */
428 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_INDEX_OFFSET);
430 if (last_idx)
431 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
432
433 /* print the asserts */
434 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
435
436 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
437 TSTORM_ASSERT_LIST_OFFSET(i));
438 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
439 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
440 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
441 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
442 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
443 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
444
445 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
446 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
447 " 0x%08x 0x%08x 0x%08x\n",
448 i, row3, row2, row1, row0);
449 rc++;
450 } else {
451 break;
452 }
453 }
454
455 /* CSTORM */
456 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_INDEX_OFFSET);
458 if (last_idx)
459 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
460
461 /* print the asserts */
462 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
463
464 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
465 CSTORM_ASSERT_LIST_OFFSET(i));
466 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
467 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
468 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
469 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
470 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
471 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
472
473 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
474 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
475 " 0x%08x 0x%08x 0x%08x\n",
476 i, row3, row2, row1, row0);
477 rc++;
478 } else {
479 break;
480 }
481 }
482
483 /* USTORM */
484 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_INDEX_OFFSET);
486 if (last_idx)
487 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
488
489 /* print the asserts */
490 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
491
492 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
493 USTORM_ASSERT_LIST_OFFSET(i));
494 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
495 USTORM_ASSERT_LIST_OFFSET(i) + 4);
496 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
497 USTORM_ASSERT_LIST_OFFSET(i) + 8);
498 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
499 USTORM_ASSERT_LIST_OFFSET(i) + 12);
500
501 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
502 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
503 " 0x%08x 0x%08x 0x%08x\n",
504 i, row3, row2, row1, row0);
505 rc++;
506 } else {
507 break;
a2fbb9ea
ET
508 }
509 }
34f80b04 510
a2fbb9ea
ET
511 return rc;
512}
c14423fe 513
a2fbb9ea
ET
514static void bnx2x_fw_dump(struct bnx2x *bp)
515{
516 u32 mark, offset;
4781bfad 517 __be32 data[9];
a2fbb9ea
ET
518 int word;
519
520 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 521 mark = ((mark + 0x3) & ~0x3);
ad361c98 522 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 523
ad361c98 524 printk(KERN_ERR PFX);
a2fbb9ea
ET
525 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
526 for (word = 0; word < 8; word++)
527 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
528 offset + 4*word));
529 data[8] = 0x0;
49d66772 530 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
531 }
532 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
533 for (word = 0; word < 8; word++)
534 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
535 offset + 4*word));
536 data[8] = 0x0;
49d66772 537 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 538 }
ad361c98 539 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
540}
541
542static void bnx2x_panic_dump(struct bnx2x *bp)
543{
544 int i;
545 u16 j, start, end;
546
66e855f3
YG
547 bp->stats_state = STATS_STATE_DISABLED;
548 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
549
a2fbb9ea
ET
550 BNX2X_ERR("begin crash dump -----------------\n");
551
8440d2b6
EG
552 /* Indices */
553 /* Common */
554 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
555 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
556 " spq_prod_idx(%u)\n",
557 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
558 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
559
560 /* Rx */
561 for_each_rx_queue(bp, i) {
a2fbb9ea 562 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 563
c3eefaf6 564 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
565 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
566 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 567 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
568 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
569 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 570 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
571 " fp_u_idx(%x) *sb_u_idx(%x)\n",
572 fp->rx_sge_prod, fp->last_max_sge,
573 le16_to_cpu(fp->fp_u_idx),
574 fp->status_blk->u_status_block.status_block_index);
575 }
a2fbb9ea 576
8440d2b6
EG
577 /* Tx */
578 for_each_tx_queue(bp, i) {
579 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 580
c3eefaf6 581 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
582 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
583 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
584 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 585 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 586 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 587 fp->status_blk->c_status_block.status_block_index,
ca00392c 588 fp->tx_db.data.prod);
8440d2b6 589 }
a2fbb9ea 590
8440d2b6
EG
591 /* Rings */
592 /* Rx */
593 for_each_rx_queue(bp, i) {
594 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
595
596 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
597 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 598 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
599 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
600 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
601
c3eefaf6
EG
602 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
603 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
604 }
605
3196a88a
EG
606 start = RX_SGE(fp->rx_sge_prod);
607 end = RX_SGE(fp->last_max_sge);
8440d2b6 608 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
609 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
610 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
611
c3eefaf6
EG
612 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
613 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
614 }
615
a2fbb9ea
ET
616 start = RCQ_BD(fp->rx_comp_cons - 10);
617 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 618 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
619 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
620
c3eefaf6
EG
621 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
622 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
623 }
624 }
625
8440d2b6
EG
626 /* Tx */
627 for_each_tx_queue(bp, i) {
628 struct bnx2x_fastpath *fp = &bp->fp[i];
629
630 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
631 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
632 for (j = start; j != end; j = TX_BD(j + 1)) {
633 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
634
c3eefaf6
EG
635 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
636 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
637 }
638
639 start = TX_BD(fp->tx_bd_cons - 10);
640 end = TX_BD(fp->tx_bd_cons + 254);
641 for (j = start; j != end; j = TX_BD(j + 1)) {
642 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
643
c3eefaf6
EG
644 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
645 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
646 }
647 }
a2fbb9ea 648
34f80b04 649 bnx2x_fw_dump(bp);
a2fbb9ea
ET
650 bnx2x_mc_assert(bp);
651 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
652}
653
615f8fd9 654static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 655{
34f80b04 656 int port = BP_PORT(bp);
a2fbb9ea
ET
657 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
658 u32 val = REG_RD(bp, addr);
659 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 660 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
661
662 if (msix) {
8badd27a
EG
663 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
664 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
665 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
667 } else if (msi) {
668 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
669 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
670 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
672 } else {
673 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 674 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
675 HC_CONFIG_0_REG_INT_LINE_EN_0 |
676 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
679 val, port, addr);
615f8fd9
ET
680
681 REG_WR(bp, addr, val);
682
a2fbb9ea
ET
683 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
684 }
685
8badd27a
EG
686 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
687 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
688
689 REG_WR(bp, addr, val);
37dbbf32
EG
690 /*
691 * Ensure that HC_CONFIG is written before leading/trailing edge config
692 */
693 mmiowb();
694 barrier();
34f80b04
EG
695
696 if (CHIP_IS_E1H(bp)) {
697 /* init leading/trailing edge */
698 if (IS_E1HMF(bp)) {
8badd27a 699 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 700 if (bp->port.pmf)
4acac6a5
EG
701 /* enable nig and gpio3 attention */
702 val |= 0x1100;
34f80b04
EG
703 } else
704 val = 0xffff;
705
706 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
707 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
708 }
37dbbf32
EG
709
710 /* Make sure that interrupts are indeed enabled from here on */
711 mmiowb();
a2fbb9ea
ET
712}
713
615f8fd9 714static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 715{
34f80b04 716 int port = BP_PORT(bp);
a2fbb9ea
ET
717 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
718 u32 val = REG_RD(bp, addr);
719
720 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
721 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
722 HC_CONFIG_0_REG_INT_LINE_EN_0 |
723 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
724
725 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
726 val, port, addr);
727
8badd27a
EG
728 /* flush all outstanding writes */
729 mmiowb();
730
a2fbb9ea
ET
731 REG_WR(bp, addr, val);
732 if (REG_RD(bp, addr) != val)
733 BNX2X_ERR("BUG! proper val not read from IGU!\n");
734}
735
f8ef6e44 736static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 737{
a2fbb9ea 738 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 739 int i, offset;
a2fbb9ea 740
34f80b04 741 /* disable interrupt handling */
a2fbb9ea 742 atomic_inc(&bp->intr_sem);
e1510706
EG
743 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
744
f8ef6e44
YG
745 if (disable_hw)
746 /* prevent the HW from sending interrupts */
747 bnx2x_int_disable(bp);
a2fbb9ea
ET
748
749 /* make sure all ISRs are done */
750 if (msix) {
8badd27a
EG
751 synchronize_irq(bp->msix_table[0].vector);
752 offset = 1;
37b091ba
MC
753#ifdef BCM_CNIC
754 offset++;
755#endif
a2fbb9ea 756 for_each_queue(bp, i)
8badd27a 757 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
758 } else
759 synchronize_irq(bp->pdev->irq);
760
761 /* make sure sp_task is not running */
1cf167f2
EG
762 cancel_delayed_work(&bp->sp_task);
763 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
764}
765
34f80b04 766/* fast path */
a2fbb9ea
ET
767
768/*
34f80b04 769 * General service functions
a2fbb9ea
ET
770 */
771
34f80b04 772static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
773 u8 storm, u16 index, u8 op, u8 update)
774{
5c862848
EG
775 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
776 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
777 struct igu_ack_register igu_ack;
778
779 igu_ack.status_block_index = index;
780 igu_ack.sb_id_and_flags =
34f80b04 781 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
782 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
783 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
784 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
785
5c862848
EG
786 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
787 (*(u32 *)&igu_ack), hc_addr);
788 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
789
790 /* Make sure that ACK is written */
791 mmiowb();
792 barrier();
a2fbb9ea
ET
793}
794
795static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
796{
797 struct host_status_block *fpsb = fp->status_blk;
798 u16 rc = 0;
799
800 barrier(); /* status block is written to by the chip */
801 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
802 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
803 rc |= 1;
804 }
805 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
806 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
807 rc |= 2;
808 }
809 return rc;
810}
811
a2fbb9ea
ET
812static u16 bnx2x_ack_int(struct bnx2x *bp)
813{
5c862848
EG
814 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
815 COMMAND_REG_SIMD_MASK);
816 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 817
5c862848
EG
818 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
819 result, hc_addr);
a2fbb9ea 820
a2fbb9ea
ET
821 return result;
822}
823
824
825/*
826 * fast path service functions
827 */
828
e8b5fc51
VZ
829static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
830{
831 /* Tell compiler that consumer and producer can change */
832 barrier();
833 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
834}
835
a2fbb9ea
ET
836/* free skb in the packet ring at pos idx
837 * return idx of last bd freed
838 */
839static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
840 u16 idx)
841{
842 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
843 struct eth_tx_start_bd *tx_start_bd;
844 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 845 struct sk_buff *skb = tx_buf->skb;
34f80b04 846 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
847 int nbd;
848
849 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
850 idx, tx_buf, skb);
851
852 /* unmap first bd */
853 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
854 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
855 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
856 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 857
ca00392c 858 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 859#ifdef BNX2X_STOP_ON_ERROR
ca00392c 860 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 861 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
862 bnx2x_panic();
863 }
864#endif
ca00392c 865 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 866
ca00392c
EG
867 /* Get the next bd */
868 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 869
ca00392c
EG
870 /* Skip a parse bd... */
871 --nbd;
872 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
873
874 /* ...and the TSO split header bd since they have no mapping */
875 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
876 --nbd;
877 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
878 }
879
880 /* now free frags */
881 while (nbd > 0) {
882
883 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
884 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
885 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
886 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
887 if (--nbd)
888 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
889 }
890
891 /* release skb */
53e5e96e 892 WARN_ON(!skb);
ca00392c 893 dev_kfree_skb_any(skb);
a2fbb9ea
ET
894 tx_buf->first_bd = 0;
895 tx_buf->skb = NULL;
896
34f80b04 897 return new_cons;
a2fbb9ea
ET
898}
899
34f80b04 900static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 901{
34f80b04
EG
902 s16 used;
903 u16 prod;
904 u16 cons;
a2fbb9ea 905
34f80b04 906 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
907 prod = fp->tx_bd_prod;
908 cons = fp->tx_bd_cons;
909
34f80b04
EG
910 /* NUM_TX_RINGS = number of "next-page" entries
911 It will be used as a threshold */
912 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 913
34f80b04 914#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
915 WARN_ON(used < 0);
916 WARN_ON(used > fp->bp->tx_ring_size);
917 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 918#endif
a2fbb9ea 919
34f80b04 920 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
921}
922
7961f791 923static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
924{
925 struct bnx2x *bp = fp->bp;
555f6c78 926 struct netdev_queue *txq;
a2fbb9ea
ET
927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
928 int done = 0;
929
930#ifdef BNX2X_STOP_ON_ERROR
931 if (unlikely(bp->panic))
932 return;
933#endif
934
ca00392c 935 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
936 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
937 sw_cons = fp->tx_pkt_cons;
938
939 while (sw_cons != hw_cons) {
940 u16 pkt_cons;
941
942 pkt_cons = TX_BD(sw_cons);
943
944 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
945
34f80b04 946 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
947 hw_cons, sw_cons, pkt_cons);
948
34f80b04 949/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
950 rmb();
951 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
952 }
953*/
954 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
955 sw_cons++;
956 done++;
a2fbb9ea
ET
957 }
958
959 fp->tx_pkt_cons = sw_cons;
960 fp->tx_bd_cons = bd_cons;
961
a2fbb9ea 962 /* TBD need a thresh? */
555f6c78 963 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 964
6044735d
EG
965 /* Need to make the tx_bd_cons update visible to start_xmit()
966 * before checking for netif_tx_queue_stopped(). Without the
967 * memory barrier, there is a small possibility that
968 * start_xmit() will miss it and cause the queue to be stopped
969 * forever.
970 */
971 smp_mb();
972
555f6c78 973 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 974 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 975 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 976 netif_tx_wake_queue(txq);
a2fbb9ea
ET
977 }
978}
979
993ac7b5
MC
980#ifdef BCM_CNIC
981static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
982#endif
3196a88a 983
a2fbb9ea
ET
984static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
985 union eth_rx_cqe *rr_cqe)
986{
987 struct bnx2x *bp = fp->bp;
988 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
990
34f80b04 991 DP(BNX2X_MSG_SP,
a2fbb9ea 992 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 993 fp->index, cid, command, bp->state,
34f80b04 994 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
995
996 bp->spq_left++;
997
0626b899 998 if (fp->index) {
a2fbb9ea
ET
999 switch (command | fp->state) {
1000 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1001 BNX2X_FP_STATE_OPENING):
1002 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1003 cid);
1004 fp->state = BNX2X_FP_STATE_OPEN;
1005 break;
1006
1007 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1008 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1009 cid);
1010 fp->state = BNX2X_FP_STATE_HALTED;
1011 break;
1012
1013 default:
34f80b04
EG
1014 BNX2X_ERR("unexpected MC reply (%d) "
1015 "fp->state is %x\n", command, fp->state);
1016 break;
a2fbb9ea 1017 }
34f80b04 1018 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1019 return;
1020 }
c14423fe 1021
a2fbb9ea
ET
1022 switch (command | bp->state) {
1023 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1024 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1025 bp->state = BNX2X_STATE_OPEN;
1026 break;
1027
1028 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1029 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1030 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1031 fp->state = BNX2X_FP_STATE_HALTED;
1032 break;
1033
a2fbb9ea 1034 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1035 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1036 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1037 break;
1038
993ac7b5
MC
1039#ifdef BCM_CNIC
1040 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1041 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1042 bnx2x_cnic_cfc_comp(bp, cid);
1043 break;
1044#endif
3196a88a 1045
a2fbb9ea 1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1047 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1048 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1049 bp->set_mac_pending--;
1050 smp_wmb();
a2fbb9ea
ET
1051 break;
1052
49d66772 1053 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1054 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1055 bp->set_mac_pending--;
1056 smp_wmb();
49d66772
ET
1057 break;
1058
a2fbb9ea 1059 default:
34f80b04 1060 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1061 command, bp->state);
34f80b04 1062 break;
a2fbb9ea 1063 }
34f80b04 1064 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1065}
1066
7a9b2557
VZ
1067static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1068 struct bnx2x_fastpath *fp, u16 index)
1069{
1070 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1071 struct page *page = sw_buf->page;
1072 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1073
1074 /* Skip "next page" elements */
1075 if (!page)
1076 return;
1077
1078 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1079 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1080 __free_pages(page, PAGES_PER_SGE_SHIFT);
1081
1082 sw_buf->page = NULL;
1083 sge->addr_hi = 0;
1084 sge->addr_lo = 0;
1085}
1086
1087static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1088 struct bnx2x_fastpath *fp, int last)
1089{
1090 int i;
1091
1092 for (i = 0; i < last; i++)
1093 bnx2x_free_rx_sge(bp, fp, i);
1094}
1095
1096static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1097 struct bnx2x_fastpath *fp, u16 index)
1098{
1099 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1100 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1101 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1102 dma_addr_t mapping;
1103
1104 if (unlikely(page == NULL))
1105 return -ENOMEM;
1106
4f40f2cb 1107 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1108 PCI_DMA_FROMDEVICE);
8d8bb39b 1109 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1110 __free_pages(page, PAGES_PER_SGE_SHIFT);
1111 return -ENOMEM;
1112 }
1113
1114 sw_buf->page = page;
1115 pci_unmap_addr_set(sw_buf, mapping, mapping);
1116
1117 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1118 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1119
1120 return 0;
1121}
1122
a2fbb9ea
ET
1123static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1124 struct bnx2x_fastpath *fp, u16 index)
1125{
1126 struct sk_buff *skb;
1127 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1128 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1129 dma_addr_t mapping;
1130
1131 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1132 if (unlikely(skb == NULL))
1133 return -ENOMEM;
1134
437cf2f1 1135 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1136 PCI_DMA_FROMDEVICE);
8d8bb39b 1137 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1138 dev_kfree_skb(skb);
1139 return -ENOMEM;
1140 }
1141
1142 rx_buf->skb = skb;
1143 pci_unmap_addr_set(rx_buf, mapping, mapping);
1144
1145 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1146 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1147
1148 return 0;
1149}
1150
1151/* note that we are not allocating a new skb,
1152 * we are just moving one from cons to prod
1153 * we are not creating a new mapping,
1154 * so there is no need to check for dma_mapping_error().
1155 */
1156static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1157 struct sk_buff *skb, u16 cons, u16 prod)
1158{
1159 struct bnx2x *bp = fp->bp;
1160 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1161 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1162 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1163 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1164
1165 pci_dma_sync_single_for_device(bp->pdev,
1166 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1167 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1168
1169 prod_rx_buf->skb = cons_rx_buf->skb;
1170 pci_unmap_addr_set(prod_rx_buf, mapping,
1171 pci_unmap_addr(cons_rx_buf, mapping));
1172 *prod_bd = *cons_bd;
1173}
1174
7a9b2557
VZ
1175static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1176 u16 idx)
1177{
1178 u16 last_max = fp->last_max_sge;
1179
1180 if (SUB_S16(idx, last_max) > 0)
1181 fp->last_max_sge = idx;
1182}
1183
1184static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1185{
1186 int i, j;
1187
1188 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1189 int idx = RX_SGE_CNT * i - 1;
1190
1191 for (j = 0; j < 2; j++) {
1192 SGE_MASK_CLEAR_BIT(fp, idx);
1193 idx--;
1194 }
1195 }
1196}
1197
1198static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1199 struct eth_fast_path_rx_cqe *fp_cqe)
1200{
1201 struct bnx2x *bp = fp->bp;
4f40f2cb 1202 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1203 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1204 SGE_PAGE_SHIFT;
7a9b2557
VZ
1205 u16 last_max, last_elem, first_elem;
1206 u16 delta = 0;
1207 u16 i;
1208
1209 if (!sge_len)
1210 return;
1211
1212 /* First mark all used pages */
1213 for (i = 0; i < sge_len; i++)
1214 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1215
1216 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1217 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1218
1219 /* Here we assume that the last SGE index is the biggest */
1220 prefetch((void *)(fp->sge_mask));
1221 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1222
1223 last_max = RX_SGE(fp->last_max_sge);
1224 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1225 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1226
1227 /* If ring is not full */
1228 if (last_elem + 1 != first_elem)
1229 last_elem++;
1230
1231 /* Now update the prod */
1232 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1233 if (likely(fp->sge_mask[i]))
1234 break;
1235
1236 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1237 delta += RX_SGE_MASK_ELEM_SZ;
1238 }
1239
1240 if (delta > 0) {
1241 fp->rx_sge_prod += delta;
1242 /* clear page-end entries */
1243 bnx2x_clear_sge_mask_next_elems(fp);
1244 }
1245
1246 DP(NETIF_MSG_RX_STATUS,
1247 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1248 fp->last_max_sge, fp->rx_sge_prod);
1249}
1250
1251static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1252{
1253 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1254 memset(fp->sge_mask, 0xff,
1255 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1256
33471629
EG
1257 /* Clear the two last indices in the page to 1:
1258 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1259 hence will never be indicated and should be removed from
1260 the calculations. */
1261 bnx2x_clear_sge_mask_next_elems(fp);
1262}
1263
1264static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1265 struct sk_buff *skb, u16 cons, u16 prod)
1266{
1267 struct bnx2x *bp = fp->bp;
1268 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1269 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1270 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1271 dma_addr_t mapping;
1272
1273 /* move empty skb from pool to prod and map it */
1274 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1275 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1276 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1277 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1278
1279 /* move partial skb from cons to pool (don't unmap yet) */
1280 fp->tpa_pool[queue] = *cons_rx_buf;
1281
1282 /* mark bin state as start - print error if current state != stop */
1283 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1284 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1285
1286 fp->tpa_state[queue] = BNX2X_TPA_START;
1287
1288 /* point prod_bd to new skb */
1289 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1290 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1291
1292#ifdef BNX2X_STOP_ON_ERROR
1293 fp->tpa_queue_used |= (1 << queue);
1294#ifdef __powerpc64__
1295 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1296#else
1297 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1298#endif
1299 fp->tpa_queue_used);
1300#endif
1301}
1302
1303static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1304 struct sk_buff *skb,
1305 struct eth_fast_path_rx_cqe *fp_cqe,
1306 u16 cqe_idx)
1307{
1308 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1309 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1310 u32 i, frag_len, frag_size, pages;
1311 int err;
1312 int j;
1313
1314 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1315 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1316
1317 /* This is needed in order to enable forwarding support */
1318 if (frag_size)
4f40f2cb 1319 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1320 max(frag_size, (u32)len_on_bd));
1321
1322#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1323 if (pages >
1324 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1325 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1326 pages, cqe_idx);
1327 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1328 fp_cqe->pkt_len, len_on_bd);
1329 bnx2x_panic();
1330 return -EINVAL;
1331 }
1332#endif
1333
1334 /* Run through the SGL and compose the fragmented skb */
1335 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1336 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1337
1338 /* FW gives the indices of the SGE as if the ring is an array
1339 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1340 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1341 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1342 old_rx_pg = *rx_pg;
1343
1344 /* If we fail to allocate a substitute page, we simply stop
1345 where we are and drop the whole packet */
1346 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1347 if (unlikely(err)) {
de832a55 1348 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1349 return err;
1350 }
1351
1352 /* Unmap the page as we r going to pass it to the stack */
1353 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1354 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1355
1356 /* Add one frag and update the appropriate fields in the skb */
1357 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1358
1359 skb->data_len += frag_len;
1360 skb->truesize += frag_len;
1361 skb->len += frag_len;
1362
1363 frag_size -= frag_len;
1364 }
1365
1366 return 0;
1367}
1368
1369static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1370 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1371 u16 cqe_idx)
1372{
1373 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1374 struct sk_buff *skb = rx_buf->skb;
1375 /* alloc new skb */
1376 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1377
1378 /* Unmap skb in the pool anyway, as we are going to change
1379 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1380 fails. */
1381 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1382 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1383
7a9b2557 1384 if (likely(new_skb)) {
66e855f3
YG
1385 /* fix ip xsum and give it to the stack */
1386 /* (no need to map the new skb) */
0c6671b0
EG
1387#ifdef BCM_VLAN
1388 int is_vlan_cqe =
1389 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1390 PARSING_FLAGS_VLAN);
1391 int is_not_hwaccel_vlan_cqe =
1392 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1393#endif
7a9b2557
VZ
1394
1395 prefetch(skb);
1396 prefetch(((char *)(skb)) + 128);
1397
7a9b2557
VZ
1398#ifdef BNX2X_STOP_ON_ERROR
1399 if (pad + len > bp->rx_buf_size) {
1400 BNX2X_ERR("skb_put is about to fail... "
1401 "pad %d len %d rx_buf_size %d\n",
1402 pad, len, bp->rx_buf_size);
1403 bnx2x_panic();
1404 return;
1405 }
1406#endif
1407
1408 skb_reserve(skb, pad);
1409 skb_put(skb, len);
1410
1411 skb->protocol = eth_type_trans(skb, bp->dev);
1412 skb->ip_summed = CHECKSUM_UNNECESSARY;
1413
1414 {
1415 struct iphdr *iph;
1416
1417 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1418#ifdef BCM_VLAN
1419 /* If there is no Rx VLAN offloading -
1420 take VLAN tag into an account */
1421 if (unlikely(is_not_hwaccel_vlan_cqe))
1422 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1423#endif
7a9b2557
VZ
1424 iph->check = 0;
1425 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1426 }
1427
1428 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1429 &cqe->fast_path_cqe, cqe_idx)) {
1430#ifdef BCM_VLAN
0c6671b0
EG
1431 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1432 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1433 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1434 le16_to_cpu(cqe->fast_path_cqe.
1435 vlan_tag));
1436 else
1437#endif
1438 netif_receive_skb(skb);
1439 } else {
1440 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1441 " - dropping packet!\n");
1442 dev_kfree_skb(skb);
1443 }
1444
7a9b2557
VZ
1445
1446 /* put new skb in bin */
1447 fp->tpa_pool[queue].skb = new_skb;
1448
1449 } else {
66e855f3 1450 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1451 DP(NETIF_MSG_RX_STATUS,
1452 "Failed to allocate new skb - dropping packet!\n");
de832a55 1453 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1454 }
1455
1456 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1457}
1458
1459static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1460 struct bnx2x_fastpath *fp,
1461 u16 bd_prod, u16 rx_comp_prod,
1462 u16 rx_sge_prod)
1463{
8d9c5f34 1464 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1465 int i;
1466
1467 /* Update producers */
1468 rx_prods.bd_prod = bd_prod;
1469 rx_prods.cqe_prod = rx_comp_prod;
1470 rx_prods.sge_prod = rx_sge_prod;
1471
58f4c4cf
EG
1472 /*
1473 * Make sure that the BD and SGE data is updated before updating the
1474 * producers since FW might read the BD/SGE right after the producer
1475 * is updated.
1476 * This is only applicable for weak-ordered memory model archs such
1477 * as IA-64. The following barrier is also mandatory since FW will
1478 * assumes BDs must have buffers.
1479 */
1480 wmb();
1481
8d9c5f34
EG
1482 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1483 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1484 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1485 ((u32 *)&rx_prods)[i]);
1486
58f4c4cf
EG
1487 mmiowb(); /* keep prod updates ordered */
1488
7a9b2557 1489 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1490 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1491 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1492}
1493
a2fbb9ea
ET
1494static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1495{
1496 struct bnx2x *bp = fp->bp;
34f80b04 1497 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1498 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1499 int rx_pkt = 0;
1500
1501#ifdef BNX2X_STOP_ON_ERROR
1502 if (unlikely(bp->panic))
1503 return 0;
1504#endif
1505
34f80b04
EG
1506 /* CQ "next element" is of the size of the regular element,
1507 that's why it's ok here */
a2fbb9ea
ET
1508 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1509 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1510 hw_comp_cons++;
1511
1512 bd_cons = fp->rx_bd_cons;
1513 bd_prod = fp->rx_bd_prod;
34f80b04 1514 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1515 sw_comp_cons = fp->rx_comp_cons;
1516 sw_comp_prod = fp->rx_comp_prod;
1517
1518 /* Memory barrier necessary as speculative reads of the rx
1519 * buffer can be ahead of the index in the status block
1520 */
1521 rmb();
1522
1523 DP(NETIF_MSG_RX_STATUS,
1524 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1525 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1526
1527 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1528 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1529 struct sk_buff *skb;
1530 union eth_rx_cqe *cqe;
34f80b04
EG
1531 u8 cqe_fp_flags;
1532 u16 len, pad;
a2fbb9ea
ET
1533
1534 comp_ring_cons = RCQ_BD(sw_comp_cons);
1535 bd_prod = RX_BD(bd_prod);
1536 bd_cons = RX_BD(bd_cons);
1537
619e7a66
EG
1538 /* Prefetch the page containing the BD descriptor
1539 at producer's index. It will be needed when new skb is
1540 allocated */
1541 prefetch((void *)(PAGE_ALIGN((unsigned long)
1542 (&fp->rx_desc_ring[bd_prod])) -
1543 PAGE_SIZE + 1));
1544
a2fbb9ea 1545 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1546 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1547
a2fbb9ea 1548 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1549 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1550 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1551 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1552 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1553 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1554
1555 /* is this a slowpath msg? */
34f80b04 1556 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1557 bnx2x_sp_event(fp, cqe);
1558 goto next_cqe;
1559
1560 /* this is an rx packet */
1561 } else {
1562 rx_buf = &fp->rx_buf_ring[bd_cons];
1563 skb = rx_buf->skb;
a2fbb9ea
ET
1564 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1565 pad = cqe->fast_path_cqe.placement_offset;
1566
7a9b2557
VZ
1567 /* If CQE is marked both TPA_START and TPA_END
1568 it is a non-TPA CQE */
1569 if ((!fp->disable_tpa) &&
1570 (TPA_TYPE(cqe_fp_flags) !=
1571 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1572 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1573
1574 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1575 DP(NETIF_MSG_RX_STATUS,
1576 "calling tpa_start on queue %d\n",
1577 queue);
1578
1579 bnx2x_tpa_start(fp, queue, skb,
1580 bd_cons, bd_prod);
1581 goto next_rx;
1582 }
1583
1584 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1585 DP(NETIF_MSG_RX_STATUS,
1586 "calling tpa_stop on queue %d\n",
1587 queue);
1588
1589 if (!BNX2X_RX_SUM_FIX(cqe))
1590 BNX2X_ERR("STOP on none TCP "
1591 "data\n");
1592
1593 /* This is a size of the linear data
1594 on this skb */
1595 len = le16_to_cpu(cqe->fast_path_cqe.
1596 len_on_bd);
1597 bnx2x_tpa_stop(bp, fp, queue, pad,
1598 len, cqe, comp_ring_cons);
1599#ifdef BNX2X_STOP_ON_ERROR
1600 if (bp->panic)
17cb4006 1601 return 0;
7a9b2557
VZ
1602#endif
1603
1604 bnx2x_update_sge_prod(fp,
1605 &cqe->fast_path_cqe);
1606 goto next_cqe;
1607 }
1608 }
1609
a2fbb9ea
ET
1610 pci_dma_sync_single_for_device(bp->pdev,
1611 pci_unmap_addr(rx_buf, mapping),
1612 pad + RX_COPY_THRESH,
1613 PCI_DMA_FROMDEVICE);
1614 prefetch(skb);
1615 prefetch(((char *)(skb)) + 128);
1616
1617 /* is this an error packet? */
34f80b04 1618 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1619 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1620 "ERROR flags %x rx packet %u\n",
1621 cqe_fp_flags, sw_comp_cons);
de832a55 1622 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1623 goto reuse_rx;
1624 }
1625
1626 /* Since we don't have a jumbo ring
1627 * copy small packets if mtu > 1500
1628 */
1629 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1630 (len <= RX_COPY_THRESH)) {
1631 struct sk_buff *new_skb;
1632
1633 new_skb = netdev_alloc_skb(bp->dev,
1634 len + pad);
1635 if (new_skb == NULL) {
1636 DP(NETIF_MSG_RX_ERR,
34f80b04 1637 "ERROR packet dropped "
a2fbb9ea 1638 "because of alloc failure\n");
de832a55 1639 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1640 goto reuse_rx;
1641 }
1642
1643 /* aligned copy */
1644 skb_copy_from_linear_data_offset(skb, pad,
1645 new_skb->data + pad, len);
1646 skb_reserve(new_skb, pad);
1647 skb_put(new_skb, len);
1648
1649 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1650
1651 skb = new_skb;
1652
a119a069
EG
1653 } else
1654 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1655 pci_unmap_single(bp->pdev,
1656 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1657 bp->rx_buf_size,
a2fbb9ea
ET
1658 PCI_DMA_FROMDEVICE);
1659 skb_reserve(skb, pad);
1660 skb_put(skb, len);
1661
1662 } else {
1663 DP(NETIF_MSG_RX_ERR,
34f80b04 1664 "ERROR packet dropped because "
a2fbb9ea 1665 "of alloc failure\n");
de832a55 1666 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1667reuse_rx:
1668 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1669 goto next_rx;
1670 }
1671
1672 skb->protocol = eth_type_trans(skb, bp->dev);
1673
1674 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1675 if (bp->rx_csum) {
1adcd8be
EG
1676 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1677 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1678 else
de832a55 1679 fp->eth_q_stats.hw_csum_err++;
66e855f3 1680 }
a2fbb9ea
ET
1681 }
1682
748e5439 1683 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1684
a2fbb9ea 1685#ifdef BCM_VLAN
0c6671b0 1686 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1687 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1688 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1689 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1690 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1691 else
1692#endif
34f80b04 1693 netif_receive_skb(skb);
a2fbb9ea 1694
a2fbb9ea
ET
1695
1696next_rx:
1697 rx_buf->skb = NULL;
1698
1699 bd_cons = NEXT_RX_IDX(bd_cons);
1700 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1701 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1702 rx_pkt++;
a2fbb9ea
ET
1703next_cqe:
1704 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1705 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1706
34f80b04 1707 if (rx_pkt == budget)
a2fbb9ea
ET
1708 break;
1709 } /* while */
1710
1711 fp->rx_bd_cons = bd_cons;
34f80b04 1712 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1713 fp->rx_comp_cons = sw_comp_cons;
1714 fp->rx_comp_prod = sw_comp_prod;
1715
7a9b2557
VZ
1716 /* Update producers */
1717 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1718 fp->rx_sge_prod);
a2fbb9ea
ET
1719
1720 fp->rx_pkt += rx_pkt;
1721 fp->rx_calls++;
1722
1723 return rx_pkt;
1724}
1725
1726static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1727{
1728 struct bnx2x_fastpath *fp = fp_cookie;
1729 struct bnx2x *bp = fp->bp;
a2fbb9ea 1730
da5a662a
VZ
1731 /* Return here if interrupt is disabled */
1732 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1733 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1734 return IRQ_HANDLED;
1735 }
1736
34f80b04 1737 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1738 fp->index, fp->sb_id);
0626b899 1739 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1740
1741#ifdef BNX2X_STOP_ON_ERROR
1742 if (unlikely(bp->panic))
1743 return IRQ_HANDLED;
1744#endif
ca00392c
EG
1745 /* Handle Rx or Tx according to MSI-X vector */
1746 if (fp->is_rx_queue) {
1747 prefetch(fp->rx_cons_sb);
1748 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1749
ca00392c 1750 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1751
ca00392c
EG
1752 } else {
1753 prefetch(fp->tx_cons_sb);
1754 prefetch(&fp->status_blk->c_status_block.status_block_index);
1755
1756 bnx2x_update_fpsb_idx(fp);
1757 rmb();
1758 bnx2x_tx_int(fp);
1759
1760 /* Re-enable interrupts */
1761 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1762 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1763 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1764 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1765 }
34f80b04 1766
a2fbb9ea
ET
1767 return IRQ_HANDLED;
1768}
1769
1770static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1771{
555f6c78 1772 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1773 u16 status = bnx2x_ack_int(bp);
34f80b04 1774 u16 mask;
ca00392c 1775 int i;
a2fbb9ea 1776
34f80b04 1777 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1778 if (unlikely(status == 0)) {
1779 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1780 return IRQ_NONE;
1781 }
f5372251 1782 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1783
34f80b04 1784 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1785 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1786 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1787 return IRQ_HANDLED;
1788 }
1789
3196a88a
EG
1790#ifdef BNX2X_STOP_ON_ERROR
1791 if (unlikely(bp->panic))
1792 return IRQ_HANDLED;
1793#endif
1794
ca00392c
EG
1795 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1796 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1797
ca00392c
EG
1798 mask = 0x2 << fp->sb_id;
1799 if (status & mask) {
1800 /* Handle Rx or Tx according to SB id */
1801 if (fp->is_rx_queue) {
1802 prefetch(fp->rx_cons_sb);
1803 prefetch(&fp->status_blk->u_status_block.
1804 status_block_index);
a2fbb9ea 1805
ca00392c 1806 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1807
ca00392c
EG
1808 } else {
1809 prefetch(fp->tx_cons_sb);
1810 prefetch(&fp->status_blk->c_status_block.
1811 status_block_index);
1812
1813 bnx2x_update_fpsb_idx(fp);
1814 rmb();
1815 bnx2x_tx_int(fp);
1816
1817 /* Re-enable interrupts */
1818 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1819 le16_to_cpu(fp->fp_u_idx),
1820 IGU_INT_NOP, 1);
1821 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1822 le16_to_cpu(fp->fp_c_idx),
1823 IGU_INT_ENABLE, 1);
1824 }
1825 status &= ~mask;
1826 }
a2fbb9ea
ET
1827 }
1828
993ac7b5
MC
1829#ifdef BCM_CNIC
1830 mask = 0x2 << CNIC_SB_ID(bp);
1831 if (status & (mask | 0x1)) {
1832 struct cnic_ops *c_ops = NULL;
1833
1834 rcu_read_lock();
1835 c_ops = rcu_dereference(bp->cnic_ops);
1836 if (c_ops)
1837 c_ops->cnic_handler(bp->cnic_data, NULL);
1838 rcu_read_unlock();
1839
1840 status &= ~mask;
1841 }
1842#endif
a2fbb9ea 1843
34f80b04 1844 if (unlikely(status & 0x1)) {
1cf167f2 1845 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1846
1847 status &= ~0x1;
1848 if (!status)
1849 return IRQ_HANDLED;
1850 }
1851
34f80b04
EG
1852 if (status)
1853 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1854 status);
a2fbb9ea 1855
c18487ee 1856 return IRQ_HANDLED;
a2fbb9ea
ET
1857}
1858
c18487ee 1859/* end of fast path */
a2fbb9ea 1860
bb2a0f7a 1861static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1862
c18487ee
YR
1863/* Link */
1864
1865/*
1866 * General service functions
1867 */
a2fbb9ea 1868
4a37fb66 1869static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1870{
1871 u32 lock_status;
1872 u32 resource_bit = (1 << resource);
4a37fb66
YG
1873 int func = BP_FUNC(bp);
1874 u32 hw_lock_control_reg;
c18487ee 1875 int cnt;
a2fbb9ea 1876
c18487ee
YR
1877 /* Validating that the resource is within range */
1878 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1879 DP(NETIF_MSG_HW,
1880 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1881 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1882 return -EINVAL;
1883 }
a2fbb9ea 1884
4a37fb66
YG
1885 if (func <= 5) {
1886 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1887 } else {
1888 hw_lock_control_reg =
1889 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1890 }
1891
c18487ee 1892 /* Validating that the resource is not already taken */
4a37fb66 1893 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1894 if (lock_status & resource_bit) {
1895 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1896 lock_status, resource_bit);
1897 return -EEXIST;
1898 }
a2fbb9ea 1899
46230476
EG
1900 /* Try for 5 second every 5ms */
1901 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1902 /* Try to acquire the lock */
4a37fb66
YG
1903 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1904 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1905 if (lock_status & resource_bit)
1906 return 0;
a2fbb9ea 1907
c18487ee 1908 msleep(5);
a2fbb9ea 1909 }
c18487ee
YR
1910 DP(NETIF_MSG_HW, "Timeout\n");
1911 return -EAGAIN;
1912}
a2fbb9ea 1913
4a37fb66 1914static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1915{
1916 u32 lock_status;
1917 u32 resource_bit = (1 << resource);
4a37fb66
YG
1918 int func = BP_FUNC(bp);
1919 u32 hw_lock_control_reg;
a2fbb9ea 1920
c18487ee
YR
1921 /* Validating that the resource is within range */
1922 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1923 DP(NETIF_MSG_HW,
1924 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1925 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1926 return -EINVAL;
1927 }
1928
4a37fb66
YG
1929 if (func <= 5) {
1930 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1931 } else {
1932 hw_lock_control_reg =
1933 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1934 }
1935
c18487ee 1936 /* Validating that the resource is currently taken */
4a37fb66 1937 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1938 if (!(lock_status & resource_bit)) {
1939 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1940 lock_status, resource_bit);
1941 return -EFAULT;
a2fbb9ea
ET
1942 }
1943
4a37fb66 1944 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1945 return 0;
1946}
1947
1948/* HW Lock for shared dual port PHYs */
4a37fb66 1949static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1950{
34f80b04 1951 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1952
46c6a674
EG
1953 if (bp->port.need_hw_lock)
1954 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1955}
a2fbb9ea 1956
4a37fb66 1957static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1958{
46c6a674
EG
1959 if (bp->port.need_hw_lock)
1960 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1961
34f80b04 1962 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1963}
a2fbb9ea 1964
4acac6a5
EG
1965int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1966{
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1973 u32 gpio_reg;
1974 int value;
1975
1976 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1977 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1978 return -EINVAL;
1979 }
1980
1981 /* read GPIO value */
1982 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1983
1984 /* get the requested pin value */
1985 if ((gpio_reg & gpio_mask) == gpio_mask)
1986 value = 1;
1987 else
1988 value = 0;
1989
1990 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1991
1992 return value;
1993}
1994
17de50b7 1995int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1996{
1997 /* The GPIO should be swapped if swap register is set and active */
1998 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1999 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2000 int gpio_shift = gpio_num +
2001 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2002 u32 gpio_mask = (1 << gpio_shift);
2003 u32 gpio_reg;
a2fbb9ea 2004
c18487ee
YR
2005 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2006 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2007 return -EINVAL;
2008 }
a2fbb9ea 2009
4a37fb66 2010 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2011 /* read GPIO and mask except the float bits */
2012 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2013
c18487ee
YR
2014 switch (mode) {
2015 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2016 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2017 gpio_num, gpio_shift);
2018 /* clear FLOAT and set CLR */
2019 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2020 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2021 break;
a2fbb9ea 2022
c18487ee
YR
2023 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2024 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2025 gpio_num, gpio_shift);
2026 /* clear FLOAT and set SET */
2027 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2028 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2029 break;
a2fbb9ea 2030
17de50b7 2031 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2032 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2033 gpio_num, gpio_shift);
2034 /* set FLOAT */
2035 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2036 break;
a2fbb9ea 2037
c18487ee
YR
2038 default:
2039 break;
a2fbb9ea
ET
2040 }
2041
c18487ee 2042 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2043 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2044
c18487ee 2045 return 0;
a2fbb9ea
ET
2046}
2047
4acac6a5
EG
2048int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2049{
2050 /* The GPIO should be swapped if swap register is set and active */
2051 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2052 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2053 int gpio_shift = gpio_num +
2054 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2055 u32 gpio_mask = (1 << gpio_shift);
2056 u32 gpio_reg;
2057
2058 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2059 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2060 return -EINVAL;
2061 }
2062
2063 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2064 /* read GPIO int */
2065 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2066
2067 switch (mode) {
2068 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2069 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2070 "output low\n", gpio_num, gpio_shift);
2071 /* clear SET and set CLR */
2072 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2073 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2074 break;
2075
2076 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2077 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2078 "output high\n", gpio_num, gpio_shift);
2079 /* clear CLR and set SET */
2080 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2081 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2082 break;
2083
2084 default:
2085 break;
2086 }
2087
2088 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2089 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2090
2091 return 0;
2092}
2093
c18487ee 2094static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2095{
c18487ee
YR
2096 u32 spio_mask = (1 << spio_num);
2097 u32 spio_reg;
a2fbb9ea 2098
c18487ee
YR
2099 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2100 (spio_num > MISC_REGISTERS_SPIO_7)) {
2101 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2102 return -EINVAL;
a2fbb9ea
ET
2103 }
2104
4a37fb66 2105 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2106 /* read SPIO and mask except the float bits */
2107 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2108
c18487ee 2109 switch (mode) {
6378c025 2110 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2111 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2112 /* clear FLOAT and set CLR */
2113 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2114 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2115 break;
a2fbb9ea 2116
6378c025 2117 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2118 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2119 /* clear FLOAT and set SET */
2120 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2121 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2122 break;
a2fbb9ea 2123
c18487ee
YR
2124 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2125 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2126 /* set FLOAT */
2127 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2128 break;
a2fbb9ea 2129
c18487ee
YR
2130 default:
2131 break;
a2fbb9ea
ET
2132 }
2133
c18487ee 2134 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2135 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2136
a2fbb9ea
ET
2137 return 0;
2138}
2139
c18487ee 2140static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2141{
ad33ea3a
EG
2142 switch (bp->link_vars.ieee_fc &
2143 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2144 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2145 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2146 ADVERTISED_Pause);
2147 break;
356e2385 2148
c18487ee 2149 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2150 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2151 ADVERTISED_Pause);
2152 break;
356e2385 2153
c18487ee 2154 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2155 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2156 break;
356e2385 2157
c18487ee 2158 default:
34f80b04 2159 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2160 ADVERTISED_Pause);
2161 break;
2162 }
2163}
f1410647 2164
c18487ee
YR
2165static void bnx2x_link_report(struct bnx2x *bp)
2166{
f34d28ea 2167 if (bp->flags & MF_FUNC_DIS) {
2691d51d
EG
2168 netif_carrier_off(bp->dev);
2169 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2170 return;
2171 }
2172
c18487ee 2173 if (bp->link_vars.link_up) {
35c5f8fe
EG
2174 u16 line_speed;
2175
c18487ee
YR
2176 if (bp->state == BNX2X_STATE_OPEN)
2177 netif_carrier_on(bp->dev);
2178 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2179
35c5f8fe
EG
2180 line_speed = bp->link_vars.line_speed;
2181 if (IS_E1HMF(bp)) {
2182 u16 vn_max_rate;
2183
2184 vn_max_rate =
2185 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2186 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2187 if (vn_max_rate < line_speed)
2188 line_speed = vn_max_rate;
2189 }
2190 printk("%d Mbps ", line_speed);
f1410647 2191
c18487ee
YR
2192 if (bp->link_vars.duplex == DUPLEX_FULL)
2193 printk("full duplex");
2194 else
2195 printk("half duplex");
f1410647 2196
c0700f90
DM
2197 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2198 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2199 printk(", receive ");
356e2385
EG
2200 if (bp->link_vars.flow_ctrl &
2201 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2202 printk("& transmit ");
2203 } else {
2204 printk(", transmit ");
2205 }
2206 printk("flow control ON");
2207 }
2208 printk("\n");
f1410647 2209
c18487ee
YR
2210 } else { /* link_down */
2211 netif_carrier_off(bp->dev);
2212 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2213 }
c18487ee
YR
2214}
2215
b5bf9068 2216static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2217{
19680c48
EG
2218 if (!BP_NOMCP(bp)) {
2219 u8 rc;
a2fbb9ea 2220
19680c48 2221 /* Initialize link parameters structure variables */
8c99e7b0
YR
2222 /* It is recommended to turn off RX FC for jumbo frames
2223 for better performance */
0c593270 2224 if (bp->dev->mtu > 5000)
c0700f90 2225 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2226 else
c0700f90 2227 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2228
4a37fb66 2229 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2230
2231 if (load_mode == LOAD_DIAG)
2232 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2233
19680c48 2234 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2235
4a37fb66 2236 bnx2x_release_phy_lock(bp);
a2fbb9ea 2237
3c96c68b
EG
2238 bnx2x_calc_fc_adv(bp);
2239
b5bf9068
EG
2240 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2242 bnx2x_link_report(bp);
b5bf9068 2243 }
34f80b04 2244
19680c48
EG
2245 return rc;
2246 }
f5372251 2247 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2248 return -EINVAL;
a2fbb9ea
ET
2249}
2250
c18487ee 2251static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2252{
19680c48 2253 if (!BP_NOMCP(bp)) {
4a37fb66 2254 bnx2x_acquire_phy_lock(bp);
19680c48 2255 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2256 bnx2x_release_phy_lock(bp);
a2fbb9ea 2257
19680c48
EG
2258 bnx2x_calc_fc_adv(bp);
2259 } else
f5372251 2260 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2261}
a2fbb9ea 2262
c18487ee
YR
2263static void bnx2x__link_reset(struct bnx2x *bp)
2264{
19680c48 2265 if (!BP_NOMCP(bp)) {
4a37fb66 2266 bnx2x_acquire_phy_lock(bp);
589abe3a 2267 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2268 bnx2x_release_phy_lock(bp);
19680c48 2269 } else
f5372251 2270 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2271}
a2fbb9ea 2272
c18487ee
YR
2273static u8 bnx2x_link_test(struct bnx2x *bp)
2274{
2275 u8 rc;
a2fbb9ea 2276
4a37fb66 2277 bnx2x_acquire_phy_lock(bp);
c18487ee 2278 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2279 bnx2x_release_phy_lock(bp);
a2fbb9ea 2280
c18487ee
YR
2281 return rc;
2282}
a2fbb9ea 2283
8a1c38d1 2284static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2285{
8a1c38d1
EG
2286 u32 r_param = bp->link_vars.line_speed / 8;
2287 u32 fair_periodic_timeout_usec;
2288 u32 t_fair;
34f80b04 2289
8a1c38d1
EG
2290 memset(&(bp->cmng.rs_vars), 0,
2291 sizeof(struct rate_shaping_vars_per_port));
2292 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2293
8a1c38d1
EG
2294 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2295 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2296
8a1c38d1
EG
2297 /* this is the threshold below which no timer arming will occur
2298 1.25 coefficient is for the threshold to be a little bigger
2299 than the real time, to compensate for timer in-accuracy */
2300 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2301 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2302
8a1c38d1
EG
2303 /* resolution of fairness timer */
2304 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2305 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2306 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2307
8a1c38d1
EG
2308 /* this is the threshold below which we won't arm the timer anymore */
2309 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2310
8a1c38d1
EG
2311 /* we multiply by 1e3/8 to get bytes/msec.
2312 We don't want the credits to pass a credit
2313 of the t_fair*FAIR_MEM (algorithm resolution) */
2314 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2315 /* since each tick is 4 usec */
2316 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2317}
2318
2691d51d
EG
2319/* Calculates the sum of vn_min_rates.
2320 It's needed for further normalizing of the min_rates.
2321 Returns:
2322 sum of vn_min_rates.
2323 or
2324 0 - if all the min_rates are 0.
2325 In the later case fainess algorithm should be deactivated.
2326 If not all min_rates are zero then those that are zeroes will be set to 1.
2327 */
2328static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2329{
2330 int all_zero = 1;
2331 int port = BP_PORT(bp);
2332 int vn;
2333
2334 bp->vn_weight_sum = 0;
2335 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2336 int func = 2*vn + port;
2337 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2338 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2339 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2340
2341 /* Skip hidden vns */
2342 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2343 continue;
2344
2345 /* If min rate is zero - set it to 1 */
2346 if (!vn_min_rate)
2347 vn_min_rate = DEF_MIN_RATE;
2348 else
2349 all_zero = 0;
2350
2351 bp->vn_weight_sum += vn_min_rate;
2352 }
2353
2354 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2355 if (all_zero) {
2356 bp->cmng.flags.cmng_enables &=
2357 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2358 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2359 " fairness will be disabled\n");
2360 } else
2361 bp->cmng.flags.cmng_enables |=
2362 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2363}
2364
8a1c38d1 2365static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2366{
2367 struct rate_shaping_vars_per_vn m_rs_vn;
2368 struct fairness_vars_per_vn m_fair_vn;
2369 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2370 u16 vn_min_rate, vn_max_rate;
2371 int i;
2372
2373 /* If function is hidden - set min and max to zeroes */
2374 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2375 vn_min_rate = 0;
2376 vn_max_rate = 0;
2377
2378 } else {
2379 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2380 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2381 /* If min rate is zero - set it to 1 */
2382 if (!vn_min_rate)
34f80b04
EG
2383 vn_min_rate = DEF_MIN_RATE;
2384 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2385 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2386 }
8a1c38d1 2387 DP(NETIF_MSG_IFUP,
b015e3d1 2388 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2389 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2390
2391 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2392 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2393
2394 /* global vn counter - maximal Mbps for this vn */
2395 m_rs_vn.vn_counter.rate = vn_max_rate;
2396
2397 /* quota - number of bytes transmitted in this period */
2398 m_rs_vn.vn_counter.quota =
2399 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2400
8a1c38d1 2401 if (bp->vn_weight_sum) {
34f80b04
EG
2402 /* credit for each period of the fairness algorithm:
2403 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2404 vn_weight_sum should not be larger than 10000, thus
2405 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2406 than zero */
34f80b04 2407 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2408 max((u32)(vn_min_rate * (T_FAIR_COEF /
2409 (8 * bp->vn_weight_sum))),
2410 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2411 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2412 m_fair_vn.vn_credit_delta);
2413 }
2414
34f80b04
EG
2415 /* Store it to internal memory */
2416 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2417 REG_WR(bp, BAR_XSTRORM_INTMEM +
2418 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2419 ((u32 *)(&m_rs_vn))[i]);
2420
2421 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2422 REG_WR(bp, BAR_XSTRORM_INTMEM +
2423 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2424 ((u32 *)(&m_fair_vn))[i]);
2425}
2426
8a1c38d1 2427
c18487ee
YR
2428/* This function is called upon link interrupt */
2429static void bnx2x_link_attn(struct bnx2x *bp)
2430{
bb2a0f7a
YG
2431 /* Make sure that we are synced with the current statistics */
2432 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2433
c18487ee 2434 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2435
bb2a0f7a
YG
2436 if (bp->link_vars.link_up) {
2437
1c06328c 2438 /* dropless flow control */
a18f5128 2439 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2440 int port = BP_PORT(bp);
2441 u32 pause_enabled = 0;
2442
2443 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2444 pause_enabled = 1;
2445
2446 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2447 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2448 pause_enabled);
2449 }
2450
bb2a0f7a
YG
2451 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2452 struct host_port_stats *pstats;
2453
2454 pstats = bnx2x_sp(bp, port_stats);
2455 /* reset old bmac stats */
2456 memset(&(pstats->mac_stx[0]), 0,
2457 sizeof(struct mac_stx));
2458 }
f34d28ea 2459 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2460 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2461 }
2462
c18487ee
YR
2463 /* indicate link status */
2464 bnx2x_link_report(bp);
34f80b04
EG
2465
2466 if (IS_E1HMF(bp)) {
8a1c38d1 2467 int port = BP_PORT(bp);
34f80b04 2468 int func;
8a1c38d1 2469 int vn;
34f80b04 2470
ab6ad5a4 2471 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2472 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2473 if (vn == BP_E1HVN(bp))
2474 continue;
2475
8a1c38d1 2476 func = ((vn << 1) | port);
34f80b04
EG
2477 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2478 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2479 }
34f80b04 2480
8a1c38d1
EG
2481 if (bp->link_vars.link_up) {
2482 int i;
2483
2484 /* Init rate shaping and fairness contexts */
2485 bnx2x_init_port_minmax(bp);
34f80b04 2486
34f80b04 2487 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2488 bnx2x_init_vn_minmax(bp, 2*vn + port);
2489
2490 /* Store it to internal memory */
2491 for (i = 0;
2492 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2493 REG_WR(bp, BAR_XSTRORM_INTMEM +
2494 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2495 ((u32 *)(&bp->cmng))[i]);
2496 }
34f80b04 2497 }
c18487ee 2498}
a2fbb9ea 2499
c18487ee
YR
2500static void bnx2x__link_status_update(struct bnx2x *bp)
2501{
f34d28ea 2502 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2503 return;
a2fbb9ea 2504
c18487ee 2505 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2506
bb2a0f7a
YG
2507 if (bp->link_vars.link_up)
2508 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2509 else
2510 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2511
2691d51d
EG
2512 bnx2x_calc_vn_weight_sum(bp);
2513
c18487ee
YR
2514 /* indicate link status */
2515 bnx2x_link_report(bp);
a2fbb9ea 2516}
a2fbb9ea 2517
34f80b04
EG
2518static void bnx2x_pmf_update(struct bnx2x *bp)
2519{
2520 int port = BP_PORT(bp);
2521 u32 val;
2522
2523 bp->port.pmf = 1;
2524 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2525
2526 /* enable nig attention */
2527 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2528 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2529 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2530
2531 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2532}
2533
c18487ee 2534/* end of Link */
a2fbb9ea
ET
2535
2536/* slow path */
2537
2538/*
2539 * General service functions
2540 */
2541
2691d51d
EG
2542/* send the MCP a request, block until there is a reply */
2543u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2544{
2545 int func = BP_FUNC(bp);
2546 u32 seq = ++bp->fw_seq;
2547 u32 rc = 0;
2548 u32 cnt = 1;
2549 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2550
c4ff7cbf 2551 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2552 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2553 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2554
2555 do {
2556 /* let the FW do it's magic ... */
2557 msleep(delay);
2558
2559 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2560
c4ff7cbf
EG
2561 /* Give the FW up to 5 second (500*10ms) */
2562 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2563
2564 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2565 cnt*delay, rc, seq);
2566
2567 /* is this a reply to our command? */
2568 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2569 rc &= FW_MSG_CODE_MASK;
2570 else {
2571 /* FW BUG! */
2572 BNX2X_ERR("FW failed to respond!\n");
2573 bnx2x_fw_dump(bp);
2574 rc = 0;
2575 }
c4ff7cbf 2576 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2577
2578 return rc;
2579}
2580
2581static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2582static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2583static void bnx2x_set_rx_mode(struct net_device *dev);
2584
2585static void bnx2x_e1h_disable(struct bnx2x *bp)
2586{
2587 int port = BP_PORT(bp);
2691d51d
EG
2588
2589 netif_tx_disable(bp->dev);
2590 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2591
2592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2593
2691d51d
EG
2594 netif_carrier_off(bp->dev);
2595}
2596
2597static void bnx2x_e1h_enable(struct bnx2x *bp)
2598{
2599 int port = BP_PORT(bp);
2600
2601 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2602
2691d51d
EG
2603 /* Tx queue should be only reenabled */
2604 netif_tx_wake_all_queues(bp->dev);
2605
061bc702
EG
2606 /*
2607 * Should not call netif_carrier_on since it will be called if the link
2608 * is up when checking for link state
2609 */
2691d51d
EG
2610}
2611
2612static void bnx2x_update_min_max(struct bnx2x *bp)
2613{
2614 int port = BP_PORT(bp);
2615 int vn, i;
2616
2617 /* Init rate shaping and fairness contexts */
2618 bnx2x_init_port_minmax(bp);
2619
2620 bnx2x_calc_vn_weight_sum(bp);
2621
2622 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2623 bnx2x_init_vn_minmax(bp, 2*vn + port);
2624
2625 if (bp->port.pmf) {
2626 int func;
2627
2628 /* Set the attention towards other drivers on the same port */
2629 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2630 if (vn == BP_E1HVN(bp))
2631 continue;
2632
2633 func = ((vn << 1) | port);
2634 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2635 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2636 }
2637
2638 /* Store it to internal memory */
2639 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2640 REG_WR(bp, BAR_XSTRORM_INTMEM +
2641 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2642 ((u32 *)(&bp->cmng))[i]);
2643 }
2644}
2645
2646static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2647{
2691d51d 2648 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2649
2650 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2651
f34d28ea
EG
2652 /*
2653 * This is the only place besides the function initialization
2654 * where the bp->flags can change so it is done without any
2655 * locks
2656 */
2691d51d
EG
2657 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2658 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2659 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2660
2661 bnx2x_e1h_disable(bp);
2662 } else {
2663 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2664 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2665
2666 bnx2x_e1h_enable(bp);
2667 }
2668 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2669 }
2670 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2671
2672 bnx2x_update_min_max(bp);
2673 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2674 }
2675
2676 /* Report results to MCP */
2677 if (dcc_event)
2678 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2679 else
2680 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2681}
2682
28912902
MC
2683/* must be called under the spq lock */
2684static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2685{
2686 struct eth_spe *next_spe = bp->spq_prod_bd;
2687
2688 if (bp->spq_prod_bd == bp->spq_last_bd) {
2689 bp->spq_prod_bd = bp->spq;
2690 bp->spq_prod_idx = 0;
2691 DP(NETIF_MSG_TIMER, "end of spq\n");
2692 } else {
2693 bp->spq_prod_bd++;
2694 bp->spq_prod_idx++;
2695 }
2696 return next_spe;
2697}
2698
2699/* must be called under the spq lock */
2700static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2701{
2702 int func = BP_FUNC(bp);
2703
2704 /* Make sure that BD data is updated before writing the producer */
2705 wmb();
2706
2707 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2708 bp->spq_prod_idx);
2709 mmiowb();
2710}
2711
a2fbb9ea
ET
2712/* the slow path queue is odd since completions arrive on the fastpath ring */
2713static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2714 u32 data_hi, u32 data_lo, int common)
2715{
28912902 2716 struct eth_spe *spe;
a2fbb9ea 2717
34f80b04
EG
2718 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2719 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2720 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2721 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2722 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2723
2724#ifdef BNX2X_STOP_ON_ERROR
2725 if (unlikely(bp->panic))
2726 return -EIO;
2727#endif
2728
34f80b04 2729 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2730
2731 if (!bp->spq_left) {
2732 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2733 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2734 bnx2x_panic();
2735 return -EBUSY;
2736 }
f1410647 2737
28912902
MC
2738 spe = bnx2x_sp_get_next(bp);
2739
a2fbb9ea 2740 /* CID needs port number to be encoded int it */
28912902 2741 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2742 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2743 HW_CID(bp, cid)));
28912902 2744 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2745 if (common)
28912902 2746 spe->hdr.type |=
a2fbb9ea
ET
2747 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2748
28912902
MC
2749 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2750 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2751
2752 bp->spq_left--;
2753
28912902 2754 bnx2x_sp_prod_update(bp);
34f80b04 2755 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2756 return 0;
2757}
2758
2759/* acquire split MCP access lock register */
4a37fb66 2760static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2761{
a2fbb9ea 2762 u32 i, j, val;
34f80b04 2763 int rc = 0;
a2fbb9ea
ET
2764
2765 might_sleep();
2766 i = 100;
2767 for (j = 0; j < i*10; j++) {
2768 val = (1UL << 31);
2769 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2770 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2771 if (val & (1L << 31))
2772 break;
2773
2774 msleep(5);
2775 }
a2fbb9ea 2776 if (!(val & (1L << 31))) {
19680c48 2777 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2778 rc = -EBUSY;
2779 }
2780
2781 return rc;
2782}
2783
4a37fb66
YG
2784/* release split MCP access lock register */
2785static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2786{
2787 u32 val = 0;
2788
2789 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2790}
2791
2792static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2793{
2794 struct host_def_status_block *def_sb = bp->def_status_blk;
2795 u16 rc = 0;
2796
2797 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2798 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2799 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2800 rc |= 1;
2801 }
2802 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2803 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2804 rc |= 2;
2805 }
2806 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2807 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2808 rc |= 4;
2809 }
2810 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2811 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2812 rc |= 8;
2813 }
2814 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2815 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2816 rc |= 16;
2817 }
2818 return rc;
2819}
2820
2821/*
2822 * slow path service functions
2823 */
2824
2825static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2826{
34f80b04 2827 int port = BP_PORT(bp);
5c862848
EG
2828 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2829 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2830 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2831 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2832 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2833 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2834 u32 aeu_mask;
87942b46 2835 u32 nig_mask = 0;
a2fbb9ea 2836
a2fbb9ea
ET
2837 if (bp->attn_state & asserted)
2838 BNX2X_ERR("IGU ERROR\n");
2839
3fcaf2e5
EG
2840 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2841 aeu_mask = REG_RD(bp, aeu_addr);
2842
a2fbb9ea 2843 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2844 aeu_mask, asserted);
2845 aeu_mask &= ~(asserted & 0xff);
2846 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2847
3fcaf2e5
EG
2848 REG_WR(bp, aeu_addr, aeu_mask);
2849 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2850
3fcaf2e5 2851 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2852 bp->attn_state |= asserted;
3fcaf2e5 2853 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2854
2855 if (asserted & ATTN_HARD_WIRED_MASK) {
2856 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2857
a5e9a7cf
EG
2858 bnx2x_acquire_phy_lock(bp);
2859
877e9aa4 2860 /* save nig interrupt mask */
87942b46 2861 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2862 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2863
c18487ee 2864 bnx2x_link_attn(bp);
a2fbb9ea
ET
2865
2866 /* handle unicore attn? */
2867 }
2868 if (asserted & ATTN_SW_TIMER_4_FUNC)
2869 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2870
2871 if (asserted & GPIO_2_FUNC)
2872 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2873
2874 if (asserted & GPIO_3_FUNC)
2875 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2876
2877 if (asserted & GPIO_4_FUNC)
2878 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2879
2880 if (port == 0) {
2881 if (asserted & ATTN_GENERAL_ATTN_1) {
2882 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2883 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2884 }
2885 if (asserted & ATTN_GENERAL_ATTN_2) {
2886 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2887 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2888 }
2889 if (asserted & ATTN_GENERAL_ATTN_3) {
2890 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2891 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2892 }
2893 } else {
2894 if (asserted & ATTN_GENERAL_ATTN_4) {
2895 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2896 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2897 }
2898 if (asserted & ATTN_GENERAL_ATTN_5) {
2899 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2900 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2901 }
2902 if (asserted & ATTN_GENERAL_ATTN_6) {
2903 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2904 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2905 }
2906 }
2907
2908 } /* if hardwired */
2909
5c862848
EG
2910 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2911 asserted, hc_addr);
2912 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2913
2914 /* now set back the mask */
a5e9a7cf 2915 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2916 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2917 bnx2x_release_phy_lock(bp);
2918 }
a2fbb9ea
ET
2919}
2920
fd4ef40d
EG
2921static inline void bnx2x_fan_failure(struct bnx2x *bp)
2922{
2923 int port = BP_PORT(bp);
2924
2925 /* mark the failure */
2926 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2927 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2928 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2929 bp->link_params.ext_phy_config);
2930
2931 /* log the failure */
2932 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2933 " the driver to shutdown the card to prevent permanent"
2934 " damage. Please contact Dell Support for assistance\n",
2935 bp->dev->name);
2936}
ab6ad5a4 2937
877e9aa4 2938static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2939{
34f80b04 2940 int port = BP_PORT(bp);
877e9aa4 2941 int reg_offset;
4d295db0 2942 u32 val, swap_val, swap_override;
877e9aa4 2943
34f80b04
EG
2944 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2945 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2946
34f80b04 2947 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2948
2949 val = REG_RD(bp, reg_offset);
2950 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2951 REG_WR(bp, reg_offset, val);
2952
2953 BNX2X_ERR("SPIO5 hw attention\n");
2954
fd4ef40d 2955 /* Fan failure attention */
35b19ba5
EG
2956 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2957 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2958 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2959 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2960 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2961 /* The PHY reset is controlled by GPIO 1 */
2962 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2963 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2964 break;
2965
4d295db0
EG
2966 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2967 /* The PHY reset is controlled by GPIO 1 */
2968 /* fake the port number to cancel the swap done in
2969 set_gpio() */
2970 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2971 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2972 port = (swap_val && swap_override) ^ 1;
2973 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2974 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2975 break;
2976
877e9aa4
ET
2977 default:
2978 break;
2979 }
fd4ef40d 2980 bnx2x_fan_failure(bp);
877e9aa4 2981 }
34f80b04 2982
589abe3a
EG
2983 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2984 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2985 bnx2x_acquire_phy_lock(bp);
2986 bnx2x_handle_module_detect_int(&bp->link_params);
2987 bnx2x_release_phy_lock(bp);
2988 }
2989
34f80b04
EG
2990 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2991
2992 val = REG_RD(bp, reg_offset);
2993 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2994 REG_WR(bp, reg_offset, val);
2995
2996 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2997 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2998 bnx2x_panic();
2999 }
877e9aa4
ET
3000}
3001
3002static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3003{
3004 u32 val;
3005
0626b899 3006 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3007
3008 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3009 BNX2X_ERR("DB hw attention 0x%x\n", val);
3010 /* DORQ discard attention */
3011 if (val & 0x2)
3012 BNX2X_ERR("FATAL error from DORQ\n");
3013 }
34f80b04
EG
3014
3015 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3016
3017 int port = BP_PORT(bp);
3018 int reg_offset;
3019
3020 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3021 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3022
3023 val = REG_RD(bp, reg_offset);
3024 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3025 REG_WR(bp, reg_offset, val);
3026
3027 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3028 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3029 bnx2x_panic();
3030 }
877e9aa4
ET
3031}
3032
3033static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3034{
3035 u32 val;
3036
3037 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3038
3039 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3040 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3041 /* CFC error attention */
3042 if (val & 0x2)
3043 BNX2X_ERR("FATAL error from CFC\n");
3044 }
3045
3046 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3047
3048 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3049 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3050 /* RQ_USDMDP_FIFO_OVERFLOW */
3051 if (val & 0x18000)
3052 BNX2X_ERR("FATAL error from PXP\n");
3053 }
34f80b04
EG
3054
3055 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3056
3057 int port = BP_PORT(bp);
3058 int reg_offset;
3059
3060 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3061 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3062
3063 val = REG_RD(bp, reg_offset);
3064 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3065 REG_WR(bp, reg_offset, val);
3066
3067 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3068 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3069 bnx2x_panic();
3070 }
877e9aa4
ET
3071}
3072
3073static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3074{
34f80b04
EG
3075 u32 val;
3076
877e9aa4
ET
3077 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3078
34f80b04
EG
3079 if (attn & BNX2X_PMF_LINK_ASSERT) {
3080 int func = BP_FUNC(bp);
3081
3082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3083 bp->mf_config = SHMEM_RD(bp,
3084 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3085 val = SHMEM_RD(bp, func_mb[func].drv_status);
3086 if (val & DRV_STATUS_DCC_EVENT_MASK)
3087 bnx2x_dcc_event(bp,
3088 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3089 bnx2x__link_status_update(bp);
2691d51d 3090 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3091 bnx2x_pmf_update(bp);
3092
3093 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3094
3095 BNX2X_ERR("MC assert!\n");
3096 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3097 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3098 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3099 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3100 bnx2x_panic();
3101
3102 } else if (attn & BNX2X_MCP_ASSERT) {
3103
3104 BNX2X_ERR("MCP assert!\n");
3105 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3106 bnx2x_fw_dump(bp);
877e9aa4
ET
3107
3108 } else
3109 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3110 }
3111
3112 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3113 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3114 if (attn & BNX2X_GRC_TIMEOUT) {
3115 val = CHIP_IS_E1H(bp) ?
3116 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3117 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3118 }
3119 if (attn & BNX2X_GRC_RSV) {
3120 val = CHIP_IS_E1H(bp) ?
3121 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3122 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3123 }
877e9aa4 3124 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3125 }
3126}
3127
3128static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3129{
a2fbb9ea
ET
3130 struct attn_route attn;
3131 struct attn_route group_mask;
34f80b04 3132 int port = BP_PORT(bp);
877e9aa4 3133 int index;
a2fbb9ea
ET
3134 u32 reg_addr;
3135 u32 val;
3fcaf2e5 3136 u32 aeu_mask;
a2fbb9ea
ET
3137
3138 /* need to take HW lock because MCP or other port might also
3139 try to handle this event */
4a37fb66 3140 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3141
3142 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3143 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3144 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3145 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3146 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3147 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3148
3149 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3150 if (deasserted & (1 << index)) {
3151 group_mask = bp->attn_group[index];
3152
34f80b04
EG
3153 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3154 index, group_mask.sig[0], group_mask.sig[1],
3155 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3156
877e9aa4
ET
3157 bnx2x_attn_int_deasserted3(bp,
3158 attn.sig[3] & group_mask.sig[3]);
3159 bnx2x_attn_int_deasserted1(bp,
3160 attn.sig[1] & group_mask.sig[1]);
3161 bnx2x_attn_int_deasserted2(bp,
3162 attn.sig[2] & group_mask.sig[2]);
3163 bnx2x_attn_int_deasserted0(bp,
3164 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3165
a2fbb9ea
ET
3166 if ((attn.sig[0] & group_mask.sig[0] &
3167 HW_PRTY_ASSERT_SET_0) ||
3168 (attn.sig[1] & group_mask.sig[1] &
3169 HW_PRTY_ASSERT_SET_1) ||
3170 (attn.sig[2] & group_mask.sig[2] &
3171 HW_PRTY_ASSERT_SET_2))
6378c025 3172 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3173 }
3174 }
3175
4a37fb66 3176 bnx2x_release_alr(bp);
a2fbb9ea 3177
5c862848 3178 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3179
3180 val = ~deasserted;
3fcaf2e5
EG
3181 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3182 val, reg_addr);
5c862848 3183 REG_WR(bp, reg_addr, val);
a2fbb9ea 3184
a2fbb9ea 3185 if (~bp->attn_state & deasserted)
3fcaf2e5 3186 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3187
3188 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3189 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3190
3fcaf2e5
EG
3191 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3192 aeu_mask = REG_RD(bp, reg_addr);
3193
3194 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3195 aeu_mask, deasserted);
3196 aeu_mask |= (deasserted & 0xff);
3197 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3198
3fcaf2e5
EG
3199 REG_WR(bp, reg_addr, aeu_mask);
3200 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3201
3202 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3203 bp->attn_state &= ~deasserted;
3204 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3205}
3206
3207static void bnx2x_attn_int(struct bnx2x *bp)
3208{
3209 /* read local copy of bits */
68d59484
EG
3210 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3211 attn_bits);
3212 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3213 attn_bits_ack);
a2fbb9ea
ET
3214 u32 attn_state = bp->attn_state;
3215
3216 /* look for changed bits */
3217 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3218 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3219
3220 DP(NETIF_MSG_HW,
3221 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3222 attn_bits, attn_ack, asserted, deasserted);
3223
3224 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3225 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3226
3227 /* handle bits that were raised */
3228 if (asserted)
3229 bnx2x_attn_int_asserted(bp, asserted);
3230
3231 if (deasserted)
3232 bnx2x_attn_int_deasserted(bp, deasserted);
3233}
3234
3235static void bnx2x_sp_task(struct work_struct *work)
3236{
1cf167f2 3237 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3238 u16 status;
3239
34f80b04 3240
a2fbb9ea
ET
3241 /* Return here if interrupt is disabled */
3242 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3243 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3244 return;
3245 }
3246
3247 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3248/* if (status == 0) */
3249/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3250
3196a88a 3251 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3252
877e9aa4
ET
3253 /* HW attentions */
3254 if (status & 0x1)
a2fbb9ea 3255 bnx2x_attn_int(bp);
a2fbb9ea 3256
68d59484 3257 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3258 IGU_INT_NOP, 1);
3259 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3260 IGU_INT_NOP, 1);
3261 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3262 IGU_INT_NOP, 1);
3263 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3264 IGU_INT_NOP, 1);
3265 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3266 IGU_INT_ENABLE, 1);
877e9aa4 3267
a2fbb9ea
ET
3268}
3269
3270static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3271{
3272 struct net_device *dev = dev_instance;
3273 struct bnx2x *bp = netdev_priv(dev);
3274
3275 /* Return here if interrupt is disabled */
3276 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3277 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3278 return IRQ_HANDLED;
3279 }
3280
8d9c5f34 3281 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3282
3283#ifdef BNX2X_STOP_ON_ERROR
3284 if (unlikely(bp->panic))
3285 return IRQ_HANDLED;
3286#endif
3287
993ac7b5
MC
3288#ifdef BCM_CNIC
3289 {
3290 struct cnic_ops *c_ops;
3291
3292 rcu_read_lock();
3293 c_ops = rcu_dereference(bp->cnic_ops);
3294 if (c_ops)
3295 c_ops->cnic_handler(bp->cnic_data, NULL);
3296 rcu_read_unlock();
3297 }
3298#endif
1cf167f2 3299 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3300
3301 return IRQ_HANDLED;
3302}
3303
3304/* end of slow path */
3305
3306/* Statistics */
3307
3308/****************************************************************************
3309* Macros
3310****************************************************************************/
3311
a2fbb9ea
ET
3312/* sum[hi:lo] += add[hi:lo] */
3313#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3314 do { \
3315 s_lo += a_lo; \
f5ba6772 3316 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3317 } while (0)
3318
3319/* difference = minuend - subtrahend */
3320#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3321 do { \
bb2a0f7a
YG
3322 if (m_lo < s_lo) { \
3323 /* underflow */ \
a2fbb9ea 3324 d_hi = m_hi - s_hi; \
bb2a0f7a 3325 if (d_hi > 0) { \
6378c025 3326 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3327 d_hi--; \
3328 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3329 } else { \
6378c025 3330 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3331 d_hi = 0; \
3332 d_lo = 0; \
3333 } \
bb2a0f7a
YG
3334 } else { \
3335 /* m_lo >= s_lo */ \
a2fbb9ea 3336 if (m_hi < s_hi) { \
bb2a0f7a
YG
3337 d_hi = 0; \
3338 d_lo = 0; \
3339 } else { \
6378c025 3340 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3341 d_hi = m_hi - s_hi; \
3342 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3343 } \
3344 } \
3345 } while (0)
3346
bb2a0f7a 3347#define UPDATE_STAT64(s, t) \
a2fbb9ea 3348 do { \
bb2a0f7a
YG
3349 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3350 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3351 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3352 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3353 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3354 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3355 } while (0)
3356
bb2a0f7a 3357#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3358 do { \
bb2a0f7a
YG
3359 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3360 diff.lo, new->s##_lo, old->s##_lo); \
3361 ADD_64(estats->t##_hi, diff.hi, \
3362 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3363 } while (0)
3364
3365/* sum[hi:lo] += add */
3366#define ADD_EXTEND_64(s_hi, s_lo, a) \
3367 do { \
3368 s_lo += a; \
3369 s_hi += (s_lo < a) ? 1 : 0; \
3370 } while (0)
3371
bb2a0f7a 3372#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3373 do { \
bb2a0f7a
YG
3374 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3375 pstats->mac_stx[1].s##_lo, \
3376 new->s); \
a2fbb9ea
ET
3377 } while (0)
3378
bb2a0f7a 3379#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3380 do { \
4781bfad
EG
3381 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3382 old_tclient->s = tclient->s; \
de832a55
EG
3383 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3384 } while (0)
3385
3386#define UPDATE_EXTEND_USTAT(s, t) \
3387 do { \
3388 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3389 old_uclient->s = uclient->s; \
3390 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3391 } while (0)
3392
3393#define UPDATE_EXTEND_XSTAT(s, t) \
3394 do { \
4781bfad
EG
3395 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3396 old_xclient->s = xclient->s; \
de832a55
EG
3397 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3398 } while (0)
3399
3400/* minuend -= subtrahend */
3401#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3402 do { \
3403 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3404 } while (0)
3405
3406/* minuend[hi:lo] -= subtrahend */
3407#define SUB_EXTEND_64(m_hi, m_lo, s) \
3408 do { \
3409 SUB_64(m_hi, 0, m_lo, s); \
3410 } while (0)
3411
3412#define SUB_EXTEND_USTAT(s, t) \
3413 do { \
3414 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3415 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3416 } while (0)
3417
3418/*
3419 * General service functions
3420 */
3421
3422static inline long bnx2x_hilo(u32 *hiref)
3423{
3424 u32 lo = *(hiref + 1);
3425#if (BITS_PER_LONG == 64)
3426 u32 hi = *hiref;
3427
3428 return HILO_U64(hi, lo);
3429#else
3430 return lo;
3431#endif
3432}
3433
3434/*
3435 * Init service functions
3436 */
3437
bb2a0f7a
YG
3438static void bnx2x_storm_stats_post(struct bnx2x *bp)
3439{
3440 if (!bp->stats_pending) {
3441 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3442 int i, rc;
bb2a0f7a
YG
3443
3444 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3445 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3446 for_each_queue(bp, i)
3447 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3448
3449 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3450 ((u32 *)&ramrod_data)[1],
3451 ((u32 *)&ramrod_data)[0], 0);
3452 if (rc == 0) {
3453 /* stats ramrod has it's own slot on the spq */
3454 bp->spq_left++;
3455 bp->stats_pending = 1;
3456 }
3457 }
3458}
3459
bb2a0f7a
YG
3460static void bnx2x_hw_stats_post(struct bnx2x *bp)
3461{
3462 struct dmae_command *dmae = &bp->stats_dmae;
3463 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3464
3465 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3466 if (CHIP_REV_IS_SLOW(bp))
3467 return;
bb2a0f7a
YG
3468
3469 /* loader */
3470 if (bp->executer_idx) {
3471 int loader_idx = PMF_DMAE_C(bp);
3472
3473 memset(dmae, 0, sizeof(struct dmae_command));
3474
3475 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3476 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3477 DMAE_CMD_DST_RESET |
3478#ifdef __BIG_ENDIAN
3479 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3480#else
3481 DMAE_CMD_ENDIANITY_DW_SWAP |
3482#endif
3483 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3484 DMAE_CMD_PORT_0) |
3485 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3486 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3487 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3488 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3489 sizeof(struct dmae_command) *
3490 (loader_idx + 1)) >> 2;
3491 dmae->dst_addr_hi = 0;
3492 dmae->len = sizeof(struct dmae_command) >> 2;
3493 if (CHIP_IS_E1(bp))
3494 dmae->len--;
3495 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3496 dmae->comp_addr_hi = 0;
3497 dmae->comp_val = 1;
3498
3499 *stats_comp = 0;
3500 bnx2x_post_dmae(bp, dmae, loader_idx);
3501
3502 } else if (bp->func_stx) {
3503 *stats_comp = 0;
3504 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3505 }
3506}
3507
3508static int bnx2x_stats_comp(struct bnx2x *bp)
3509{
3510 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3511 int cnt = 10;
3512
3513 might_sleep();
3514 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3515 if (!cnt) {
3516 BNX2X_ERR("timeout waiting for stats finished\n");
3517 break;
3518 }
3519 cnt--;
12469401 3520 msleep(1);
bb2a0f7a
YG
3521 }
3522 return 1;
3523}
3524
3525/*
3526 * Statistics service functions
3527 */
3528
3529static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3530{
3531 struct dmae_command *dmae;
3532 u32 opcode;
3533 int loader_idx = PMF_DMAE_C(bp);
3534 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3535
3536 /* sanity */
3537 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3538 BNX2X_ERR("BUG!\n");
3539 return;
3540 }
3541
3542 bp->executer_idx = 0;
3543
3544 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3545 DMAE_CMD_C_ENABLE |
3546 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3547#ifdef __BIG_ENDIAN
3548 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3549#else
3550 DMAE_CMD_ENDIANITY_DW_SWAP |
3551#endif
3552 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3553 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3554
3555 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3556 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3557 dmae->src_addr_lo = bp->port.port_stx >> 2;
3558 dmae->src_addr_hi = 0;
3559 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3560 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3561 dmae->len = DMAE_LEN32_RD_MAX;
3562 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3563 dmae->comp_addr_hi = 0;
3564 dmae->comp_val = 1;
3565
3566 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3567 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3568 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3569 dmae->src_addr_hi = 0;
7a9b2557
VZ
3570 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3571 DMAE_LEN32_RD_MAX * 4);
3572 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3573 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3574 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3575 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3576 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3577 dmae->comp_val = DMAE_COMP_VAL;
3578
3579 *stats_comp = 0;
3580 bnx2x_hw_stats_post(bp);
3581 bnx2x_stats_comp(bp);
3582}
3583
3584static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3585{
3586 struct dmae_command *dmae;
34f80b04 3587 int port = BP_PORT(bp);
bb2a0f7a 3588 int vn = BP_E1HVN(bp);
a2fbb9ea 3589 u32 opcode;
bb2a0f7a 3590 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3591 u32 mac_addr;
bb2a0f7a
YG
3592 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3593
3594 /* sanity */
3595 if (!bp->link_vars.link_up || !bp->port.pmf) {
3596 BNX2X_ERR("BUG!\n");
3597 return;
3598 }
a2fbb9ea
ET
3599
3600 bp->executer_idx = 0;
bb2a0f7a
YG
3601
3602 /* MCP */
3603 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3604 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3605 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3606#ifdef __BIG_ENDIAN
bb2a0f7a 3607 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3608#else
bb2a0f7a 3609 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3610#endif
bb2a0f7a
YG
3611 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3612 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3613
bb2a0f7a 3614 if (bp->port.port_stx) {
a2fbb9ea
ET
3615
3616 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617 dmae->opcode = opcode;
bb2a0f7a
YG
3618 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3619 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3620 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3621 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3622 dmae->len = sizeof(struct host_port_stats) >> 2;
3623 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3624 dmae->comp_addr_hi = 0;
3625 dmae->comp_val = 1;
a2fbb9ea
ET
3626 }
3627
bb2a0f7a
YG
3628 if (bp->func_stx) {
3629
3630 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631 dmae->opcode = opcode;
3632 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3633 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3634 dmae->dst_addr_lo = bp->func_stx >> 2;
3635 dmae->dst_addr_hi = 0;
3636 dmae->len = sizeof(struct host_func_stats) >> 2;
3637 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3638 dmae->comp_addr_hi = 0;
3639 dmae->comp_val = 1;
a2fbb9ea
ET
3640 }
3641
bb2a0f7a 3642 /* MAC */
a2fbb9ea
ET
3643 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3644 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3645 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3646#ifdef __BIG_ENDIAN
3647 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3648#else
3649 DMAE_CMD_ENDIANITY_DW_SWAP |
3650#endif
bb2a0f7a
YG
3651 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3652 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3653
c18487ee 3654 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3655
3656 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3657 NIG_REG_INGRESS_BMAC0_MEM);
3658
3659 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3660 BIGMAC_REGISTER_TX_STAT_GTBYT */
3661 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3662 dmae->opcode = opcode;
3663 dmae->src_addr_lo = (mac_addr +
3664 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3665 dmae->src_addr_hi = 0;
3666 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3667 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3668 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3669 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3670 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3671 dmae->comp_addr_hi = 0;
3672 dmae->comp_val = 1;
3673
3674 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3675 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3676 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3677 dmae->opcode = opcode;
3678 dmae->src_addr_lo = (mac_addr +
3679 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3680 dmae->src_addr_hi = 0;
3681 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3682 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3683 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3684 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3685 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3686 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3687 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3688 dmae->comp_addr_hi = 0;
3689 dmae->comp_val = 1;
3690
c18487ee 3691 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3692
3693 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3694
3695 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3696 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3697 dmae->opcode = opcode;
3698 dmae->src_addr_lo = (mac_addr +
3699 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3700 dmae->src_addr_hi = 0;
3701 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3702 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3703 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3704 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3705 dmae->comp_addr_hi = 0;
3706 dmae->comp_val = 1;
3707
3708 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (mac_addr +
3712 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3715 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3716 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3717 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3718 dmae->len = 1;
3719 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3720 dmae->comp_addr_hi = 0;
3721 dmae->comp_val = 1;
3722
3723 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3724 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3725 dmae->opcode = opcode;
3726 dmae->src_addr_lo = (mac_addr +
3727 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3728 dmae->src_addr_hi = 0;
3729 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3730 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3731 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3732 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3733 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3734 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3735 dmae->comp_addr_hi = 0;
3736 dmae->comp_val = 1;
3737 }
3738
3739 /* NIG */
bb2a0f7a
YG
3740 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3741 dmae->opcode = opcode;
3742 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3743 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3744 dmae->src_addr_hi = 0;
3745 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3746 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3747 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3748 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3749 dmae->comp_addr_hi = 0;
3750 dmae->comp_val = 1;
3751
3752 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3753 dmae->opcode = opcode;
3754 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3755 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3756 dmae->src_addr_hi = 0;
3757 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3758 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3759 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3760 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3761 dmae->len = (2*sizeof(u32)) >> 2;
3762 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3763 dmae->comp_addr_hi = 0;
3764 dmae->comp_val = 1;
3765
a2fbb9ea
ET
3766 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3767 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3768 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3769 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3770#ifdef __BIG_ENDIAN
3771 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3772#else
3773 DMAE_CMD_ENDIANITY_DW_SWAP |
3774#endif
bb2a0f7a
YG
3775 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3776 (vn << DMAE_CMD_E1HVN_SHIFT));
3777 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3778 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3779 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3780 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3781 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3782 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3783 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3784 dmae->len = (2*sizeof(u32)) >> 2;
3785 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3786 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3787 dmae->comp_val = DMAE_COMP_VAL;
3788
3789 *stats_comp = 0;
a2fbb9ea
ET
3790}
3791
bb2a0f7a 3792static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3793{
bb2a0f7a
YG
3794 struct dmae_command *dmae = &bp->stats_dmae;
3795 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3796
bb2a0f7a
YG
3797 /* sanity */
3798 if (!bp->func_stx) {
3799 BNX2X_ERR("BUG!\n");
3800 return;
3801 }
a2fbb9ea 3802
bb2a0f7a
YG
3803 bp->executer_idx = 0;
3804 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3805
bb2a0f7a
YG
3806 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3807 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3808 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3809#ifdef __BIG_ENDIAN
3810 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3811#else
3812 DMAE_CMD_ENDIANITY_DW_SWAP |
3813#endif
3814 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3815 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3816 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3817 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3818 dmae->dst_addr_lo = bp->func_stx >> 2;
3819 dmae->dst_addr_hi = 0;
3820 dmae->len = sizeof(struct host_func_stats) >> 2;
3821 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3822 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3823 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3824
bb2a0f7a
YG
3825 *stats_comp = 0;
3826}
a2fbb9ea 3827
bb2a0f7a
YG
3828static void bnx2x_stats_start(struct bnx2x *bp)
3829{
3830 if (bp->port.pmf)
3831 bnx2x_port_stats_init(bp);
3832
3833 else if (bp->func_stx)
3834 bnx2x_func_stats_init(bp);
3835
3836 bnx2x_hw_stats_post(bp);
3837 bnx2x_storm_stats_post(bp);
3838}
3839
3840static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3841{
3842 bnx2x_stats_comp(bp);
3843 bnx2x_stats_pmf_update(bp);
3844 bnx2x_stats_start(bp);
3845}
3846
3847static void bnx2x_stats_restart(struct bnx2x *bp)
3848{
3849 bnx2x_stats_comp(bp);
3850 bnx2x_stats_start(bp);
3851}
3852
3853static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3854{
3855 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3856 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3857 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3858 struct {
3859 u32 lo;
3860 u32 hi;
3861 } diff;
bb2a0f7a
YG
3862
3863 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3864 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3865 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3866 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3867 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3868 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3869 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3870 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3871 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3872 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3873 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3874 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3875 UPDATE_STAT64(tx_stat_gt127,
3876 tx_stat_etherstatspkts65octetsto127octets);
3877 UPDATE_STAT64(tx_stat_gt255,
3878 tx_stat_etherstatspkts128octetsto255octets);
3879 UPDATE_STAT64(tx_stat_gt511,
3880 tx_stat_etherstatspkts256octetsto511octets);
3881 UPDATE_STAT64(tx_stat_gt1023,
3882 tx_stat_etherstatspkts512octetsto1023octets);
3883 UPDATE_STAT64(tx_stat_gt1518,
3884 tx_stat_etherstatspkts1024octetsto1522octets);
3885 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3886 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3887 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3888 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3889 UPDATE_STAT64(tx_stat_gterr,
3890 tx_stat_dot3statsinternalmactransmiterrors);
3891 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3892
3893 estats->pause_frames_received_hi =
3894 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3895 estats->pause_frames_received_lo =
3896 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3897
3898 estats->pause_frames_sent_hi =
3899 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3900 estats->pause_frames_sent_lo =
3901 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3902}
3903
3904static void bnx2x_emac_stats_update(struct bnx2x *bp)
3905{
3906 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3907 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3908 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3909
3910 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3911 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3912 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3913 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3914 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3915 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3916 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3917 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3918 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3919 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3920 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3921 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3922 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3923 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3924 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3925 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3926 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3927 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3928 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3929 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3930 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3931 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3932 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3933 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3934 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3935 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3936 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3937 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3938 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3939 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3940 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3941
3942 estats->pause_frames_received_hi =
3943 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3944 estats->pause_frames_received_lo =
3945 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3946 ADD_64(estats->pause_frames_received_hi,
3947 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3948 estats->pause_frames_received_lo,
3949 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3950
3951 estats->pause_frames_sent_hi =
3952 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3953 estats->pause_frames_sent_lo =
3954 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3955 ADD_64(estats->pause_frames_sent_hi,
3956 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3957 estats->pause_frames_sent_lo,
3958 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3959}
3960
3961static int bnx2x_hw_stats_update(struct bnx2x *bp)
3962{
3963 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3964 struct nig_stats *old = &(bp->port.old_nig_stats);
3965 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3966 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3967 struct {
3968 u32 lo;
3969 u32 hi;
3970 } diff;
de832a55 3971 u32 nig_timer_max;
bb2a0f7a
YG
3972
3973 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3974 bnx2x_bmac_stats_update(bp);
3975
3976 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3977 bnx2x_emac_stats_update(bp);
3978
3979 else { /* unreached */
c3eefaf6 3980 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3981 return -1;
3982 }
a2fbb9ea 3983
bb2a0f7a
YG
3984 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3985 new->brb_discard - old->brb_discard);
66e855f3
YG
3986 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3987 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3988
bb2a0f7a
YG
3989 UPDATE_STAT64_NIG(egress_mac_pkt0,
3990 etherstatspkts1024octetsto1522octets);
3991 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3992
bb2a0f7a 3993 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3994
bb2a0f7a
YG
3995 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3996 sizeof(struct mac_stx));
3997 estats->brb_drop_hi = pstats->brb_drop_hi;
3998 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3999
bb2a0f7a 4000 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 4001
de832a55
EG
4002 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4003 if (nig_timer_max != estats->nig_timer_max) {
4004 estats->nig_timer_max = nig_timer_max;
4005 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
4006 }
4007
bb2a0f7a 4008 return 0;
a2fbb9ea
ET
4009}
4010
bb2a0f7a 4011static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4012{
4013 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4014 struct tstorm_per_port_stats *tport =
de832a55 4015 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4016 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4017 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4018 int i;
4019
6fe49bb9
EG
4020 memcpy(&(fstats->total_bytes_received_hi),
4021 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4022 sizeof(struct host_func_stats) - 2*sizeof(u32));
4023 estats->error_bytes_received_hi = 0;
4024 estats->error_bytes_received_lo = 0;
4025 estats->etherstatsoverrsizepkts_hi = 0;
4026 estats->etherstatsoverrsizepkts_lo = 0;
4027 estats->no_buff_discard_hi = 0;
4028 estats->no_buff_discard_lo = 0;
a2fbb9ea 4029
ca00392c 4030 for_each_rx_queue(bp, i) {
de832a55
EG
4031 struct bnx2x_fastpath *fp = &bp->fp[i];
4032 int cl_id = fp->cl_id;
4033 struct tstorm_per_client_stats *tclient =
4034 &stats->tstorm_common.client_statistics[cl_id];
4035 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4036 struct ustorm_per_client_stats *uclient =
4037 &stats->ustorm_common.client_statistics[cl_id];
4038 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4039 struct xstorm_per_client_stats *xclient =
4040 &stats->xstorm_common.client_statistics[cl_id];
4041 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4042 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4043 u32 diff;
4044
4045 /* are storm stats valid? */
4046 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4047 bp->stats_counter) {
de832a55
EG
4048 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4049 " xstorm counter (%d) != stats_counter (%d)\n",
4050 i, xclient->stats_counter, bp->stats_counter);
4051 return -1;
4052 }
4053 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4054 bp->stats_counter) {
de832a55
EG
4055 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4056 " tstorm counter (%d) != stats_counter (%d)\n",
4057 i, tclient->stats_counter, bp->stats_counter);
4058 return -2;
4059 }
4060 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4061 bp->stats_counter) {
4062 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4063 " ustorm counter (%d) != stats_counter (%d)\n",
4064 i, uclient->stats_counter, bp->stats_counter);
4065 return -4;
4066 }
a2fbb9ea 4067
de832a55 4068 qstats->total_bytes_received_hi =
ca00392c 4069 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4070 qstats->total_bytes_received_lo =
ca00392c
EG
4071 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4072
4073 ADD_64(qstats->total_bytes_received_hi,
4074 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4075 qstats->total_bytes_received_lo,
4076 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4077
4078 ADD_64(qstats->total_bytes_received_hi,
4079 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4080 qstats->total_bytes_received_lo,
4081 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4082
4083 qstats->valid_bytes_received_hi =
4084 qstats->total_bytes_received_hi;
de832a55 4085 qstats->valid_bytes_received_lo =
ca00392c 4086 qstats->total_bytes_received_lo;
bb2a0f7a 4087
de832a55 4088 qstats->error_bytes_received_hi =
bb2a0f7a 4089 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4090 qstats->error_bytes_received_lo =
bb2a0f7a 4091 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4092
de832a55
EG
4093 ADD_64(qstats->total_bytes_received_hi,
4094 qstats->error_bytes_received_hi,
4095 qstats->total_bytes_received_lo,
4096 qstats->error_bytes_received_lo);
4097
4098 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4099 total_unicast_packets_received);
4100 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4101 total_multicast_packets_received);
4102 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4103 total_broadcast_packets_received);
4104 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4105 etherstatsoverrsizepkts);
4106 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4107
4108 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4109 total_unicast_packets_received);
4110 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4111 total_multicast_packets_received);
4112 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4113 total_broadcast_packets_received);
4114 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4115 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4116 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4117
4118 qstats->total_bytes_transmitted_hi =
ca00392c 4119 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4120 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4121 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4122
4123 ADD_64(qstats->total_bytes_transmitted_hi,
4124 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4125 qstats->total_bytes_transmitted_lo,
4126 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4127
4128 ADD_64(qstats->total_bytes_transmitted_hi,
4129 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4130 qstats->total_bytes_transmitted_lo,
4131 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4132
de832a55
EG
4133 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4134 total_unicast_packets_transmitted);
4135 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4136 total_multicast_packets_transmitted);
4137 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4138 total_broadcast_packets_transmitted);
4139
4140 old_tclient->checksum_discard = tclient->checksum_discard;
4141 old_tclient->ttl0_discard = tclient->ttl0_discard;
4142
4143 ADD_64(fstats->total_bytes_received_hi,
4144 qstats->total_bytes_received_hi,
4145 fstats->total_bytes_received_lo,
4146 qstats->total_bytes_received_lo);
4147 ADD_64(fstats->total_bytes_transmitted_hi,
4148 qstats->total_bytes_transmitted_hi,
4149 fstats->total_bytes_transmitted_lo,
4150 qstats->total_bytes_transmitted_lo);
4151 ADD_64(fstats->total_unicast_packets_received_hi,
4152 qstats->total_unicast_packets_received_hi,
4153 fstats->total_unicast_packets_received_lo,
4154 qstats->total_unicast_packets_received_lo);
4155 ADD_64(fstats->total_multicast_packets_received_hi,
4156 qstats->total_multicast_packets_received_hi,
4157 fstats->total_multicast_packets_received_lo,
4158 qstats->total_multicast_packets_received_lo);
4159 ADD_64(fstats->total_broadcast_packets_received_hi,
4160 qstats->total_broadcast_packets_received_hi,
4161 fstats->total_broadcast_packets_received_lo,
4162 qstats->total_broadcast_packets_received_lo);
4163 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4164 qstats->total_unicast_packets_transmitted_hi,
4165 fstats->total_unicast_packets_transmitted_lo,
4166 qstats->total_unicast_packets_transmitted_lo);
4167 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4168 qstats->total_multicast_packets_transmitted_hi,
4169 fstats->total_multicast_packets_transmitted_lo,
4170 qstats->total_multicast_packets_transmitted_lo);
4171 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4172 qstats->total_broadcast_packets_transmitted_hi,
4173 fstats->total_broadcast_packets_transmitted_lo,
4174 qstats->total_broadcast_packets_transmitted_lo);
4175 ADD_64(fstats->valid_bytes_received_hi,
4176 qstats->valid_bytes_received_hi,
4177 fstats->valid_bytes_received_lo,
4178 qstats->valid_bytes_received_lo);
4179
4180 ADD_64(estats->error_bytes_received_hi,
4181 qstats->error_bytes_received_hi,
4182 estats->error_bytes_received_lo,
4183 qstats->error_bytes_received_lo);
4184 ADD_64(estats->etherstatsoverrsizepkts_hi,
4185 qstats->etherstatsoverrsizepkts_hi,
4186 estats->etherstatsoverrsizepkts_lo,
4187 qstats->etherstatsoverrsizepkts_lo);
4188 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4189 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4190 }
4191
4192 ADD_64(fstats->total_bytes_received_hi,
4193 estats->rx_stat_ifhcinbadoctets_hi,
4194 fstats->total_bytes_received_lo,
4195 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4196
4197 memcpy(estats, &(fstats->total_bytes_received_hi),
4198 sizeof(struct host_func_stats) - 2*sizeof(u32));
4199
de832a55
EG
4200 ADD_64(estats->etherstatsoverrsizepkts_hi,
4201 estats->rx_stat_dot3statsframestoolong_hi,
4202 estats->etherstatsoverrsizepkts_lo,
4203 estats->rx_stat_dot3statsframestoolong_lo);
4204 ADD_64(estats->error_bytes_received_hi,
4205 estats->rx_stat_ifhcinbadoctets_hi,
4206 estats->error_bytes_received_lo,
4207 estats->rx_stat_ifhcinbadoctets_lo);
4208
4209 if (bp->port.pmf) {
4210 estats->mac_filter_discard =
4211 le32_to_cpu(tport->mac_filter_discard);
4212 estats->xxoverflow_discard =
4213 le32_to_cpu(tport->xxoverflow_discard);
4214 estats->brb_truncate_discard =
bb2a0f7a 4215 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4216 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4217 }
bb2a0f7a
YG
4218
4219 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4220
de832a55
EG
4221 bp->stats_pending = 0;
4222
a2fbb9ea
ET
4223 return 0;
4224}
4225
bb2a0f7a 4226static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4227{
bb2a0f7a 4228 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4229 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4230 int i;
a2fbb9ea
ET
4231
4232 nstats->rx_packets =
4233 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4234 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4235 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4236
4237 nstats->tx_packets =
4238 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4239 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4240 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4241
de832a55 4242 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4243
0e39e645 4244 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4245
de832a55 4246 nstats->rx_dropped = estats->mac_discard;
ca00392c 4247 for_each_rx_queue(bp, i)
de832a55
EG
4248 nstats->rx_dropped +=
4249 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4250
a2fbb9ea
ET
4251 nstats->tx_dropped = 0;
4252
4253 nstats->multicast =
de832a55 4254 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4255
bb2a0f7a 4256 nstats->collisions =
de832a55 4257 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4258
4259 nstats->rx_length_errors =
de832a55
EG
4260 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4261 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4262 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4263 bnx2x_hilo(&estats->brb_truncate_hi);
4264 nstats->rx_crc_errors =
4265 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4266 nstats->rx_frame_errors =
4267 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4268 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4269 nstats->rx_missed_errors = estats->xxoverflow_discard;
4270
4271 nstats->rx_errors = nstats->rx_length_errors +
4272 nstats->rx_over_errors +
4273 nstats->rx_crc_errors +
4274 nstats->rx_frame_errors +
0e39e645
ET
4275 nstats->rx_fifo_errors +
4276 nstats->rx_missed_errors;
a2fbb9ea 4277
bb2a0f7a 4278 nstats->tx_aborted_errors =
de832a55
EG
4279 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4280 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4281 nstats->tx_carrier_errors =
4282 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4283 nstats->tx_fifo_errors = 0;
4284 nstats->tx_heartbeat_errors = 0;
4285 nstats->tx_window_errors = 0;
4286
4287 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4288 nstats->tx_carrier_errors +
4289 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4290}
4291
4292static void bnx2x_drv_stats_update(struct bnx2x *bp)
4293{
4294 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4295 int i;
4296
4297 estats->driver_xoff = 0;
4298 estats->rx_err_discard_pkt = 0;
4299 estats->rx_skb_alloc_failed = 0;
4300 estats->hw_csum_err = 0;
ca00392c 4301 for_each_rx_queue(bp, i) {
de832a55
EG
4302 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4303
4304 estats->driver_xoff += qstats->driver_xoff;
4305 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4306 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4307 estats->hw_csum_err += qstats->hw_csum_err;
4308 }
a2fbb9ea
ET
4309}
4310
bb2a0f7a 4311static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4312{
bb2a0f7a 4313 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4314
bb2a0f7a
YG
4315 if (*stats_comp != DMAE_COMP_VAL)
4316 return;
4317
4318 if (bp->port.pmf)
de832a55 4319 bnx2x_hw_stats_update(bp);
a2fbb9ea 4320
de832a55
EG
4321 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4322 BNX2X_ERR("storm stats were not updated for 3 times\n");
4323 bnx2x_panic();
4324 return;
a2fbb9ea
ET
4325 }
4326
de832a55
EG
4327 bnx2x_net_stats_update(bp);
4328 bnx2x_drv_stats_update(bp);
4329
a2fbb9ea 4330 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4331 struct bnx2x_fastpath *fp0_rx = bp->fp;
4332 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4333 struct tstorm_per_client_stats *old_tclient =
4334 &bp->fp->old_tclient;
4335 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4336 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4337 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4338 int i;
a2fbb9ea
ET
4339
4340 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4341 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4342 " tx pkt (%lx)\n",
ca00392c
EG
4343 bnx2x_tx_avail(fp0_tx),
4344 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4345 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4346 " rx pkt (%lx)\n",
ca00392c
EG
4347 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4348 fp0_rx->rx_comp_cons),
4349 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4350 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4351 "brb truncate %u\n",
4352 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4353 qstats->driver_xoff,
4354 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4355 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4356 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4357 "mac_discard %u mac_filter_discard %u "
4358 "xxovrflow_discard %u brb_truncate_discard %u "
4359 "ttl0_discard %u\n",
4781bfad 4360 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4361 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4362 bnx2x_hilo(&qstats->no_buff_discard_hi),
4363 estats->mac_discard, estats->mac_filter_discard,
4364 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4365 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4366
4367 for_each_queue(bp, i) {
4368 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4369 bnx2x_fp(bp, i, tx_pkt),
4370 bnx2x_fp(bp, i, rx_pkt),
4371 bnx2x_fp(bp, i, rx_calls));
4372 }
4373 }
4374
bb2a0f7a
YG
4375 bnx2x_hw_stats_post(bp);
4376 bnx2x_storm_stats_post(bp);
4377}
a2fbb9ea 4378
bb2a0f7a
YG
4379static void bnx2x_port_stats_stop(struct bnx2x *bp)
4380{
4381 struct dmae_command *dmae;
4382 u32 opcode;
4383 int loader_idx = PMF_DMAE_C(bp);
4384 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4385
bb2a0f7a 4386 bp->executer_idx = 0;
a2fbb9ea 4387
bb2a0f7a
YG
4388 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4389 DMAE_CMD_C_ENABLE |
4390 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4391#ifdef __BIG_ENDIAN
bb2a0f7a 4392 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4393#else
bb2a0f7a 4394 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4395#endif
bb2a0f7a
YG
4396 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4397 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4398
4399 if (bp->port.port_stx) {
4400
4401 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4402 if (bp->func_stx)
4403 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4404 else
4405 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4406 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4407 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4408 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4409 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4410 dmae->len = sizeof(struct host_port_stats) >> 2;
4411 if (bp->func_stx) {
4412 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4413 dmae->comp_addr_hi = 0;
4414 dmae->comp_val = 1;
4415 } else {
4416 dmae->comp_addr_lo =
4417 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4418 dmae->comp_addr_hi =
4419 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4420 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4421
bb2a0f7a
YG
4422 *stats_comp = 0;
4423 }
a2fbb9ea
ET
4424 }
4425
bb2a0f7a
YG
4426 if (bp->func_stx) {
4427
4428 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4429 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4430 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4431 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4432 dmae->dst_addr_lo = bp->func_stx >> 2;
4433 dmae->dst_addr_hi = 0;
4434 dmae->len = sizeof(struct host_func_stats) >> 2;
4435 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4436 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4437 dmae->comp_val = DMAE_COMP_VAL;
4438
4439 *stats_comp = 0;
a2fbb9ea 4440 }
bb2a0f7a
YG
4441}
4442
4443static void bnx2x_stats_stop(struct bnx2x *bp)
4444{
4445 int update = 0;
4446
4447 bnx2x_stats_comp(bp);
4448
4449 if (bp->port.pmf)
4450 update = (bnx2x_hw_stats_update(bp) == 0);
4451
4452 update |= (bnx2x_storm_stats_update(bp) == 0);
4453
4454 if (update) {
4455 bnx2x_net_stats_update(bp);
a2fbb9ea 4456
bb2a0f7a
YG
4457 if (bp->port.pmf)
4458 bnx2x_port_stats_stop(bp);
4459
4460 bnx2x_hw_stats_post(bp);
4461 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4462 }
4463}
4464
bb2a0f7a
YG
4465static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4466{
4467}
4468
4469static const struct {
4470 void (*action)(struct bnx2x *bp);
4471 enum bnx2x_stats_state next_state;
4472} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4473/* state event */
4474{
4475/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4476/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4477/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4478/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4479},
4480{
4481/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4482/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4483/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4484/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4485}
4486};
4487
4488static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4489{
4490 enum bnx2x_stats_state state = bp->stats_state;
4491
4492 bnx2x_stats_stm[state][event].action(bp);
4493 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4494
8924665a
EG
4495 /* Make sure the state has been "changed" */
4496 smp_wmb();
4497
bb2a0f7a
YG
4498 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4499 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4500 state, event, bp->stats_state);
4501}
4502
6fe49bb9
EG
4503static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4504{
4505 struct dmae_command *dmae;
4506 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4507
4508 /* sanity */
4509 if (!bp->port.pmf || !bp->port.port_stx) {
4510 BNX2X_ERR("BUG!\n");
4511 return;
4512 }
4513
4514 bp->executer_idx = 0;
4515
4516 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4517 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4518 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4519 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4520#ifdef __BIG_ENDIAN
4521 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4522#else
4523 DMAE_CMD_ENDIANITY_DW_SWAP |
4524#endif
4525 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4526 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4527 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4528 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4529 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4530 dmae->dst_addr_hi = 0;
4531 dmae->len = sizeof(struct host_port_stats) >> 2;
4532 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4533 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4534 dmae->comp_val = DMAE_COMP_VAL;
4535
4536 *stats_comp = 0;
4537 bnx2x_hw_stats_post(bp);
4538 bnx2x_stats_comp(bp);
4539}
4540
4541static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4542{
4543 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4544 int port = BP_PORT(bp);
4545 int func;
4546 u32 func_stx;
4547
4548 /* sanity */
4549 if (!bp->port.pmf || !bp->func_stx) {
4550 BNX2X_ERR("BUG!\n");
4551 return;
4552 }
4553
4554 /* save our func_stx */
4555 func_stx = bp->func_stx;
4556
4557 for (vn = VN_0; vn < vn_max; vn++) {
4558 func = 2*vn + port;
4559
4560 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4561 bnx2x_func_stats_init(bp);
4562 bnx2x_hw_stats_post(bp);
4563 bnx2x_stats_comp(bp);
4564 }
4565
4566 /* restore our func_stx */
4567 bp->func_stx = func_stx;
4568}
4569
4570static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4571{
4572 struct dmae_command *dmae = &bp->stats_dmae;
4573 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4574
4575 /* sanity */
4576 if (!bp->func_stx) {
4577 BNX2X_ERR("BUG!\n");
4578 return;
4579 }
4580
4581 bp->executer_idx = 0;
4582 memset(dmae, 0, sizeof(struct dmae_command));
4583
4584 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4585 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4586 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4587#ifdef __BIG_ENDIAN
4588 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4589#else
4590 DMAE_CMD_ENDIANITY_DW_SWAP |
4591#endif
4592 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4593 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4594 dmae->src_addr_lo = bp->func_stx >> 2;
4595 dmae->src_addr_hi = 0;
4596 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4597 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4598 dmae->len = sizeof(struct host_func_stats) >> 2;
4599 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4600 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4601 dmae->comp_val = DMAE_COMP_VAL;
4602
4603 *stats_comp = 0;
4604 bnx2x_hw_stats_post(bp);
4605 bnx2x_stats_comp(bp);
4606}
4607
4608static void bnx2x_stats_init(struct bnx2x *bp)
4609{
4610 int port = BP_PORT(bp);
4611 int func = BP_FUNC(bp);
4612 int i;
4613
4614 bp->stats_pending = 0;
4615 bp->executer_idx = 0;
4616 bp->stats_counter = 0;
4617
4618 /* port and func stats for management */
4619 if (!BP_NOMCP(bp)) {
4620 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4621 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4622
4623 } else {
4624 bp->port.port_stx = 0;
4625 bp->func_stx = 0;
4626 }
4627 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4628 bp->port.port_stx, bp->func_stx);
4629
4630 /* port stats */
4631 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4632 bp->port.old_nig_stats.brb_discard =
4633 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4634 bp->port.old_nig_stats.brb_truncate =
4635 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4636 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4637 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4638 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4639 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4640
4641 /* function stats */
4642 for_each_queue(bp, i) {
4643 struct bnx2x_fastpath *fp = &bp->fp[i];
4644
4645 memset(&fp->old_tclient, 0,
4646 sizeof(struct tstorm_per_client_stats));
4647 memset(&fp->old_uclient, 0,
4648 sizeof(struct ustorm_per_client_stats));
4649 memset(&fp->old_xclient, 0,
4650 sizeof(struct xstorm_per_client_stats));
4651 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4652 }
4653
4654 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4655 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4656
4657 bp->stats_state = STATS_STATE_DISABLED;
4658
4659 if (bp->port.pmf) {
4660 if (bp->port.port_stx)
4661 bnx2x_port_stats_base_init(bp);
4662
4663 if (bp->func_stx)
4664 bnx2x_func_stats_base_init(bp);
4665
4666 } else if (bp->func_stx)
4667 bnx2x_func_stats_base_update(bp);
4668}
4669
a2fbb9ea
ET
4670static void bnx2x_timer(unsigned long data)
4671{
4672 struct bnx2x *bp = (struct bnx2x *) data;
4673
4674 if (!netif_running(bp->dev))
4675 return;
4676
4677 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4678 goto timer_restart;
a2fbb9ea
ET
4679
4680 if (poll) {
4681 struct bnx2x_fastpath *fp = &bp->fp[0];
4682 int rc;
4683
7961f791 4684 bnx2x_tx_int(fp);
a2fbb9ea
ET
4685 rc = bnx2x_rx_int(fp, 1000);
4686 }
4687
34f80b04
EG
4688 if (!BP_NOMCP(bp)) {
4689 int func = BP_FUNC(bp);
a2fbb9ea
ET
4690 u32 drv_pulse;
4691 u32 mcp_pulse;
4692
4693 ++bp->fw_drv_pulse_wr_seq;
4694 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4695 /* TBD - add SYSTEM_TIME */
4696 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4697 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4698
34f80b04 4699 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4700 MCP_PULSE_SEQ_MASK);
4701 /* The delta between driver pulse and mcp response
4702 * should be 1 (before mcp response) or 0 (after mcp response)
4703 */
4704 if ((drv_pulse != mcp_pulse) &&
4705 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4706 /* someone lost a heartbeat... */
4707 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4708 drv_pulse, mcp_pulse);
4709 }
4710 }
4711
f34d28ea 4712 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 4713 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4714
f1410647 4715timer_restart:
a2fbb9ea
ET
4716 mod_timer(&bp->timer, jiffies + bp->current_interval);
4717}
4718
4719/* end of Statistics */
4720
4721/* nic init */
4722
4723/*
4724 * nic init service functions
4725 */
4726
34f80b04 4727static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4728{
34f80b04
EG
4729 int port = BP_PORT(bp);
4730
ca00392c
EG
4731 /* "CSTORM" */
4732 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4733 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4734 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4735 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4736 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4737 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4738}
4739
5c862848
EG
4740static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4741 dma_addr_t mapping, int sb_id)
34f80b04
EG
4742{
4743 int port = BP_PORT(bp);
bb2a0f7a 4744 int func = BP_FUNC(bp);
a2fbb9ea 4745 int index;
34f80b04 4746 u64 section;
a2fbb9ea
ET
4747
4748 /* USTORM */
4749 section = ((u64)mapping) + offsetof(struct host_status_block,
4750 u_status_block);
34f80b04 4751 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4752
ca00392c
EG
4753 REG_WR(bp, BAR_CSTRORM_INTMEM +
4754 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4755 REG_WR(bp, BAR_CSTRORM_INTMEM +
4756 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4757 U64_HI(section));
ca00392c
EG
4758 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4759 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4760
4761 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4762 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4763 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4764
4765 /* CSTORM */
4766 section = ((u64)mapping) + offsetof(struct host_status_block,
4767 c_status_block);
34f80b04 4768 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4769
4770 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4771 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4772 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4773 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4774 U64_HI(section));
7a9b2557 4775 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4776 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4777
4778 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4779 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4780 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4781
4782 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4783}
4784
4785static void bnx2x_zero_def_sb(struct bnx2x *bp)
4786{
4787 int func = BP_FUNC(bp);
a2fbb9ea 4788
ca00392c 4789 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4790 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4791 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4792 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4793 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4794 sizeof(struct cstorm_def_status_block_u)/4);
4795 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4796 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4797 sizeof(struct cstorm_def_status_block_c)/4);
4798 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4799 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4800 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4801}
4802
4803static void bnx2x_init_def_sb(struct bnx2x *bp,
4804 struct host_def_status_block *def_sb,
34f80b04 4805 dma_addr_t mapping, int sb_id)
a2fbb9ea 4806{
34f80b04
EG
4807 int port = BP_PORT(bp);
4808 int func = BP_FUNC(bp);
a2fbb9ea
ET
4809 int index, val, reg_offset;
4810 u64 section;
4811
4812 /* ATTN */
4813 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4814 atten_status_block);
34f80b04 4815 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4816
49d66772
ET
4817 bp->attn_state = 0;
4818
a2fbb9ea
ET
4819 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4820 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4821
34f80b04 4822 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4823 bp->attn_group[index].sig[0] = REG_RD(bp,
4824 reg_offset + 0x10*index);
4825 bp->attn_group[index].sig[1] = REG_RD(bp,
4826 reg_offset + 0x4 + 0x10*index);
4827 bp->attn_group[index].sig[2] = REG_RD(bp,
4828 reg_offset + 0x8 + 0x10*index);
4829 bp->attn_group[index].sig[3] = REG_RD(bp,
4830 reg_offset + 0xc + 0x10*index);
4831 }
4832
a2fbb9ea
ET
4833 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4834 HC_REG_ATTN_MSG0_ADDR_L);
4835
4836 REG_WR(bp, reg_offset, U64_LO(section));
4837 REG_WR(bp, reg_offset + 4, U64_HI(section));
4838
4839 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4840
4841 val = REG_RD(bp, reg_offset);
34f80b04 4842 val |= sb_id;
a2fbb9ea
ET
4843 REG_WR(bp, reg_offset, val);
4844
4845 /* USTORM */
4846 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4847 u_def_status_block);
34f80b04 4848 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4849
ca00392c
EG
4850 REG_WR(bp, BAR_CSTRORM_INTMEM +
4851 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4852 REG_WR(bp, BAR_CSTRORM_INTMEM +
4853 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4854 U64_HI(section));
ca00392c
EG
4855 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4856 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4857
4858 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4859 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4860 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4861
4862 /* CSTORM */
4863 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4864 c_def_status_block);
34f80b04 4865 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4866
4867 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4868 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4869 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4870 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4871 U64_HI(section));
5c862848 4872 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4873 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4874
4875 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4876 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4877 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4878
4879 /* TSTORM */
4880 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4881 t_def_status_block);
34f80b04 4882 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4883
4884 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4885 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4886 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4887 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4888 U64_HI(section));
5c862848 4889 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4890 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4891
4892 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4893 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4894 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4895
4896 /* XSTORM */
4897 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4898 x_def_status_block);
34f80b04 4899 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4900
4901 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4902 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4903 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4904 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4905 U64_HI(section));
5c862848 4906 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4907 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4908
4909 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4910 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4911 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4912
bb2a0f7a 4913 bp->stats_pending = 0;
66e855f3 4914 bp->set_mac_pending = 0;
bb2a0f7a 4915
34f80b04 4916 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4917}
4918
4919static void bnx2x_update_coalesce(struct bnx2x *bp)
4920{
34f80b04 4921 int port = BP_PORT(bp);
a2fbb9ea
ET
4922 int i;
4923
4924 for_each_queue(bp, i) {
34f80b04 4925 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4926
4927 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4928 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4929 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4930 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4931 bp->rx_ticks/12);
ca00392c
EG
4932 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4933 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4934 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4935 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4936
4937 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4938 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4939 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4940 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4941 bp->tx_ticks/12);
a2fbb9ea 4942 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4943 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4944 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4945 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4946 }
4947}
4948
7a9b2557
VZ
4949static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4950 struct bnx2x_fastpath *fp, int last)
4951{
4952 int i;
4953
4954 for (i = 0; i < last; i++) {
4955 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4956 struct sk_buff *skb = rx_buf->skb;
4957
4958 if (skb == NULL) {
4959 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4960 continue;
4961 }
4962
4963 if (fp->tpa_state[i] == BNX2X_TPA_START)
4964 pci_unmap_single(bp->pdev,
4965 pci_unmap_addr(rx_buf, mapping),
356e2385 4966 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4967
4968 dev_kfree_skb(skb);
4969 rx_buf->skb = NULL;
4970 }
4971}
4972
a2fbb9ea
ET
4973static void bnx2x_init_rx_rings(struct bnx2x *bp)
4974{
7a9b2557 4975 int func = BP_FUNC(bp);
32626230
EG
4976 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4977 ETH_MAX_AGGREGATION_QUEUES_E1H;
4978 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4979 int i, j;
a2fbb9ea 4980
87942b46 4981 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4982 DP(NETIF_MSG_IFUP,
4983 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4984
7a9b2557 4985 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4986
555f6c78 4987 for_each_rx_queue(bp, j) {
32626230 4988 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4989
32626230 4990 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4991 fp->tpa_pool[i].skb =
4992 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4993 if (!fp->tpa_pool[i].skb) {
4994 BNX2X_ERR("Failed to allocate TPA "
4995 "skb pool for queue[%d] - "
4996 "disabling TPA on this "
4997 "queue!\n", j);
4998 bnx2x_free_tpa_pool(bp, fp, i);
4999 fp->disable_tpa = 1;
5000 break;
5001 }
5002 pci_unmap_addr_set((struct sw_rx_bd *)
5003 &bp->fp->tpa_pool[i],
5004 mapping, 0);
5005 fp->tpa_state[i] = BNX2X_TPA_STOP;
5006 }
5007 }
5008 }
5009
555f6c78 5010 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
5011 struct bnx2x_fastpath *fp = &bp->fp[j];
5012
5013 fp->rx_bd_cons = 0;
5014 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5015 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5016
ca00392c
EG
5017 /* Mark queue as Rx */
5018 fp->is_rx_queue = 1;
5019
7a9b2557
VZ
5020 /* "next page" elements initialization */
5021 /* SGE ring */
5022 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5023 struct eth_rx_sge *sge;
5024
5025 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5026 sge->addr_hi =
5027 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5028 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5029 sge->addr_lo =
5030 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5031 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5032 }
5033
5034 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5035
7a9b2557 5036 /* RX BD ring */
a2fbb9ea
ET
5037 for (i = 1; i <= NUM_RX_RINGS; i++) {
5038 struct eth_rx_bd *rx_bd;
5039
5040 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5041 rx_bd->addr_hi =
5042 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5043 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5044 rx_bd->addr_lo =
5045 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5046 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5047 }
5048
34f80b04 5049 /* CQ ring */
a2fbb9ea
ET
5050 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5051 struct eth_rx_cqe_next_page *nextpg;
5052
5053 nextpg = (struct eth_rx_cqe_next_page *)
5054 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5055 nextpg->addr_hi =
5056 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5057 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5058 nextpg->addr_lo =
5059 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5060 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5061 }
5062
7a9b2557
VZ
5063 /* Allocate SGEs and initialize the ring elements */
5064 for (i = 0, ring_prod = 0;
5065 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5066
7a9b2557
VZ
5067 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5068 BNX2X_ERR("was only able to allocate "
5069 "%d rx sges\n", i);
5070 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5071 /* Cleanup already allocated elements */
5072 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5073 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5074 fp->disable_tpa = 1;
5075 ring_prod = 0;
5076 break;
5077 }
5078 ring_prod = NEXT_SGE_IDX(ring_prod);
5079 }
5080 fp->rx_sge_prod = ring_prod;
5081
5082 /* Allocate BDs and initialize BD ring */
66e855f3 5083 fp->rx_comp_cons = 0;
7a9b2557 5084 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5085 for (i = 0; i < bp->rx_ring_size; i++) {
5086 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5087 BNX2X_ERR("was only able to allocate "
de832a55
EG
5088 "%d rx skbs on queue[%d]\n", i, j);
5089 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5090 break;
5091 }
5092 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5093 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5094 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5095 }
5096
7a9b2557
VZ
5097 fp->rx_bd_prod = ring_prod;
5098 /* must not have more available CQEs than BDs */
5099 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5100 cqe_ring_prod);
a2fbb9ea
ET
5101 fp->rx_pkt = fp->rx_calls = 0;
5102
7a9b2557
VZ
5103 /* Warning!
5104 * this will generate an interrupt (to the TSTORM)
5105 * must only be done after chip is initialized
5106 */
5107 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5108 fp->rx_sge_prod);
a2fbb9ea
ET
5109 if (j != 0)
5110 continue;
5111
5112 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5113 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5114 U64_LO(fp->rx_comp_mapping));
5115 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5116 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5117 U64_HI(fp->rx_comp_mapping));
5118 }
5119}
5120
5121static void bnx2x_init_tx_ring(struct bnx2x *bp)
5122{
5123 int i, j;
5124
555f6c78 5125 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5126 struct bnx2x_fastpath *fp = &bp->fp[j];
5127
5128 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5129 struct eth_tx_next_bd *tx_next_bd =
5130 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5131
ca00392c 5132 tx_next_bd->addr_hi =
a2fbb9ea 5133 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5134 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5135 tx_next_bd->addr_lo =
a2fbb9ea 5136 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5137 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5138 }
5139
ca00392c
EG
5140 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5141 fp->tx_db.data.zero_fill1 = 0;
5142 fp->tx_db.data.prod = 0;
5143
a2fbb9ea
ET
5144 fp->tx_pkt_prod = 0;
5145 fp->tx_pkt_cons = 0;
5146 fp->tx_bd_prod = 0;
5147 fp->tx_bd_cons = 0;
5148 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5149 fp->tx_pkt = 0;
5150 }
6fe49bb9
EG
5151
5152 /* clean tx statistics */
5153 for_each_rx_queue(bp, i)
5154 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5155}
5156
5157static void bnx2x_init_sp_ring(struct bnx2x *bp)
5158{
34f80b04 5159 int func = BP_FUNC(bp);
a2fbb9ea
ET
5160
5161 spin_lock_init(&bp->spq_lock);
5162
5163 bp->spq_left = MAX_SPQ_PENDING;
5164 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5165 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5166 bp->spq_prod_bd = bp->spq;
5167 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5168
34f80b04 5169 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5170 U64_LO(bp->spq_mapping));
34f80b04
EG
5171 REG_WR(bp,
5172 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5173 U64_HI(bp->spq_mapping));
5174
34f80b04 5175 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5176 bp->spq_prod_idx);
5177}
5178
5179static void bnx2x_init_context(struct bnx2x *bp)
5180{
5181 int i;
5182
ca00392c 5183 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5184 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5185 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5186 u8 cl_id = fp->cl_id;
a2fbb9ea 5187
34f80b04
EG
5188 context->ustorm_st_context.common.sb_index_numbers =
5189 BNX2X_RX_SB_INDEX_NUM;
0626b899 5190 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5191 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5192 context->ustorm_st_context.common.flags =
de832a55
EG
5193 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5194 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5195 context->ustorm_st_context.common.statistics_counter_id =
5196 cl_id;
8d9c5f34 5197 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5198 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5199 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5200 bp->rx_buf_size;
34f80b04 5201 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5202 U64_HI(fp->rx_desc_mapping);
34f80b04 5203 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5204 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5205 if (!fp->disable_tpa) {
5206 context->ustorm_st_context.common.flags |=
ca00392c 5207 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5208 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5209 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5210 (u32)0xffff);
7a9b2557
VZ
5211 context->ustorm_st_context.common.sge_page_base_hi =
5212 U64_HI(fp->rx_sge_mapping);
5213 context->ustorm_st_context.common.sge_page_base_lo =
5214 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5215
5216 context->ustorm_st_context.common.max_sges_for_packet =
5217 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5218 context->ustorm_st_context.common.max_sges_for_packet =
5219 ((context->ustorm_st_context.common.
5220 max_sges_for_packet + PAGES_PER_SGE - 1) &
5221 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5222 }
5223
8d9c5f34
EG
5224 context->ustorm_ag_context.cdu_usage =
5225 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5226 CDU_REGION_NUMBER_UCM_AG,
5227 ETH_CONNECTION_TYPE);
5228
ca00392c
EG
5229 context->xstorm_ag_context.cdu_reserved =
5230 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5231 CDU_REGION_NUMBER_XCM_AG,
5232 ETH_CONNECTION_TYPE);
5233 }
5234
5235 for_each_tx_queue(bp, i) {
5236 struct bnx2x_fastpath *fp = &bp->fp[i];
5237 struct eth_context *context =
5238 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5239
5240 context->cstorm_st_context.sb_index_number =
5241 C_SB_ETH_TX_CQ_INDEX;
5242 context->cstorm_st_context.status_block_id = fp->sb_id;
5243
8d9c5f34
EG
5244 context->xstorm_st_context.tx_bd_page_base_hi =
5245 U64_HI(fp->tx_desc_mapping);
5246 context->xstorm_st_context.tx_bd_page_base_lo =
5247 U64_LO(fp->tx_desc_mapping);
ca00392c 5248 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5249 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5250 }
5251}
5252
5253static void bnx2x_init_ind_table(struct bnx2x *bp)
5254{
26c8fa4d 5255 int func = BP_FUNC(bp);
a2fbb9ea
ET
5256 int i;
5257
555f6c78 5258 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5259 return;
5260
555f6c78
EG
5261 DP(NETIF_MSG_IFUP,
5262 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5263 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5264 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5265 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5266 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5267}
5268
49d66772
ET
5269static void bnx2x_set_client_config(struct bnx2x *bp)
5270{
49d66772 5271 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5272 int port = BP_PORT(bp);
5273 int i;
49d66772 5274
e7799c5f 5275 tstorm_client.mtu = bp->dev->mtu;
49d66772 5276 tstorm_client.config_flags =
de832a55
EG
5277 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5278 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5279#ifdef BCM_VLAN
0c6671b0 5280 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5281 tstorm_client.config_flags |=
8d9c5f34 5282 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5283 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5284 }
5285#endif
49d66772
ET
5286
5287 for_each_queue(bp, i) {
de832a55
EG
5288 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5289
49d66772 5290 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5291 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5292 ((u32 *)&tstorm_client)[0]);
5293 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5294 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5295 ((u32 *)&tstorm_client)[1]);
5296 }
5297
34f80b04
EG
5298 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5299 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5300}
5301
a2fbb9ea
ET
5302static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5303{
a2fbb9ea 5304 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5305 int mode = bp->rx_mode;
37b091ba 5306 int mask = bp->rx_mode_cl_mask;
34f80b04 5307 int func = BP_FUNC(bp);
581ce43d 5308 int port = BP_PORT(bp);
a2fbb9ea 5309 int i;
581ce43d
EG
5310 /* All but management unicast packets should pass to the host as well */
5311 u32 llh_mask =
5312 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5313 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5314 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5315 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5316
3196a88a 5317 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5318
5319 switch (mode) {
5320 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5321 tstorm_mac_filter.ucast_drop_all = mask;
5322 tstorm_mac_filter.mcast_drop_all = mask;
5323 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5324 break;
356e2385 5325
a2fbb9ea 5326 case BNX2X_RX_MODE_NORMAL:
34f80b04 5327 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5328 break;
356e2385 5329
a2fbb9ea 5330 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5331 tstorm_mac_filter.mcast_accept_all = mask;
5332 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5333 break;
356e2385 5334
a2fbb9ea 5335 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5336 tstorm_mac_filter.ucast_accept_all = mask;
5337 tstorm_mac_filter.mcast_accept_all = mask;
5338 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5339 /* pass management unicast packets as well */
5340 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5341 break;
356e2385 5342
a2fbb9ea 5343 default:
34f80b04
EG
5344 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5345 break;
a2fbb9ea
ET
5346 }
5347
581ce43d
EG
5348 REG_WR(bp,
5349 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5350 llh_mask);
5351
a2fbb9ea
ET
5352 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5353 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5354 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5355 ((u32 *)&tstorm_mac_filter)[i]);
5356
34f80b04 5357/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5358 ((u32 *)&tstorm_mac_filter)[i]); */
5359 }
a2fbb9ea 5360
49d66772
ET
5361 if (mode != BNX2X_RX_MODE_NONE)
5362 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5363}
5364
471de716
EG
5365static void bnx2x_init_internal_common(struct bnx2x *bp)
5366{
5367 int i;
5368
5369 /* Zero this manually as its initialization is
5370 currently missing in the initTool */
5371 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5372 REG_WR(bp, BAR_USTRORM_INTMEM +
5373 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5374}
5375
5376static void bnx2x_init_internal_port(struct bnx2x *bp)
5377{
5378 int port = BP_PORT(bp);
5379
ca00392c
EG
5380 REG_WR(bp,
5381 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5382 REG_WR(bp,
5383 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5384 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5385 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5386}
5387
5388static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5389{
a2fbb9ea
ET
5390 struct tstorm_eth_function_common_config tstorm_config = {0};
5391 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5392 int port = BP_PORT(bp);
5393 int func = BP_FUNC(bp);
de832a55
EG
5394 int i, j;
5395 u32 offset;
471de716 5396 u16 max_agg_size;
a2fbb9ea
ET
5397
5398 if (is_multi(bp)) {
555f6c78 5399 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5400 tstorm_config.rss_result_mask = MULTI_MASK;
5401 }
ca00392c
EG
5402
5403 /* Enable TPA if needed */
5404 if (bp->flags & TPA_ENABLE_FLAG)
5405 tstorm_config.config_flags |=
5406 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5407
8d9c5f34
EG
5408 if (IS_E1HMF(bp))
5409 tstorm_config.config_flags |=
5410 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5411
34f80b04
EG
5412 tstorm_config.leading_client_id = BP_L_ID(bp);
5413
a2fbb9ea 5414 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5415 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5416 (*(u32 *)&tstorm_config));
5417
c14423fe 5418 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5419 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5420 bnx2x_set_storm_rx_mode(bp);
5421
de832a55
EG
5422 for_each_queue(bp, i) {
5423 u8 cl_id = bp->fp[i].cl_id;
5424
5425 /* reset xstorm per client statistics */
5426 offset = BAR_XSTRORM_INTMEM +
5427 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5428 for (j = 0;
5429 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5430 REG_WR(bp, offset + j*4, 0);
5431
5432 /* reset tstorm per client statistics */
5433 offset = BAR_TSTRORM_INTMEM +
5434 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5435 for (j = 0;
5436 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5437 REG_WR(bp, offset + j*4, 0);
5438
5439 /* reset ustorm per client statistics */
5440 offset = BAR_USTRORM_INTMEM +
5441 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5442 for (j = 0;
5443 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5444 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5445 }
5446
5447 /* Init statistics related context */
34f80b04 5448 stats_flags.collect_eth = 1;
a2fbb9ea 5449
66e855f3 5450 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5451 ((u32 *)&stats_flags)[0]);
66e855f3 5452 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5453 ((u32 *)&stats_flags)[1]);
5454
66e855f3 5455 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5456 ((u32 *)&stats_flags)[0]);
66e855f3 5457 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5458 ((u32 *)&stats_flags)[1]);
5459
de832a55
EG
5460 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5461 ((u32 *)&stats_flags)[0]);
5462 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5463 ((u32 *)&stats_flags)[1]);
5464
66e855f3 5465 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5466 ((u32 *)&stats_flags)[0]);
66e855f3 5467 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5468 ((u32 *)&stats_flags)[1]);
5469
66e855f3
YG
5470 REG_WR(bp, BAR_XSTRORM_INTMEM +
5471 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5472 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5473 REG_WR(bp, BAR_XSTRORM_INTMEM +
5474 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5475 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5476
5477 REG_WR(bp, BAR_TSTRORM_INTMEM +
5478 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5479 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5480 REG_WR(bp, BAR_TSTRORM_INTMEM +
5481 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5482 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5483
de832a55
EG
5484 REG_WR(bp, BAR_USTRORM_INTMEM +
5485 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5486 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5487 REG_WR(bp, BAR_USTRORM_INTMEM +
5488 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5489 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5490
34f80b04
EG
5491 if (CHIP_IS_E1H(bp)) {
5492 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5493 IS_E1HMF(bp));
5494 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5495 IS_E1HMF(bp));
5496 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5497 IS_E1HMF(bp));
5498 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5499 IS_E1HMF(bp));
5500
7a9b2557
VZ
5501 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5502 bp->e1hov);
34f80b04
EG
5503 }
5504
4f40f2cb
EG
5505 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5506 max_agg_size =
5507 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5508 SGE_PAGE_SIZE * PAGES_PER_SGE),
5509 (u32)0xffff);
555f6c78 5510 for_each_rx_queue(bp, i) {
7a9b2557 5511 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5512
5513 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5514 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5515 U64_LO(fp->rx_comp_mapping));
5516 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5517 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5518 U64_HI(fp->rx_comp_mapping));
5519
ca00392c
EG
5520 /* Next page */
5521 REG_WR(bp, BAR_USTRORM_INTMEM +
5522 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5523 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5524 REG_WR(bp, BAR_USTRORM_INTMEM +
5525 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5526 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5527
7a9b2557 5528 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5529 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5530 max_agg_size);
5531 }
8a1c38d1 5532
1c06328c
EG
5533 /* dropless flow control */
5534 if (CHIP_IS_E1H(bp)) {
5535 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5536
5537 rx_pause.bd_thr_low = 250;
5538 rx_pause.cqe_thr_low = 250;
5539 rx_pause.cos = 1;
5540 rx_pause.sge_thr_low = 0;
5541 rx_pause.bd_thr_high = 350;
5542 rx_pause.cqe_thr_high = 350;
5543 rx_pause.sge_thr_high = 0;
5544
5545 for_each_rx_queue(bp, i) {
5546 struct bnx2x_fastpath *fp = &bp->fp[i];
5547
5548 if (!fp->disable_tpa) {
5549 rx_pause.sge_thr_low = 150;
5550 rx_pause.sge_thr_high = 250;
5551 }
5552
5553
5554 offset = BAR_USTRORM_INTMEM +
5555 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5556 fp->cl_id);
5557 for (j = 0;
5558 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5559 j++)
5560 REG_WR(bp, offset + j*4,
5561 ((u32 *)&rx_pause)[j]);
5562 }
5563 }
5564
8a1c38d1
EG
5565 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5566
5567 /* Init rate shaping and fairness contexts */
5568 if (IS_E1HMF(bp)) {
5569 int vn;
5570
5571 /* During init there is no active link
5572 Until link is up, set link rate to 10Gbps */
5573 bp->link_vars.line_speed = SPEED_10000;
5574 bnx2x_init_port_minmax(bp);
5575
b015e3d1
EG
5576 if (!BP_NOMCP(bp))
5577 bp->mf_config =
5578 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5579 bnx2x_calc_vn_weight_sum(bp);
5580
5581 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5582 bnx2x_init_vn_minmax(bp, 2*vn + port);
5583
5584 /* Enable rate shaping and fairness */
b015e3d1 5585 bp->cmng.flags.cmng_enables |=
8a1c38d1 5586 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5587
8a1c38d1
EG
5588 } else {
5589 /* rate shaping and fairness are disabled */
5590 DP(NETIF_MSG_IFUP,
5591 "single function mode minmax will be disabled\n");
5592 }
5593
5594
5595 /* Store it to internal memory */
5596 if (bp->port.pmf)
5597 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5598 REG_WR(bp, BAR_XSTRORM_INTMEM +
5599 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5600 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5601}
5602
471de716
EG
5603static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5604{
5605 switch (load_code) {
5606 case FW_MSG_CODE_DRV_LOAD_COMMON:
5607 bnx2x_init_internal_common(bp);
5608 /* no break */
5609
5610 case FW_MSG_CODE_DRV_LOAD_PORT:
5611 bnx2x_init_internal_port(bp);
5612 /* no break */
5613
5614 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5615 bnx2x_init_internal_func(bp);
5616 break;
5617
5618 default:
5619 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5620 break;
5621 }
5622}
5623
5624static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5625{
5626 int i;
5627
5628 for_each_queue(bp, i) {
5629 struct bnx2x_fastpath *fp = &bp->fp[i];
5630
34f80b04 5631 fp->bp = bp;
a2fbb9ea 5632 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5633 fp->index = i;
34f80b04 5634 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5635#ifdef BCM_CNIC
5636 fp->sb_id = fp->cl_id + 1;
5637#else
34f80b04 5638 fp->sb_id = fp->cl_id;
37b091ba 5639#endif
ca00392c
EG
5640 /* Suitable Rx and Tx SBs are served by the same client */
5641 if (i >= bp->num_rx_queues)
5642 fp->cl_id -= bp->num_rx_queues;
34f80b04 5643 DP(NETIF_MSG_IFUP,
f5372251
EG
5644 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5645 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5646 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5647 fp->sb_id);
5c862848 5648 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5649 }
5650
16119785
EG
5651 /* ensure status block indices were read */
5652 rmb();
5653
5654
5c862848
EG
5655 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5656 DEF_SB_ID);
5657 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5658 bnx2x_update_coalesce(bp);
5659 bnx2x_init_rx_rings(bp);
5660 bnx2x_init_tx_ring(bp);
5661 bnx2x_init_sp_ring(bp);
5662 bnx2x_init_context(bp);
471de716 5663 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5664 bnx2x_init_ind_table(bp);
0ef00459
EG
5665 bnx2x_stats_init(bp);
5666
5667 /* At this point, we are ready for interrupts */
5668 atomic_set(&bp->intr_sem, 0);
5669
5670 /* flush all before enabling interrupts */
5671 mb();
5672 mmiowb();
5673
615f8fd9 5674 bnx2x_int_enable(bp);
eb8da205
EG
5675
5676 /* Check for SPIO5 */
5677 bnx2x_attn_int_deasserted0(bp,
5678 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5679 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5680}
5681
5682/* end of nic init */
5683
5684/*
5685 * gzip service functions
5686 */
5687
5688static int bnx2x_gunzip_init(struct bnx2x *bp)
5689{
5690 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5691 &bp->gunzip_mapping);
5692 if (bp->gunzip_buf == NULL)
5693 goto gunzip_nomem1;
5694
5695 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5696 if (bp->strm == NULL)
5697 goto gunzip_nomem2;
5698
5699 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5700 GFP_KERNEL);
5701 if (bp->strm->workspace == NULL)
5702 goto gunzip_nomem3;
5703
5704 return 0;
5705
5706gunzip_nomem3:
5707 kfree(bp->strm);
5708 bp->strm = NULL;
5709
5710gunzip_nomem2:
5711 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5712 bp->gunzip_mapping);
5713 bp->gunzip_buf = NULL;
5714
5715gunzip_nomem1:
5716 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5717 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5718 return -ENOMEM;
5719}
5720
5721static void bnx2x_gunzip_end(struct bnx2x *bp)
5722{
5723 kfree(bp->strm->workspace);
5724
5725 kfree(bp->strm);
5726 bp->strm = NULL;
5727
5728 if (bp->gunzip_buf) {
5729 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5730 bp->gunzip_mapping);
5731 bp->gunzip_buf = NULL;
5732 }
5733}
5734
94a78b79 5735static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5736{
5737 int n, rc;
5738
5739 /* check gzip header */
94a78b79
VZ
5740 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5741 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5742 return -EINVAL;
94a78b79 5743 }
a2fbb9ea
ET
5744
5745 n = 10;
5746
34f80b04 5747#define FNAME 0x8
a2fbb9ea
ET
5748
5749 if (zbuf[3] & FNAME)
5750 while ((zbuf[n++] != 0) && (n < len));
5751
94a78b79 5752 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5753 bp->strm->avail_in = len - n;
5754 bp->strm->next_out = bp->gunzip_buf;
5755 bp->strm->avail_out = FW_BUF_SIZE;
5756
5757 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5758 if (rc != Z_OK)
5759 return rc;
5760
5761 rc = zlib_inflate(bp->strm, Z_FINISH);
5762 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5763 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5764 bp->dev->name, bp->strm->msg);
5765
5766 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5767 if (bp->gunzip_outlen & 0x3)
5768 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5769 " gunzip_outlen (%d) not aligned\n",
5770 bp->dev->name, bp->gunzip_outlen);
5771 bp->gunzip_outlen >>= 2;
5772
5773 zlib_inflateEnd(bp->strm);
5774
5775 if (rc == Z_STREAM_END)
5776 return 0;
5777
5778 return rc;
5779}
5780
5781/* nic load/unload */
5782
5783/*
34f80b04 5784 * General service functions
a2fbb9ea
ET
5785 */
5786
5787/* send a NIG loopback debug packet */
5788static void bnx2x_lb_pckt(struct bnx2x *bp)
5789{
a2fbb9ea 5790 u32 wb_write[3];
a2fbb9ea
ET
5791
5792 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5793 wb_write[0] = 0x55555555;
5794 wb_write[1] = 0x55555555;
34f80b04 5795 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5796 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5797
5798 /* NON-IP protocol */
a2fbb9ea
ET
5799 wb_write[0] = 0x09000000;
5800 wb_write[1] = 0x55555555;
34f80b04 5801 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5802 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5803}
5804
5805/* some of the internal memories
5806 * are not directly readable from the driver
5807 * to test them we send debug packets
5808 */
5809static int bnx2x_int_mem_test(struct bnx2x *bp)
5810{
5811 int factor;
5812 int count, i;
5813 u32 val = 0;
5814
ad8d3948 5815 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5816 factor = 120;
ad8d3948
EG
5817 else if (CHIP_REV_IS_EMUL(bp))
5818 factor = 200;
5819 else
a2fbb9ea 5820 factor = 1;
a2fbb9ea
ET
5821
5822 DP(NETIF_MSG_HW, "start part1\n");
5823
5824 /* Disable inputs of parser neighbor blocks */
5825 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5826 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5827 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5828 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5829
5830 /* Write 0 to parser credits for CFC search request */
5831 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5832
5833 /* send Ethernet packet */
5834 bnx2x_lb_pckt(bp);
5835
5836 /* TODO do i reset NIG statistic? */
5837 /* Wait until NIG register shows 1 packet of size 0x10 */
5838 count = 1000 * factor;
5839 while (count) {
34f80b04 5840
a2fbb9ea
ET
5841 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5842 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5843 if (val == 0x10)
5844 break;
5845
5846 msleep(10);
5847 count--;
5848 }
5849 if (val != 0x10) {
5850 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5851 return -1;
5852 }
5853
5854 /* Wait until PRS register shows 1 packet */
5855 count = 1000 * factor;
5856 while (count) {
5857 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5858 if (val == 1)
5859 break;
5860
5861 msleep(10);
5862 count--;
5863 }
5864 if (val != 0x1) {
5865 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5866 return -2;
5867 }
5868
5869 /* Reset and init BRB, PRS */
34f80b04 5870 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5871 msleep(50);
34f80b04 5872 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5873 msleep(50);
94a78b79
VZ
5874 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5875 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5876
5877 DP(NETIF_MSG_HW, "part2\n");
5878
5879 /* Disable inputs of parser neighbor blocks */
5880 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5881 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5882 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5883 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5884
5885 /* Write 0 to parser credits for CFC search request */
5886 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5887
5888 /* send 10 Ethernet packets */
5889 for (i = 0; i < 10; i++)
5890 bnx2x_lb_pckt(bp);
5891
5892 /* Wait until NIG register shows 10 + 1
5893 packets of size 11*0x10 = 0xb0 */
5894 count = 1000 * factor;
5895 while (count) {
34f80b04 5896
a2fbb9ea
ET
5897 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5898 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5899 if (val == 0xb0)
5900 break;
5901
5902 msleep(10);
5903 count--;
5904 }
5905 if (val != 0xb0) {
5906 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5907 return -3;
5908 }
5909
5910 /* Wait until PRS register shows 2 packets */
5911 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5912 if (val != 2)
5913 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5914
5915 /* Write 1 to parser credits for CFC search request */
5916 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5917
5918 /* Wait until PRS register shows 3 packets */
5919 msleep(10 * factor);
5920 /* Wait until NIG register shows 1 packet of size 0x10 */
5921 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5922 if (val != 3)
5923 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5924
5925 /* clear NIG EOP FIFO */
5926 for (i = 0; i < 11; i++)
5927 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5928 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5929 if (val != 1) {
5930 BNX2X_ERR("clear of NIG failed\n");
5931 return -4;
5932 }
5933
5934 /* Reset and init BRB, PRS, NIG */
5935 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5936 msleep(50);
5937 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5938 msleep(50);
94a78b79
VZ
5939 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5940 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5941#ifndef BCM_CNIC
a2fbb9ea
ET
5942 /* set NIC mode */
5943 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5944#endif
5945
5946 /* Enable inputs of parser neighbor blocks */
5947 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5948 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5949 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5950 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5951
5952 DP(NETIF_MSG_HW, "done\n");
5953
5954 return 0; /* OK */
5955}
5956
5957static void enable_blocks_attention(struct bnx2x *bp)
5958{
5959 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5960 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5961 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5962 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5963 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5964 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5965 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5966 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5967 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5968/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5969/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5970 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5971 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5972 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5973/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5974/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5975 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5976 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5977 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5978 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5979/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5980/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5981 if (CHIP_REV_IS_FPGA(bp))
5982 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5983 else
5984 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5985 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5986 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5987 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5988/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5989/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5990 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5991 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5992/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5993 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5994}
5995
34f80b04 5996
81f75bbf
EG
5997static void bnx2x_reset_common(struct bnx2x *bp)
5998{
5999 /* reset_common */
6000 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6001 0xd3ffff7f);
6002 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6003}
6004
573f2035
EG
6005static void bnx2x_init_pxp(struct bnx2x *bp)
6006{
6007 u16 devctl;
6008 int r_order, w_order;
6009
6010 pci_read_config_word(bp->pdev,
6011 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6012 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6013 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6014 if (bp->mrrs == -1)
6015 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6016 else {
6017 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6018 r_order = bp->mrrs;
6019 }
6020
6021 bnx2x_init_pxp_arb(bp, r_order, w_order);
6022}
fd4ef40d
EG
6023
6024static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6025{
6026 u32 val;
6027 u8 port;
6028 u8 is_required = 0;
6029
6030 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6031 SHARED_HW_CFG_FAN_FAILURE_MASK;
6032
6033 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6034 is_required = 1;
6035
6036 /*
6037 * The fan failure mechanism is usually related to the PHY type since
6038 * the power consumption of the board is affected by the PHY. Currently,
6039 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6040 */
6041 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6042 for (port = PORT_0; port < PORT_MAX; port++) {
6043 u32 phy_type =
6044 SHMEM_RD(bp, dev_info.port_hw_config[port].
6045 external_phy_config) &
6046 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6047 is_required |=
6048 ((phy_type ==
6049 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6050 (phy_type ==
6051 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6052 (phy_type ==
6053 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6054 }
6055
6056 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6057
6058 if (is_required == 0)
6059 return;
6060
6061 /* Fan failure is indicated by SPIO 5 */
6062 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6063 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6064
6065 /* set to active low mode */
6066 val = REG_RD(bp, MISC_REG_SPIO_INT);
6067 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6068 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6069 REG_WR(bp, MISC_REG_SPIO_INT, val);
6070
6071 /* enable interrupt to signal the IGU */
6072 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6073 val |= (1 << MISC_REGISTERS_SPIO_5);
6074 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6075}
6076
34f80b04 6077static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6078{
a2fbb9ea 6079 u32 val, i;
37b091ba
MC
6080#ifdef BCM_CNIC
6081 u32 wb_write[2];
6082#endif
a2fbb9ea 6083
34f80b04 6084 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6085
81f75bbf 6086 bnx2x_reset_common(bp);
34f80b04
EG
6087 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6088 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6089
94a78b79 6090 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6091 if (CHIP_IS_E1H(bp))
6092 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6093
34f80b04
EG
6094 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6095 msleep(30);
6096 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6097
94a78b79 6098 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6099 if (CHIP_IS_E1(bp)) {
6100 /* enable HW interrupt from PXP on USDM overflow
6101 bit 16 on INT_MASK_0 */
6102 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6103 }
a2fbb9ea 6104
94a78b79 6105 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6106 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6107
6108#ifdef __BIG_ENDIAN
34f80b04
EG
6109 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6110 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6111 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6112 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6113 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6114 /* make sure this value is 0 */
6115 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6116
6117/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6118 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6119 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6120 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6121 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6122#endif
6123
34f80b04 6124 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6125#ifdef BCM_CNIC
34f80b04
EG
6126 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6127 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6128 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6129#endif
6130
34f80b04
EG
6131 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6132 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6133
34f80b04
EG
6134 /* let the HW do it's magic ... */
6135 msleep(100);
6136 /* finish PXP init */
6137 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6138 if (val != 1) {
6139 BNX2X_ERR("PXP2 CFG failed\n");
6140 return -EBUSY;
6141 }
6142 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6143 if (val != 1) {
6144 BNX2X_ERR("PXP2 RD_INIT failed\n");
6145 return -EBUSY;
6146 }
a2fbb9ea 6147
34f80b04
EG
6148 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6149 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6150
94a78b79 6151 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6152
34f80b04
EG
6153 /* clean the DMAE memory */
6154 bp->dmae_ready = 1;
6155 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6156
94a78b79
VZ
6157 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6158 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6159 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6160 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6161
34f80b04
EG
6162 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6163 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6164 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6165 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6166
94a78b79 6167 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6168
6169#ifdef BCM_CNIC
6170 wb_write[0] = 0;
6171 wb_write[1] = 0;
6172 for (i = 0; i < 64; i++) {
6173 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6174 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6175
6176 if (CHIP_IS_E1H(bp)) {
6177 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6178 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6179 wb_write, 2);
6180 }
6181 }
6182#endif
34f80b04
EG
6183 /* soft reset pulse */
6184 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6185 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6186
37b091ba 6187#ifdef BCM_CNIC
94a78b79 6188 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6189#endif
a2fbb9ea 6190
94a78b79 6191 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6192 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6193 if (!CHIP_REV_IS_SLOW(bp)) {
6194 /* enable hw interrupt from doorbell Q */
6195 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6196 }
a2fbb9ea 6197
94a78b79
VZ
6198 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6199 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6200 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6201#ifndef BCM_CNIC
3196a88a
EG
6202 /* set NIC mode */
6203 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6204#endif
34f80b04
EG
6205 if (CHIP_IS_E1H(bp))
6206 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6207
94a78b79
VZ
6208 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6209 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6210 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6211 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6212
ca00392c
EG
6213 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6214 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6215 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6216 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6217
94a78b79
VZ
6218 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6219 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6220 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6221 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6222
34f80b04
EG
6223 /* sync semi rtc */
6224 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6225 0x80000000);
6226 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6227 0x80000000);
a2fbb9ea 6228
94a78b79
VZ
6229 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6230 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6231 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6232
34f80b04
EG
6233 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6234 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6235 REG_WR(bp, i, 0xc0cac01a);
6236 /* TODO: replace with something meaningful */
6237 }
94a78b79 6238 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6239#ifdef BCM_CNIC
6240 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6241 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6242 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6243 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6244 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6245 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6246 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6247 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6248 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6249 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6250#endif
34f80b04 6251 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6252
34f80b04
EG
6253 if (sizeof(union cdu_context) != 1024)
6254 /* we currently assume that a context is 1024 bytes */
6255 printk(KERN_ALERT PFX "please adjust the size of"
6256 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6257
94a78b79 6258 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6259 val = (4 << 24) + (0 << 12) + 1024;
6260 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6261
94a78b79 6262 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6263 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6264 /* enable context validation interrupt from CFC */
6265 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6266
6267 /* set the thresholds to prevent CFC/CDU race */
6268 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6269
94a78b79
VZ
6270 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6271 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6272
94a78b79 6273 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6274 /* Reset PCIE errors for debug */
6275 REG_WR(bp, 0x2814, 0xffffffff);
6276 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6277
94a78b79 6278 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6279 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6280 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6281 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6282
94a78b79 6283 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6284 if (CHIP_IS_E1H(bp)) {
6285 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6286 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6287 }
6288
6289 if (CHIP_REV_IS_SLOW(bp))
6290 msleep(200);
6291
6292 /* finish CFC init */
6293 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6294 if (val != 1) {
6295 BNX2X_ERR("CFC LL_INIT failed\n");
6296 return -EBUSY;
6297 }
6298 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6299 if (val != 1) {
6300 BNX2X_ERR("CFC AC_INIT failed\n");
6301 return -EBUSY;
6302 }
6303 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6304 if (val != 1) {
6305 BNX2X_ERR("CFC CAM_INIT failed\n");
6306 return -EBUSY;
6307 }
6308 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6309
34f80b04
EG
6310 /* read NIG statistic
6311 to see if this is our first up since powerup */
6312 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6313 val = *bnx2x_sp(bp, wb_data[0]);
6314
6315 /* do internal memory self test */
6316 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6317 BNX2X_ERR("internal mem self test failed\n");
6318 return -EBUSY;
6319 }
6320
35b19ba5 6321 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6322 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6323 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6324 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6325 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6326 bp->port.need_hw_lock = 1;
6327 break;
6328
34f80b04
EG
6329 default:
6330 break;
6331 }
f1410647 6332
fd4ef40d
EG
6333 bnx2x_setup_fan_failure_detection(bp);
6334
34f80b04
EG
6335 /* clear PXP2 attentions */
6336 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6337
34f80b04 6338 enable_blocks_attention(bp);
a2fbb9ea 6339
6bbca910
YR
6340 if (!BP_NOMCP(bp)) {
6341 bnx2x_acquire_phy_lock(bp);
6342 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6343 bnx2x_release_phy_lock(bp);
6344 } else
6345 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6346
34f80b04
EG
6347 return 0;
6348}
a2fbb9ea 6349
34f80b04
EG
6350static int bnx2x_init_port(struct bnx2x *bp)
6351{
6352 int port = BP_PORT(bp);
94a78b79 6353 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6354 u32 low, high;
34f80b04 6355 u32 val;
a2fbb9ea 6356
34f80b04
EG
6357 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6358
6359 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6360
94a78b79 6361 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6362 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6363
6364 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6365 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6366 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6367 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6368
37b091ba
MC
6369#ifdef BCM_CNIC
6370 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6371
94a78b79 6372 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6373 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6374 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6375#endif
94a78b79 6376 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6377
94a78b79 6378 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6379 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6380 /* no pause for emulation and FPGA */
6381 low = 0;
6382 high = 513;
6383 } else {
6384 if (IS_E1HMF(bp))
6385 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6386 else if (bp->dev->mtu > 4096) {
6387 if (bp->flags & ONE_PORT_FLAG)
6388 low = 160;
6389 else {
6390 val = bp->dev->mtu;
6391 /* (24*1024 + val*4)/256 */
6392 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6393 }
6394 } else
6395 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6396 high = low + 56; /* 14*1024/256 */
6397 }
6398 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6399 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6400
6401
94a78b79 6402 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6403
94a78b79 6404 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6405 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6406 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6407 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6408
94a78b79
VZ
6409 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6410 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6411 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6412 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6413
94a78b79 6414 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6415 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6416
94a78b79 6417 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6418
6419 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6420 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6421
6422 /* update threshold */
34f80b04 6423 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6424 /* update init credit */
34f80b04 6425 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6426
6427 /* probe changes */
34f80b04 6428 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6429 msleep(5);
34f80b04 6430 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6431
37b091ba
MC
6432#ifdef BCM_CNIC
6433 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6434#endif
94a78b79 6435 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6436 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6437
6438 if (CHIP_IS_E1(bp)) {
6439 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6440 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6441 }
94a78b79 6442 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6443
94a78b79 6444 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6445 /* init aeu_mask_attn_func_0/1:
6446 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6447 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6448 * bits 4-7 are used for "per vn group attention" */
6449 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6450 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6451
94a78b79 6452 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6453 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6454 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6455 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6456 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6457
94a78b79 6458 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6459
6460 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6461
6462 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6463 /* 0x2 disable e1hov, 0x1 enable */
6464 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6465 (IS_E1HMF(bp) ? 0x1 : 0x2));
6466
1c06328c
EG
6467 {
6468 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6469 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6470 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6471 }
34f80b04
EG
6472 }
6473
94a78b79 6474 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6475 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6476
35b19ba5 6477 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6478 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6479 {
6480 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6481
6482 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6483 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6484
6485 /* The GPIO should be swapped if the swap register is
6486 set and active */
6487 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6488 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6489
6490 /* Select function upon port-swap configuration */
6491 if (port == 0) {
6492 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6493 aeu_gpio_mask = (swap_val && swap_override) ?
6494 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6495 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6496 } else {
6497 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6498 aeu_gpio_mask = (swap_val && swap_override) ?
6499 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6500 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6501 }
6502 val = REG_RD(bp, offset);
6503 /* add GPIO3 to group */
6504 val |= aeu_gpio_mask;
6505 REG_WR(bp, offset, val);
6506 }
6507 break;
6508
35b19ba5 6509 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6510 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6511 /* add SPIO 5 to group 0 */
4d295db0
EG
6512 {
6513 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6514 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6515 val = REG_RD(bp, reg_addr);
f1410647 6516 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6517 REG_WR(bp, reg_addr, val);
6518 }
f1410647
ET
6519 break;
6520
6521 default:
6522 break;
6523 }
6524
c18487ee 6525 bnx2x__link_reset(bp);
a2fbb9ea 6526
34f80b04
EG
6527 return 0;
6528}
6529
6530#define ILT_PER_FUNC (768/2)
6531#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6532/* the phys address is shifted right 12 bits and has an added
6533 1=valid bit added to the 53rd bit
6534 then since this is a wide register(TM)
6535 we split it into two 32 bit writes
6536 */
6537#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6538#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6539#define PXP_ONE_ILT(x) (((x) << 10) | x)
6540#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6541
37b091ba
MC
6542#ifdef BCM_CNIC
6543#define CNIC_ILT_LINES 127
6544#define CNIC_CTX_PER_ILT 16
6545#else
34f80b04 6546#define CNIC_ILT_LINES 0
37b091ba 6547#endif
34f80b04
EG
6548
6549static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6550{
6551 int reg;
6552
6553 if (CHIP_IS_E1H(bp))
6554 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6555 else /* E1 */
6556 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6557
6558 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6559}
6560
6561static int bnx2x_init_func(struct bnx2x *bp)
6562{
6563 int port = BP_PORT(bp);
6564 int func = BP_FUNC(bp);
8badd27a 6565 u32 addr, val;
34f80b04
EG
6566 int i;
6567
6568 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6569
8badd27a
EG
6570 /* set MSI reconfigure capability */
6571 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6572 val = REG_RD(bp, addr);
6573 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6574 REG_WR(bp, addr, val);
6575
34f80b04
EG
6576 i = FUNC_ILT_BASE(func);
6577
6578 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6579 if (CHIP_IS_E1H(bp)) {
6580 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6581 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6582 } else /* E1 */
6583 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6584 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6585
37b091ba
MC
6586#ifdef BCM_CNIC
6587 i += 1 + CNIC_ILT_LINES;
6588 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6589 if (CHIP_IS_E1(bp))
6590 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6591 else {
6592 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6593 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6594 }
6595
6596 i++;
6597 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6598 if (CHIP_IS_E1(bp))
6599 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6600 else {
6601 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6602 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6603 }
6604
6605 i++;
6606 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6607 if (CHIP_IS_E1(bp))
6608 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6609 else {
6610 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6611 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6612 }
6613
6614 /* tell the searcher where the T2 table is */
6615 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6616
6617 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6618 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6619
6620 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6621 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6622 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6623
6624 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6625#endif
34f80b04
EG
6626
6627 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6628 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6629 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6630 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6631 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6632 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6633 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6634 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6635 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6636 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6637
6638 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6639 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6640 }
6641
6642 /* HC init per function */
6643 if (CHIP_IS_E1H(bp)) {
6644 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6645
6646 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6647 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6648 }
94a78b79 6649 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6650
c14423fe 6651 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6652 REG_WR(bp, 0x2114, 0xffffffff);
6653 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6654
34f80b04
EG
6655 return 0;
6656}
6657
6658static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6659{
6660 int i, rc = 0;
a2fbb9ea 6661
34f80b04
EG
6662 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6663 BP_FUNC(bp), load_code);
a2fbb9ea 6664
34f80b04
EG
6665 bp->dmae_ready = 0;
6666 mutex_init(&bp->dmae_mutex);
54016b26
EG
6667 rc = bnx2x_gunzip_init(bp);
6668 if (rc)
6669 return rc;
a2fbb9ea 6670
34f80b04
EG
6671 switch (load_code) {
6672 case FW_MSG_CODE_DRV_LOAD_COMMON:
6673 rc = bnx2x_init_common(bp);
6674 if (rc)
6675 goto init_hw_err;
6676 /* no break */
6677
6678 case FW_MSG_CODE_DRV_LOAD_PORT:
6679 bp->dmae_ready = 1;
6680 rc = bnx2x_init_port(bp);
6681 if (rc)
6682 goto init_hw_err;
6683 /* no break */
6684
6685 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6686 bp->dmae_ready = 1;
6687 rc = bnx2x_init_func(bp);
6688 if (rc)
6689 goto init_hw_err;
6690 break;
6691
6692 default:
6693 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6694 break;
6695 }
6696
6697 if (!BP_NOMCP(bp)) {
6698 int func = BP_FUNC(bp);
a2fbb9ea
ET
6699
6700 bp->fw_drv_pulse_wr_seq =
34f80b04 6701 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6702 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6703 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6704 }
a2fbb9ea 6705
34f80b04
EG
6706 /* this needs to be done before gunzip end */
6707 bnx2x_zero_def_sb(bp);
6708 for_each_queue(bp, i)
6709 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
6710#ifdef BCM_CNIC
6711 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6712#endif
34f80b04
EG
6713
6714init_hw_err:
6715 bnx2x_gunzip_end(bp);
6716
6717 return rc;
a2fbb9ea
ET
6718}
6719
a2fbb9ea
ET
6720static void bnx2x_free_mem(struct bnx2x *bp)
6721{
6722
6723#define BNX2X_PCI_FREE(x, y, size) \
6724 do { \
6725 if (x) { \
6726 pci_free_consistent(bp->pdev, size, x, y); \
6727 x = NULL; \
6728 y = 0; \
6729 } \
6730 } while (0)
6731
6732#define BNX2X_FREE(x) \
6733 do { \
6734 if (x) { \
6735 vfree(x); \
6736 x = NULL; \
6737 } \
6738 } while (0)
6739
6740 int i;
6741
6742 /* fastpath */
555f6c78 6743 /* Common */
a2fbb9ea
ET
6744 for_each_queue(bp, i) {
6745
555f6c78 6746 /* status blocks */
a2fbb9ea
ET
6747 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6748 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6749 sizeof(struct host_status_block));
555f6c78
EG
6750 }
6751 /* Rx */
6752 for_each_rx_queue(bp, i) {
a2fbb9ea 6753
555f6c78 6754 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6755 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6756 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6757 bnx2x_fp(bp, i, rx_desc_mapping),
6758 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6759
6760 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6761 bnx2x_fp(bp, i, rx_comp_mapping),
6762 sizeof(struct eth_fast_path_rx_cqe) *
6763 NUM_RCQ_BD);
a2fbb9ea 6764
7a9b2557 6765 /* SGE ring */
32626230 6766 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6767 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6768 bnx2x_fp(bp, i, rx_sge_mapping),
6769 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6770 }
555f6c78
EG
6771 /* Tx */
6772 for_each_tx_queue(bp, i) {
6773
6774 /* fastpath tx rings: tx_buf tx_desc */
6775 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6776 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6777 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6778 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6779 }
a2fbb9ea
ET
6780 /* end of fastpath */
6781
6782 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6783 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6784
6785 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6786 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6787
37b091ba 6788#ifdef BCM_CNIC
a2fbb9ea
ET
6789 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6790 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6791 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6792 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
6793 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6794 sizeof(struct host_status_block));
a2fbb9ea 6795#endif
7a9b2557 6796 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6797
6798#undef BNX2X_PCI_FREE
6799#undef BNX2X_KFREE
6800}
6801
6802static int bnx2x_alloc_mem(struct bnx2x *bp)
6803{
6804
6805#define BNX2X_PCI_ALLOC(x, y, size) \
6806 do { \
6807 x = pci_alloc_consistent(bp->pdev, size, y); \
6808 if (x == NULL) \
6809 goto alloc_mem_err; \
6810 memset(x, 0, size); \
6811 } while (0)
6812
6813#define BNX2X_ALLOC(x, size) \
6814 do { \
6815 x = vmalloc(size); \
6816 if (x == NULL) \
6817 goto alloc_mem_err; \
6818 memset(x, 0, size); \
6819 } while (0)
6820
6821 int i;
6822
6823 /* fastpath */
555f6c78 6824 /* Common */
a2fbb9ea
ET
6825 for_each_queue(bp, i) {
6826 bnx2x_fp(bp, i, bp) = bp;
6827
555f6c78 6828 /* status blocks */
a2fbb9ea
ET
6829 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6830 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6831 sizeof(struct host_status_block));
555f6c78
EG
6832 }
6833 /* Rx */
6834 for_each_rx_queue(bp, i) {
a2fbb9ea 6835
555f6c78 6836 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6837 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6838 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6839 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6840 &bnx2x_fp(bp, i, rx_desc_mapping),
6841 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6842
6843 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6844 &bnx2x_fp(bp, i, rx_comp_mapping),
6845 sizeof(struct eth_fast_path_rx_cqe) *
6846 NUM_RCQ_BD);
6847
7a9b2557
VZ
6848 /* SGE ring */
6849 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6850 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6851 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6852 &bnx2x_fp(bp, i, rx_sge_mapping),
6853 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6854 }
555f6c78
EG
6855 /* Tx */
6856 for_each_tx_queue(bp, i) {
6857
555f6c78
EG
6858 /* fastpath tx rings: tx_buf tx_desc */
6859 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6860 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6861 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6862 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6863 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6864 }
a2fbb9ea
ET
6865 /* end of fastpath */
6866
6867 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6868 sizeof(struct host_def_status_block));
6869
6870 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6871 sizeof(struct bnx2x_slowpath));
6872
37b091ba 6873#ifdef BCM_CNIC
a2fbb9ea
ET
6874 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6875
a2fbb9ea
ET
6876 /* allocate searcher T2 table
6877 we allocate 1/4 of alloc num for T2
6878 (which is not entered into the ILT) */
6879 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6880
37b091ba 6881 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 6882 for (i = 0; i < 16*1024; i += 64)
37b091ba 6883 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6884
37b091ba 6885 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
6886 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6887
6888 /* QM queues (128*MAX_CONN) */
6889 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
6890
6891 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6892 sizeof(struct host_status_block));
a2fbb9ea
ET
6893#endif
6894
6895 /* Slow path ring */
6896 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6897
6898 return 0;
6899
6900alloc_mem_err:
6901 bnx2x_free_mem(bp);
6902 return -ENOMEM;
6903
6904#undef BNX2X_PCI_ALLOC
6905#undef BNX2X_ALLOC
6906}
6907
6908static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6909{
6910 int i;
6911
555f6c78 6912 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6913 struct bnx2x_fastpath *fp = &bp->fp[i];
6914
6915 u16 bd_cons = fp->tx_bd_cons;
6916 u16 sw_prod = fp->tx_pkt_prod;
6917 u16 sw_cons = fp->tx_pkt_cons;
6918
a2fbb9ea
ET
6919 while (sw_cons != sw_prod) {
6920 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6921 sw_cons++;
6922 }
6923 }
6924}
6925
6926static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6927{
6928 int i, j;
6929
555f6c78 6930 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6931 struct bnx2x_fastpath *fp = &bp->fp[j];
6932
a2fbb9ea
ET
6933 for (i = 0; i < NUM_RX_BD; i++) {
6934 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6935 struct sk_buff *skb = rx_buf->skb;
6936
6937 if (skb == NULL)
6938 continue;
6939
6940 pci_unmap_single(bp->pdev,
6941 pci_unmap_addr(rx_buf, mapping),
356e2385 6942 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6943
6944 rx_buf->skb = NULL;
6945 dev_kfree_skb(skb);
6946 }
7a9b2557 6947 if (!fp->disable_tpa)
32626230
EG
6948 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6949 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6950 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6951 }
6952}
6953
6954static void bnx2x_free_skbs(struct bnx2x *bp)
6955{
6956 bnx2x_free_tx_skbs(bp);
6957 bnx2x_free_rx_skbs(bp);
6958}
6959
6960static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6961{
34f80b04 6962 int i, offset = 1;
a2fbb9ea
ET
6963
6964 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6965 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6966 bp->msix_table[0].vector);
6967
37b091ba
MC
6968#ifdef BCM_CNIC
6969 offset++;
6970#endif
a2fbb9ea 6971 for_each_queue(bp, i) {
c14423fe 6972 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6973 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6974 bnx2x_fp(bp, i, state));
6975
34f80b04 6976 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6977 }
a2fbb9ea
ET
6978}
6979
6980static void bnx2x_free_irq(struct bnx2x *bp)
6981{
a2fbb9ea 6982 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6983 bnx2x_free_msix_irqs(bp);
6984 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6985 bp->flags &= ~USING_MSIX_FLAG;
6986
8badd27a
EG
6987 } else if (bp->flags & USING_MSI_FLAG) {
6988 free_irq(bp->pdev->irq, bp->dev);
6989 pci_disable_msi(bp->pdev);
6990 bp->flags &= ~USING_MSI_FLAG;
6991
a2fbb9ea
ET
6992 } else
6993 free_irq(bp->pdev->irq, bp->dev);
6994}
6995
6996static int bnx2x_enable_msix(struct bnx2x *bp)
6997{
8badd27a
EG
6998 int i, rc, offset = 1;
6999 int igu_vec = 0;
a2fbb9ea 7000
8badd27a
EG
7001 bp->msix_table[0].entry = igu_vec;
7002 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 7003
37b091ba
MC
7004#ifdef BCM_CNIC
7005 igu_vec = BP_L_ID(bp) + offset;
7006 bp->msix_table[1].entry = igu_vec;
7007 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7008 offset++;
7009#endif
34f80b04 7010 for_each_queue(bp, i) {
8badd27a 7011 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7012 bp->msix_table[i + offset].entry = igu_vec;
7013 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7014 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7015 }
7016
34f80b04 7017 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7018 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 7019 if (rc) {
8badd27a
EG
7020 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7021 return rc;
34f80b04 7022 }
8badd27a 7023
a2fbb9ea
ET
7024 bp->flags |= USING_MSIX_FLAG;
7025
7026 return 0;
a2fbb9ea
ET
7027}
7028
a2fbb9ea
ET
7029static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7030{
34f80b04 7031 int i, rc, offset = 1;
a2fbb9ea 7032
a2fbb9ea
ET
7033 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7034 bp->dev->name, bp->dev);
a2fbb9ea
ET
7035 if (rc) {
7036 BNX2X_ERR("request sp irq failed\n");
7037 return -EBUSY;
7038 }
7039
37b091ba
MC
7040#ifdef BCM_CNIC
7041 offset++;
7042#endif
a2fbb9ea 7043 for_each_queue(bp, i) {
555f6c78
EG
7044 struct bnx2x_fastpath *fp = &bp->fp[i];
7045
ca00392c
EG
7046 if (i < bp->num_rx_queues)
7047 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7048 else
7049 sprintf(fp->name, "%s-tx-%d",
7050 bp->dev->name, i - bp->num_rx_queues);
7051
34f80b04 7052 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7053 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7054 if (rc) {
555f6c78 7055 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7056 bnx2x_free_msix_irqs(bp);
7057 return -EBUSY;
7058 }
7059
555f6c78 7060 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7061 }
7062
555f6c78 7063 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
7064 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7065 " ... fp[%d] %d\n",
7066 bp->dev->name, bp->msix_table[0].vector,
7067 0, bp->msix_table[offset].vector,
7068 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7069
a2fbb9ea 7070 return 0;
a2fbb9ea
ET
7071}
7072
8badd27a
EG
7073static int bnx2x_enable_msi(struct bnx2x *bp)
7074{
7075 int rc;
7076
7077 rc = pci_enable_msi(bp->pdev);
7078 if (rc) {
7079 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7080 return -1;
7081 }
7082 bp->flags |= USING_MSI_FLAG;
7083
7084 return 0;
7085}
7086
a2fbb9ea
ET
7087static int bnx2x_req_irq(struct bnx2x *bp)
7088{
8badd27a 7089 unsigned long flags;
34f80b04 7090 int rc;
a2fbb9ea 7091
8badd27a
EG
7092 if (bp->flags & USING_MSI_FLAG)
7093 flags = 0;
7094 else
7095 flags = IRQF_SHARED;
7096
7097 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7098 bp->dev->name, bp->dev);
a2fbb9ea
ET
7099 if (!rc)
7100 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7101
7102 return rc;
a2fbb9ea
ET
7103}
7104
65abd74d
YG
7105static void bnx2x_napi_enable(struct bnx2x *bp)
7106{
7107 int i;
7108
555f6c78 7109 for_each_rx_queue(bp, i)
65abd74d
YG
7110 napi_enable(&bnx2x_fp(bp, i, napi));
7111}
7112
7113static void bnx2x_napi_disable(struct bnx2x *bp)
7114{
7115 int i;
7116
555f6c78 7117 for_each_rx_queue(bp, i)
65abd74d
YG
7118 napi_disable(&bnx2x_fp(bp, i, napi));
7119}
7120
7121static void bnx2x_netif_start(struct bnx2x *bp)
7122{
e1510706
EG
7123 int intr_sem;
7124
7125 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7126 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7127
7128 if (intr_sem) {
65abd74d 7129 if (netif_running(bp->dev)) {
65abd74d
YG
7130 bnx2x_napi_enable(bp);
7131 bnx2x_int_enable(bp);
555f6c78
EG
7132 if (bp->state == BNX2X_STATE_OPEN)
7133 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7134 }
7135 }
7136}
7137
f8ef6e44 7138static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7139{
f8ef6e44 7140 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7141 bnx2x_napi_disable(bp);
762d5f6c
EG
7142 netif_tx_disable(bp->dev);
7143 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
7144}
7145
a2fbb9ea
ET
7146/*
7147 * Init service functions
7148 */
7149
e665bfda
MC
7150/**
7151 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7152 *
7153 * @param bp driver descriptor
7154 * @param set set or clear an entry (1 or 0)
7155 * @param mac pointer to a buffer containing a MAC
7156 * @param cl_bit_vec bit vector of clients to register a MAC for
7157 * @param cam_offset offset in a CAM to use
7158 * @param with_bcast set broadcast MAC as well
7159 */
7160static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7161 u32 cl_bit_vec, u8 cam_offset,
7162 u8 with_bcast)
a2fbb9ea
ET
7163{
7164 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7165 int port = BP_PORT(bp);
a2fbb9ea
ET
7166
7167 /* CAM allocation
7168 * unicasts 0-31:port0 32-63:port1
7169 * multicast 64-127:port0 128-191:port1
7170 */
e665bfda
MC
7171 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7172 config->hdr.offset = cam_offset;
7173 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7174 config->hdr.reserved1 = 0;
7175
7176 /* primary MAC */
7177 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7178 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7179 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7180 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7181 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7182 swab16(*(u16 *)&mac[4]);
34f80b04 7183 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7184 if (set)
7185 config->config_table[0].target_table_entry.flags = 0;
7186 else
7187 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7188 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7189 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7190 config->config_table[0].target_table_entry.vlan_id = 0;
7191
3101c2bc
YG
7192 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7193 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7194 config->config_table[0].cam_entry.msb_mac_addr,
7195 config->config_table[0].cam_entry.middle_mac_addr,
7196 config->config_table[0].cam_entry.lsb_mac_addr);
7197
7198 /* broadcast */
e665bfda
MC
7199 if (with_bcast) {
7200 config->config_table[1].cam_entry.msb_mac_addr =
7201 cpu_to_le16(0xffff);
7202 config->config_table[1].cam_entry.middle_mac_addr =
7203 cpu_to_le16(0xffff);
7204 config->config_table[1].cam_entry.lsb_mac_addr =
7205 cpu_to_le16(0xffff);
7206 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7207 if (set)
7208 config->config_table[1].target_table_entry.flags =
7209 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7210 else
7211 CAM_INVALIDATE(config->config_table[1]);
7212 config->config_table[1].target_table_entry.clients_bit_vector =
7213 cpu_to_le32(cl_bit_vec);
7214 config->config_table[1].target_table_entry.vlan_id = 0;
7215 }
a2fbb9ea
ET
7216
7217 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7218 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7219 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7220}
7221
e665bfda
MC
7222/**
7223 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7224 *
7225 * @param bp driver descriptor
7226 * @param set set or clear an entry (1 or 0)
7227 * @param mac pointer to a buffer containing a MAC
7228 * @param cl_bit_vec bit vector of clients to register a MAC for
7229 * @param cam_offset offset in a CAM to use
7230 */
7231static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7232 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7233{
7234 struct mac_configuration_cmd_e1h *config =
7235 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7236
8d9c5f34 7237 config->hdr.length = 1;
e665bfda
MC
7238 config->hdr.offset = cam_offset;
7239 config->hdr.client_id = 0xff;
34f80b04
EG
7240 config->hdr.reserved1 = 0;
7241
7242 /* primary MAC */
7243 config->config_table[0].msb_mac_addr =
e665bfda 7244 swab16(*(u16 *)&mac[0]);
34f80b04 7245 config->config_table[0].middle_mac_addr =
e665bfda 7246 swab16(*(u16 *)&mac[2]);
34f80b04 7247 config->config_table[0].lsb_mac_addr =
e665bfda 7248 swab16(*(u16 *)&mac[4]);
ca00392c 7249 config->config_table[0].clients_bit_vector =
e665bfda 7250 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7251 config->config_table[0].vlan_id = 0;
7252 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7253 if (set)
7254 config->config_table[0].flags = BP_PORT(bp);
7255 else
7256 config->config_table[0].flags =
7257 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7258
e665bfda 7259 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7260 (set ? "setting" : "clearing"),
34f80b04
EG
7261 config->config_table[0].msb_mac_addr,
7262 config->config_table[0].middle_mac_addr,
e665bfda 7263 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7264
7265 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7266 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7267 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7268}
7269
a2fbb9ea
ET
7270static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7271 int *state_p, int poll)
7272{
7273 /* can take a while if any port is running */
8b3a0f0b 7274 int cnt = 5000;
a2fbb9ea 7275
c14423fe
ET
7276 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7277 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7278
7279 might_sleep();
34f80b04 7280 while (cnt--) {
a2fbb9ea
ET
7281 if (poll) {
7282 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7283 /* if index is different from 0
7284 * the reply for some commands will
3101c2bc 7285 * be on the non default queue
a2fbb9ea
ET
7286 */
7287 if (idx)
7288 bnx2x_rx_int(&bp->fp[idx], 10);
7289 }
a2fbb9ea 7290
3101c2bc 7291 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7292 if (*state_p == state) {
7293#ifdef BNX2X_STOP_ON_ERROR
7294 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7295#endif
a2fbb9ea 7296 return 0;
8b3a0f0b 7297 }
a2fbb9ea 7298
a2fbb9ea 7299 msleep(1);
e3553b29
EG
7300
7301 if (bp->panic)
7302 return -EIO;
a2fbb9ea
ET
7303 }
7304
a2fbb9ea 7305 /* timeout! */
49d66772
ET
7306 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7307 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7308#ifdef BNX2X_STOP_ON_ERROR
7309 bnx2x_panic();
7310#endif
a2fbb9ea 7311
49d66772 7312 return -EBUSY;
a2fbb9ea
ET
7313}
7314
e665bfda
MC
7315static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7316{
7317 bp->set_mac_pending++;
7318 smp_wmb();
7319
7320 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7321 (1 << bp->fp->cl_id), BP_FUNC(bp));
7322
7323 /* Wait for a completion */
7324 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7325}
7326
7327static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7328{
7329 bp->set_mac_pending++;
7330 smp_wmb();
7331
7332 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7333 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7334 1);
7335
7336 /* Wait for a completion */
7337 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7338}
7339
993ac7b5
MC
7340#ifdef BCM_CNIC
7341/**
7342 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7343 * MAC(s). This function will wait until the ramdord completion
7344 * returns.
7345 *
7346 * @param bp driver handle
7347 * @param set set or clear the CAM entry
7348 *
7349 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7350 */
7351static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7352{
7353 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7354
7355 bp->set_mac_pending++;
7356 smp_wmb();
7357
7358 /* Send a SET_MAC ramrod */
7359 if (CHIP_IS_E1(bp))
7360 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7361 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7362 1);
7363 else
7364 /* CAM allocation for E1H
7365 * unicasts: by func number
7366 * multicast: 20+FUNC*20, 20 each
7367 */
7368 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7369 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7370
7371 /* Wait for a completion when setting */
7372 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7373
7374 return 0;
7375}
7376#endif
7377
a2fbb9ea
ET
7378static int bnx2x_setup_leading(struct bnx2x *bp)
7379{
34f80b04 7380 int rc;
a2fbb9ea 7381
c14423fe 7382 /* reset IGU state */
34f80b04 7383 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7384
7385 /* SETUP ramrod */
7386 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7387
34f80b04
EG
7388 /* Wait for completion */
7389 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7390
34f80b04 7391 return rc;
a2fbb9ea
ET
7392}
7393
7394static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7395{
555f6c78
EG
7396 struct bnx2x_fastpath *fp = &bp->fp[index];
7397
a2fbb9ea 7398 /* reset IGU state */
555f6c78 7399 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7400
228241eb 7401 /* SETUP ramrod */
555f6c78
EG
7402 fp->state = BNX2X_FP_STATE_OPENING;
7403 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7404 fp->cl_id, 0);
a2fbb9ea
ET
7405
7406 /* Wait for completion */
7407 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7408 &(fp->state), 0);
a2fbb9ea
ET
7409}
7410
a2fbb9ea 7411static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7412
ca00392c
EG
7413static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7414 int *num_tx_queues_out)
7415{
7416 int _num_rx_queues = 0, _num_tx_queues = 0;
7417
7418 switch (bp->multi_mode) {
7419 case ETH_RSS_MODE_DISABLED:
7420 _num_rx_queues = 1;
7421 _num_tx_queues = 1;
7422 break;
7423
7424 case ETH_RSS_MODE_REGULAR:
7425 if (num_rx_queues)
7426 _num_rx_queues = min_t(u32, num_rx_queues,
7427 BNX2X_MAX_QUEUES(bp));
7428 else
7429 _num_rx_queues = min_t(u32, num_online_cpus(),
7430 BNX2X_MAX_QUEUES(bp));
7431
7432 if (num_tx_queues)
7433 _num_tx_queues = min_t(u32, num_tx_queues,
7434 BNX2X_MAX_QUEUES(bp));
7435 else
7436 _num_tx_queues = min_t(u32, num_online_cpus(),
7437 BNX2X_MAX_QUEUES(bp));
7438
7439 /* There must be not more Tx queues than Rx queues */
7440 if (_num_tx_queues > _num_rx_queues) {
7441 BNX2X_ERR("number of tx queues (%d) > "
7442 "number of rx queues (%d)"
7443 " defaulting to %d\n",
7444 _num_tx_queues, _num_rx_queues,
7445 _num_rx_queues);
7446 _num_tx_queues = _num_rx_queues;
7447 }
7448 break;
7449
7450
7451 default:
7452 _num_rx_queues = 1;
7453 _num_tx_queues = 1;
7454 break;
7455 }
7456
7457 *num_rx_queues_out = _num_rx_queues;
7458 *num_tx_queues_out = _num_tx_queues;
7459}
7460
7461static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7462{
ca00392c 7463 int rc = 0;
a2fbb9ea 7464
8badd27a
EG
7465 switch (int_mode) {
7466 case INT_MODE_INTx:
7467 case INT_MODE_MSI:
ca00392c
EG
7468 bp->num_rx_queues = 1;
7469 bp->num_tx_queues = 1;
7470 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7471 break;
7472
7473 case INT_MODE_MSIX:
7474 default:
ca00392c
EG
7475 /* Set interrupt mode according to bp->multi_mode value */
7476 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7477 &bp->num_tx_queues);
7478
7479 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7480 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7481
2dfe0e1f
EG
7482 /* if we can't use MSI-X we only need one fp,
7483 * so try to enable MSI-X with the requested number of fp's
7484 * and fallback to MSI or legacy INTx with one fp
7485 */
ca00392c
EG
7486 rc = bnx2x_enable_msix(bp);
7487 if (rc) {
34f80b04 7488 /* failed to enable MSI-X */
ca00392c
EG
7489 bp->num_rx_queues = 1;
7490 bp->num_tx_queues = 1;
a2fbb9ea 7491 }
8badd27a 7492 break;
a2fbb9ea 7493 }
555f6c78 7494 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7495 return rc;
8badd27a
EG
7496}
7497
993ac7b5
MC
7498#ifdef BCM_CNIC
7499static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7500static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7501#endif
8badd27a
EG
7502
7503/* must be called with rtnl_lock */
7504static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7505{
7506 u32 load_code;
ca00392c
EG
7507 int i, rc;
7508
8badd27a 7509#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7510 if (unlikely(bp->panic))
7511 return -EPERM;
7512#endif
7513
7514 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7515
ca00392c 7516 rc = bnx2x_set_int_mode(bp);
c14423fe 7517
a2fbb9ea
ET
7518 if (bnx2x_alloc_mem(bp))
7519 return -ENOMEM;
7520
555f6c78 7521 for_each_rx_queue(bp, i)
7a9b2557
VZ
7522 bnx2x_fp(bp, i, disable_tpa) =
7523 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7524
555f6c78 7525 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7526 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7527 bnx2x_poll, 128);
7528
2dfe0e1f
EG
7529 bnx2x_napi_enable(bp);
7530
34f80b04
EG
7531 if (bp->flags & USING_MSIX_FLAG) {
7532 rc = bnx2x_req_msix_irqs(bp);
7533 if (rc) {
7534 pci_disable_msix(bp->pdev);
2dfe0e1f 7535 goto load_error1;
34f80b04
EG
7536 }
7537 } else {
ca00392c
EG
7538 /* Fall to INTx if failed to enable MSI-X due to lack of
7539 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7540 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7541 bnx2x_enable_msi(bp);
34f80b04
EG
7542 bnx2x_ack_int(bp);
7543 rc = bnx2x_req_irq(bp);
7544 if (rc) {
2dfe0e1f 7545 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7546 if (bp->flags & USING_MSI_FLAG)
7547 pci_disable_msi(bp->pdev);
2dfe0e1f 7548 goto load_error1;
a2fbb9ea 7549 }
8badd27a
EG
7550 if (bp->flags & USING_MSI_FLAG) {
7551 bp->dev->irq = bp->pdev->irq;
7552 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7553 bp->dev->name, bp->pdev->irq);
7554 }
a2fbb9ea
ET
7555 }
7556
2dfe0e1f
EG
7557 /* Send LOAD_REQUEST command to MCP
7558 Returns the type of LOAD command:
7559 if it is the first port to be initialized
7560 common blocks should be initialized, otherwise - not
7561 */
7562 if (!BP_NOMCP(bp)) {
7563 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7564 if (!load_code) {
7565 BNX2X_ERR("MCP response failure, aborting\n");
7566 rc = -EBUSY;
7567 goto load_error2;
7568 }
7569 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7570 rc = -EBUSY; /* other port in diagnostic mode */
7571 goto load_error2;
7572 }
7573
7574 } else {
7575 int port = BP_PORT(bp);
7576
f5372251 7577 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7578 load_count[0], load_count[1], load_count[2]);
7579 load_count[0]++;
7580 load_count[1 + port]++;
f5372251 7581 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7582 load_count[0], load_count[1], load_count[2]);
7583 if (load_count[0] == 1)
7584 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7585 else if (load_count[1 + port] == 1)
7586 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7587 else
7588 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7589 }
7590
7591 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7592 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7593 bp->port.pmf = 1;
7594 else
7595 bp->port.pmf = 0;
7596 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7597
a2fbb9ea 7598 /* Initialize HW */
34f80b04
EG
7599 rc = bnx2x_init_hw(bp, load_code);
7600 if (rc) {
a2fbb9ea 7601 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7602 goto load_error2;
a2fbb9ea
ET
7603 }
7604
a2fbb9ea 7605 /* Setup NIC internals and enable interrupts */
471de716 7606 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7607
2691d51d
EG
7608 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7609 (bp->common.shmem2_base))
7610 SHMEM2_WR(bp, dcc_support,
7611 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7612 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7613
a2fbb9ea 7614 /* Send LOAD_DONE command to MCP */
34f80b04 7615 if (!BP_NOMCP(bp)) {
228241eb
ET
7616 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7617 if (!load_code) {
da5a662a 7618 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7619 rc = -EBUSY;
2dfe0e1f 7620 goto load_error3;
a2fbb9ea
ET
7621 }
7622 }
7623
7624 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7625
34f80b04
EG
7626 rc = bnx2x_setup_leading(bp);
7627 if (rc) {
da5a662a 7628 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7629#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7630 goto load_error3;
e3553b29
EG
7631#else
7632 bp->panic = 1;
7633 return -EBUSY;
7634#endif
34f80b04 7635 }
a2fbb9ea 7636
34f80b04
EG
7637 if (CHIP_IS_E1H(bp))
7638 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7639 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 7640 bp->flags |= MF_FUNC_DIS;
34f80b04 7641 }
a2fbb9ea 7642
ca00392c 7643 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7644#ifdef BCM_CNIC
7645 /* Enable Timer scan */
7646 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7647#endif
34f80b04
EG
7648 for_each_nondefault_queue(bp, i) {
7649 rc = bnx2x_setup_multi(bp, i);
7650 if (rc)
37b091ba
MC
7651#ifdef BCM_CNIC
7652 goto load_error4;
7653#else
2dfe0e1f 7654 goto load_error3;
37b091ba 7655#endif
34f80b04 7656 }
a2fbb9ea 7657
ca00392c 7658 if (CHIP_IS_E1(bp))
e665bfda 7659 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7660 else
e665bfda 7661 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7662#ifdef BCM_CNIC
7663 /* Set iSCSI L2 MAC */
7664 mutex_lock(&bp->cnic_mutex);
7665 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7666 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7667 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7668 }
7669 mutex_unlock(&bp->cnic_mutex);
7670#endif
ca00392c 7671 }
34f80b04
EG
7672
7673 if (bp->port.pmf)
b5bf9068 7674 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7675
7676 /* Start fast path */
34f80b04
EG
7677 switch (load_mode) {
7678 case LOAD_NORMAL:
ca00392c
EG
7679 if (bp->state == BNX2X_STATE_OPEN) {
7680 /* Tx queue should be only reenabled */
7681 netif_tx_wake_all_queues(bp->dev);
7682 }
2dfe0e1f 7683 /* Initialize the receive filter. */
34f80b04
EG
7684 bnx2x_set_rx_mode(bp->dev);
7685 break;
7686
7687 case LOAD_OPEN:
555f6c78 7688 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7689 if (bp->state != BNX2X_STATE_OPEN)
7690 netif_tx_disable(bp->dev);
2dfe0e1f 7691 /* Initialize the receive filter. */
34f80b04 7692 bnx2x_set_rx_mode(bp->dev);
34f80b04 7693 break;
a2fbb9ea 7694
34f80b04 7695 case LOAD_DIAG:
2dfe0e1f 7696 /* Initialize the receive filter. */
a2fbb9ea 7697 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7698 bp->state = BNX2X_STATE_DIAG;
7699 break;
7700
7701 default:
7702 break;
a2fbb9ea
ET
7703 }
7704
34f80b04
EG
7705 if (!bp->port.pmf)
7706 bnx2x__link_status_update(bp);
7707
a2fbb9ea
ET
7708 /* start the timer */
7709 mod_timer(&bp->timer, jiffies + bp->current_interval);
7710
993ac7b5
MC
7711#ifdef BCM_CNIC
7712 bnx2x_setup_cnic_irq_info(bp);
7713 if (bp->state == BNX2X_STATE_OPEN)
7714 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7715#endif
34f80b04 7716
a2fbb9ea
ET
7717 return 0;
7718
37b091ba
MC
7719#ifdef BCM_CNIC
7720load_error4:
7721 /* Disable Timer scan */
7722 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7723#endif
2dfe0e1f
EG
7724load_error3:
7725 bnx2x_int_disable_sync(bp, 1);
7726 if (!BP_NOMCP(bp)) {
7727 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7728 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7729 }
7730 bp->port.pmf = 0;
7a9b2557
VZ
7731 /* Free SKBs, SGEs, TPA pool and driver internals */
7732 bnx2x_free_skbs(bp);
555f6c78 7733 for_each_rx_queue(bp, i)
3196a88a 7734 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7735load_error2:
d1014634
YG
7736 /* Release IRQs */
7737 bnx2x_free_irq(bp);
2dfe0e1f
EG
7738load_error1:
7739 bnx2x_napi_disable(bp);
555f6c78 7740 for_each_rx_queue(bp, i)
7cde1c8b 7741 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7742 bnx2x_free_mem(bp);
7743
34f80b04 7744 return rc;
a2fbb9ea
ET
7745}
7746
7747static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7748{
555f6c78 7749 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7750 int rc;
7751
c14423fe 7752 /* halt the connection */
555f6c78
EG
7753 fp->state = BNX2X_FP_STATE_HALTING;
7754 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7755
34f80b04 7756 /* Wait for completion */
a2fbb9ea 7757 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7758 &(fp->state), 1);
c14423fe 7759 if (rc) /* timeout */
a2fbb9ea
ET
7760 return rc;
7761
7762 /* delete cfc entry */
7763 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7764
34f80b04
EG
7765 /* Wait for completion */
7766 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7767 &(fp->state), 1);
34f80b04 7768 return rc;
a2fbb9ea
ET
7769}
7770
da5a662a 7771static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7772{
4781bfad 7773 __le16 dsb_sp_prod_idx;
c14423fe 7774 /* if the other port is handling traffic,
a2fbb9ea 7775 this can take a lot of time */
34f80b04
EG
7776 int cnt = 500;
7777 int rc;
a2fbb9ea
ET
7778
7779 might_sleep();
7780
7781 /* Send HALT ramrod */
7782 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7783 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7784
34f80b04
EG
7785 /* Wait for completion */
7786 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7787 &(bp->fp[0].state), 1);
7788 if (rc) /* timeout */
da5a662a 7789 return rc;
a2fbb9ea 7790
49d66772 7791 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7792
228241eb 7793 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7794 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7795
49d66772 7796 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7797 we are going to reset the chip anyway
7798 so there is not much to do if this times out
7799 */
34f80b04 7800 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7801 if (!cnt) {
7802 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7803 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7804 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7805#ifdef BNX2X_STOP_ON_ERROR
7806 bnx2x_panic();
7807#endif
36e552ab 7808 rc = -EBUSY;
34f80b04
EG
7809 break;
7810 }
7811 cnt--;
da5a662a 7812 msleep(1);
5650d9d4 7813 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7814 }
7815 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7816 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7817
7818 return rc;
a2fbb9ea
ET
7819}
7820
34f80b04
EG
7821static void bnx2x_reset_func(struct bnx2x *bp)
7822{
7823 int port = BP_PORT(bp);
7824 int func = BP_FUNC(bp);
7825 int base, i;
7826
7827 /* Configure IGU */
7828 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7829 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7830
37b091ba
MC
7831#ifdef BCM_CNIC
7832 /* Disable Timer scan */
7833 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7834 /*
7835 * Wait for at least 10ms and up to 2 second for the timers scan to
7836 * complete
7837 */
7838 for (i = 0; i < 200; i++) {
7839 msleep(10);
7840 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7841 break;
7842 }
7843#endif
34f80b04
EG
7844 /* Clear ILT */
7845 base = FUNC_ILT_BASE(func);
7846 for (i = base; i < base + ILT_PER_FUNC; i++)
7847 bnx2x_ilt_wr(bp, i, 0);
7848}
7849
7850static void bnx2x_reset_port(struct bnx2x *bp)
7851{
7852 int port = BP_PORT(bp);
7853 u32 val;
7854
7855 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7856
7857 /* Do not rcv packets to BRB */
7858 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7859 /* Do not direct rcv packets that are not for MCP to the BRB */
7860 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7861 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7862
7863 /* Configure AEU */
7864 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7865
7866 msleep(100);
7867 /* Check for BRB port occupancy */
7868 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7869 if (val)
7870 DP(NETIF_MSG_IFDOWN,
33471629 7871 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7872
7873 /* TODO: Close Doorbell port? */
7874}
7875
34f80b04
EG
7876static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7877{
7878 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7879 BP_FUNC(bp), reset_code);
7880
7881 switch (reset_code) {
7882 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7883 bnx2x_reset_port(bp);
7884 bnx2x_reset_func(bp);
7885 bnx2x_reset_common(bp);
7886 break;
7887
7888 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7889 bnx2x_reset_port(bp);
7890 bnx2x_reset_func(bp);
7891 break;
7892
7893 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7894 bnx2x_reset_func(bp);
7895 break;
49d66772 7896
34f80b04
EG
7897 default:
7898 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7899 break;
7900 }
7901}
7902
33471629 7903/* must be called with rtnl_lock */
34f80b04 7904static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7905{
da5a662a 7906 int port = BP_PORT(bp);
a2fbb9ea 7907 u32 reset_code = 0;
da5a662a 7908 int i, cnt, rc;
a2fbb9ea 7909
993ac7b5
MC
7910#ifdef BCM_CNIC
7911 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7912#endif
a2fbb9ea
ET
7913 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7914
ab6ad5a4 7915 /* Set "drop all" */
228241eb
ET
7916 bp->rx_mode = BNX2X_RX_MODE_NONE;
7917 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7918
ab6ad5a4 7919 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7920 bnx2x_netif_stop(bp, 1);
e94d8af3 7921
34f80b04
EG
7922 del_timer_sync(&bp->timer);
7923 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7924 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7925 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7926
70b9986c
EG
7927 /* Release IRQs */
7928 bnx2x_free_irq(bp);
7929
555f6c78
EG
7930 /* Wait until tx fastpath tasks complete */
7931 for_each_tx_queue(bp, i) {
228241eb
ET
7932 struct bnx2x_fastpath *fp = &bp->fp[i];
7933
34f80b04 7934 cnt = 1000;
e8b5fc51 7935 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7936
7961f791 7937 bnx2x_tx_int(fp);
34f80b04
EG
7938 if (!cnt) {
7939 BNX2X_ERR("timeout waiting for queue[%d]\n",
7940 i);
7941#ifdef BNX2X_STOP_ON_ERROR
7942 bnx2x_panic();
7943 return -EBUSY;
7944#else
7945 break;
7946#endif
7947 }
7948 cnt--;
da5a662a 7949 msleep(1);
34f80b04 7950 }
228241eb 7951 }
da5a662a
VZ
7952 /* Give HW time to discard old tx messages */
7953 msleep(1);
a2fbb9ea 7954
3101c2bc
YG
7955 if (CHIP_IS_E1(bp)) {
7956 struct mac_configuration_cmd *config =
7957 bnx2x_sp(bp, mcast_config);
7958
e665bfda 7959 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7960
8d9c5f34 7961 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7962 CAM_INVALIDATE(config->config_table[i]);
7963
8d9c5f34 7964 config->hdr.length = i;
3101c2bc
YG
7965 if (CHIP_REV_IS_SLOW(bp))
7966 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7967 else
7968 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7969 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7970 config->hdr.reserved1 = 0;
7971
e665bfda
MC
7972 bp->set_mac_pending++;
7973 smp_wmb();
7974
3101c2bc
YG
7975 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7976 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7977 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7978
7979 } else { /* E1H */
65abd74d
YG
7980 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7981
e665bfda 7982 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7983
7984 for (i = 0; i < MC_HASH_SIZE; i++)
7985 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7986
7987 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 7988 }
993ac7b5
MC
7989#ifdef BCM_CNIC
7990 /* Clear iSCSI L2 MAC */
7991 mutex_lock(&bp->cnic_mutex);
7992 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7993 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7994 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7995 }
7996 mutex_unlock(&bp->cnic_mutex);
7997#endif
3101c2bc 7998
65abd74d
YG
7999 if (unload_mode == UNLOAD_NORMAL)
8000 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8001
7d0446c2 8002 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8003 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8004
7d0446c2 8005 else if (bp->wol) {
65abd74d
YG
8006 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8007 u8 *mac_addr = bp->dev->dev_addr;
8008 u32 val;
8009 /* The mac address is written to entries 1-4 to
8010 preserve entry 0 which is used by the PMF */
8011 u8 entry = (BP_E1HVN(bp) + 1)*8;
8012
8013 val = (mac_addr[0] << 8) | mac_addr[1];
8014 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8015
8016 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8017 (mac_addr[4] << 8) | mac_addr[5];
8018 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8019
8020 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8021
8022 } else
8023 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8024
34f80b04
EG
8025 /* Close multi and leading connections
8026 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8027 for_each_nondefault_queue(bp, i)
8028 if (bnx2x_stop_multi(bp, i))
228241eb 8029 goto unload_error;
a2fbb9ea 8030
da5a662a
VZ
8031 rc = bnx2x_stop_leading(bp);
8032 if (rc) {
34f80b04 8033 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8034#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8035 return -EBUSY;
da5a662a
VZ
8036#else
8037 goto unload_error;
34f80b04 8038#endif
228241eb
ET
8039 }
8040
8041unload_error:
34f80b04 8042 if (!BP_NOMCP(bp))
228241eb 8043 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8044 else {
f5372251 8045 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8046 load_count[0], load_count[1], load_count[2]);
8047 load_count[0]--;
da5a662a 8048 load_count[1 + port]--;
f5372251 8049 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8050 load_count[0], load_count[1], load_count[2]);
8051 if (load_count[0] == 0)
8052 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8053 else if (load_count[1 + port] == 0)
34f80b04
EG
8054 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8055 else
8056 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8057 }
a2fbb9ea 8058
34f80b04
EG
8059 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8060 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8061 bnx2x__link_reset(bp);
a2fbb9ea
ET
8062
8063 /* Reset the chip */
228241eb 8064 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8065
8066 /* Report UNLOAD_DONE to MCP */
34f80b04 8067 if (!BP_NOMCP(bp))
a2fbb9ea 8068 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8069
9a035440 8070 bp->port.pmf = 0;
a2fbb9ea 8071
7a9b2557 8072 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8073 bnx2x_free_skbs(bp);
555f6c78 8074 for_each_rx_queue(bp, i)
3196a88a 8075 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 8076 for_each_rx_queue(bp, i)
7cde1c8b 8077 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8078 bnx2x_free_mem(bp);
8079
8080 bp->state = BNX2X_STATE_CLOSED;
228241eb 8081
a2fbb9ea
ET
8082 netif_carrier_off(bp->dev);
8083
8084 return 0;
8085}
8086
34f80b04
EG
8087static void bnx2x_reset_task(struct work_struct *work)
8088{
8089 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8090
8091#ifdef BNX2X_STOP_ON_ERROR
8092 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8093 " so reset not done to allow debug dump,\n"
ad361c98 8094 " you will need to reboot when done\n");
34f80b04
EG
8095 return;
8096#endif
8097
8098 rtnl_lock();
8099
8100 if (!netif_running(bp->dev))
8101 goto reset_task_exit;
8102
8103 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8104 bnx2x_nic_load(bp, LOAD_NORMAL);
8105
8106reset_task_exit:
8107 rtnl_unlock();
8108}
8109
a2fbb9ea
ET
8110/* end of nic load/unload */
8111
8112/* ethtool_ops */
8113
8114/*
8115 * Init service functions
8116 */
8117
f1ef27ef
EG
8118static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8119{
8120 switch (func) {
8121 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8122 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8123 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8124 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8125 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8126 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8127 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8128 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8129 default:
8130 BNX2X_ERR("Unsupported function index: %d\n", func);
8131 return (u32)(-1);
8132 }
8133}
8134
8135static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8136{
8137 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8138
8139 /* Flush all outstanding writes */
8140 mmiowb();
8141
8142 /* Pretend to be function 0 */
8143 REG_WR(bp, reg, 0);
8144 /* Flush the GRC transaction (in the chip) */
8145 new_val = REG_RD(bp, reg);
8146 if (new_val != 0) {
8147 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8148 new_val);
8149 BUG();
8150 }
8151
8152 /* From now we are in the "like-E1" mode */
8153 bnx2x_int_disable(bp);
8154
8155 /* Flush all outstanding writes */
8156 mmiowb();
8157
8158 /* Restore the original funtion settings */
8159 REG_WR(bp, reg, orig_func);
8160 new_val = REG_RD(bp, reg);
8161 if (new_val != orig_func) {
8162 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8163 orig_func, new_val);
8164 BUG();
8165 }
8166}
8167
8168static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8169{
8170 if (CHIP_IS_E1H(bp))
8171 bnx2x_undi_int_disable_e1h(bp, func);
8172 else
8173 bnx2x_int_disable(bp);
8174}
8175
34f80b04
EG
8176static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8177{
8178 u32 val;
8179
8180 /* Check if there is any driver already loaded */
8181 val = REG_RD(bp, MISC_REG_UNPREPARED);
8182 if (val == 0x1) {
8183 /* Check if it is the UNDI driver
8184 * UNDI driver initializes CID offset for normal bell to 0x7
8185 */
4a37fb66 8186 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8187 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8188 if (val == 0x7) {
8189 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8190 /* save our func */
34f80b04 8191 int func = BP_FUNC(bp);
da5a662a
VZ
8192 u32 swap_en;
8193 u32 swap_val;
34f80b04 8194
b4661739
EG
8195 /* clear the UNDI indication */
8196 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8197
34f80b04
EG
8198 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8199
8200 /* try unload UNDI on port 0 */
8201 bp->func = 0;
da5a662a
VZ
8202 bp->fw_seq =
8203 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8204 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8205 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8206
8207 /* if UNDI is loaded on the other port */
8208 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8209
da5a662a
VZ
8210 /* send "DONE" for previous unload */
8211 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8212
8213 /* unload UNDI on port 1 */
34f80b04 8214 bp->func = 1;
da5a662a
VZ
8215 bp->fw_seq =
8216 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8217 DRV_MSG_SEQ_NUMBER_MASK);
8218 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8219
8220 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8221 }
8222
b4661739
EG
8223 /* now it's safe to release the lock */
8224 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8225
f1ef27ef 8226 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8227
8228 /* close input traffic and wait for it */
8229 /* Do not rcv packets to BRB */
8230 REG_WR(bp,
8231 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8232 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8233 /* Do not direct rcv packets that are not for MCP to
8234 * the BRB */
8235 REG_WR(bp,
8236 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8237 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8238 /* clear AEU */
8239 REG_WR(bp,
8240 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8241 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8242 msleep(10);
8243
8244 /* save NIG port swap info */
8245 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8246 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8247 /* reset device */
8248 REG_WR(bp,
8249 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8250 0xd3ffffff);
34f80b04
EG
8251 REG_WR(bp,
8252 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8253 0x1403);
da5a662a
VZ
8254 /* take the NIG out of reset and restore swap values */
8255 REG_WR(bp,
8256 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8257 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8258 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8259 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8260
8261 /* send unload done to the MCP */
8262 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8263
8264 /* restore our func and fw_seq */
8265 bp->func = func;
8266 bp->fw_seq =
8267 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8268 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8269
8270 } else
8271 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8272 }
8273}
8274
8275static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8276{
8277 u32 val, val2, val3, val4, id;
72ce58c3 8278 u16 pmc;
34f80b04
EG
8279
8280 /* Get the chip revision id and number. */
8281 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8282 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8283 id = ((val & 0xffff) << 16);
8284 val = REG_RD(bp, MISC_REG_CHIP_REV);
8285 id |= ((val & 0xf) << 12);
8286 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8287 id |= ((val & 0xff) << 4);
5a40e08e 8288 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8289 id |= (val & 0xf);
8290 bp->common.chip_id = id;
8291 bp->link_params.chip_id = bp->common.chip_id;
8292 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8293
1c06328c
EG
8294 val = (REG_RD(bp, 0x2874) & 0x55);
8295 if ((bp->common.chip_id & 0x1) ||
8296 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8297 bp->flags |= ONE_PORT_FLAG;
8298 BNX2X_DEV_INFO("single port device\n");
8299 }
8300
34f80b04
EG
8301 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8302 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8303 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8304 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8305 bp->common.flash_size, bp->common.flash_size);
8306
8307 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8308 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8309 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8310 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8311 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8312
8313 if (!bp->common.shmem_base ||
8314 (bp->common.shmem_base < 0xA0000) ||
8315 (bp->common.shmem_base >= 0xC0000)) {
8316 BNX2X_DEV_INFO("MCP not active\n");
8317 bp->flags |= NO_MCP_FLAG;
8318 return;
8319 }
8320
8321 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8322 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8323 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8324 BNX2X_ERR("BAD MCP validity signature\n");
8325
8326 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8327 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8328
8329 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8330 SHARED_HW_CFG_LED_MODE_MASK) >>
8331 SHARED_HW_CFG_LED_MODE_SHIFT);
8332
c2c8b03e
EG
8333 bp->link_params.feature_config_flags = 0;
8334 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8335 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8336 bp->link_params.feature_config_flags |=
8337 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8338 else
8339 bp->link_params.feature_config_flags &=
8340 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8341
34f80b04
EG
8342 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8343 bp->common.bc_ver = val;
8344 BNX2X_DEV_INFO("bc_ver %X\n", val);
8345 if (val < BNX2X_BC_VER) {
8346 /* for now only warn
8347 * later we might need to enforce this */
8348 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8349 " please upgrade BC\n", BNX2X_BC_VER, val);
8350 }
4d295db0
EG
8351 bp->link_params.feature_config_flags |=
8352 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8353 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8354
8355 if (BP_E1HVN(bp) == 0) {
8356 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8357 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8358 } else {
8359 /* no WOL capability for E1HVN != 0 */
8360 bp->flags |= NO_WOL_FLAG;
8361 }
8362 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8363 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8364
8365 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8366 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8367 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8368 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8369
8370 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8371 val, val2, val3, val4);
8372}
8373
8374static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8375 u32 switch_cfg)
a2fbb9ea 8376{
34f80b04 8377 int port = BP_PORT(bp);
a2fbb9ea
ET
8378 u32 ext_phy_type;
8379
a2fbb9ea
ET
8380 switch (switch_cfg) {
8381 case SWITCH_CFG_1G:
8382 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8383
c18487ee
YR
8384 ext_phy_type =
8385 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8386 switch (ext_phy_type) {
8387 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8388 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8389 ext_phy_type);
8390
34f80b04
EG
8391 bp->port.supported |= (SUPPORTED_10baseT_Half |
8392 SUPPORTED_10baseT_Full |
8393 SUPPORTED_100baseT_Half |
8394 SUPPORTED_100baseT_Full |
8395 SUPPORTED_1000baseT_Full |
8396 SUPPORTED_2500baseX_Full |
8397 SUPPORTED_TP |
8398 SUPPORTED_FIBRE |
8399 SUPPORTED_Autoneg |
8400 SUPPORTED_Pause |
8401 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8402 break;
8403
8404 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8405 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8406 ext_phy_type);
8407
34f80b04
EG
8408 bp->port.supported |= (SUPPORTED_10baseT_Half |
8409 SUPPORTED_10baseT_Full |
8410 SUPPORTED_100baseT_Half |
8411 SUPPORTED_100baseT_Full |
8412 SUPPORTED_1000baseT_Full |
8413 SUPPORTED_TP |
8414 SUPPORTED_FIBRE |
8415 SUPPORTED_Autoneg |
8416 SUPPORTED_Pause |
8417 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8418 break;
8419
8420 default:
8421 BNX2X_ERR("NVRAM config error. "
8422 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8423 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8424 return;
8425 }
8426
34f80b04
EG
8427 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8428 port*0x10);
8429 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8430 break;
8431
8432 case SWITCH_CFG_10G:
8433 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8434
c18487ee
YR
8435 ext_phy_type =
8436 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8437 switch (ext_phy_type) {
8438 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8439 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8440 ext_phy_type);
8441
34f80b04
EG
8442 bp->port.supported |= (SUPPORTED_10baseT_Half |
8443 SUPPORTED_10baseT_Full |
8444 SUPPORTED_100baseT_Half |
8445 SUPPORTED_100baseT_Full |
8446 SUPPORTED_1000baseT_Full |
8447 SUPPORTED_2500baseX_Full |
8448 SUPPORTED_10000baseT_Full |
8449 SUPPORTED_TP |
8450 SUPPORTED_FIBRE |
8451 SUPPORTED_Autoneg |
8452 SUPPORTED_Pause |
8453 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8454 break;
8455
589abe3a
EG
8456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8457 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8458 ext_phy_type);
f1410647 8459
34f80b04 8460 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8461 SUPPORTED_1000baseT_Full |
34f80b04 8462 SUPPORTED_FIBRE |
589abe3a 8463 SUPPORTED_Autoneg |
34f80b04
EG
8464 SUPPORTED_Pause |
8465 SUPPORTED_Asym_Pause);
f1410647
ET
8466 break;
8467
589abe3a
EG
8468 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8469 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8470 ext_phy_type);
8471
34f80b04 8472 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8473 SUPPORTED_2500baseX_Full |
34f80b04 8474 SUPPORTED_1000baseT_Full |
589abe3a
EG
8475 SUPPORTED_FIBRE |
8476 SUPPORTED_Autoneg |
8477 SUPPORTED_Pause |
8478 SUPPORTED_Asym_Pause);
8479 break;
8480
8481 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8482 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8483 ext_phy_type);
8484
8485 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8486 SUPPORTED_FIBRE |
8487 SUPPORTED_Pause |
8488 SUPPORTED_Asym_Pause);
f1410647
ET
8489 break;
8490
589abe3a
EG
8491 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8492 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8493 ext_phy_type);
8494
34f80b04
EG
8495 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8496 SUPPORTED_1000baseT_Full |
8497 SUPPORTED_FIBRE |
34f80b04
EG
8498 SUPPORTED_Pause |
8499 SUPPORTED_Asym_Pause);
f1410647
ET
8500 break;
8501
589abe3a
EG
8502 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8503 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8504 ext_phy_type);
8505
34f80b04 8506 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8507 SUPPORTED_1000baseT_Full |
34f80b04 8508 SUPPORTED_Autoneg |
589abe3a 8509 SUPPORTED_FIBRE |
34f80b04
EG
8510 SUPPORTED_Pause |
8511 SUPPORTED_Asym_Pause);
c18487ee
YR
8512 break;
8513
4d295db0
EG
8514 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8515 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8516 ext_phy_type);
8517
8518 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8519 SUPPORTED_1000baseT_Full |
8520 SUPPORTED_Autoneg |
8521 SUPPORTED_FIBRE |
8522 SUPPORTED_Pause |
8523 SUPPORTED_Asym_Pause);
8524 break;
8525
f1410647
ET
8526 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8527 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8528 ext_phy_type);
8529
34f80b04
EG
8530 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8531 SUPPORTED_TP |
8532 SUPPORTED_Autoneg |
8533 SUPPORTED_Pause |
8534 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8535 break;
8536
28577185
EG
8537 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8538 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8539 ext_phy_type);
8540
8541 bp->port.supported |= (SUPPORTED_10baseT_Half |
8542 SUPPORTED_10baseT_Full |
8543 SUPPORTED_100baseT_Half |
8544 SUPPORTED_100baseT_Full |
8545 SUPPORTED_1000baseT_Full |
8546 SUPPORTED_10000baseT_Full |
8547 SUPPORTED_TP |
8548 SUPPORTED_Autoneg |
8549 SUPPORTED_Pause |
8550 SUPPORTED_Asym_Pause);
8551 break;
8552
c18487ee
YR
8553 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8554 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8555 bp->link_params.ext_phy_config);
8556 break;
8557
a2fbb9ea
ET
8558 default:
8559 BNX2X_ERR("NVRAM config error. "
8560 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8561 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8562 return;
8563 }
8564
34f80b04
EG
8565 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8566 port*0x18);
8567 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8568
a2fbb9ea
ET
8569 break;
8570
8571 default:
8572 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8573 bp->port.link_config);
a2fbb9ea
ET
8574 return;
8575 }
34f80b04 8576 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8577
8578 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8579 if (!(bp->link_params.speed_cap_mask &
8580 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8581 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8582
c18487ee
YR
8583 if (!(bp->link_params.speed_cap_mask &
8584 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8585 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8586
c18487ee
YR
8587 if (!(bp->link_params.speed_cap_mask &
8588 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8589 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8590
c18487ee
YR
8591 if (!(bp->link_params.speed_cap_mask &
8592 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8593 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8594
c18487ee
YR
8595 if (!(bp->link_params.speed_cap_mask &
8596 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8597 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8598 SUPPORTED_1000baseT_Full);
a2fbb9ea 8599
c18487ee
YR
8600 if (!(bp->link_params.speed_cap_mask &
8601 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8602 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8603
c18487ee
YR
8604 if (!(bp->link_params.speed_cap_mask &
8605 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8606 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8607
34f80b04 8608 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8609}
8610
34f80b04 8611static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8612{
c18487ee 8613 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8614
34f80b04 8615 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8616 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8617 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8618 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8619 bp->port.advertising = bp->port.supported;
a2fbb9ea 8620 } else {
c18487ee
YR
8621 u32 ext_phy_type =
8622 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8623
8624 if ((ext_phy_type ==
8625 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8626 (ext_phy_type ==
8627 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8628 /* force 10G, no AN */
c18487ee 8629 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8630 bp->port.advertising =
a2fbb9ea
ET
8631 (ADVERTISED_10000baseT_Full |
8632 ADVERTISED_FIBRE);
8633 break;
8634 }
8635 BNX2X_ERR("NVRAM config error. "
8636 "Invalid link_config 0x%x"
8637 " Autoneg not supported\n",
34f80b04 8638 bp->port.link_config);
a2fbb9ea
ET
8639 return;
8640 }
8641 break;
8642
8643 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8644 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8645 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8646 bp->port.advertising = (ADVERTISED_10baseT_Full |
8647 ADVERTISED_TP);
a2fbb9ea
ET
8648 } else {
8649 BNX2X_ERR("NVRAM config error. "
8650 "Invalid link_config 0x%x"
8651 " speed_cap_mask 0x%x\n",
34f80b04 8652 bp->port.link_config,
c18487ee 8653 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8654 return;
8655 }
8656 break;
8657
8658 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8659 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8660 bp->link_params.req_line_speed = SPEED_10;
8661 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8662 bp->port.advertising = (ADVERTISED_10baseT_Half |
8663 ADVERTISED_TP);
a2fbb9ea
ET
8664 } else {
8665 BNX2X_ERR("NVRAM config error. "
8666 "Invalid link_config 0x%x"
8667 " speed_cap_mask 0x%x\n",
34f80b04 8668 bp->port.link_config,
c18487ee 8669 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8670 return;
8671 }
8672 break;
8673
8674 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8675 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8676 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8677 bp->port.advertising = (ADVERTISED_100baseT_Full |
8678 ADVERTISED_TP);
a2fbb9ea
ET
8679 } else {
8680 BNX2X_ERR("NVRAM config error. "
8681 "Invalid link_config 0x%x"
8682 " speed_cap_mask 0x%x\n",
34f80b04 8683 bp->port.link_config,
c18487ee 8684 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8685 return;
8686 }
8687 break;
8688
8689 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8690 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8691 bp->link_params.req_line_speed = SPEED_100;
8692 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8693 bp->port.advertising = (ADVERTISED_100baseT_Half |
8694 ADVERTISED_TP);
a2fbb9ea
ET
8695 } else {
8696 BNX2X_ERR("NVRAM config error. "
8697 "Invalid link_config 0x%x"
8698 " speed_cap_mask 0x%x\n",
34f80b04 8699 bp->port.link_config,
c18487ee 8700 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8701 return;
8702 }
8703 break;
8704
8705 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8706 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8707 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8708 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8709 ADVERTISED_TP);
a2fbb9ea
ET
8710 } else {
8711 BNX2X_ERR("NVRAM config error. "
8712 "Invalid link_config 0x%x"
8713 " speed_cap_mask 0x%x\n",
34f80b04 8714 bp->port.link_config,
c18487ee 8715 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8716 return;
8717 }
8718 break;
8719
8720 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8721 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8722 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8723 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8724 ADVERTISED_TP);
a2fbb9ea
ET
8725 } else {
8726 BNX2X_ERR("NVRAM config error. "
8727 "Invalid link_config 0x%x"
8728 " speed_cap_mask 0x%x\n",
34f80b04 8729 bp->port.link_config,
c18487ee 8730 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8731 return;
8732 }
8733 break;
8734
8735 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8736 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8737 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8738 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8739 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8740 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8741 ADVERTISED_FIBRE);
a2fbb9ea
ET
8742 } else {
8743 BNX2X_ERR("NVRAM config error. "
8744 "Invalid link_config 0x%x"
8745 " speed_cap_mask 0x%x\n",
34f80b04 8746 bp->port.link_config,
c18487ee 8747 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8748 return;
8749 }
8750 break;
8751
8752 default:
8753 BNX2X_ERR("NVRAM config error. "
8754 "BAD link speed link_config 0x%x\n",
34f80b04 8755 bp->port.link_config);
c18487ee 8756 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8757 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8758 break;
8759 }
a2fbb9ea 8760
34f80b04
EG
8761 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8762 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8763 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8764 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8765 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8766
c18487ee 8767 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8768 " advertising 0x%x\n",
c18487ee
YR
8769 bp->link_params.req_line_speed,
8770 bp->link_params.req_duplex,
34f80b04 8771 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8772}
8773
e665bfda
MC
8774static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8775{
8776 mac_hi = cpu_to_be16(mac_hi);
8777 mac_lo = cpu_to_be32(mac_lo);
8778 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8779 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8780}
8781
34f80b04 8782static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8783{
34f80b04
EG
8784 int port = BP_PORT(bp);
8785 u32 val, val2;
589abe3a 8786 u32 config;
c2c8b03e 8787 u16 i;
01cd4528 8788 u32 ext_phy_type;
a2fbb9ea 8789
c18487ee 8790 bp->link_params.bp = bp;
34f80b04 8791 bp->link_params.port = port;
c18487ee 8792
c18487ee 8793 bp->link_params.lane_config =
a2fbb9ea 8794 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8795 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8796 SHMEM_RD(bp,
8797 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8798 /* BCM8727_NOC => BCM8727 no over current */
8799 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8800 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8801 bp->link_params.ext_phy_config &=
8802 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8803 bp->link_params.ext_phy_config |=
8804 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8805 bp->link_params.feature_config_flags |=
8806 FEATURE_CONFIG_BCM8727_NOC;
8807 }
8808
c18487ee 8809 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8810 SHMEM_RD(bp,
8811 dev_info.port_hw_config[port].speed_capability_mask);
8812
34f80b04 8813 bp->port.link_config =
a2fbb9ea
ET
8814 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8815
c2c8b03e
EG
8816 /* Get the 4 lanes xgxs config rx and tx */
8817 for (i = 0; i < 2; i++) {
8818 val = SHMEM_RD(bp,
8819 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8820 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8821 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8822
8823 val = SHMEM_RD(bp,
8824 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8825 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8826 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8827 }
8828
3ce2c3f9
EG
8829 /* If the device is capable of WoL, set the default state according
8830 * to the HW
8831 */
4d295db0 8832 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8833 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8834 (config & PORT_FEATURE_WOL_ENABLED));
8835
c2c8b03e
EG
8836 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8837 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8838 bp->link_params.lane_config,
8839 bp->link_params.ext_phy_config,
34f80b04 8840 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8841
4d295db0
EG
8842 bp->link_params.switch_cfg |= (bp->port.link_config &
8843 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8844 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8845
8846 bnx2x_link_settings_requested(bp);
8847
01cd4528
EG
8848 /*
8849 * If connected directly, work with the internal PHY, otherwise, work
8850 * with the external PHY
8851 */
8852 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8853 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8854 bp->mdio.prtad = bp->link_params.phy_addr;
8855
8856 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8857 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8858 bp->mdio.prtad =
659bc5c4 8859 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8860
a2fbb9ea
ET
8861 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8862 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8863 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8864 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8865 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8866
8867#ifdef BCM_CNIC
8868 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8869 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8870 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8871#endif
34f80b04
EG
8872}
8873
8874static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8875{
8876 int func = BP_FUNC(bp);
8877 u32 val, val2;
8878 int rc = 0;
a2fbb9ea 8879
34f80b04 8880 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8881
34f80b04
EG
8882 bp->e1hov = 0;
8883 bp->e1hmf = 0;
8884 if (CHIP_IS_E1H(bp)) {
8885 bp->mf_config =
8886 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8887
2691d51d 8888 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8889 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8890 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8891 bp->e1hmf = 1;
2691d51d
EG
8892 BNX2X_DEV_INFO("%s function mode\n",
8893 IS_E1HMF(bp) ? "multi" : "single");
8894
8895 if (IS_E1HMF(bp)) {
8896 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8897 e1hov_tag) &
8898 FUNC_MF_CFG_E1HOV_TAG_MASK);
8899 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8900 bp->e1hov = val;
8901 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8902 "(0x%04x)\n",
8903 func, bp->e1hov, bp->e1hov);
8904 } else {
34f80b04
EG
8905 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8906 " aborting\n", func);
8907 rc = -EPERM;
8908 }
2691d51d
EG
8909 } else {
8910 if (BP_E1HVN(bp)) {
8911 BNX2X_ERR("!!! VN %d in single function mode,"
8912 " aborting\n", BP_E1HVN(bp));
8913 rc = -EPERM;
8914 }
34f80b04
EG
8915 }
8916 }
a2fbb9ea 8917
34f80b04
EG
8918 if (!BP_NOMCP(bp)) {
8919 bnx2x_get_port_hwinfo(bp);
8920
8921 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8922 DRV_MSG_SEQ_NUMBER_MASK);
8923 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8924 }
8925
8926 if (IS_E1HMF(bp)) {
8927 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8928 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8929 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8930 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8931 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8932 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8933 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8934 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8935 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8936 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8937 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8938 ETH_ALEN);
8939 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8940 ETH_ALEN);
a2fbb9ea 8941 }
34f80b04
EG
8942
8943 return rc;
a2fbb9ea
ET
8944 }
8945
34f80b04
EG
8946 if (BP_NOMCP(bp)) {
8947 /* only supposed to happen on emulation/FPGA */
33471629 8948 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8949 random_ether_addr(bp->dev->dev_addr);
8950 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8951 }
a2fbb9ea 8952
34f80b04
EG
8953 return rc;
8954}
8955
8956static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8957{
8958 int func = BP_FUNC(bp);
87942b46 8959 int timer_interval;
34f80b04
EG
8960 int rc;
8961
da5a662a
VZ
8962 /* Disable interrupt handling until HW is initialized */
8963 atomic_set(&bp->intr_sem, 1);
e1510706 8964 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8965
34f80b04 8966 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8967 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
8968#ifdef BCM_CNIC
8969 mutex_init(&bp->cnic_mutex);
8970#endif
a2fbb9ea 8971
1cf167f2 8972 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8973 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8974
8975 rc = bnx2x_get_hwinfo(bp);
8976
8977 /* need to reset chip if undi was active */
8978 if (!BP_NOMCP(bp))
8979 bnx2x_undi_unload(bp);
8980
8981 if (CHIP_REV_IS_FPGA(bp))
8982 printk(KERN_ERR PFX "FPGA detected\n");
8983
8984 if (BP_NOMCP(bp) && (func == 0))
8985 printk(KERN_ERR PFX
8986 "MCP disabled, must load devices in order!\n");
8987
555f6c78 8988 /* Set multi queue mode */
8badd27a
EG
8989 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8990 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8991 printk(KERN_ERR PFX
8badd27a 8992 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8993 multi_mode = ETH_RSS_MODE_DISABLED;
8994 }
8995 bp->multi_mode = multi_mode;
8996
8997
7a9b2557
VZ
8998 /* Set TPA flags */
8999 if (disable_tpa) {
9000 bp->flags &= ~TPA_ENABLE_FLAG;
9001 bp->dev->features &= ~NETIF_F_LRO;
9002 } else {
9003 bp->flags |= TPA_ENABLE_FLAG;
9004 bp->dev->features |= NETIF_F_LRO;
9005 }
9006
a18f5128
EG
9007 if (CHIP_IS_E1(bp))
9008 bp->dropless_fc = 0;
9009 else
9010 bp->dropless_fc = dropless_fc;
9011
8d5726c4 9012 bp->mrrs = mrrs;
7a9b2557 9013
34f80b04
EG
9014 bp->tx_ring_size = MAX_TX_AVAIL;
9015 bp->rx_ring_size = MAX_RX_AVAIL;
9016
9017 bp->rx_csum = 1;
34f80b04
EG
9018
9019 bp->tx_ticks = 50;
9020 bp->rx_ticks = 25;
9021
87942b46
EG
9022 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9023 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9024
9025 init_timer(&bp->timer);
9026 bp->timer.expires = jiffies + bp->current_interval;
9027 bp->timer.data = (unsigned long) bp;
9028 bp->timer.function = bnx2x_timer;
9029
9030 return rc;
a2fbb9ea
ET
9031}
9032
9033/*
9034 * ethtool service functions
9035 */
9036
9037/* All ethtool functions called with rtnl_lock */
9038
9039static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9040{
9041 struct bnx2x *bp = netdev_priv(dev);
9042
34f80b04
EG
9043 cmd->supported = bp->port.supported;
9044 cmd->advertising = bp->port.advertising;
a2fbb9ea 9045
f34d28ea
EG
9046 if ((bp->state == BNX2X_STATE_OPEN) &&
9047 !(bp->flags & MF_FUNC_DIS) &&
9048 (bp->link_vars.link_up)) {
c18487ee
YR
9049 cmd->speed = bp->link_vars.line_speed;
9050 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9051 if (IS_E1HMF(bp)) {
9052 u16 vn_max_rate;
34f80b04 9053
b015e3d1
EG
9054 vn_max_rate =
9055 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 9056 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
9057 if (vn_max_rate < cmd->speed)
9058 cmd->speed = vn_max_rate;
9059 }
9060 } else {
9061 cmd->speed = -1;
9062 cmd->duplex = -1;
34f80b04 9063 }
a2fbb9ea 9064
c18487ee
YR
9065 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9066 u32 ext_phy_type =
9067 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9068
9069 switch (ext_phy_type) {
9070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9071 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9072 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9073 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9074 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9075 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9076 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9077 cmd->port = PORT_FIBRE;
9078 break;
9079
9080 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9081 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9082 cmd->port = PORT_TP;
9083 break;
9084
c18487ee
YR
9085 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9086 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9087 bp->link_params.ext_phy_config);
9088 break;
9089
f1410647
ET
9090 default:
9091 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9092 bp->link_params.ext_phy_config);
9093 break;
f1410647
ET
9094 }
9095 } else
a2fbb9ea 9096 cmd->port = PORT_TP;
a2fbb9ea 9097
01cd4528 9098 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9099 cmd->transceiver = XCVR_INTERNAL;
9100
c18487ee 9101 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9102 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9103 else
a2fbb9ea 9104 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9105
9106 cmd->maxtxpkt = 0;
9107 cmd->maxrxpkt = 0;
9108
9109 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9110 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9111 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9112 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9113 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9114 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9115 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9116
9117 return 0;
9118}
9119
9120static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9121{
9122 struct bnx2x *bp = netdev_priv(dev);
9123 u32 advertising;
9124
34f80b04
EG
9125 if (IS_E1HMF(bp))
9126 return 0;
9127
a2fbb9ea
ET
9128 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9129 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9130 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9131 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9132 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9133 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9134 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9135
a2fbb9ea 9136 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9137 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9138 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9139 return -EINVAL;
f1410647 9140 }
a2fbb9ea
ET
9141
9142 /* advertise the requested speed and duplex if supported */
34f80b04 9143 cmd->advertising &= bp->port.supported;
a2fbb9ea 9144
c18487ee
YR
9145 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9146 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9147 bp->port.advertising |= (ADVERTISED_Autoneg |
9148 cmd->advertising);
a2fbb9ea
ET
9149
9150 } else { /* forced speed */
9151 /* advertise the requested speed and duplex if supported */
9152 switch (cmd->speed) {
9153 case SPEED_10:
9154 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9155 if (!(bp->port.supported &
f1410647
ET
9156 SUPPORTED_10baseT_Full)) {
9157 DP(NETIF_MSG_LINK,
9158 "10M full not supported\n");
a2fbb9ea 9159 return -EINVAL;
f1410647 9160 }
a2fbb9ea
ET
9161
9162 advertising = (ADVERTISED_10baseT_Full |
9163 ADVERTISED_TP);
9164 } else {
34f80b04 9165 if (!(bp->port.supported &
f1410647
ET
9166 SUPPORTED_10baseT_Half)) {
9167 DP(NETIF_MSG_LINK,
9168 "10M half not supported\n");
a2fbb9ea 9169 return -EINVAL;
f1410647 9170 }
a2fbb9ea
ET
9171
9172 advertising = (ADVERTISED_10baseT_Half |
9173 ADVERTISED_TP);
9174 }
9175 break;
9176
9177 case SPEED_100:
9178 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9179 if (!(bp->port.supported &
f1410647
ET
9180 SUPPORTED_100baseT_Full)) {
9181 DP(NETIF_MSG_LINK,
9182 "100M full not supported\n");
a2fbb9ea 9183 return -EINVAL;
f1410647 9184 }
a2fbb9ea
ET
9185
9186 advertising = (ADVERTISED_100baseT_Full |
9187 ADVERTISED_TP);
9188 } else {
34f80b04 9189 if (!(bp->port.supported &
f1410647
ET
9190 SUPPORTED_100baseT_Half)) {
9191 DP(NETIF_MSG_LINK,
9192 "100M half not supported\n");
a2fbb9ea 9193 return -EINVAL;
f1410647 9194 }
a2fbb9ea
ET
9195
9196 advertising = (ADVERTISED_100baseT_Half |
9197 ADVERTISED_TP);
9198 }
9199 break;
9200
9201 case SPEED_1000:
f1410647
ET
9202 if (cmd->duplex != DUPLEX_FULL) {
9203 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 9204 return -EINVAL;
f1410647 9205 }
a2fbb9ea 9206
34f80b04 9207 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 9208 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 9209 return -EINVAL;
f1410647 9210 }
a2fbb9ea
ET
9211
9212 advertising = (ADVERTISED_1000baseT_Full |
9213 ADVERTISED_TP);
9214 break;
9215
9216 case SPEED_2500:
f1410647
ET
9217 if (cmd->duplex != DUPLEX_FULL) {
9218 DP(NETIF_MSG_LINK,
9219 "2.5G half not supported\n");
a2fbb9ea 9220 return -EINVAL;
f1410647 9221 }
a2fbb9ea 9222
34f80b04 9223 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9224 DP(NETIF_MSG_LINK,
9225 "2.5G full not supported\n");
a2fbb9ea 9226 return -EINVAL;
f1410647 9227 }
a2fbb9ea 9228
f1410647 9229 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9230 ADVERTISED_TP);
9231 break;
9232
9233 case SPEED_10000:
f1410647
ET
9234 if (cmd->duplex != DUPLEX_FULL) {
9235 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9236 return -EINVAL;
f1410647 9237 }
a2fbb9ea 9238
34f80b04 9239 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9240 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9241 return -EINVAL;
f1410647 9242 }
a2fbb9ea
ET
9243
9244 advertising = (ADVERTISED_10000baseT_Full |
9245 ADVERTISED_FIBRE);
9246 break;
9247
9248 default:
f1410647 9249 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9250 return -EINVAL;
9251 }
9252
c18487ee
YR
9253 bp->link_params.req_line_speed = cmd->speed;
9254 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9255 bp->port.advertising = advertising;
a2fbb9ea
ET
9256 }
9257
c18487ee 9258 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9259 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9260 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9261 bp->port.advertising);
a2fbb9ea 9262
34f80b04 9263 if (netif_running(dev)) {
bb2a0f7a 9264 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9265 bnx2x_link_set(bp);
9266 }
a2fbb9ea
ET
9267
9268 return 0;
9269}
9270
0a64ea57
EG
9271#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9272#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9273
9274static int bnx2x_get_regs_len(struct net_device *dev)
9275{
0a64ea57 9276 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9277 int regdump_len = 0;
0a64ea57
EG
9278 int i;
9279
0a64ea57
EG
9280 if (CHIP_IS_E1(bp)) {
9281 for (i = 0; i < REGS_COUNT; i++)
9282 if (IS_E1_ONLINE(reg_addrs[i].info))
9283 regdump_len += reg_addrs[i].size;
9284
9285 for (i = 0; i < WREGS_COUNT_E1; i++)
9286 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9287 regdump_len += wreg_addrs_e1[i].size *
9288 (1 + wreg_addrs_e1[i].read_regs_count);
9289
9290 } else { /* E1H */
9291 for (i = 0; i < REGS_COUNT; i++)
9292 if (IS_E1H_ONLINE(reg_addrs[i].info))
9293 regdump_len += reg_addrs[i].size;
9294
9295 for (i = 0; i < WREGS_COUNT_E1H; i++)
9296 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9297 regdump_len += wreg_addrs_e1h[i].size *
9298 (1 + wreg_addrs_e1h[i].read_regs_count);
9299 }
9300 regdump_len *= 4;
9301 regdump_len += sizeof(struct dump_hdr);
9302
9303 return regdump_len;
9304}
9305
9306static void bnx2x_get_regs(struct net_device *dev,
9307 struct ethtool_regs *regs, void *_p)
9308{
9309 u32 *p = _p, i, j;
9310 struct bnx2x *bp = netdev_priv(dev);
9311 struct dump_hdr dump_hdr = {0};
9312
9313 regs->version = 0;
9314 memset(p, 0, regs->len);
9315
9316 if (!netif_running(bp->dev))
9317 return;
9318
9319 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9320 dump_hdr.dump_sign = dump_sign_all;
9321 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9322 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9323 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9324 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9325 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9326
9327 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9328 p += dump_hdr.hdr_size + 1;
9329
9330 if (CHIP_IS_E1(bp)) {
9331 for (i = 0; i < REGS_COUNT; i++)
9332 if (IS_E1_ONLINE(reg_addrs[i].info))
9333 for (j = 0; j < reg_addrs[i].size; j++)
9334 *p++ = REG_RD(bp,
9335 reg_addrs[i].addr + j*4);
9336
9337 } else { /* E1H */
9338 for (i = 0; i < REGS_COUNT; i++)
9339 if (IS_E1H_ONLINE(reg_addrs[i].info))
9340 for (j = 0; j < reg_addrs[i].size; j++)
9341 *p++ = REG_RD(bp,
9342 reg_addrs[i].addr + j*4);
9343 }
9344}
9345
0d28e49a
EG
9346#define PHY_FW_VER_LEN 10
9347
9348static void bnx2x_get_drvinfo(struct net_device *dev,
9349 struct ethtool_drvinfo *info)
9350{
9351 struct bnx2x *bp = netdev_priv(dev);
9352 u8 phy_fw_ver[PHY_FW_VER_LEN];
9353
9354 strcpy(info->driver, DRV_MODULE_NAME);
9355 strcpy(info->version, DRV_MODULE_VERSION);
9356
9357 phy_fw_ver[0] = '\0';
9358 if (bp->port.pmf) {
9359 bnx2x_acquire_phy_lock(bp);
9360 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9361 (bp->state != BNX2X_STATE_CLOSED),
9362 phy_fw_ver, PHY_FW_VER_LEN);
9363 bnx2x_release_phy_lock(bp);
9364 }
9365
9366 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9367 (bp->common.bc_ver & 0xff0000) >> 16,
9368 (bp->common.bc_ver & 0xff00) >> 8,
9369 (bp->common.bc_ver & 0xff),
9370 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9371 strcpy(info->bus_info, pci_name(bp->pdev));
9372 info->n_stats = BNX2X_NUM_STATS;
9373 info->testinfo_len = BNX2X_NUM_TESTS;
9374 info->eedump_len = bp->common.flash_size;
9375 info->regdump_len = bnx2x_get_regs_len(dev);
9376}
9377
a2fbb9ea
ET
9378static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9379{
9380 struct bnx2x *bp = netdev_priv(dev);
9381
9382 if (bp->flags & NO_WOL_FLAG) {
9383 wol->supported = 0;
9384 wol->wolopts = 0;
9385 } else {
9386 wol->supported = WAKE_MAGIC;
9387 if (bp->wol)
9388 wol->wolopts = WAKE_MAGIC;
9389 else
9390 wol->wolopts = 0;
9391 }
9392 memset(&wol->sopass, 0, sizeof(wol->sopass));
9393}
9394
9395static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9396{
9397 struct bnx2x *bp = netdev_priv(dev);
9398
9399 if (wol->wolopts & ~WAKE_MAGIC)
9400 return -EINVAL;
9401
9402 if (wol->wolopts & WAKE_MAGIC) {
9403 if (bp->flags & NO_WOL_FLAG)
9404 return -EINVAL;
9405
9406 bp->wol = 1;
34f80b04 9407 } else
a2fbb9ea 9408 bp->wol = 0;
34f80b04 9409
a2fbb9ea
ET
9410 return 0;
9411}
9412
9413static u32 bnx2x_get_msglevel(struct net_device *dev)
9414{
9415 struct bnx2x *bp = netdev_priv(dev);
9416
9417 return bp->msglevel;
9418}
9419
9420static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9421{
9422 struct bnx2x *bp = netdev_priv(dev);
9423
9424 if (capable(CAP_NET_ADMIN))
9425 bp->msglevel = level;
9426}
9427
9428static int bnx2x_nway_reset(struct net_device *dev)
9429{
9430 struct bnx2x *bp = netdev_priv(dev);
9431
34f80b04
EG
9432 if (!bp->port.pmf)
9433 return 0;
a2fbb9ea 9434
34f80b04 9435 if (netif_running(dev)) {
bb2a0f7a 9436 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9437 bnx2x_link_set(bp);
9438 }
a2fbb9ea
ET
9439
9440 return 0;
9441}
9442
ab6ad5a4 9443static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9444{
9445 struct bnx2x *bp = netdev_priv(dev);
9446
f34d28ea
EG
9447 if (bp->flags & MF_FUNC_DIS)
9448 return 0;
9449
01e53298
NO
9450 return bp->link_vars.link_up;
9451}
9452
a2fbb9ea
ET
9453static int bnx2x_get_eeprom_len(struct net_device *dev)
9454{
9455 struct bnx2x *bp = netdev_priv(dev);
9456
34f80b04 9457 return bp->common.flash_size;
a2fbb9ea
ET
9458}
9459
9460static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9461{
34f80b04 9462 int port = BP_PORT(bp);
a2fbb9ea
ET
9463 int count, i;
9464 u32 val = 0;
9465
9466 /* adjust timeout for emulation/FPGA */
9467 count = NVRAM_TIMEOUT_COUNT;
9468 if (CHIP_REV_IS_SLOW(bp))
9469 count *= 100;
9470
9471 /* request access to nvram interface */
9472 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9473 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9474
9475 for (i = 0; i < count*10; i++) {
9476 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9477 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9478 break;
9479
9480 udelay(5);
9481 }
9482
9483 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9484 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9485 return -EBUSY;
9486 }
9487
9488 return 0;
9489}
9490
9491static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9492{
34f80b04 9493 int port = BP_PORT(bp);
a2fbb9ea
ET
9494 int count, i;
9495 u32 val = 0;
9496
9497 /* adjust timeout for emulation/FPGA */
9498 count = NVRAM_TIMEOUT_COUNT;
9499 if (CHIP_REV_IS_SLOW(bp))
9500 count *= 100;
9501
9502 /* relinquish nvram interface */
9503 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9504 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9505
9506 for (i = 0; i < count*10; i++) {
9507 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9508 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9509 break;
9510
9511 udelay(5);
9512 }
9513
9514 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9515 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9516 return -EBUSY;
9517 }
9518
9519 return 0;
9520}
9521
9522static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9523{
9524 u32 val;
9525
9526 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9527
9528 /* enable both bits, even on read */
9529 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9530 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9531 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9532}
9533
9534static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9535{
9536 u32 val;
9537
9538 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9539
9540 /* disable both bits, even after read */
9541 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9542 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9543 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9544}
9545
4781bfad 9546static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9547 u32 cmd_flags)
9548{
f1410647 9549 int count, i, rc;
a2fbb9ea
ET
9550 u32 val;
9551
9552 /* build the command word */
9553 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9554
9555 /* need to clear DONE bit separately */
9556 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9557
9558 /* address of the NVRAM to read from */
9559 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9560 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9561
9562 /* issue a read command */
9563 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9564
9565 /* adjust timeout for emulation/FPGA */
9566 count = NVRAM_TIMEOUT_COUNT;
9567 if (CHIP_REV_IS_SLOW(bp))
9568 count *= 100;
9569
9570 /* wait for completion */
9571 *ret_val = 0;
9572 rc = -EBUSY;
9573 for (i = 0; i < count; i++) {
9574 udelay(5);
9575 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9576
9577 if (val & MCPR_NVM_COMMAND_DONE) {
9578 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9579 /* we read nvram data in cpu order
9580 * but ethtool sees it as an array of bytes
9581 * converting to big-endian will do the work */
4781bfad 9582 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9583 rc = 0;
9584 break;
9585 }
9586 }
9587
9588 return rc;
9589}
9590
9591static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9592 int buf_size)
9593{
9594 int rc;
9595 u32 cmd_flags;
4781bfad 9596 __be32 val;
a2fbb9ea
ET
9597
9598 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9599 DP(BNX2X_MSG_NVM,
c14423fe 9600 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9601 offset, buf_size);
9602 return -EINVAL;
9603 }
9604
34f80b04
EG
9605 if (offset + buf_size > bp->common.flash_size) {
9606 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9607 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9608 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9609 return -EINVAL;
9610 }
9611
9612 /* request access to nvram interface */
9613 rc = bnx2x_acquire_nvram_lock(bp);
9614 if (rc)
9615 return rc;
9616
9617 /* enable access to nvram interface */
9618 bnx2x_enable_nvram_access(bp);
9619
9620 /* read the first word(s) */
9621 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9622 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9623 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9624 memcpy(ret_buf, &val, 4);
9625
9626 /* advance to the next dword */
9627 offset += sizeof(u32);
9628 ret_buf += sizeof(u32);
9629 buf_size -= sizeof(u32);
9630 cmd_flags = 0;
9631 }
9632
9633 if (rc == 0) {
9634 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9635 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9636 memcpy(ret_buf, &val, 4);
9637 }
9638
9639 /* disable access to nvram interface */
9640 bnx2x_disable_nvram_access(bp);
9641 bnx2x_release_nvram_lock(bp);
9642
9643 return rc;
9644}
9645
9646static int bnx2x_get_eeprom(struct net_device *dev,
9647 struct ethtool_eeprom *eeprom, u8 *eebuf)
9648{
9649 struct bnx2x *bp = netdev_priv(dev);
9650 int rc;
9651
2add3acb
EG
9652 if (!netif_running(dev))
9653 return -EAGAIN;
9654
34f80b04 9655 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9656 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9657 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9658 eeprom->len, eeprom->len);
9659
9660 /* parameters already validated in ethtool_get_eeprom */
9661
9662 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9663
9664 return rc;
9665}
9666
9667static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9668 u32 cmd_flags)
9669{
f1410647 9670 int count, i, rc;
a2fbb9ea
ET
9671
9672 /* build the command word */
9673 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9674
9675 /* need to clear DONE bit separately */
9676 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9677
9678 /* write the data */
9679 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9680
9681 /* address of the NVRAM to write to */
9682 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9683 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9684
9685 /* issue the write command */
9686 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9687
9688 /* adjust timeout for emulation/FPGA */
9689 count = NVRAM_TIMEOUT_COUNT;
9690 if (CHIP_REV_IS_SLOW(bp))
9691 count *= 100;
9692
9693 /* wait for completion */
9694 rc = -EBUSY;
9695 for (i = 0; i < count; i++) {
9696 udelay(5);
9697 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9698 if (val & MCPR_NVM_COMMAND_DONE) {
9699 rc = 0;
9700 break;
9701 }
9702 }
9703
9704 return rc;
9705}
9706
f1410647 9707#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9708
9709static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9710 int buf_size)
9711{
9712 int rc;
9713 u32 cmd_flags;
9714 u32 align_offset;
4781bfad 9715 __be32 val;
a2fbb9ea 9716
34f80b04
EG
9717 if (offset + buf_size > bp->common.flash_size) {
9718 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9719 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9720 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9721 return -EINVAL;
9722 }
9723
9724 /* request access to nvram interface */
9725 rc = bnx2x_acquire_nvram_lock(bp);
9726 if (rc)
9727 return rc;
9728
9729 /* enable access to nvram interface */
9730 bnx2x_enable_nvram_access(bp);
9731
9732 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9733 align_offset = (offset & ~0x03);
9734 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9735
9736 if (rc == 0) {
9737 val &= ~(0xff << BYTE_OFFSET(offset));
9738 val |= (*data_buf << BYTE_OFFSET(offset));
9739
9740 /* nvram data is returned as an array of bytes
9741 * convert it back to cpu order */
9742 val = be32_to_cpu(val);
9743
a2fbb9ea
ET
9744 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9745 cmd_flags);
9746 }
9747
9748 /* disable access to nvram interface */
9749 bnx2x_disable_nvram_access(bp);
9750 bnx2x_release_nvram_lock(bp);
9751
9752 return rc;
9753}
9754
9755static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9756 int buf_size)
9757{
9758 int rc;
9759 u32 cmd_flags;
9760 u32 val;
9761 u32 written_so_far;
9762
34f80b04 9763 if (buf_size == 1) /* ethtool */
a2fbb9ea 9764 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9765
9766 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9767 DP(BNX2X_MSG_NVM,
c14423fe 9768 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9769 offset, buf_size);
9770 return -EINVAL;
9771 }
9772
34f80b04
EG
9773 if (offset + buf_size > bp->common.flash_size) {
9774 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9775 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9776 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9777 return -EINVAL;
9778 }
9779
9780 /* request access to nvram interface */
9781 rc = bnx2x_acquire_nvram_lock(bp);
9782 if (rc)
9783 return rc;
9784
9785 /* enable access to nvram interface */
9786 bnx2x_enable_nvram_access(bp);
9787
9788 written_so_far = 0;
9789 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9790 while ((written_so_far < buf_size) && (rc == 0)) {
9791 if (written_so_far == (buf_size - sizeof(u32)))
9792 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9793 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9794 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9795 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9796 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9797
9798 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9799
9800 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9801
9802 /* advance to the next dword */
9803 offset += sizeof(u32);
9804 data_buf += sizeof(u32);
9805 written_so_far += sizeof(u32);
9806 cmd_flags = 0;
9807 }
9808
9809 /* disable access to nvram interface */
9810 bnx2x_disable_nvram_access(bp);
9811 bnx2x_release_nvram_lock(bp);
9812
9813 return rc;
9814}
9815
9816static int bnx2x_set_eeprom(struct net_device *dev,
9817 struct ethtool_eeprom *eeprom, u8 *eebuf)
9818{
9819 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9820 int port = BP_PORT(bp);
9821 int rc = 0;
a2fbb9ea 9822
9f4c9583
EG
9823 if (!netif_running(dev))
9824 return -EAGAIN;
9825
34f80b04 9826 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9827 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9828 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9829 eeprom->len, eeprom->len);
9830
9831 /* parameters already validated in ethtool_set_eeprom */
9832
f57a6025
EG
9833 /* PHY eeprom can be accessed only by the PMF */
9834 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9835 !bp->port.pmf)
9836 return -EINVAL;
9837
9838 if (eeprom->magic == 0x50485950) {
9839 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9840 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9841
f57a6025
EG
9842 bnx2x_acquire_phy_lock(bp);
9843 rc |= bnx2x_link_reset(&bp->link_params,
9844 &bp->link_vars, 0);
9845 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9846 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9847 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9848 MISC_REGISTERS_GPIO_HIGH, port);
9849 bnx2x_release_phy_lock(bp);
9850 bnx2x_link_report(bp);
9851
9852 } else if (eeprom->magic == 0x50485952) {
9853 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 9854 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 9855 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9856 rc |= bnx2x_link_reset(&bp->link_params,
9857 &bp->link_vars, 1);
9858
9859 rc |= bnx2x_phy_init(&bp->link_params,
9860 &bp->link_vars);
4a37fb66 9861 bnx2x_release_phy_lock(bp);
f57a6025
EG
9862 bnx2x_calc_fc_adv(bp);
9863 }
9864 } else if (eeprom->magic == 0x53985943) {
9865 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9866 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9867 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9868 u8 ext_phy_addr =
659bc5c4 9869 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9870
9871 /* DSP Remove Download Mode */
9872 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9873 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9874
f57a6025
EG
9875 bnx2x_acquire_phy_lock(bp);
9876
9877 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9878
9879 /* wait 0.5 sec to allow it to run */
9880 msleep(500);
9881 bnx2x_ext_phy_hw_reset(bp, port);
9882 msleep(500);
9883 bnx2x_release_phy_lock(bp);
9884 }
9885 } else
c18487ee 9886 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9887
9888 return rc;
9889}
9890
9891static int bnx2x_get_coalesce(struct net_device *dev,
9892 struct ethtool_coalesce *coal)
9893{
9894 struct bnx2x *bp = netdev_priv(dev);
9895
9896 memset(coal, 0, sizeof(struct ethtool_coalesce));
9897
9898 coal->rx_coalesce_usecs = bp->rx_ticks;
9899 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9900
9901 return 0;
9902}
9903
ca00392c 9904#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9905static int bnx2x_set_coalesce(struct net_device *dev,
9906 struct ethtool_coalesce *coal)
9907{
9908 struct bnx2x *bp = netdev_priv(dev);
9909
9910 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9911 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9912 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9913
9914 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9915 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9916 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9917
34f80b04 9918 if (netif_running(dev))
a2fbb9ea
ET
9919 bnx2x_update_coalesce(bp);
9920
9921 return 0;
9922}
9923
9924static void bnx2x_get_ringparam(struct net_device *dev,
9925 struct ethtool_ringparam *ering)
9926{
9927 struct bnx2x *bp = netdev_priv(dev);
9928
9929 ering->rx_max_pending = MAX_RX_AVAIL;
9930 ering->rx_mini_max_pending = 0;
9931 ering->rx_jumbo_max_pending = 0;
9932
9933 ering->rx_pending = bp->rx_ring_size;
9934 ering->rx_mini_pending = 0;
9935 ering->rx_jumbo_pending = 0;
9936
9937 ering->tx_max_pending = MAX_TX_AVAIL;
9938 ering->tx_pending = bp->tx_ring_size;
9939}
9940
9941static int bnx2x_set_ringparam(struct net_device *dev,
9942 struct ethtool_ringparam *ering)
9943{
9944 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9945 int rc = 0;
a2fbb9ea
ET
9946
9947 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9948 (ering->tx_pending > MAX_TX_AVAIL) ||
9949 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9950 return -EINVAL;
9951
9952 bp->rx_ring_size = ering->rx_pending;
9953 bp->tx_ring_size = ering->tx_pending;
9954
34f80b04
EG
9955 if (netif_running(dev)) {
9956 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9957 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9958 }
9959
34f80b04 9960 return rc;
a2fbb9ea
ET
9961}
9962
9963static void bnx2x_get_pauseparam(struct net_device *dev,
9964 struct ethtool_pauseparam *epause)
9965{
9966 struct bnx2x *bp = netdev_priv(dev);
9967
356e2385
EG
9968 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9969 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9970 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9971
c0700f90
DM
9972 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9973 BNX2X_FLOW_CTRL_RX);
9974 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9975 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9976
9977 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9978 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9979 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9980}
9981
9982static int bnx2x_set_pauseparam(struct net_device *dev,
9983 struct ethtool_pauseparam *epause)
9984{
9985 struct bnx2x *bp = netdev_priv(dev);
9986
34f80b04
EG
9987 if (IS_E1HMF(bp))
9988 return 0;
9989
a2fbb9ea
ET
9990 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9991 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9992 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9993
c0700f90 9994 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9995
f1410647 9996 if (epause->rx_pause)
c0700f90 9997 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9998
f1410647 9999 if (epause->tx_pause)
c0700f90 10000 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 10001
c0700f90
DM
10002 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10003 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10004
c18487ee 10005 if (epause->autoneg) {
34f80b04 10006 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10007 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10008 return -EINVAL;
10009 }
a2fbb9ea 10010
c18487ee 10011 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10012 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10013 }
a2fbb9ea 10014
c18487ee
YR
10015 DP(NETIF_MSG_LINK,
10016 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10017
10018 if (netif_running(dev)) {
bb2a0f7a 10019 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10020 bnx2x_link_set(bp);
10021 }
a2fbb9ea
ET
10022
10023 return 0;
10024}
10025
df0f2343
VZ
10026static int bnx2x_set_flags(struct net_device *dev, u32 data)
10027{
10028 struct bnx2x *bp = netdev_priv(dev);
10029 int changed = 0;
10030 int rc = 0;
10031
10032 /* TPA requires Rx CSUM offloading */
10033 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10034 if (!(dev->features & NETIF_F_LRO)) {
10035 dev->features |= NETIF_F_LRO;
10036 bp->flags |= TPA_ENABLE_FLAG;
10037 changed = 1;
10038 }
10039
10040 } else if (dev->features & NETIF_F_LRO) {
10041 dev->features &= ~NETIF_F_LRO;
10042 bp->flags &= ~TPA_ENABLE_FLAG;
10043 changed = 1;
10044 }
10045
10046 if (changed && netif_running(dev)) {
10047 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10048 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10049 }
10050
10051 return rc;
10052}
10053
a2fbb9ea
ET
10054static u32 bnx2x_get_rx_csum(struct net_device *dev)
10055{
10056 struct bnx2x *bp = netdev_priv(dev);
10057
10058 return bp->rx_csum;
10059}
10060
10061static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10062{
10063 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10064 int rc = 0;
a2fbb9ea
ET
10065
10066 bp->rx_csum = data;
df0f2343
VZ
10067
10068 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10069 TPA'ed packets will be discarded due to wrong TCP CSUM */
10070 if (!data) {
10071 u32 flags = ethtool_op_get_flags(dev);
10072
10073 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10074 }
10075
10076 return rc;
a2fbb9ea
ET
10077}
10078
10079static int bnx2x_set_tso(struct net_device *dev, u32 data)
10080{
755735eb 10081 if (data) {
a2fbb9ea 10082 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10083 dev->features |= NETIF_F_TSO6;
10084 } else {
a2fbb9ea 10085 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10086 dev->features &= ~NETIF_F_TSO6;
10087 }
10088
a2fbb9ea
ET
10089 return 0;
10090}
10091
f3c87cdd 10092static const struct {
a2fbb9ea
ET
10093 char string[ETH_GSTRING_LEN];
10094} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10095 { "register_test (offline)" },
10096 { "memory_test (offline)" },
10097 { "loopback_test (offline)" },
10098 { "nvram_test (online)" },
10099 { "interrupt_test (online)" },
10100 { "link_test (online)" },
d3d4f495 10101 { "idle check (online)" }
a2fbb9ea
ET
10102};
10103
f3c87cdd
YG
10104static int bnx2x_test_registers(struct bnx2x *bp)
10105{
10106 int idx, i, rc = -ENODEV;
10107 u32 wr_val = 0;
9dabc424 10108 int port = BP_PORT(bp);
f3c87cdd
YG
10109 static const struct {
10110 u32 offset0;
10111 u32 offset1;
10112 u32 mask;
10113 } reg_tbl[] = {
10114/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10115 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10116 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10117 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10118 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10119 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10120 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10121 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10122 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10123 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10124/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10125 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10126 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10127 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10128 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10129 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10130 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10131 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10132 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10133 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10134/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10135 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10136 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10137 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10138 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10139 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10140 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10141 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10142 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10143 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10144/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10145 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10146 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10147 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10148 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10149 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10150 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10151
10152 { 0xffffffff, 0, 0x00000000 }
10153 };
10154
10155 if (!netif_running(bp->dev))
10156 return rc;
10157
10158 /* Repeat the test twice:
10159 First by writing 0x00000000, second by writing 0xffffffff */
10160 for (idx = 0; idx < 2; idx++) {
10161
10162 switch (idx) {
10163 case 0:
10164 wr_val = 0;
10165 break;
10166 case 1:
10167 wr_val = 0xffffffff;
10168 break;
10169 }
10170
10171 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10172 u32 offset, mask, save_val, val;
f3c87cdd
YG
10173
10174 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10175 mask = reg_tbl[i].mask;
10176
10177 save_val = REG_RD(bp, offset);
10178
10179 REG_WR(bp, offset, wr_val);
10180 val = REG_RD(bp, offset);
10181
10182 /* Restore the original register's value */
10183 REG_WR(bp, offset, save_val);
10184
10185 /* verify that value is as expected value */
10186 if ((val & mask) != (wr_val & mask))
10187 goto test_reg_exit;
10188 }
10189 }
10190
10191 rc = 0;
10192
10193test_reg_exit:
10194 return rc;
10195}
10196
10197static int bnx2x_test_memory(struct bnx2x *bp)
10198{
10199 int i, j, rc = -ENODEV;
10200 u32 val;
10201 static const struct {
10202 u32 offset;
10203 int size;
10204 } mem_tbl[] = {
10205 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10206 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10207 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10208 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10209 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10210 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10211 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10212
10213 { 0xffffffff, 0 }
10214 };
10215 static const struct {
10216 char *name;
10217 u32 offset;
9dabc424
YG
10218 u32 e1_mask;
10219 u32 e1h_mask;
f3c87cdd 10220 } prty_tbl[] = {
9dabc424
YG
10221 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10222 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10223 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10224 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10225 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10226 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10227
10228 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10229 };
10230
10231 if (!netif_running(bp->dev))
10232 return rc;
10233
10234 /* Go through all the memories */
10235 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10236 for (j = 0; j < mem_tbl[i].size; j++)
10237 REG_RD(bp, mem_tbl[i].offset + j*4);
10238
10239 /* Check the parity status */
10240 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10241 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10242 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10243 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10244 DP(NETIF_MSG_HW,
10245 "%s is 0x%x\n", prty_tbl[i].name, val);
10246 goto test_mem_exit;
10247 }
10248 }
10249
10250 rc = 0;
10251
10252test_mem_exit:
10253 return rc;
10254}
10255
f3c87cdd
YG
10256static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10257{
10258 int cnt = 1000;
10259
10260 if (link_up)
10261 while (bnx2x_link_test(bp) && cnt--)
10262 msleep(10);
10263}
10264
10265static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10266{
10267 unsigned int pkt_size, num_pkts, i;
10268 struct sk_buff *skb;
10269 unsigned char *packet;
ca00392c
EG
10270 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10271 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
10272 u16 tx_start_idx, tx_idx;
10273 u16 rx_start_idx, rx_idx;
ca00392c 10274 u16 pkt_prod, bd_prod;
f3c87cdd 10275 struct sw_tx_bd *tx_buf;
ca00392c
EG
10276 struct eth_tx_start_bd *tx_start_bd;
10277 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10278 dma_addr_t mapping;
10279 union eth_rx_cqe *cqe;
10280 u8 cqe_fp_flags;
10281 struct sw_rx_bd *rx_buf;
10282 u16 len;
10283 int rc = -ENODEV;
10284
b5bf9068
EG
10285 /* check the loopback mode */
10286 switch (loopback_mode) {
10287 case BNX2X_PHY_LOOPBACK:
10288 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10289 return -EINVAL;
10290 break;
10291 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10292 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10293 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10294 break;
10295 default:
f3c87cdd 10296 return -EINVAL;
b5bf9068 10297 }
f3c87cdd 10298
b5bf9068
EG
10299 /* prepare the loopback packet */
10300 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10301 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10302 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10303 if (!skb) {
10304 rc = -ENOMEM;
10305 goto test_loopback_exit;
10306 }
10307 packet = skb_put(skb, pkt_size);
10308 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10309 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10310 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10311 for (i = ETH_HLEN; i < pkt_size; i++)
10312 packet[i] = (unsigned char) (i & 0xff);
10313
b5bf9068 10314 /* send the loopback packet */
f3c87cdd 10315 num_pkts = 0;
ca00392c
EG
10316 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10317 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10318
ca00392c
EG
10319 pkt_prod = fp_tx->tx_pkt_prod++;
10320 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10321 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10322 tx_buf->skb = skb;
ca00392c 10323 tx_buf->flags = 0;
f3c87cdd 10324
ca00392c
EG
10325 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10326 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10327 mapping = pci_map_single(bp->pdev, skb->data,
10328 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10329 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10330 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10331 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10332 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10333 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10334 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10335 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10336 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10337
10338 /* turn on parsing and get a BD */
10339 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10340 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10341
10342 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10343
58f4c4cf
EG
10344 wmb();
10345
ca00392c
EG
10346 fp_tx->tx_db.data.prod += 2;
10347 barrier();
10348 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10349
10350 mmiowb();
10351
10352 num_pkts++;
ca00392c 10353 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10354 bp->dev->trans_start = jiffies;
10355
10356 udelay(100);
10357
ca00392c 10358 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10359 if (tx_idx != tx_start_idx + num_pkts)
10360 goto test_loopback_exit;
10361
ca00392c 10362 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10363 if (rx_idx != rx_start_idx + num_pkts)
10364 goto test_loopback_exit;
10365
ca00392c 10366 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10367 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10368 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10369 goto test_loopback_rx_exit;
10370
10371 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10372 if (len != pkt_size)
10373 goto test_loopback_rx_exit;
10374
ca00392c 10375 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10376 skb = rx_buf->skb;
10377 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10378 for (i = ETH_HLEN; i < pkt_size; i++)
10379 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10380 goto test_loopback_rx_exit;
10381
10382 rc = 0;
10383
10384test_loopback_rx_exit:
f3c87cdd 10385
ca00392c
EG
10386 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10387 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10388 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10389 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10390
10391 /* Update producers */
ca00392c
EG
10392 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10393 fp_rx->rx_sge_prod);
f3c87cdd
YG
10394
10395test_loopback_exit:
10396 bp->link_params.loopback_mode = LOOPBACK_NONE;
10397
10398 return rc;
10399}
10400
10401static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10402{
b5bf9068 10403 int rc = 0, res;
f3c87cdd
YG
10404
10405 if (!netif_running(bp->dev))
10406 return BNX2X_LOOPBACK_FAILED;
10407
f8ef6e44 10408 bnx2x_netif_stop(bp, 1);
3910c8ae 10409 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10410
b5bf9068
EG
10411 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10412 if (res) {
10413 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10414 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10415 }
10416
b5bf9068
EG
10417 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10418 if (res) {
10419 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10420 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10421 }
10422
3910c8ae 10423 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10424 bnx2x_netif_start(bp);
10425
10426 return rc;
10427}
10428
10429#define CRC32_RESIDUAL 0xdebb20e3
10430
10431static int bnx2x_test_nvram(struct bnx2x *bp)
10432{
10433 static const struct {
10434 int offset;
10435 int size;
10436 } nvram_tbl[] = {
10437 { 0, 0x14 }, /* bootstrap */
10438 { 0x14, 0xec }, /* dir */
10439 { 0x100, 0x350 }, /* manuf_info */
10440 { 0x450, 0xf0 }, /* feature_info */
10441 { 0x640, 0x64 }, /* upgrade_key_info */
10442 { 0x6a4, 0x64 },
10443 { 0x708, 0x70 }, /* manuf_key_info */
10444 { 0x778, 0x70 },
10445 { 0, 0 }
10446 };
4781bfad 10447 __be32 buf[0x350 / 4];
f3c87cdd
YG
10448 u8 *data = (u8 *)buf;
10449 int i, rc;
ab6ad5a4 10450 u32 magic, crc;
f3c87cdd
YG
10451
10452 rc = bnx2x_nvram_read(bp, 0, data, 4);
10453 if (rc) {
f5372251 10454 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10455 goto test_nvram_exit;
10456 }
10457
10458 magic = be32_to_cpu(buf[0]);
10459 if (magic != 0x669955aa) {
10460 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10461 rc = -ENODEV;
10462 goto test_nvram_exit;
10463 }
10464
10465 for (i = 0; nvram_tbl[i].size; i++) {
10466
10467 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10468 nvram_tbl[i].size);
10469 if (rc) {
10470 DP(NETIF_MSG_PROBE,
f5372251 10471 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10472 goto test_nvram_exit;
10473 }
10474
ab6ad5a4
EG
10475 crc = ether_crc_le(nvram_tbl[i].size, data);
10476 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10477 DP(NETIF_MSG_PROBE,
ab6ad5a4 10478 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10479 rc = -ENODEV;
10480 goto test_nvram_exit;
10481 }
10482 }
10483
10484test_nvram_exit:
10485 return rc;
10486}
10487
10488static int bnx2x_test_intr(struct bnx2x *bp)
10489{
10490 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10491 int i, rc;
10492
10493 if (!netif_running(bp->dev))
10494 return -ENODEV;
10495
8d9c5f34 10496 config->hdr.length = 0;
af246401
EG
10497 if (CHIP_IS_E1(bp))
10498 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10499 else
10500 config->hdr.offset = BP_FUNC(bp);
0626b899 10501 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10502 config->hdr.reserved1 = 0;
10503
e665bfda
MC
10504 bp->set_mac_pending++;
10505 smp_wmb();
f3c87cdd
YG
10506 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10507 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10508 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10509 if (rc == 0) {
f3c87cdd
YG
10510 for (i = 0; i < 10; i++) {
10511 if (!bp->set_mac_pending)
10512 break;
e665bfda 10513 smp_rmb();
f3c87cdd
YG
10514 msleep_interruptible(10);
10515 }
10516 if (i == 10)
10517 rc = -ENODEV;
10518 }
10519
10520 return rc;
10521}
10522
a2fbb9ea
ET
10523static void bnx2x_self_test(struct net_device *dev,
10524 struct ethtool_test *etest, u64 *buf)
10525{
10526 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10527
10528 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10529
f3c87cdd 10530 if (!netif_running(dev))
a2fbb9ea 10531 return;
a2fbb9ea 10532
33471629 10533 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10534 if (IS_E1HMF(bp))
10535 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10536
10537 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10538 int port = BP_PORT(bp);
10539 u32 val;
f3c87cdd
YG
10540 u8 link_up;
10541
279abdf5
EG
10542 /* save current value of input enable for TX port IF */
10543 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10544 /* disable input for TX port IF */
10545 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10546
061bc702 10547 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
10548 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10549 bnx2x_nic_load(bp, LOAD_DIAG);
10550 /* wait until link state is restored */
10551 bnx2x_wait_for_link(bp, link_up);
10552
10553 if (bnx2x_test_registers(bp) != 0) {
10554 buf[0] = 1;
10555 etest->flags |= ETH_TEST_FL_FAILED;
10556 }
10557 if (bnx2x_test_memory(bp) != 0) {
10558 buf[1] = 1;
10559 etest->flags |= ETH_TEST_FL_FAILED;
10560 }
10561 buf[2] = bnx2x_test_loopback(bp, link_up);
10562 if (buf[2] != 0)
10563 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10564
f3c87cdd 10565 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10566
10567 /* restore input for TX port IF */
10568 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10569
f3c87cdd
YG
10570 bnx2x_nic_load(bp, LOAD_NORMAL);
10571 /* wait until link state is restored */
10572 bnx2x_wait_for_link(bp, link_up);
10573 }
10574 if (bnx2x_test_nvram(bp) != 0) {
10575 buf[3] = 1;
a2fbb9ea
ET
10576 etest->flags |= ETH_TEST_FL_FAILED;
10577 }
f3c87cdd
YG
10578 if (bnx2x_test_intr(bp) != 0) {
10579 buf[4] = 1;
10580 etest->flags |= ETH_TEST_FL_FAILED;
10581 }
10582 if (bp->port.pmf)
10583 if (bnx2x_link_test(bp) != 0) {
10584 buf[5] = 1;
10585 etest->flags |= ETH_TEST_FL_FAILED;
10586 }
f3c87cdd
YG
10587
10588#ifdef BNX2X_EXTRA_DEBUG
10589 bnx2x_panic_dump(bp);
10590#endif
a2fbb9ea
ET
10591}
10592
de832a55
EG
10593static const struct {
10594 long offset;
10595 int size;
10596 u8 string[ETH_GSTRING_LEN];
10597} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10598/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10599 { Q_STATS_OFFSET32(error_bytes_received_hi),
10600 8, "[%d]: rx_error_bytes" },
10601 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10602 8, "[%d]: rx_ucast_packets" },
10603 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10604 8, "[%d]: rx_mcast_packets" },
10605 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10606 8, "[%d]: rx_bcast_packets" },
10607 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10608 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10609 4, "[%d]: rx_phy_ip_err_discards"},
10610 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10611 4, "[%d]: rx_skb_alloc_discard" },
10612 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10613
10614/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10615 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10616 8, "[%d]: tx_packets" }
10617};
10618
bb2a0f7a
YG
10619static const struct {
10620 long offset;
10621 int size;
10622 u32 flags;
66e855f3
YG
10623#define STATS_FLAGS_PORT 1
10624#define STATS_FLAGS_FUNC 2
de832a55 10625#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10626 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10627} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10628/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10629 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10630 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10631 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10632 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10633 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10634 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10635 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10636 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10637 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10638 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10639 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10640 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10641 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10642 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10643 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10644 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10645 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10646/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10647 8, STATS_FLAGS_PORT, "rx_fragments" },
10648 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10649 8, STATS_FLAGS_PORT, "rx_jabbers" },
10650 { STATS_OFFSET32(no_buff_discard_hi),
10651 8, STATS_FLAGS_BOTH, "rx_discards" },
10652 { STATS_OFFSET32(mac_filter_discard),
10653 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10654 { STATS_OFFSET32(xxoverflow_discard),
10655 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10656 { STATS_OFFSET32(brb_drop_hi),
10657 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10658 { STATS_OFFSET32(brb_truncate_hi),
10659 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10660 { STATS_OFFSET32(pause_frames_received_hi),
10661 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10662 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10663 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10664 { STATS_OFFSET32(nig_timer_max),
10665 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10666/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10667 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10668 { STATS_OFFSET32(rx_skb_alloc_failed),
10669 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10670 { STATS_OFFSET32(hw_csum_err),
10671 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10672
10673 { STATS_OFFSET32(total_bytes_transmitted_hi),
10674 8, STATS_FLAGS_BOTH, "tx_bytes" },
10675 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10676 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10677 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10678 8, STATS_FLAGS_BOTH, "tx_packets" },
10679 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10680 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10681 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10682 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10683 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10684 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10685 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10686 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10687/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10688 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10689 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10690 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10691 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10692 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10693 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10694 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10695 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10696 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10697 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10698 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10699 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10700 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10701 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10702 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10703 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10704 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10705 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10706 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10707/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10708 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10709 { STATS_OFFSET32(pause_frames_sent_hi),
10710 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10711};
10712
de832a55
EG
10713#define IS_PORT_STAT(i) \
10714 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10715#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10716#define IS_E1HMF_MODE_STAT(bp) \
10717 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10718
15f0a394
BH
10719static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10720{
10721 struct bnx2x *bp = netdev_priv(dev);
10722 int i, num_stats;
10723
10724 switch(stringset) {
10725 case ETH_SS_STATS:
10726 if (is_multi(bp)) {
10727 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10728 if (!IS_E1HMF_MODE_STAT(bp))
10729 num_stats += BNX2X_NUM_STATS;
10730 } else {
10731 if (IS_E1HMF_MODE_STAT(bp)) {
10732 num_stats = 0;
10733 for (i = 0; i < BNX2X_NUM_STATS; i++)
10734 if (IS_FUNC_STAT(i))
10735 num_stats++;
10736 } else
10737 num_stats = BNX2X_NUM_STATS;
10738 }
10739 return num_stats;
10740
10741 case ETH_SS_TEST:
10742 return BNX2X_NUM_TESTS;
10743
10744 default:
10745 return -EINVAL;
10746 }
10747}
10748
a2fbb9ea
ET
10749static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10750{
bb2a0f7a 10751 struct bnx2x *bp = netdev_priv(dev);
de832a55 10752 int i, j, k;
bb2a0f7a 10753
a2fbb9ea
ET
10754 switch (stringset) {
10755 case ETH_SS_STATS:
de832a55
EG
10756 if (is_multi(bp)) {
10757 k = 0;
ca00392c 10758 for_each_rx_queue(bp, i) {
de832a55
EG
10759 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10760 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10761 bnx2x_q_stats_arr[j].string, i);
10762 k += BNX2X_NUM_Q_STATS;
10763 }
10764 if (IS_E1HMF_MODE_STAT(bp))
10765 break;
10766 for (j = 0; j < BNX2X_NUM_STATS; j++)
10767 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10768 bnx2x_stats_arr[j].string);
10769 } else {
10770 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10771 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10772 continue;
10773 strcpy(buf + j*ETH_GSTRING_LEN,
10774 bnx2x_stats_arr[i].string);
10775 j++;
10776 }
bb2a0f7a 10777 }
a2fbb9ea
ET
10778 break;
10779
10780 case ETH_SS_TEST:
10781 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10782 break;
10783 }
10784}
10785
a2fbb9ea
ET
10786static void bnx2x_get_ethtool_stats(struct net_device *dev,
10787 struct ethtool_stats *stats, u64 *buf)
10788{
10789 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10790 u32 *hw_stats, *offset;
10791 int i, j, k;
bb2a0f7a 10792
de832a55
EG
10793 if (is_multi(bp)) {
10794 k = 0;
ca00392c 10795 for_each_rx_queue(bp, i) {
de832a55
EG
10796 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10797 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10798 if (bnx2x_q_stats_arr[j].size == 0) {
10799 /* skip this counter */
10800 buf[k + j] = 0;
10801 continue;
10802 }
10803 offset = (hw_stats +
10804 bnx2x_q_stats_arr[j].offset);
10805 if (bnx2x_q_stats_arr[j].size == 4) {
10806 /* 4-byte counter */
10807 buf[k + j] = (u64) *offset;
10808 continue;
10809 }
10810 /* 8-byte counter */
10811 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10812 }
10813 k += BNX2X_NUM_Q_STATS;
10814 }
10815 if (IS_E1HMF_MODE_STAT(bp))
10816 return;
10817 hw_stats = (u32 *)&bp->eth_stats;
10818 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10819 if (bnx2x_stats_arr[j].size == 0) {
10820 /* skip this counter */
10821 buf[k + j] = 0;
10822 continue;
10823 }
10824 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10825 if (bnx2x_stats_arr[j].size == 4) {
10826 /* 4-byte counter */
10827 buf[k + j] = (u64) *offset;
10828 continue;
10829 }
10830 /* 8-byte counter */
10831 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10832 }
de832a55
EG
10833 } else {
10834 hw_stats = (u32 *)&bp->eth_stats;
10835 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10836 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10837 continue;
10838 if (bnx2x_stats_arr[i].size == 0) {
10839 /* skip this counter */
10840 buf[j] = 0;
10841 j++;
10842 continue;
10843 }
10844 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10845 if (bnx2x_stats_arr[i].size == 4) {
10846 /* 4-byte counter */
10847 buf[j] = (u64) *offset;
10848 j++;
10849 continue;
10850 }
10851 /* 8-byte counter */
10852 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10853 j++;
a2fbb9ea 10854 }
a2fbb9ea
ET
10855 }
10856}
10857
10858static int bnx2x_phys_id(struct net_device *dev, u32 data)
10859{
10860 struct bnx2x *bp = netdev_priv(dev);
10861 int i;
10862
34f80b04
EG
10863 if (!netif_running(dev))
10864 return 0;
10865
10866 if (!bp->port.pmf)
10867 return 0;
10868
a2fbb9ea
ET
10869 if (data == 0)
10870 data = 2;
10871
10872 for (i = 0; i < (data * 2); i++) {
c18487ee 10873 if ((i % 2) == 0)
7846e471
YR
10874 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10875 SPEED_1000);
c18487ee 10876 else
7846e471 10877 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 10878
a2fbb9ea
ET
10879 msleep_interruptible(500);
10880 if (signal_pending(current))
10881 break;
10882 }
10883
c18487ee 10884 if (bp->link_vars.link_up)
7846e471
YR
10885 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10886 bp->link_vars.line_speed);
a2fbb9ea
ET
10887
10888 return 0;
10889}
10890
0fc0b732 10891static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10892 .get_settings = bnx2x_get_settings,
10893 .set_settings = bnx2x_set_settings,
10894 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10895 .get_regs_len = bnx2x_get_regs_len,
10896 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10897 .get_wol = bnx2x_get_wol,
10898 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10899 .get_msglevel = bnx2x_get_msglevel,
10900 .set_msglevel = bnx2x_set_msglevel,
10901 .nway_reset = bnx2x_nway_reset,
01e53298 10902 .get_link = bnx2x_get_link,
7a9b2557
VZ
10903 .get_eeprom_len = bnx2x_get_eeprom_len,
10904 .get_eeprom = bnx2x_get_eeprom,
10905 .set_eeprom = bnx2x_set_eeprom,
10906 .get_coalesce = bnx2x_get_coalesce,
10907 .set_coalesce = bnx2x_set_coalesce,
10908 .get_ringparam = bnx2x_get_ringparam,
10909 .set_ringparam = bnx2x_set_ringparam,
10910 .get_pauseparam = bnx2x_get_pauseparam,
10911 .set_pauseparam = bnx2x_set_pauseparam,
10912 .get_rx_csum = bnx2x_get_rx_csum,
10913 .set_rx_csum = bnx2x_set_rx_csum,
10914 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10915 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10916 .set_flags = bnx2x_set_flags,
10917 .get_flags = ethtool_op_get_flags,
10918 .get_sg = ethtool_op_get_sg,
10919 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10920 .get_tso = ethtool_op_get_tso,
10921 .set_tso = bnx2x_set_tso,
7a9b2557 10922 .self_test = bnx2x_self_test,
15f0a394 10923 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10924 .get_strings = bnx2x_get_strings,
a2fbb9ea 10925 .phys_id = bnx2x_phys_id,
bb2a0f7a 10926 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10927};
10928
10929/* end of ethtool_ops */
10930
10931/****************************************************************************
10932* General service functions
10933****************************************************************************/
10934
10935static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10936{
10937 u16 pmcsr;
10938
10939 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10940
10941 switch (state) {
10942 case PCI_D0:
34f80b04 10943 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10944 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10945 PCI_PM_CTRL_PME_STATUS));
10946
10947 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10948 /* delay required during transition out of D3hot */
a2fbb9ea 10949 msleep(20);
34f80b04 10950 break;
a2fbb9ea 10951
34f80b04
EG
10952 case PCI_D3hot:
10953 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10954 pmcsr |= 3;
a2fbb9ea 10955
34f80b04
EG
10956 if (bp->wol)
10957 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10958
34f80b04
EG
10959 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10960 pmcsr);
a2fbb9ea 10961
34f80b04
EG
10962 /* No more memory access after this point until
10963 * device is brought back to D0.
10964 */
10965 break;
10966
10967 default:
10968 return -EINVAL;
10969 }
10970 return 0;
a2fbb9ea
ET
10971}
10972
237907c1
EG
10973static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10974{
10975 u16 rx_cons_sb;
10976
10977 /* Tell compiler that status block fields can change */
10978 barrier();
10979 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10980 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10981 rx_cons_sb++;
10982 return (fp->rx_comp_cons != rx_cons_sb);
10983}
10984
34f80b04
EG
10985/*
10986 * net_device service functions
10987 */
10988
a2fbb9ea
ET
10989static int bnx2x_poll(struct napi_struct *napi, int budget)
10990{
10991 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10992 napi);
10993 struct bnx2x *bp = fp->bp;
10994 int work_done = 0;
10995
10996#ifdef BNX2X_STOP_ON_ERROR
10997 if (unlikely(bp->panic))
34f80b04 10998 goto poll_panic;
a2fbb9ea
ET
10999#endif
11000
a2fbb9ea
ET
11001 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
11002 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
11003
11004 bnx2x_update_fpsb_idx(fp);
11005
8534f32c 11006 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 11007 work_done = bnx2x_rx_int(fp, budget);
356e2385 11008
8534f32c
EG
11009 /* must not complete if we consumed full budget */
11010 if (work_done >= budget)
11011 goto poll_again;
11012 }
a2fbb9ea 11013
ca00392c 11014 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 11015 * ensure that status block indices have been actually read
ca00392c 11016 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 11017 * so that we won't write the "newer" value of the status block to IGU
ca00392c 11018 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
11019 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11020 * may be postponed to right before bnx2x_ack_sb). In this case
11021 * there will never be another interrupt until there is another update
11022 * of the status block, while there is still unhandled work.
11023 */
11024 rmb();
a2fbb9ea 11025
ca00392c 11026 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 11027#ifdef BNX2X_STOP_ON_ERROR
34f80b04 11028poll_panic:
a2fbb9ea 11029#endif
288379f0 11030 napi_complete(napi);
a2fbb9ea 11031
0626b899 11032 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 11033 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 11034 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
11035 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11036 }
356e2385 11037
8534f32c 11038poll_again:
a2fbb9ea
ET
11039 return work_done;
11040}
11041
755735eb
EG
11042
11043/* we split the first BD into headers and data BDs
33471629 11044 * to ease the pain of our fellow microcode engineers
755735eb
EG
11045 * we use one mapping for both BDs
11046 * So far this has only been observed to happen
11047 * in Other Operating Systems(TM)
11048 */
11049static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11050 struct bnx2x_fastpath *fp,
ca00392c
EG
11051 struct sw_tx_bd *tx_buf,
11052 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
11053 u16 bd_prod, int nbd)
11054{
ca00392c 11055 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
11056 struct eth_tx_bd *d_tx_bd;
11057 dma_addr_t mapping;
11058 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11059
11060 /* first fix first BD */
11061 h_tx_bd->nbd = cpu_to_le16(nbd);
11062 h_tx_bd->nbytes = cpu_to_le16(hlen);
11063
11064 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11065 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11066 h_tx_bd->addr_lo, h_tx_bd->nbd);
11067
11068 /* now get a new data BD
11069 * (after the pbd) and fill it */
11070 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11071 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11072
11073 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11074 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11075
11076 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11077 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11078 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11079
11080 /* this marks the BD as one that has no individual mapping */
11081 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11082
755735eb
EG
11083 DP(NETIF_MSG_TX_QUEUED,
11084 "TSO split data size is %d (%x:%x)\n",
11085 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11086
ca00392c
EG
11087 /* update tx_bd */
11088 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11089
11090 return bd_prod;
11091}
11092
11093static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11094{
11095 if (fix > 0)
11096 csum = (u16) ~csum_fold(csum_sub(csum,
11097 csum_partial(t_header - fix, fix, 0)));
11098
11099 else if (fix < 0)
11100 csum = (u16) ~csum_fold(csum_add(csum,
11101 csum_partial(t_header, -fix, 0)));
11102
11103 return swab16(csum);
11104}
11105
11106static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11107{
11108 u32 rc;
11109
11110 if (skb->ip_summed != CHECKSUM_PARTIAL)
11111 rc = XMIT_PLAIN;
11112
11113 else {
4781bfad 11114 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11115 rc = XMIT_CSUM_V6;
11116 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11117 rc |= XMIT_CSUM_TCP;
11118
11119 } else {
11120 rc = XMIT_CSUM_V4;
11121 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11122 rc |= XMIT_CSUM_TCP;
11123 }
11124 }
11125
11126 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 11127 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
11128
11129 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 11130 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
11131
11132 return rc;
11133}
11134
632da4d6 11135#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11136/* check if packet requires linearization (packet is too fragmented)
11137 no need to check fragmentation if page size > 8K (there will be no
11138 violation to FW restrictions) */
755735eb
EG
11139static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11140 u32 xmit_type)
11141{
11142 int to_copy = 0;
11143 int hlen = 0;
11144 int first_bd_sz = 0;
11145
11146 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11147 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11148
11149 if (xmit_type & XMIT_GSO) {
11150 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11151 /* Check if LSO packet needs to be copied:
11152 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11153 int wnd_size = MAX_FETCH_BD - 3;
33471629 11154 /* Number of windows to check */
755735eb
EG
11155 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11156 int wnd_idx = 0;
11157 int frag_idx = 0;
11158 u32 wnd_sum = 0;
11159
11160 /* Headers length */
11161 hlen = (int)(skb_transport_header(skb) - skb->data) +
11162 tcp_hdrlen(skb);
11163
11164 /* Amount of data (w/o headers) on linear part of SKB*/
11165 first_bd_sz = skb_headlen(skb) - hlen;
11166
11167 wnd_sum = first_bd_sz;
11168
11169 /* Calculate the first sum - it's special */
11170 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11171 wnd_sum +=
11172 skb_shinfo(skb)->frags[frag_idx].size;
11173
11174 /* If there was data on linear skb data - check it */
11175 if (first_bd_sz > 0) {
11176 if (unlikely(wnd_sum < lso_mss)) {
11177 to_copy = 1;
11178 goto exit_lbl;
11179 }
11180
11181 wnd_sum -= first_bd_sz;
11182 }
11183
11184 /* Others are easier: run through the frag list and
11185 check all windows */
11186 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11187 wnd_sum +=
11188 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11189
11190 if (unlikely(wnd_sum < lso_mss)) {
11191 to_copy = 1;
11192 break;
11193 }
11194 wnd_sum -=
11195 skb_shinfo(skb)->frags[wnd_idx].size;
11196 }
755735eb
EG
11197 } else {
11198 /* in non-LSO too fragmented packet should always
11199 be linearized */
11200 to_copy = 1;
11201 }
11202 }
11203
11204exit_lbl:
11205 if (unlikely(to_copy))
11206 DP(NETIF_MSG_TX_QUEUED,
11207 "Linearization IS REQUIRED for %s packet. "
11208 "num_frags %d hlen %d first_bd_sz %d\n",
11209 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11210 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11211
11212 return to_copy;
11213}
632da4d6 11214#endif
755735eb
EG
11215
11216/* called with netif_tx_lock
a2fbb9ea 11217 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11218 * netif_wake_queue()
a2fbb9ea 11219 */
61357325 11220static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11221{
11222 struct bnx2x *bp = netdev_priv(dev);
ca00392c 11223 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 11224 struct netdev_queue *txq;
a2fbb9ea 11225 struct sw_tx_bd *tx_buf;
ca00392c
EG
11226 struct eth_tx_start_bd *tx_start_bd;
11227 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11228 struct eth_tx_parse_bd *pbd = NULL;
11229 u16 pkt_prod, bd_prod;
755735eb 11230 int nbd, fp_index;
a2fbb9ea 11231 dma_addr_t mapping;
755735eb 11232 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11233 int i;
11234 u8 hlen = 0;
ca00392c 11235 __le16 pkt_size = 0;
a2fbb9ea
ET
11236
11237#ifdef BNX2X_STOP_ON_ERROR
11238 if (unlikely(bp->panic))
11239 return NETDEV_TX_BUSY;
11240#endif
11241
555f6c78
EG
11242 fp_index = skb_get_queue_mapping(skb);
11243 txq = netdev_get_tx_queue(dev, fp_index);
11244
ca00392c
EG
11245 fp = &bp->fp[fp_index + bp->num_rx_queues];
11246 fp_stat = &bp->fp[fp_index];
755735eb 11247
231fd58a 11248 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 11249 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 11250 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11251 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11252 return NETDEV_TX_BUSY;
11253 }
11254
755735eb
EG
11255 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11256 " gso type %x xmit_type %x\n",
11257 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11258 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11259
632da4d6 11260#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11261 /* First, check if we need to linearize the skb (due to FW
11262 restrictions). No need to check fragmentation if page size > 8K
11263 (there will be no violation to FW restrictions) */
755735eb
EG
11264 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11265 /* Statistics of linearization */
11266 bp->lin_cnt++;
11267 if (skb_linearize(skb) != 0) {
11268 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11269 "silently dropping this SKB\n");
11270 dev_kfree_skb_any(skb);
da5a662a 11271 return NETDEV_TX_OK;
755735eb
EG
11272 }
11273 }
632da4d6 11274#endif
755735eb 11275
a2fbb9ea 11276 /*
755735eb 11277 Please read carefully. First we use one BD which we mark as start,
ca00392c 11278 then we have a parsing info BD (used for TSO or xsum),
755735eb 11279 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11280 (don't forget to mark the last one as last,
11281 and to unmap only AFTER you write to the BD ...)
755735eb 11282 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11283 */
11284
11285 pkt_prod = fp->tx_pkt_prod++;
755735eb 11286 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11287
755735eb 11288 /* get a tx_buf and first BD */
a2fbb9ea 11289 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11290 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11291
ca00392c
EG
11292 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11293 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11294 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11295 /* header nbd */
ca00392c 11296 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11297
755735eb
EG
11298 /* remember the first BD of the packet */
11299 tx_buf->first_bd = fp->tx_bd_prod;
11300 tx_buf->skb = skb;
ca00392c 11301 tx_buf->flags = 0;
a2fbb9ea
ET
11302
11303 DP(NETIF_MSG_TX_QUEUED,
11304 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11305 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11306
0c6671b0
EG
11307#ifdef BCM_VLAN
11308 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11309 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11310 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11311 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11312 } else
0c6671b0 11313#endif
ca00392c 11314 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11315
ca00392c
EG
11316 /* turn on parsing and get a BD */
11317 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11318 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11319
ca00392c 11320 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11321
11322 if (xmit_type & XMIT_CSUM) {
ca00392c 11323 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11324
11325 /* for now NS flag is not used in Linux */
4781bfad
EG
11326 pbd->global_data =
11327 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11328 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11329
755735eb
EG
11330 pbd->ip_hlen = (skb_transport_header(skb) -
11331 skb_network_header(skb)) / 2;
11332
11333 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11334
755735eb 11335 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11336 hlen = hlen*2;
a2fbb9ea 11337
ca00392c 11338 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11339
11340 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11341 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11342 ETH_TX_BD_FLAGS_IP_CSUM;
11343 else
ca00392c
EG
11344 tx_start_bd->bd_flags.as_bitfield |=
11345 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11346
11347 if (xmit_type & XMIT_CSUM_TCP) {
11348 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11349
11350 } else {
11351 s8 fix = SKB_CS_OFF(skb); /* signed! */
11352
ca00392c 11353 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11354
755735eb 11355 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11356 "hlen %d fix %d csum before fix %x\n",
11357 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11358
11359 /* HW bug: fixup the CSUM */
11360 pbd->tcp_pseudo_csum =
11361 bnx2x_csum_fix(skb_transport_header(skb),
11362 SKB_CS(skb), fix);
11363
11364 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11365 pbd->tcp_pseudo_csum);
11366 }
a2fbb9ea
ET
11367 }
11368
11369 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11370 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11371
ca00392c
EG
11372 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11373 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11374 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11375 tx_start_bd->nbd = cpu_to_le16(nbd);
11376 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11377 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11378
11379 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11380 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11381 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11382 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11383 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11384
755735eb 11385 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11386
11387 DP(NETIF_MSG_TX_QUEUED,
11388 "TSO packet len %d hlen %d total len %d tso size %d\n",
11389 skb->len, hlen, skb_headlen(skb),
11390 skb_shinfo(skb)->gso_size);
11391
ca00392c 11392 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11393
755735eb 11394 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11395 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11396 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11397
11398 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11399 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11400 pbd->tcp_flags = pbd_tcp_flags(skb);
11401
11402 if (xmit_type & XMIT_GSO_V4) {
11403 pbd->ip_id = swab16(ip_hdr(skb)->id);
11404 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11405 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11406 ip_hdr(skb)->daddr,
11407 0, IPPROTO_TCP, 0));
755735eb
EG
11408
11409 } else
11410 pbd->tcp_pseudo_csum =
11411 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11412 &ipv6_hdr(skb)->daddr,
11413 0, IPPROTO_TCP, 0));
11414
a2fbb9ea
ET
11415 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11416 }
ca00392c 11417 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11418
755735eb
EG
11419 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11420 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11421
755735eb 11422 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11423 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11424 if (total_pkt_bd == NULL)
11425 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11426
755735eb
EG
11427 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11428 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11429
ca00392c
EG
11430 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11431 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11432 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11433 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11434
755735eb 11435 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11436 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11437 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11438 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11439 }
11440
ca00392c 11441 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11442
a2fbb9ea
ET
11443 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11444
755735eb 11445 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11446 * if the packet contains or ends with it
11447 */
11448 if (TX_BD_POFF(bd_prod) < nbd)
11449 nbd++;
11450
ca00392c
EG
11451 if (total_pkt_bd != NULL)
11452 total_pkt_bd->total_pkt_bytes = pkt_size;
11453
a2fbb9ea
ET
11454 if (pbd)
11455 DP(NETIF_MSG_TX_QUEUED,
11456 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11457 " tcp_flags %x xsum %x seq %u hlen %u\n",
11458 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11459 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11460 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11461
755735eb 11462 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11463
58f4c4cf
EG
11464 /*
11465 * Make sure that the BD data is updated before updating the producer
11466 * since FW might read the BD right after the producer is updated.
11467 * This is only applicable for weak-ordered memory model archs such
11468 * as IA-64. The following barrier is also mandatory since FW will
11469 * assumes packets must have BDs.
11470 */
11471 wmb();
11472
ca00392c
EG
11473 fp->tx_db.data.prod += nbd;
11474 barrier();
11475 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11476
11477 mmiowb();
11478
755735eb 11479 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11480
11481 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11482 netif_tx_stop_queue(txq);
58f4c4cf
EG
11483 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11484 if we put Tx into XOFF state. */
11485 smp_mb();
ca00392c 11486 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11487 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11488 netif_tx_wake_queue(txq);
a2fbb9ea 11489 }
ca00392c 11490 fp_stat->tx_pkt++;
a2fbb9ea
ET
11491
11492 return NETDEV_TX_OK;
11493}
11494
bb2a0f7a 11495/* called with rtnl_lock */
a2fbb9ea
ET
11496static int bnx2x_open(struct net_device *dev)
11497{
11498 struct bnx2x *bp = netdev_priv(dev);
11499
6eccabb3
EG
11500 netif_carrier_off(dev);
11501
a2fbb9ea
ET
11502 bnx2x_set_power_state(bp, PCI_D0);
11503
bb2a0f7a 11504 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11505}
11506
bb2a0f7a 11507/* called with rtnl_lock */
a2fbb9ea
ET
11508static int bnx2x_close(struct net_device *dev)
11509{
a2fbb9ea
ET
11510 struct bnx2x *bp = netdev_priv(dev);
11511
11512 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11513 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11514 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11515 if (!CHIP_REV_IS_SLOW(bp))
11516 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11517
11518 return 0;
11519}
11520
f5372251 11521/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11522static void bnx2x_set_rx_mode(struct net_device *dev)
11523{
11524 struct bnx2x *bp = netdev_priv(dev);
11525 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11526 int port = BP_PORT(bp);
11527
11528 if (bp->state != BNX2X_STATE_OPEN) {
11529 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11530 return;
11531 }
11532
11533 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11534
11535 if (dev->flags & IFF_PROMISC)
11536 rx_mode = BNX2X_RX_MODE_PROMISC;
11537
11538 else if ((dev->flags & IFF_ALLMULTI) ||
11539 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11540 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11541
11542 else { /* some multicasts */
11543 if (CHIP_IS_E1(bp)) {
11544 int i, old, offset;
11545 struct dev_mc_list *mclist;
11546 struct mac_configuration_cmd *config =
11547 bnx2x_sp(bp, mcast_config);
11548
11549 for (i = 0, mclist = dev->mc_list;
11550 mclist && (i < dev->mc_count);
11551 i++, mclist = mclist->next) {
11552
11553 config->config_table[i].
11554 cam_entry.msb_mac_addr =
11555 swab16(*(u16 *)&mclist->dmi_addr[0]);
11556 config->config_table[i].
11557 cam_entry.middle_mac_addr =
11558 swab16(*(u16 *)&mclist->dmi_addr[2]);
11559 config->config_table[i].
11560 cam_entry.lsb_mac_addr =
11561 swab16(*(u16 *)&mclist->dmi_addr[4]);
11562 config->config_table[i].cam_entry.flags =
11563 cpu_to_le16(port);
11564 config->config_table[i].
11565 target_table_entry.flags = 0;
ca00392c
EG
11566 config->config_table[i].target_table_entry.
11567 clients_bit_vector =
11568 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11569 config->config_table[i].
11570 target_table_entry.vlan_id = 0;
11571
11572 DP(NETIF_MSG_IFUP,
11573 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11574 config->config_table[i].
11575 cam_entry.msb_mac_addr,
11576 config->config_table[i].
11577 cam_entry.middle_mac_addr,
11578 config->config_table[i].
11579 cam_entry.lsb_mac_addr);
11580 }
8d9c5f34 11581 old = config->hdr.length;
34f80b04
EG
11582 if (old > i) {
11583 for (; i < old; i++) {
11584 if (CAM_IS_INVALID(config->
11585 config_table[i])) {
af246401 11586 /* already invalidated */
34f80b04
EG
11587 break;
11588 }
11589 /* invalidate */
11590 CAM_INVALIDATE(config->
11591 config_table[i]);
11592 }
11593 }
11594
11595 if (CHIP_REV_IS_SLOW(bp))
11596 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11597 else
11598 offset = BNX2X_MAX_MULTICAST*(1 + port);
11599
8d9c5f34 11600 config->hdr.length = i;
34f80b04 11601 config->hdr.offset = offset;
8d9c5f34 11602 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11603 config->hdr.reserved1 = 0;
11604
e665bfda
MC
11605 bp->set_mac_pending++;
11606 smp_wmb();
11607
34f80b04
EG
11608 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11609 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11610 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11611 0);
11612 } else { /* E1H */
11613 /* Accept one or more multicasts */
11614 struct dev_mc_list *mclist;
11615 u32 mc_filter[MC_HASH_SIZE];
11616 u32 crc, bit, regidx;
11617 int i;
11618
11619 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11620
11621 for (i = 0, mclist = dev->mc_list;
11622 mclist && (i < dev->mc_count);
11623 i++, mclist = mclist->next) {
11624
7c510e4b
JB
11625 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11626 mclist->dmi_addr);
34f80b04
EG
11627
11628 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11629 bit = (crc >> 24) & 0xff;
11630 regidx = bit >> 5;
11631 bit &= 0x1f;
11632 mc_filter[regidx] |= (1 << bit);
11633 }
11634
11635 for (i = 0; i < MC_HASH_SIZE; i++)
11636 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11637 mc_filter[i]);
11638 }
11639 }
11640
11641 bp->rx_mode = rx_mode;
11642 bnx2x_set_storm_rx_mode(bp);
11643}
11644
11645/* called with rtnl_lock */
a2fbb9ea
ET
11646static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11647{
11648 struct sockaddr *addr = p;
11649 struct bnx2x *bp = netdev_priv(dev);
11650
34f80b04 11651 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11652 return -EINVAL;
11653
11654 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11655 if (netif_running(dev)) {
11656 if (CHIP_IS_E1(bp))
e665bfda 11657 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11658 else
e665bfda 11659 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11660 }
a2fbb9ea
ET
11661
11662 return 0;
11663}
11664
c18487ee 11665/* called with rtnl_lock */
01cd4528
EG
11666static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11667 int devad, u16 addr)
a2fbb9ea 11668{
01cd4528
EG
11669 struct bnx2x *bp = netdev_priv(netdev);
11670 u16 value;
11671 int rc;
11672 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11673
01cd4528
EG
11674 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11675 prtad, devad, addr);
a2fbb9ea 11676
01cd4528
EG
11677 if (prtad != bp->mdio.prtad) {
11678 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11679 prtad, bp->mdio.prtad);
11680 return -EINVAL;
11681 }
11682
11683 /* The HW expects different devad if CL22 is used */
11684 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11685
01cd4528
EG
11686 bnx2x_acquire_phy_lock(bp);
11687 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11688 devad, addr, &value);
11689 bnx2x_release_phy_lock(bp);
11690 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11691
01cd4528
EG
11692 if (!rc)
11693 rc = value;
11694 return rc;
11695}
a2fbb9ea 11696
01cd4528
EG
11697/* called with rtnl_lock */
11698static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11699 u16 addr, u16 value)
11700{
11701 struct bnx2x *bp = netdev_priv(netdev);
11702 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11703 int rc;
11704
11705 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11706 " value 0x%x\n", prtad, devad, addr, value);
11707
11708 if (prtad != bp->mdio.prtad) {
11709 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11710 prtad, bp->mdio.prtad);
11711 return -EINVAL;
a2fbb9ea
ET
11712 }
11713
01cd4528
EG
11714 /* The HW expects different devad if CL22 is used */
11715 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11716
01cd4528
EG
11717 bnx2x_acquire_phy_lock(bp);
11718 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11719 devad, addr, value);
11720 bnx2x_release_phy_lock(bp);
11721 return rc;
11722}
c18487ee 11723
01cd4528
EG
11724/* called with rtnl_lock */
11725static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11726{
11727 struct bnx2x *bp = netdev_priv(dev);
11728 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11729
01cd4528
EG
11730 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11731 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11732
01cd4528
EG
11733 if (!netif_running(dev))
11734 return -EAGAIN;
11735
11736 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11737}
11738
34f80b04 11739/* called with rtnl_lock */
a2fbb9ea
ET
11740static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11741{
11742 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11743 int rc = 0;
a2fbb9ea
ET
11744
11745 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11746 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11747 return -EINVAL;
11748
11749 /* This does not race with packet allocation
c14423fe 11750 * because the actual alloc size is
a2fbb9ea
ET
11751 * only updated as part of load
11752 */
11753 dev->mtu = new_mtu;
11754
11755 if (netif_running(dev)) {
34f80b04
EG
11756 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11757 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11758 }
34f80b04
EG
11759
11760 return rc;
a2fbb9ea
ET
11761}
11762
11763static void bnx2x_tx_timeout(struct net_device *dev)
11764{
11765 struct bnx2x *bp = netdev_priv(dev);
11766
11767#ifdef BNX2X_STOP_ON_ERROR
11768 if (!bp->panic)
11769 bnx2x_panic();
11770#endif
11771 /* This allows the netif to be shutdown gracefully before resetting */
11772 schedule_work(&bp->reset_task);
11773}
11774
11775#ifdef BCM_VLAN
34f80b04 11776/* called with rtnl_lock */
a2fbb9ea
ET
11777static void bnx2x_vlan_rx_register(struct net_device *dev,
11778 struct vlan_group *vlgrp)
11779{
11780 struct bnx2x *bp = netdev_priv(dev);
11781
11782 bp->vlgrp = vlgrp;
0c6671b0
EG
11783
11784 /* Set flags according to the required capabilities */
11785 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11786
11787 if (dev->features & NETIF_F_HW_VLAN_TX)
11788 bp->flags |= HW_VLAN_TX_FLAG;
11789
11790 if (dev->features & NETIF_F_HW_VLAN_RX)
11791 bp->flags |= HW_VLAN_RX_FLAG;
11792
a2fbb9ea 11793 if (netif_running(dev))
49d66772 11794 bnx2x_set_client_config(bp);
a2fbb9ea 11795}
34f80b04 11796
a2fbb9ea
ET
11797#endif
11798
11799#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11800static void poll_bnx2x(struct net_device *dev)
11801{
11802 struct bnx2x *bp = netdev_priv(dev);
11803
11804 disable_irq(bp->pdev->irq);
11805 bnx2x_interrupt(bp->pdev->irq, dev);
11806 enable_irq(bp->pdev->irq);
11807}
11808#endif
11809
c64213cd
SH
11810static const struct net_device_ops bnx2x_netdev_ops = {
11811 .ndo_open = bnx2x_open,
11812 .ndo_stop = bnx2x_close,
11813 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11814 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11815 .ndo_set_mac_address = bnx2x_change_mac_addr,
11816 .ndo_validate_addr = eth_validate_addr,
11817 .ndo_do_ioctl = bnx2x_ioctl,
11818 .ndo_change_mtu = bnx2x_change_mtu,
11819 .ndo_tx_timeout = bnx2x_tx_timeout,
11820#ifdef BCM_VLAN
11821 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11822#endif
11823#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11824 .ndo_poll_controller = poll_bnx2x,
11825#endif
11826};
11827
34f80b04
EG
11828static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11829 struct net_device *dev)
a2fbb9ea
ET
11830{
11831 struct bnx2x *bp;
11832 int rc;
11833
11834 SET_NETDEV_DEV(dev, &pdev->dev);
11835 bp = netdev_priv(dev);
11836
34f80b04
EG
11837 bp->dev = dev;
11838 bp->pdev = pdev;
a2fbb9ea 11839 bp->flags = 0;
34f80b04 11840 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11841
11842 rc = pci_enable_device(pdev);
11843 if (rc) {
11844 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11845 goto err_out;
11846 }
11847
11848 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11849 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11850 " aborting\n");
11851 rc = -ENODEV;
11852 goto err_out_disable;
11853 }
11854
11855 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11856 printk(KERN_ERR PFX "Cannot find second PCI device"
11857 " base address, aborting\n");
11858 rc = -ENODEV;
11859 goto err_out_disable;
11860 }
11861
34f80b04
EG
11862 if (atomic_read(&pdev->enable_cnt) == 1) {
11863 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11864 if (rc) {
11865 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11866 " aborting\n");
11867 goto err_out_disable;
11868 }
a2fbb9ea 11869
34f80b04
EG
11870 pci_set_master(pdev);
11871 pci_save_state(pdev);
11872 }
a2fbb9ea
ET
11873
11874 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11875 if (bp->pm_cap == 0) {
11876 printk(KERN_ERR PFX "Cannot find power management"
11877 " capability, aborting\n");
11878 rc = -EIO;
11879 goto err_out_release;
11880 }
11881
11882 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11883 if (bp->pcie_cap == 0) {
11884 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11885 " aborting\n");
11886 rc = -EIO;
11887 goto err_out_release;
11888 }
11889
6a35528a 11890 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11891 bp->flags |= USING_DAC_FLAG;
6a35528a 11892 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11893 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11894 " failed, aborting\n");
11895 rc = -EIO;
11896 goto err_out_release;
11897 }
11898
284901a9 11899 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11900 printk(KERN_ERR PFX "System does not support DMA,"
11901 " aborting\n");
11902 rc = -EIO;
11903 goto err_out_release;
11904 }
11905
34f80b04
EG
11906 dev->mem_start = pci_resource_start(pdev, 0);
11907 dev->base_addr = dev->mem_start;
11908 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11909
11910 dev->irq = pdev->irq;
11911
275f165f 11912 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11913 if (!bp->regview) {
11914 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11915 rc = -ENOMEM;
11916 goto err_out_release;
11917 }
11918
34f80b04
EG
11919 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11920 min_t(u64, BNX2X_DB_SIZE,
11921 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11922 if (!bp->doorbells) {
11923 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11924 rc = -ENOMEM;
11925 goto err_out_unmap;
11926 }
11927
11928 bnx2x_set_power_state(bp, PCI_D0);
11929
34f80b04
EG
11930 /* clean indirect addresses */
11931 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11932 PCICFG_VENDOR_ID_OFFSET);
11933 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11934 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11935 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11936 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11937
34f80b04 11938 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11939
c64213cd 11940 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11941 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11942 dev->features |= NETIF_F_SG;
11943 dev->features |= NETIF_F_HW_CSUM;
11944 if (bp->flags & USING_DAC_FLAG)
11945 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11946 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11947 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11948#ifdef BCM_VLAN
11949 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11950 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11951
11952 dev->vlan_features |= NETIF_F_SG;
11953 dev->vlan_features |= NETIF_F_HW_CSUM;
11954 if (bp->flags & USING_DAC_FLAG)
11955 dev->vlan_features |= NETIF_F_HIGHDMA;
11956 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11957 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11958#endif
a2fbb9ea 11959
01cd4528
EG
11960 /* get_port_hwinfo() will set prtad and mmds properly */
11961 bp->mdio.prtad = MDIO_PRTAD_NONE;
11962 bp->mdio.mmds = 0;
11963 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11964 bp->mdio.dev = dev;
11965 bp->mdio.mdio_read = bnx2x_mdio_read;
11966 bp->mdio.mdio_write = bnx2x_mdio_write;
11967
a2fbb9ea
ET
11968 return 0;
11969
11970err_out_unmap:
11971 if (bp->regview) {
11972 iounmap(bp->regview);
11973 bp->regview = NULL;
11974 }
a2fbb9ea
ET
11975 if (bp->doorbells) {
11976 iounmap(bp->doorbells);
11977 bp->doorbells = NULL;
11978 }
11979
11980err_out_release:
34f80b04
EG
11981 if (atomic_read(&pdev->enable_cnt) == 1)
11982 pci_release_regions(pdev);
a2fbb9ea
ET
11983
11984err_out_disable:
11985 pci_disable_device(pdev);
11986 pci_set_drvdata(pdev, NULL);
11987
11988err_out:
11989 return rc;
11990}
11991
37f9ce62
EG
11992static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11993 int *width, int *speed)
25047950
ET
11994{
11995 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11996
37f9ce62 11997 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11998
37f9ce62
EG
11999 /* return value of 1=2.5GHz 2=5GHz */
12000 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 12001}
37f9ce62 12002
94a78b79
VZ
12003static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12004{
37f9ce62 12005 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
12006 struct bnx2x_fw_file_hdr *fw_hdr;
12007 struct bnx2x_fw_file_section *sections;
94a78b79 12008 u32 offset, len, num_ops;
37f9ce62 12009 u16 *ops_offsets;
94a78b79 12010 int i;
37f9ce62 12011 const u8 *fw_ver;
94a78b79
VZ
12012
12013 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12014 return -EINVAL;
12015
12016 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12017 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12018
12019 /* Make sure none of the offsets and sizes make us read beyond
12020 * the end of the firmware data */
12021 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12022 offset = be32_to_cpu(sections[i].offset);
12023 len = be32_to_cpu(sections[i].len);
12024 if (offset + len > firmware->size) {
37f9ce62
EG
12025 printk(KERN_ERR PFX "Section %d length is out of "
12026 "bounds\n", i);
94a78b79
VZ
12027 return -EINVAL;
12028 }
12029 }
12030
12031 /* Likewise for the init_ops offsets */
12032 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12033 ops_offsets = (u16 *)(firmware->data + offset);
12034 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12035
12036 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12037 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
12038 printk(KERN_ERR PFX "Section offset %d is out of "
12039 "bounds\n", i);
94a78b79
VZ
12040 return -EINVAL;
12041 }
12042 }
12043
12044 /* Check FW version */
12045 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12046 fw_ver = firmware->data + offset;
12047 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12048 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12049 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12050 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12051 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12052 " Should be %d.%d.%d.%d\n",
12053 fw_ver[0], fw_ver[1], fw_ver[2],
12054 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12055 BCM_5710_FW_MINOR_VERSION,
12056 BCM_5710_FW_REVISION_VERSION,
12057 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 12058 return -EINVAL;
94a78b79
VZ
12059 }
12060
12061 return 0;
12062}
12063
ab6ad5a4 12064static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12065{
ab6ad5a4
EG
12066 const __be32 *source = (const __be32 *)_source;
12067 u32 *target = (u32 *)_target;
94a78b79 12068 u32 i;
94a78b79
VZ
12069
12070 for (i = 0; i < n/4; i++)
12071 target[i] = be32_to_cpu(source[i]);
12072}
12073
12074/*
12075 Ops array is stored in the following format:
12076 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12077 */
ab6ad5a4 12078static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12079{
ab6ad5a4
EG
12080 const __be32 *source = (const __be32 *)_source;
12081 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12082 u32 i, j, tmp;
94a78b79 12083
ab6ad5a4 12084 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12085 tmp = be32_to_cpu(source[j]);
12086 target[i].op = (tmp >> 24) & 0xff;
12087 target[i].offset = tmp & 0xffffff;
12088 target[i].raw_data = be32_to_cpu(source[j+1]);
12089 }
12090}
ab6ad5a4
EG
12091
12092static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12093{
ab6ad5a4
EG
12094 const __be16 *source = (const __be16 *)_source;
12095 u16 *target = (u16 *)_target;
94a78b79 12096 u32 i;
94a78b79
VZ
12097
12098 for (i = 0; i < n/2; i++)
12099 target[i] = be16_to_cpu(source[i]);
12100}
12101
12102#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
ab6ad5a4
EG
12103 do { \
12104 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12105 bp->arr = kmalloc(len, GFP_KERNEL); \
94a78b79 12106 if (!bp->arr) { \
ab6ad5a4
EG
12107 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12108 "for "#arr"\n", len); \
94a78b79
VZ
12109 goto lbl; \
12110 } \
ab6ad5a4
EG
12111 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12112 (u8 *)bp->arr, len); \
94a78b79
VZ
12113 } while (0)
12114
94a78b79
VZ
12115static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12116{
45229b42 12117 const char *fw_file_name;
94a78b79 12118 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 12119 int rc;
94a78b79 12120
94a78b79 12121 if (CHIP_IS_E1(bp))
45229b42 12122 fw_file_name = FW_FILE_NAME_E1;
94a78b79 12123 else
45229b42 12124 fw_file_name = FW_FILE_NAME_E1H;
94a78b79
VZ
12125
12126 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12127
12128 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12129 if (rc) {
ab6ad5a4
EG
12130 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12131 fw_file_name);
94a78b79
VZ
12132 goto request_firmware_exit;
12133 }
12134
12135 rc = bnx2x_check_firmware(bp);
12136 if (rc) {
12137 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12138 goto request_firmware_exit;
12139 }
12140
12141 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12142
12143 /* Initialize the pointers to the init arrays */
12144 /* Blob */
12145 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12146
12147 /* Opcodes */
12148 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12149
12150 /* Offsets */
ab6ad5a4
EG
12151 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12152 be16_to_cpu_n);
94a78b79
VZ
12153
12154 /* STORMs firmware */
573f2035
EG
12155 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12156 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12157 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12158 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12159 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12160 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12161 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12162 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12163 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12164 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12165 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12166 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12167 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12168 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12169 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12170 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
12171
12172 return 0;
ab6ad5a4 12173
94a78b79
VZ
12174init_offsets_alloc_err:
12175 kfree(bp->init_ops);
12176init_ops_alloc_err:
12177 kfree(bp->init_data);
12178request_firmware_exit:
12179 release_firmware(bp->firmware);
12180
12181 return rc;
12182}
12183
12184
a2fbb9ea
ET
12185static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12186 const struct pci_device_id *ent)
12187{
a2fbb9ea
ET
12188 struct net_device *dev = NULL;
12189 struct bnx2x *bp;
37f9ce62 12190 int pcie_width, pcie_speed;
25047950 12191 int rc;
a2fbb9ea 12192
a2fbb9ea 12193 /* dev zeroed in init_etherdev */
555f6c78 12194 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
12195 if (!dev) {
12196 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 12197 return -ENOMEM;
34f80b04 12198 }
a2fbb9ea 12199
a2fbb9ea
ET
12200 bp = netdev_priv(dev);
12201 bp->msglevel = debug;
12202
df4770de
EG
12203 pci_set_drvdata(pdev, dev);
12204
34f80b04 12205 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12206 if (rc < 0) {
12207 free_netdev(dev);
12208 return rc;
12209 }
12210
34f80b04 12211 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12212 if (rc)
12213 goto init_one_exit;
12214
94a78b79
VZ
12215 /* Set init arrays */
12216 rc = bnx2x_init_firmware(bp, &pdev->dev);
12217 if (rc) {
12218 printk(KERN_ERR PFX "Error loading firmware\n");
12219 goto init_one_exit;
12220 }
12221
693fc0d1 12222 rc = register_netdev(dev);
34f80b04 12223 if (rc) {
693fc0d1 12224 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12225 goto init_one_exit;
12226 }
12227
37f9ce62 12228 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 12229 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 12230 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 12231 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 12232 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 12233 dev->base_addr, bp->pdev->irq);
e174961c 12234 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 12235
a2fbb9ea 12236 return 0;
34f80b04
EG
12237
12238init_one_exit:
12239 if (bp->regview)
12240 iounmap(bp->regview);
12241
12242 if (bp->doorbells)
12243 iounmap(bp->doorbells);
12244
12245 free_netdev(dev);
12246
12247 if (atomic_read(&pdev->enable_cnt) == 1)
12248 pci_release_regions(pdev);
12249
12250 pci_disable_device(pdev);
12251 pci_set_drvdata(pdev, NULL);
12252
12253 return rc;
a2fbb9ea
ET
12254}
12255
12256static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12257{
12258 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12259 struct bnx2x *bp;
12260
12261 if (!dev) {
228241eb
ET
12262 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12263 return;
12264 }
228241eb 12265 bp = netdev_priv(dev);
a2fbb9ea 12266
a2fbb9ea
ET
12267 unregister_netdev(dev);
12268
94a78b79
VZ
12269 kfree(bp->init_ops_offsets);
12270 kfree(bp->init_ops);
12271 kfree(bp->init_data);
12272 release_firmware(bp->firmware);
12273
a2fbb9ea
ET
12274 if (bp->regview)
12275 iounmap(bp->regview);
12276
12277 if (bp->doorbells)
12278 iounmap(bp->doorbells);
12279
12280 free_netdev(dev);
34f80b04
EG
12281
12282 if (atomic_read(&pdev->enable_cnt) == 1)
12283 pci_release_regions(pdev);
12284
a2fbb9ea
ET
12285 pci_disable_device(pdev);
12286 pci_set_drvdata(pdev, NULL);
12287}
12288
12289static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12290{
12291 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12292 struct bnx2x *bp;
12293
34f80b04
EG
12294 if (!dev) {
12295 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12296 return -ENODEV;
12297 }
12298 bp = netdev_priv(dev);
a2fbb9ea 12299
34f80b04 12300 rtnl_lock();
a2fbb9ea 12301
34f80b04 12302 pci_save_state(pdev);
228241eb 12303
34f80b04
EG
12304 if (!netif_running(dev)) {
12305 rtnl_unlock();
12306 return 0;
12307 }
a2fbb9ea
ET
12308
12309 netif_device_detach(dev);
a2fbb9ea 12310
da5a662a 12311 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12312
a2fbb9ea 12313 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12314
34f80b04
EG
12315 rtnl_unlock();
12316
a2fbb9ea
ET
12317 return 0;
12318}
12319
12320static int bnx2x_resume(struct pci_dev *pdev)
12321{
12322 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12323 struct bnx2x *bp;
a2fbb9ea
ET
12324 int rc;
12325
228241eb
ET
12326 if (!dev) {
12327 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12328 return -ENODEV;
12329 }
228241eb 12330 bp = netdev_priv(dev);
a2fbb9ea 12331
34f80b04
EG
12332 rtnl_lock();
12333
228241eb 12334 pci_restore_state(pdev);
34f80b04
EG
12335
12336 if (!netif_running(dev)) {
12337 rtnl_unlock();
12338 return 0;
12339 }
12340
a2fbb9ea
ET
12341 bnx2x_set_power_state(bp, PCI_D0);
12342 netif_device_attach(dev);
12343
da5a662a 12344 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12345
34f80b04
EG
12346 rtnl_unlock();
12347
12348 return rc;
a2fbb9ea
ET
12349}
12350
f8ef6e44
YG
12351static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12352{
12353 int i;
12354
12355 bp->state = BNX2X_STATE_ERROR;
12356
12357 bp->rx_mode = BNX2X_RX_MODE_NONE;
12358
12359 bnx2x_netif_stop(bp, 0);
12360
12361 del_timer_sync(&bp->timer);
12362 bp->stats_state = STATS_STATE_DISABLED;
12363 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12364
12365 /* Release IRQs */
12366 bnx2x_free_irq(bp);
12367
12368 if (CHIP_IS_E1(bp)) {
12369 struct mac_configuration_cmd *config =
12370 bnx2x_sp(bp, mcast_config);
12371
8d9c5f34 12372 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12373 CAM_INVALIDATE(config->config_table[i]);
12374 }
12375
12376 /* Free SKBs, SGEs, TPA pool and driver internals */
12377 bnx2x_free_skbs(bp);
555f6c78 12378 for_each_rx_queue(bp, i)
f8ef6e44 12379 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12380 for_each_rx_queue(bp, i)
7cde1c8b 12381 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12382 bnx2x_free_mem(bp);
12383
12384 bp->state = BNX2X_STATE_CLOSED;
12385
12386 netif_carrier_off(bp->dev);
12387
12388 return 0;
12389}
12390
12391static void bnx2x_eeh_recover(struct bnx2x *bp)
12392{
12393 u32 val;
12394
12395 mutex_init(&bp->port.phy_mutex);
12396
12397 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12398 bp->link_params.shmem_base = bp->common.shmem_base;
12399 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12400
12401 if (!bp->common.shmem_base ||
12402 (bp->common.shmem_base < 0xA0000) ||
12403 (bp->common.shmem_base >= 0xC0000)) {
12404 BNX2X_DEV_INFO("MCP not active\n");
12405 bp->flags |= NO_MCP_FLAG;
12406 return;
12407 }
12408
12409 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12410 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12411 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12412 BNX2X_ERR("BAD MCP validity signature\n");
12413
12414 if (!BP_NOMCP(bp)) {
12415 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12416 & DRV_MSG_SEQ_NUMBER_MASK);
12417 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12418 }
12419}
12420
493adb1f
WX
12421/**
12422 * bnx2x_io_error_detected - called when PCI error is detected
12423 * @pdev: Pointer to PCI device
12424 * @state: The current pci connection state
12425 *
12426 * This function is called after a PCI bus error affecting
12427 * this device has been detected.
12428 */
12429static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12430 pci_channel_state_t state)
12431{
12432 struct net_device *dev = pci_get_drvdata(pdev);
12433 struct bnx2x *bp = netdev_priv(dev);
12434
12435 rtnl_lock();
12436
12437 netif_device_detach(dev);
12438
07ce50e4
DN
12439 if (state == pci_channel_io_perm_failure) {
12440 rtnl_unlock();
12441 return PCI_ERS_RESULT_DISCONNECT;
12442 }
12443
493adb1f 12444 if (netif_running(dev))
f8ef6e44 12445 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12446
12447 pci_disable_device(pdev);
12448
12449 rtnl_unlock();
12450
12451 /* Request a slot reset */
12452 return PCI_ERS_RESULT_NEED_RESET;
12453}
12454
12455/**
12456 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12457 * @pdev: Pointer to PCI device
12458 *
12459 * Restart the card from scratch, as if from a cold-boot.
12460 */
12461static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12462{
12463 struct net_device *dev = pci_get_drvdata(pdev);
12464 struct bnx2x *bp = netdev_priv(dev);
12465
12466 rtnl_lock();
12467
12468 if (pci_enable_device(pdev)) {
12469 dev_err(&pdev->dev,
12470 "Cannot re-enable PCI device after reset\n");
12471 rtnl_unlock();
12472 return PCI_ERS_RESULT_DISCONNECT;
12473 }
12474
12475 pci_set_master(pdev);
12476 pci_restore_state(pdev);
12477
12478 if (netif_running(dev))
12479 bnx2x_set_power_state(bp, PCI_D0);
12480
12481 rtnl_unlock();
12482
12483 return PCI_ERS_RESULT_RECOVERED;
12484}
12485
12486/**
12487 * bnx2x_io_resume - called when traffic can start flowing again
12488 * @pdev: Pointer to PCI device
12489 *
12490 * This callback is called when the error recovery driver tells us that
12491 * its OK to resume normal operation.
12492 */
12493static void bnx2x_io_resume(struct pci_dev *pdev)
12494{
12495 struct net_device *dev = pci_get_drvdata(pdev);
12496 struct bnx2x *bp = netdev_priv(dev);
12497
12498 rtnl_lock();
12499
f8ef6e44
YG
12500 bnx2x_eeh_recover(bp);
12501
493adb1f 12502 if (netif_running(dev))
f8ef6e44 12503 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12504
12505 netif_device_attach(dev);
12506
12507 rtnl_unlock();
12508}
12509
12510static struct pci_error_handlers bnx2x_err_handler = {
12511 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12512 .slot_reset = bnx2x_io_slot_reset,
12513 .resume = bnx2x_io_resume,
493adb1f
WX
12514};
12515
a2fbb9ea 12516static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12517 .name = DRV_MODULE_NAME,
12518 .id_table = bnx2x_pci_tbl,
12519 .probe = bnx2x_init_one,
12520 .remove = __devexit_p(bnx2x_remove_one),
12521 .suspend = bnx2x_suspend,
12522 .resume = bnx2x_resume,
12523 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12524};
12525
12526static int __init bnx2x_init(void)
12527{
dd21ca6d
SG
12528 int ret;
12529
938cf541
EG
12530 printk(KERN_INFO "%s", version);
12531
1cf167f2
EG
12532 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12533 if (bnx2x_wq == NULL) {
12534 printk(KERN_ERR PFX "Cannot create workqueue\n");
12535 return -ENOMEM;
12536 }
12537
dd21ca6d
SG
12538 ret = pci_register_driver(&bnx2x_pci_driver);
12539 if (ret) {
12540 printk(KERN_ERR PFX "Cannot register driver\n");
12541 destroy_workqueue(bnx2x_wq);
12542 }
12543 return ret;
a2fbb9ea
ET
12544}
12545
12546static void __exit bnx2x_cleanup(void)
12547{
12548 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12549
12550 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12551}
12552
12553module_init(bnx2x_init);
12554module_exit(bnx2x_cleanup);
12555
993ac7b5
MC
12556#ifdef BCM_CNIC
12557
12558/* count denotes the number of new completions we have seen */
12559static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12560{
12561 struct eth_spe *spe;
12562
12563#ifdef BNX2X_STOP_ON_ERROR
12564 if (unlikely(bp->panic))
12565 return;
12566#endif
12567
12568 spin_lock_bh(&bp->spq_lock);
12569 bp->cnic_spq_pending -= count;
12570
12571 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12572 bp->cnic_spq_pending++) {
12573
12574 if (!bp->cnic_kwq_pending)
12575 break;
12576
12577 spe = bnx2x_sp_get_next(bp);
12578 *spe = *bp->cnic_kwq_cons;
12579
12580 bp->cnic_kwq_pending--;
12581
12582 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12583 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12584
12585 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12586 bp->cnic_kwq_cons = bp->cnic_kwq;
12587 else
12588 bp->cnic_kwq_cons++;
12589 }
12590 bnx2x_sp_prod_update(bp);
12591 spin_unlock_bh(&bp->spq_lock);
12592}
12593
12594static int bnx2x_cnic_sp_queue(struct net_device *dev,
12595 struct kwqe_16 *kwqes[], u32 count)
12596{
12597 struct bnx2x *bp = netdev_priv(dev);
12598 int i;
12599
12600#ifdef BNX2X_STOP_ON_ERROR
12601 if (unlikely(bp->panic))
12602 return -EIO;
12603#endif
12604
12605 spin_lock_bh(&bp->spq_lock);
12606
12607 for (i = 0; i < count; i++) {
12608 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12609
12610 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12611 break;
12612
12613 *bp->cnic_kwq_prod = *spe;
12614
12615 bp->cnic_kwq_pending++;
12616
12617 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12618 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12619 spe->data.mac_config_addr.hi,
12620 spe->data.mac_config_addr.lo,
12621 bp->cnic_kwq_pending);
12622
12623 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12624 bp->cnic_kwq_prod = bp->cnic_kwq;
12625 else
12626 bp->cnic_kwq_prod++;
12627 }
12628
12629 spin_unlock_bh(&bp->spq_lock);
12630
12631 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12632 bnx2x_cnic_sp_post(bp, 0);
12633
12634 return i;
12635}
12636
12637static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12638{
12639 struct cnic_ops *c_ops;
12640 int rc = 0;
12641
12642 mutex_lock(&bp->cnic_mutex);
12643 c_ops = bp->cnic_ops;
12644 if (c_ops)
12645 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12646 mutex_unlock(&bp->cnic_mutex);
12647
12648 return rc;
12649}
12650
12651static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12652{
12653 struct cnic_ops *c_ops;
12654 int rc = 0;
12655
12656 rcu_read_lock();
12657 c_ops = rcu_dereference(bp->cnic_ops);
12658 if (c_ops)
12659 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12660 rcu_read_unlock();
12661
12662 return rc;
12663}
12664
12665/*
12666 * for commands that have no data
12667 */
12668static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12669{
12670 struct cnic_ctl_info ctl = {0};
12671
12672 ctl.cmd = cmd;
12673
12674 return bnx2x_cnic_ctl_send(bp, &ctl);
12675}
12676
12677static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12678{
12679 struct cnic_ctl_info ctl;
12680
12681 /* first we tell CNIC and only then we count this as a completion */
12682 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12683 ctl.data.comp.cid = cid;
12684
12685 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12686 bnx2x_cnic_sp_post(bp, 1);
12687}
12688
12689static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12690{
12691 struct bnx2x *bp = netdev_priv(dev);
12692 int rc = 0;
12693
12694 switch (ctl->cmd) {
12695 case DRV_CTL_CTXTBL_WR_CMD: {
12696 u32 index = ctl->data.io.offset;
12697 dma_addr_t addr = ctl->data.io.dma_addr;
12698
12699 bnx2x_ilt_wr(bp, index, addr);
12700 break;
12701 }
12702
12703 case DRV_CTL_COMPLETION_CMD: {
12704 int count = ctl->data.comp.comp_count;
12705
12706 bnx2x_cnic_sp_post(bp, count);
12707 break;
12708 }
12709
12710 /* rtnl_lock is held. */
12711 case DRV_CTL_START_L2_CMD: {
12712 u32 cli = ctl->data.ring.client_id;
12713
12714 bp->rx_mode_cl_mask |= (1 << cli);
12715 bnx2x_set_storm_rx_mode(bp);
12716 break;
12717 }
12718
12719 /* rtnl_lock is held. */
12720 case DRV_CTL_STOP_L2_CMD: {
12721 u32 cli = ctl->data.ring.client_id;
12722
12723 bp->rx_mode_cl_mask &= ~(1 << cli);
12724 bnx2x_set_storm_rx_mode(bp);
12725 break;
12726 }
12727
12728 default:
12729 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12730 rc = -EINVAL;
12731 }
12732
12733 return rc;
12734}
12735
12736static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12737{
12738 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12739
12740 if (bp->flags & USING_MSIX_FLAG) {
12741 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12742 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12743 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12744 } else {
12745 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12746 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12747 }
12748 cp->irq_arr[0].status_blk = bp->cnic_sb;
12749 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12750 cp->irq_arr[1].status_blk = bp->def_status_blk;
12751 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12752
12753 cp->num_irq = 2;
12754}
12755
12756static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12757 void *data)
12758{
12759 struct bnx2x *bp = netdev_priv(dev);
12760 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12761
12762 if (ops == NULL)
12763 return -EINVAL;
12764
12765 if (atomic_read(&bp->intr_sem) != 0)
12766 return -EBUSY;
12767
12768 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12769 if (!bp->cnic_kwq)
12770 return -ENOMEM;
12771
12772 bp->cnic_kwq_cons = bp->cnic_kwq;
12773 bp->cnic_kwq_prod = bp->cnic_kwq;
12774 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12775
12776 bp->cnic_spq_pending = 0;
12777 bp->cnic_kwq_pending = 0;
12778
12779 bp->cnic_data = data;
12780
12781 cp->num_irq = 0;
12782 cp->drv_state = CNIC_DRV_STATE_REGD;
12783
12784 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12785
12786 bnx2x_setup_cnic_irq_info(bp);
12787 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12788 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12789 rcu_assign_pointer(bp->cnic_ops, ops);
12790
12791 return 0;
12792}
12793
12794static int bnx2x_unregister_cnic(struct net_device *dev)
12795{
12796 struct bnx2x *bp = netdev_priv(dev);
12797 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12798
12799 mutex_lock(&bp->cnic_mutex);
12800 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12801 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12802 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12803 }
12804 cp->drv_state = 0;
12805 rcu_assign_pointer(bp->cnic_ops, NULL);
12806 mutex_unlock(&bp->cnic_mutex);
12807 synchronize_rcu();
12808 kfree(bp->cnic_kwq);
12809 bp->cnic_kwq = NULL;
12810
12811 return 0;
12812}
12813
12814struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12815{
12816 struct bnx2x *bp = netdev_priv(dev);
12817 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12818
12819 cp->drv_owner = THIS_MODULE;
12820 cp->chip_id = CHIP_ID(bp);
12821 cp->pdev = bp->pdev;
12822 cp->io_base = bp->regview;
12823 cp->io_base2 = bp->doorbells;
12824 cp->max_kwqe_pending = 8;
12825 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12826 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12827 cp->ctx_tbl_len = CNIC_ILT_LINES;
12828 cp->starting_cid = BCM_CNIC_CID_START;
12829 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12830 cp->drv_ctl = bnx2x_drv_ctl;
12831 cp->drv_register_cnic = bnx2x_register_cnic;
12832 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12833
12834 return cp;
12835}
12836EXPORT_SYMBOL(bnx2x_cnic_probe);
12837
12838#endif /* BCM_CNIC */
94a78b79 12839
This page took 1.093457 seconds and 5 git commands to generate.