net: PPP buffer too small for higher speed connections
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
0ab365f4
EG
60#define DRV_MODULE_VERSION "1.52.1-4"
61#define DRV_MODULE_RELDATE "2009/11/09"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
94static int num_rx_queues;
95module_param(num_rx_queues, int, 0);
96MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
97 " (default is half number of CPUs)");
98
99static int num_tx_queues;
100module_param(num_tx_queues, int, 0);
101MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
102 " (default is half number of CPUs)");
555f6c78 103
19680c48 104static int disable_tpa;
19680c48 105module_param(disable_tpa, int, 0);
9898f86d 106MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
107
108static int int_mode;
109module_param(int_mode, int, 0);
110MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
111
a18f5128
EG
112static int dropless_fc;
113module_param(dropless_fc, int, 0);
114MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
115
9898f86d 116static int poll;
a2fbb9ea 117module_param(poll, int, 0);
9898f86d 118MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
119
120static int mrrs = -1;
121module_param(mrrs, int, 0);
122MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
123
9898f86d 124static int debug;
a2fbb9ea 125module_param(debug, int, 0);
9898f86d
EG
126MODULE_PARM_DESC(debug, " Default debug msglevel");
127
128static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 129
1cf167f2 130static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
131
132enum bnx2x_board_type {
133 BCM57710 = 0,
34f80b04
EG
134 BCM57711 = 1,
135 BCM57711E = 2,
a2fbb9ea
ET
136};
137
34f80b04 138/* indexed by board_type, above */
53a10565 139static struct {
a2fbb9ea
ET
140 char *name;
141} board_info[] __devinitdata = {
34f80b04
EG
142 { "Broadcom NetXtreme II BCM57710 XGb" },
143 { "Broadcom NetXtreme II BCM57711 XGb" },
144 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
145};
146
34f80b04 147
a2fbb9ea 148static const struct pci_device_id bnx2x_pci_tbl[] = {
e4ed7113
EG
149 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
150 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
152 { 0 }
153};
154
155MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
156
157/****************************************************************************
158* General service functions
159****************************************************************************/
160
161/* used only at init
162 * locking is done by mcp
163 */
573f2035 164void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
165{
166 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
169 PCICFG_VENDOR_ID_OFFSET);
170}
171
a2fbb9ea
ET
172static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
173{
174 u32 val;
175
176 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
177 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
178 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
179 PCICFG_VENDOR_ID_OFFSET);
180
181 return val;
182}
a2fbb9ea
ET
183
184static const u32 dmae_reg_go_c[] = {
185 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
186 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
187 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
188 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
189};
190
191/* copy command into DMAE command memory and set DMAE command go */
192static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
193 int idx)
194{
195 u32 cmd_offset;
196 int i;
197
198 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
199 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
200 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
201
ad8d3948
EG
202 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
203 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
204 }
205 REG_WR(bp, dmae_reg_go_c[idx], 1);
206}
207
ad8d3948
EG
208void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
209 u32 len32)
a2fbb9ea 210{
5ff7b6d4 211 struct dmae_command dmae;
a2fbb9ea 212 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
213 int cnt = 200;
214
215 if (!bp->dmae_ready) {
216 u32 *data = bnx2x_sp(bp, wb_data[0]);
217
218 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
219 " using indirect\n", dst_addr, len32);
220 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
221 return;
222 }
223
5ff7b6d4 224 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 225
5ff7b6d4
EG
226 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
227 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
228 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 229#ifdef __BIG_ENDIAN
5ff7b6d4 230 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 231#else
5ff7b6d4 232 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 233#endif
5ff7b6d4
EG
234 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
235 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
236 dmae.src_addr_lo = U64_LO(dma_addr);
237 dmae.src_addr_hi = U64_HI(dma_addr);
238 dmae.dst_addr_lo = dst_addr >> 2;
239 dmae.dst_addr_hi = 0;
240 dmae.len = len32;
241 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
242 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
243 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 244
c3eefaf6 245 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
246 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
247 "dst_addr [%x:%08x (%08x)]\n"
248 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
249 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
250 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
251 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 252 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
253 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
254 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 255
5ff7b6d4
EG
256 mutex_lock(&bp->dmae_mutex);
257
a2fbb9ea
ET
258 *wb_comp = 0;
259
5ff7b6d4 260 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
261
262 udelay(5);
ad8d3948
EG
263
264 while (*wb_comp != DMAE_COMP_VAL) {
265 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
266
ad8d3948 267 if (!cnt) {
c3eefaf6 268 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
269 break;
270 }
ad8d3948 271 cnt--;
12469401
YG
272 /* adjust delay for emulation/FPGA */
273 if (CHIP_REV_IS_SLOW(bp))
274 msleep(100);
275 else
276 udelay(5);
a2fbb9ea 277 }
ad8d3948
EG
278
279 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
280}
281
c18487ee 282void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 283{
5ff7b6d4 284 struct dmae_command dmae;
a2fbb9ea 285 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
286 int cnt = 200;
287
288 if (!bp->dmae_ready) {
289 u32 *data = bnx2x_sp(bp, wb_data[0]);
290 int i;
291
292 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
293 " using indirect\n", src_addr, len32);
294 for (i = 0; i < len32; i++)
295 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
296 return;
297 }
298
5ff7b6d4 299 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 300
5ff7b6d4
EG
301 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
302 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
303 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 304#ifdef __BIG_ENDIAN
5ff7b6d4 305 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 306#else
5ff7b6d4 307 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 308#endif
5ff7b6d4
EG
309 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
310 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
311 dmae.src_addr_lo = src_addr >> 2;
312 dmae.src_addr_hi = 0;
313 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
314 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
315 dmae.len = len32;
316 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
317 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
318 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 319
c3eefaf6 320 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
321 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
322 "dst_addr [%x:%08x (%08x)]\n"
323 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
324 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
325 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
326 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 327
5ff7b6d4
EG
328 mutex_lock(&bp->dmae_mutex);
329
330 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
331 *wb_comp = 0;
332
5ff7b6d4 333 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
334
335 udelay(5);
ad8d3948
EG
336
337 while (*wb_comp != DMAE_COMP_VAL) {
338
ad8d3948 339 if (!cnt) {
c3eefaf6 340 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
341 break;
342 }
ad8d3948 343 cnt--;
12469401
YG
344 /* adjust delay for emulation/FPGA */
345 if (CHIP_REV_IS_SLOW(bp))
346 msleep(100);
347 else
348 udelay(5);
a2fbb9ea 349 }
ad8d3948 350 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
351 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
352 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
353
354 mutex_unlock(&bp->dmae_mutex);
355}
356
573f2035
EG
357void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
358 u32 addr, u32 len)
359{
360 int offset = 0;
361
362 while (len > DMAE_LEN32_WR_MAX) {
363 bnx2x_write_dmae(bp, phys_addr + offset,
364 addr + offset, DMAE_LEN32_WR_MAX);
365 offset += DMAE_LEN32_WR_MAX * 4;
366 len -= DMAE_LEN32_WR_MAX;
367 }
368
369 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
370}
371
ad8d3948
EG
372/* used only for slowpath so not inlined */
373static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
374{
375 u32 wb_write[2];
376
377 wb_write[0] = val_hi;
378 wb_write[1] = val_lo;
379 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 380}
a2fbb9ea 381
ad8d3948
EG
382#ifdef USE_WB_RD
383static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
384{
385 u32 wb_data[2];
386
387 REG_RD_DMAE(bp, reg, wb_data, 2);
388
389 return HILO_U64(wb_data[0], wb_data[1]);
390}
391#endif
392
a2fbb9ea
ET
393static int bnx2x_mc_assert(struct bnx2x *bp)
394{
a2fbb9ea 395 char last_idx;
34f80b04
EG
396 int i, rc = 0;
397 u32 row0, row1, row2, row3;
398
399 /* XSTORM */
400 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_INDEX_OFFSET);
402 if (last_idx)
403 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
404
405 /* print the asserts */
406 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
407
408 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
409 XSTORM_ASSERT_LIST_OFFSET(i));
410 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
411 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
412 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
413 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
414 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
415 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
416
417 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
418 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
419 " 0x%08x 0x%08x 0x%08x\n",
420 i, row3, row2, row1, row0);
421 rc++;
422 } else {
423 break;
424 }
425 }
426
427 /* TSTORM */
428 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_INDEX_OFFSET);
430 if (last_idx)
431 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
432
433 /* print the asserts */
434 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
435
436 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
437 TSTORM_ASSERT_LIST_OFFSET(i));
438 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
439 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
440 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
441 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
442 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
443 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
444
445 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
446 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
447 " 0x%08x 0x%08x 0x%08x\n",
448 i, row3, row2, row1, row0);
449 rc++;
450 } else {
451 break;
452 }
453 }
454
455 /* CSTORM */
456 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_INDEX_OFFSET);
458 if (last_idx)
459 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
460
461 /* print the asserts */
462 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
463
464 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
465 CSTORM_ASSERT_LIST_OFFSET(i));
466 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
467 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
468 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
469 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
470 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
471 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
472
473 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
474 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
475 " 0x%08x 0x%08x 0x%08x\n",
476 i, row3, row2, row1, row0);
477 rc++;
478 } else {
479 break;
480 }
481 }
482
483 /* USTORM */
484 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_INDEX_OFFSET);
486 if (last_idx)
487 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
488
489 /* print the asserts */
490 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
491
492 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
493 USTORM_ASSERT_LIST_OFFSET(i));
494 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
495 USTORM_ASSERT_LIST_OFFSET(i) + 4);
496 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
497 USTORM_ASSERT_LIST_OFFSET(i) + 8);
498 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
499 USTORM_ASSERT_LIST_OFFSET(i) + 12);
500
501 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
502 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
503 " 0x%08x 0x%08x 0x%08x\n",
504 i, row3, row2, row1, row0);
505 rc++;
506 } else {
507 break;
a2fbb9ea
ET
508 }
509 }
34f80b04 510
a2fbb9ea
ET
511 return rc;
512}
c14423fe 513
a2fbb9ea
ET
514static void bnx2x_fw_dump(struct bnx2x *bp)
515{
516 u32 mark, offset;
4781bfad 517 __be32 data[9];
a2fbb9ea
ET
518 int word;
519
520 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 521 mark = ((mark + 0x3) & ~0x3);
ad361c98 522 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 523
ad361c98 524 printk(KERN_ERR PFX);
a2fbb9ea
ET
525 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
526 for (word = 0; word < 8; word++)
527 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
528 offset + 4*word));
529 data[8] = 0x0;
49d66772 530 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
531 }
532 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
533 for (word = 0; word < 8; word++)
534 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
535 offset + 4*word));
536 data[8] = 0x0;
49d66772 537 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 538 }
ad361c98 539 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
540}
541
542static void bnx2x_panic_dump(struct bnx2x *bp)
543{
544 int i;
545 u16 j, start, end;
546
66e855f3
YG
547 bp->stats_state = STATS_STATE_DISABLED;
548 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
549
a2fbb9ea
ET
550 BNX2X_ERR("begin crash dump -----------------\n");
551
8440d2b6
EG
552 /* Indices */
553 /* Common */
554 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
555 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
556 " spq_prod_idx(%u)\n",
557 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
558 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
559
560 /* Rx */
561 for_each_rx_queue(bp, i) {
a2fbb9ea 562 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 563
c3eefaf6 564 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
565 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
566 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 567 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
568 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
569 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 570 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
571 " fp_u_idx(%x) *sb_u_idx(%x)\n",
572 fp->rx_sge_prod, fp->last_max_sge,
573 le16_to_cpu(fp->fp_u_idx),
574 fp->status_blk->u_status_block.status_block_index);
575 }
a2fbb9ea 576
8440d2b6
EG
577 /* Tx */
578 for_each_tx_queue(bp, i) {
579 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 580
c3eefaf6 581 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
582 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
583 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
584 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 585 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 586 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 587 fp->status_blk->c_status_block.status_block_index,
ca00392c 588 fp->tx_db.data.prod);
8440d2b6 589 }
a2fbb9ea 590
8440d2b6
EG
591 /* Rings */
592 /* Rx */
593 for_each_rx_queue(bp, i) {
594 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
595
596 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
597 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 598 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
599 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
600 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
601
c3eefaf6
EG
602 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
603 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
604 }
605
3196a88a
EG
606 start = RX_SGE(fp->rx_sge_prod);
607 end = RX_SGE(fp->last_max_sge);
8440d2b6 608 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
609 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
610 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
611
c3eefaf6
EG
612 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
613 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
614 }
615
a2fbb9ea
ET
616 start = RCQ_BD(fp->rx_comp_cons - 10);
617 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 618 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
619 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
620
c3eefaf6
EG
621 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
622 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
623 }
624 }
625
8440d2b6
EG
626 /* Tx */
627 for_each_tx_queue(bp, i) {
628 struct bnx2x_fastpath *fp = &bp->fp[i];
629
630 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
631 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
632 for (j = start; j != end; j = TX_BD(j + 1)) {
633 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
634
c3eefaf6
EG
635 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
636 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
637 }
638
639 start = TX_BD(fp->tx_bd_cons - 10);
640 end = TX_BD(fp->tx_bd_cons + 254);
641 for (j = start; j != end; j = TX_BD(j + 1)) {
642 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
643
c3eefaf6
EG
644 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
645 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
646 }
647 }
a2fbb9ea 648
34f80b04 649 bnx2x_fw_dump(bp);
a2fbb9ea
ET
650 bnx2x_mc_assert(bp);
651 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
652}
653
615f8fd9 654static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 655{
34f80b04 656 int port = BP_PORT(bp);
a2fbb9ea
ET
657 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
658 u32 val = REG_RD(bp, addr);
659 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 660 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
661
662 if (msix) {
8badd27a
EG
663 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
664 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
665 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
667 } else if (msi) {
668 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
669 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
670 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
672 } else {
673 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 674 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
675 HC_CONFIG_0_REG_INT_LINE_EN_0 |
676 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
679 val, port, addr);
615f8fd9
ET
680
681 REG_WR(bp, addr, val);
682
a2fbb9ea
ET
683 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
684 }
685
8badd27a
EG
686 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
687 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
688
689 REG_WR(bp, addr, val);
37dbbf32
EG
690 /*
691 * Ensure that HC_CONFIG is written before leading/trailing edge config
692 */
693 mmiowb();
694 barrier();
34f80b04
EG
695
696 if (CHIP_IS_E1H(bp)) {
697 /* init leading/trailing edge */
698 if (IS_E1HMF(bp)) {
8badd27a 699 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 700 if (bp->port.pmf)
4acac6a5
EG
701 /* enable nig and gpio3 attention */
702 val |= 0x1100;
34f80b04
EG
703 } else
704 val = 0xffff;
705
706 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
707 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
708 }
37dbbf32
EG
709
710 /* Make sure that interrupts are indeed enabled from here on */
711 mmiowb();
a2fbb9ea
ET
712}
713
615f8fd9 714static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 715{
34f80b04 716 int port = BP_PORT(bp);
a2fbb9ea
ET
717 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
718 u32 val = REG_RD(bp, addr);
719
720 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
721 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
722 HC_CONFIG_0_REG_INT_LINE_EN_0 |
723 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
724
725 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
726 val, port, addr);
727
8badd27a
EG
728 /* flush all outstanding writes */
729 mmiowb();
730
a2fbb9ea
ET
731 REG_WR(bp, addr, val);
732 if (REG_RD(bp, addr) != val)
733 BNX2X_ERR("BUG! proper val not read from IGU!\n");
734}
735
f8ef6e44 736static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 737{
a2fbb9ea 738 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 739 int i, offset;
a2fbb9ea 740
34f80b04 741 /* disable interrupt handling */
a2fbb9ea 742 atomic_inc(&bp->intr_sem);
e1510706
EG
743 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
744
f8ef6e44
YG
745 if (disable_hw)
746 /* prevent the HW from sending interrupts */
747 bnx2x_int_disable(bp);
a2fbb9ea
ET
748
749 /* make sure all ISRs are done */
750 if (msix) {
8badd27a
EG
751 synchronize_irq(bp->msix_table[0].vector);
752 offset = 1;
37b091ba
MC
753#ifdef BCM_CNIC
754 offset++;
755#endif
a2fbb9ea 756 for_each_queue(bp, i)
8badd27a 757 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
758 } else
759 synchronize_irq(bp->pdev->irq);
760
761 /* make sure sp_task is not running */
1cf167f2
EG
762 cancel_delayed_work(&bp->sp_task);
763 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
764}
765
34f80b04 766/* fast path */
a2fbb9ea
ET
767
768/*
34f80b04 769 * General service functions
a2fbb9ea
ET
770 */
771
34f80b04 772static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
773 u8 storm, u16 index, u8 op, u8 update)
774{
5c862848
EG
775 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
776 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
777 struct igu_ack_register igu_ack;
778
779 igu_ack.status_block_index = index;
780 igu_ack.sb_id_and_flags =
34f80b04 781 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
782 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
783 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
784 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
785
5c862848
EG
786 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
787 (*(u32 *)&igu_ack), hc_addr);
788 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
789
790 /* Make sure that ACK is written */
791 mmiowb();
792 barrier();
a2fbb9ea
ET
793}
794
795static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
796{
797 struct host_status_block *fpsb = fp->status_blk;
798 u16 rc = 0;
799
800 barrier(); /* status block is written to by the chip */
801 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
802 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
803 rc |= 1;
804 }
805 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
806 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
807 rc |= 2;
808 }
809 return rc;
810}
811
a2fbb9ea
ET
812static u16 bnx2x_ack_int(struct bnx2x *bp)
813{
5c862848
EG
814 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
815 COMMAND_REG_SIMD_MASK);
816 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 817
5c862848
EG
818 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
819 result, hc_addr);
a2fbb9ea 820
a2fbb9ea
ET
821 return result;
822}
823
824
825/*
826 * fast path service functions
827 */
828
e8b5fc51
VZ
829static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
830{
831 /* Tell compiler that consumer and producer can change */
832 barrier();
833 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
834}
835
a2fbb9ea
ET
836/* free skb in the packet ring at pos idx
837 * return idx of last bd freed
838 */
839static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
840 u16 idx)
841{
842 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
843 struct eth_tx_start_bd *tx_start_bd;
844 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 845 struct sk_buff *skb = tx_buf->skb;
34f80b04 846 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
847 int nbd;
848
849 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
850 idx, tx_buf, skb);
851
852 /* unmap first bd */
853 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
854 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
855 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
856 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 857
ca00392c 858 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 859#ifdef BNX2X_STOP_ON_ERROR
ca00392c 860 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 861 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
862 bnx2x_panic();
863 }
864#endif
ca00392c 865 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 866
ca00392c
EG
867 /* Get the next bd */
868 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 869
ca00392c
EG
870 /* Skip a parse bd... */
871 --nbd;
872 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
873
874 /* ...and the TSO split header bd since they have no mapping */
875 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
876 --nbd;
877 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
878 }
879
880 /* now free frags */
881 while (nbd > 0) {
882
883 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
884 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
885 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
886 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
887 if (--nbd)
888 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
889 }
890
891 /* release skb */
53e5e96e 892 WARN_ON(!skb);
ca00392c 893 dev_kfree_skb_any(skb);
a2fbb9ea
ET
894 tx_buf->first_bd = 0;
895 tx_buf->skb = NULL;
896
34f80b04 897 return new_cons;
a2fbb9ea
ET
898}
899
34f80b04 900static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 901{
34f80b04
EG
902 s16 used;
903 u16 prod;
904 u16 cons;
a2fbb9ea 905
34f80b04 906 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
907 prod = fp->tx_bd_prod;
908 cons = fp->tx_bd_cons;
909
34f80b04
EG
910 /* NUM_TX_RINGS = number of "next-page" entries
911 It will be used as a threshold */
912 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 913
34f80b04 914#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
915 WARN_ON(used < 0);
916 WARN_ON(used > fp->bp->tx_ring_size);
917 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 918#endif
a2fbb9ea 919
34f80b04 920 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
921}
922
7961f791 923static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
924{
925 struct bnx2x *bp = fp->bp;
555f6c78 926 struct netdev_queue *txq;
a2fbb9ea
ET
927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
928 int done = 0;
929
930#ifdef BNX2X_STOP_ON_ERROR
931 if (unlikely(bp->panic))
932 return;
933#endif
934
ca00392c 935 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
936 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
937 sw_cons = fp->tx_pkt_cons;
938
939 while (sw_cons != hw_cons) {
940 u16 pkt_cons;
941
942 pkt_cons = TX_BD(sw_cons);
943
944 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
945
34f80b04 946 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
947 hw_cons, sw_cons, pkt_cons);
948
34f80b04 949/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
950 rmb();
951 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
952 }
953*/
954 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
955 sw_cons++;
956 done++;
a2fbb9ea
ET
957 }
958
959 fp->tx_pkt_cons = sw_cons;
960 fp->tx_bd_cons = bd_cons;
961
a2fbb9ea 962 /* TBD need a thresh? */
555f6c78 963 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 964
6044735d
EG
965 /* Need to make the tx_bd_cons update visible to start_xmit()
966 * before checking for netif_tx_queue_stopped(). Without the
967 * memory barrier, there is a small possibility that
968 * start_xmit() will miss it and cause the queue to be stopped
969 * forever.
970 */
971 smp_mb();
972
555f6c78 973 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 974 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 975 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 976 netif_tx_wake_queue(txq);
a2fbb9ea
ET
977 }
978}
979
993ac7b5
MC
980#ifdef BCM_CNIC
981static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
982#endif
3196a88a 983
a2fbb9ea
ET
984static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
985 union eth_rx_cqe *rr_cqe)
986{
987 struct bnx2x *bp = fp->bp;
988 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
990
34f80b04 991 DP(BNX2X_MSG_SP,
a2fbb9ea 992 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 993 fp->index, cid, command, bp->state,
34f80b04 994 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
995
996 bp->spq_left++;
997
0626b899 998 if (fp->index) {
a2fbb9ea
ET
999 switch (command | fp->state) {
1000 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1001 BNX2X_FP_STATE_OPENING):
1002 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1003 cid);
1004 fp->state = BNX2X_FP_STATE_OPEN;
1005 break;
1006
1007 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1008 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1009 cid);
1010 fp->state = BNX2X_FP_STATE_HALTED;
1011 break;
1012
1013 default:
34f80b04
EG
1014 BNX2X_ERR("unexpected MC reply (%d) "
1015 "fp->state is %x\n", command, fp->state);
1016 break;
a2fbb9ea 1017 }
34f80b04 1018 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1019 return;
1020 }
c14423fe 1021
a2fbb9ea
ET
1022 switch (command | bp->state) {
1023 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1024 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1025 bp->state = BNX2X_STATE_OPEN;
1026 break;
1027
1028 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1029 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1030 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1031 fp->state = BNX2X_FP_STATE_HALTED;
1032 break;
1033
a2fbb9ea 1034 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1035 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1036 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1037 break;
1038
993ac7b5
MC
1039#ifdef BCM_CNIC
1040 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1041 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1042 bnx2x_cnic_cfc_comp(bp, cid);
1043 break;
1044#endif
3196a88a 1045
a2fbb9ea 1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1047 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1048 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1049 bp->set_mac_pending--;
1050 smp_wmb();
a2fbb9ea
ET
1051 break;
1052
49d66772 1053 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1054 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1055 bp->set_mac_pending--;
1056 smp_wmb();
49d66772
ET
1057 break;
1058
a2fbb9ea 1059 default:
34f80b04 1060 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1061 command, bp->state);
34f80b04 1062 break;
a2fbb9ea 1063 }
34f80b04 1064 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1065}
1066
7a9b2557
VZ
1067static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1068 struct bnx2x_fastpath *fp, u16 index)
1069{
1070 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1071 struct page *page = sw_buf->page;
1072 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1073
1074 /* Skip "next page" elements */
1075 if (!page)
1076 return;
1077
1078 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1079 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1080 __free_pages(page, PAGES_PER_SGE_SHIFT);
1081
1082 sw_buf->page = NULL;
1083 sge->addr_hi = 0;
1084 sge->addr_lo = 0;
1085}
1086
1087static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1088 struct bnx2x_fastpath *fp, int last)
1089{
1090 int i;
1091
1092 for (i = 0; i < last; i++)
1093 bnx2x_free_rx_sge(bp, fp, i);
1094}
1095
1096static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1097 struct bnx2x_fastpath *fp, u16 index)
1098{
1099 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1100 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1101 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1102 dma_addr_t mapping;
1103
1104 if (unlikely(page == NULL))
1105 return -ENOMEM;
1106
4f40f2cb 1107 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1108 PCI_DMA_FROMDEVICE);
8d8bb39b 1109 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1110 __free_pages(page, PAGES_PER_SGE_SHIFT);
1111 return -ENOMEM;
1112 }
1113
1114 sw_buf->page = page;
1115 pci_unmap_addr_set(sw_buf, mapping, mapping);
1116
1117 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1118 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1119
1120 return 0;
1121}
1122
a2fbb9ea
ET
1123static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1124 struct bnx2x_fastpath *fp, u16 index)
1125{
1126 struct sk_buff *skb;
1127 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1128 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1129 dma_addr_t mapping;
1130
1131 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1132 if (unlikely(skb == NULL))
1133 return -ENOMEM;
1134
437cf2f1 1135 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1136 PCI_DMA_FROMDEVICE);
8d8bb39b 1137 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1138 dev_kfree_skb(skb);
1139 return -ENOMEM;
1140 }
1141
1142 rx_buf->skb = skb;
1143 pci_unmap_addr_set(rx_buf, mapping, mapping);
1144
1145 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1146 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1147
1148 return 0;
1149}
1150
1151/* note that we are not allocating a new skb,
1152 * we are just moving one from cons to prod
1153 * we are not creating a new mapping,
1154 * so there is no need to check for dma_mapping_error().
1155 */
1156static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1157 struct sk_buff *skb, u16 cons, u16 prod)
1158{
1159 struct bnx2x *bp = fp->bp;
1160 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1161 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1162 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1163 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1164
1165 pci_dma_sync_single_for_device(bp->pdev,
1166 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1167 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1168
1169 prod_rx_buf->skb = cons_rx_buf->skb;
1170 pci_unmap_addr_set(prod_rx_buf, mapping,
1171 pci_unmap_addr(cons_rx_buf, mapping));
1172 *prod_bd = *cons_bd;
1173}
1174
7a9b2557
VZ
1175static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1176 u16 idx)
1177{
1178 u16 last_max = fp->last_max_sge;
1179
1180 if (SUB_S16(idx, last_max) > 0)
1181 fp->last_max_sge = idx;
1182}
1183
1184static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1185{
1186 int i, j;
1187
1188 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1189 int idx = RX_SGE_CNT * i - 1;
1190
1191 for (j = 0; j < 2; j++) {
1192 SGE_MASK_CLEAR_BIT(fp, idx);
1193 idx--;
1194 }
1195 }
1196}
1197
1198static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1199 struct eth_fast_path_rx_cqe *fp_cqe)
1200{
1201 struct bnx2x *bp = fp->bp;
4f40f2cb 1202 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1203 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1204 SGE_PAGE_SHIFT;
7a9b2557
VZ
1205 u16 last_max, last_elem, first_elem;
1206 u16 delta = 0;
1207 u16 i;
1208
1209 if (!sge_len)
1210 return;
1211
1212 /* First mark all used pages */
1213 for (i = 0; i < sge_len; i++)
1214 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1215
1216 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1217 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1218
1219 /* Here we assume that the last SGE index is the biggest */
1220 prefetch((void *)(fp->sge_mask));
1221 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1222
1223 last_max = RX_SGE(fp->last_max_sge);
1224 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1225 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1226
1227 /* If ring is not full */
1228 if (last_elem + 1 != first_elem)
1229 last_elem++;
1230
1231 /* Now update the prod */
1232 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1233 if (likely(fp->sge_mask[i]))
1234 break;
1235
1236 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1237 delta += RX_SGE_MASK_ELEM_SZ;
1238 }
1239
1240 if (delta > 0) {
1241 fp->rx_sge_prod += delta;
1242 /* clear page-end entries */
1243 bnx2x_clear_sge_mask_next_elems(fp);
1244 }
1245
1246 DP(NETIF_MSG_RX_STATUS,
1247 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1248 fp->last_max_sge, fp->rx_sge_prod);
1249}
1250
1251static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1252{
1253 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1254 memset(fp->sge_mask, 0xff,
1255 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1256
33471629
EG
1257 /* Clear the two last indices in the page to 1:
1258 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1259 hence will never be indicated and should be removed from
1260 the calculations. */
1261 bnx2x_clear_sge_mask_next_elems(fp);
1262}
1263
1264static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1265 struct sk_buff *skb, u16 cons, u16 prod)
1266{
1267 struct bnx2x *bp = fp->bp;
1268 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1269 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1270 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1271 dma_addr_t mapping;
1272
1273 /* move empty skb from pool to prod and map it */
1274 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1275 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1276 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1277 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1278
1279 /* move partial skb from cons to pool (don't unmap yet) */
1280 fp->tpa_pool[queue] = *cons_rx_buf;
1281
1282 /* mark bin state as start - print error if current state != stop */
1283 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1284 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1285
1286 fp->tpa_state[queue] = BNX2X_TPA_START;
1287
1288 /* point prod_bd to new skb */
1289 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1290 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1291
1292#ifdef BNX2X_STOP_ON_ERROR
1293 fp->tpa_queue_used |= (1 << queue);
1294#ifdef __powerpc64__
1295 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1296#else
1297 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1298#endif
1299 fp->tpa_queue_used);
1300#endif
1301}
1302
1303static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1304 struct sk_buff *skb,
1305 struct eth_fast_path_rx_cqe *fp_cqe,
1306 u16 cqe_idx)
1307{
1308 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1309 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1310 u32 i, frag_len, frag_size, pages;
1311 int err;
1312 int j;
1313
1314 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1315 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1316
1317 /* This is needed in order to enable forwarding support */
1318 if (frag_size)
4f40f2cb 1319 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1320 max(frag_size, (u32)len_on_bd));
1321
1322#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1323 if (pages >
1324 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1325 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1326 pages, cqe_idx);
1327 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1328 fp_cqe->pkt_len, len_on_bd);
1329 bnx2x_panic();
1330 return -EINVAL;
1331 }
1332#endif
1333
1334 /* Run through the SGL and compose the fragmented skb */
1335 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1336 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1337
1338 /* FW gives the indices of the SGE as if the ring is an array
1339 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1340 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1341 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1342 old_rx_pg = *rx_pg;
1343
1344 /* If we fail to allocate a substitute page, we simply stop
1345 where we are and drop the whole packet */
1346 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1347 if (unlikely(err)) {
de832a55 1348 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1349 return err;
1350 }
1351
1352 /* Unmap the page as we r going to pass it to the stack */
1353 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1354 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1355
1356 /* Add one frag and update the appropriate fields in the skb */
1357 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1358
1359 skb->data_len += frag_len;
1360 skb->truesize += frag_len;
1361 skb->len += frag_len;
1362
1363 frag_size -= frag_len;
1364 }
1365
1366 return 0;
1367}
1368
1369static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1370 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1371 u16 cqe_idx)
1372{
1373 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1374 struct sk_buff *skb = rx_buf->skb;
1375 /* alloc new skb */
1376 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1377
1378 /* Unmap skb in the pool anyway, as we are going to change
1379 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1380 fails. */
1381 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1382 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1383
7a9b2557 1384 if (likely(new_skb)) {
66e855f3
YG
1385 /* fix ip xsum and give it to the stack */
1386 /* (no need to map the new skb) */
0c6671b0
EG
1387#ifdef BCM_VLAN
1388 int is_vlan_cqe =
1389 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1390 PARSING_FLAGS_VLAN);
1391 int is_not_hwaccel_vlan_cqe =
1392 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1393#endif
7a9b2557
VZ
1394
1395 prefetch(skb);
1396 prefetch(((char *)(skb)) + 128);
1397
7a9b2557
VZ
1398#ifdef BNX2X_STOP_ON_ERROR
1399 if (pad + len > bp->rx_buf_size) {
1400 BNX2X_ERR("skb_put is about to fail... "
1401 "pad %d len %d rx_buf_size %d\n",
1402 pad, len, bp->rx_buf_size);
1403 bnx2x_panic();
1404 return;
1405 }
1406#endif
1407
1408 skb_reserve(skb, pad);
1409 skb_put(skb, len);
1410
1411 skb->protocol = eth_type_trans(skb, bp->dev);
1412 skb->ip_summed = CHECKSUM_UNNECESSARY;
1413
1414 {
1415 struct iphdr *iph;
1416
1417 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1418#ifdef BCM_VLAN
1419 /* If there is no Rx VLAN offloading -
1420 take VLAN tag into an account */
1421 if (unlikely(is_not_hwaccel_vlan_cqe))
1422 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1423#endif
7a9b2557
VZ
1424 iph->check = 0;
1425 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1426 }
1427
1428 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1429 &cqe->fast_path_cqe, cqe_idx)) {
1430#ifdef BCM_VLAN
0c6671b0
EG
1431 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1432 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1433 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1434 le16_to_cpu(cqe->fast_path_cqe.
1435 vlan_tag));
1436 else
1437#endif
1438 netif_receive_skb(skb);
1439 } else {
1440 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1441 " - dropping packet!\n");
1442 dev_kfree_skb(skb);
1443 }
1444
7a9b2557
VZ
1445
1446 /* put new skb in bin */
1447 fp->tpa_pool[queue].skb = new_skb;
1448
1449 } else {
66e855f3 1450 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1451 DP(NETIF_MSG_RX_STATUS,
1452 "Failed to allocate new skb - dropping packet!\n");
de832a55 1453 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1454 }
1455
1456 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1457}
1458
1459static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1460 struct bnx2x_fastpath *fp,
1461 u16 bd_prod, u16 rx_comp_prod,
1462 u16 rx_sge_prod)
1463{
8d9c5f34 1464 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1465 int i;
1466
1467 /* Update producers */
1468 rx_prods.bd_prod = bd_prod;
1469 rx_prods.cqe_prod = rx_comp_prod;
1470 rx_prods.sge_prod = rx_sge_prod;
1471
58f4c4cf
EG
1472 /*
1473 * Make sure that the BD and SGE data is updated before updating the
1474 * producers since FW might read the BD/SGE right after the producer
1475 * is updated.
1476 * This is only applicable for weak-ordered memory model archs such
1477 * as IA-64. The following barrier is also mandatory since FW will
1478 * assumes BDs must have buffers.
1479 */
1480 wmb();
1481
8d9c5f34
EG
1482 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1483 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1484 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1485 ((u32 *)&rx_prods)[i]);
1486
58f4c4cf
EG
1487 mmiowb(); /* keep prod updates ordered */
1488
7a9b2557 1489 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1490 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1491 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1492}
1493
a2fbb9ea
ET
1494static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1495{
1496 struct bnx2x *bp = fp->bp;
34f80b04 1497 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1498 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1499 int rx_pkt = 0;
1500
1501#ifdef BNX2X_STOP_ON_ERROR
1502 if (unlikely(bp->panic))
1503 return 0;
1504#endif
1505
34f80b04
EG
1506 /* CQ "next element" is of the size of the regular element,
1507 that's why it's ok here */
a2fbb9ea
ET
1508 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1509 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1510 hw_comp_cons++;
1511
1512 bd_cons = fp->rx_bd_cons;
1513 bd_prod = fp->rx_bd_prod;
34f80b04 1514 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1515 sw_comp_cons = fp->rx_comp_cons;
1516 sw_comp_prod = fp->rx_comp_prod;
1517
1518 /* Memory barrier necessary as speculative reads of the rx
1519 * buffer can be ahead of the index in the status block
1520 */
1521 rmb();
1522
1523 DP(NETIF_MSG_RX_STATUS,
1524 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1525 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1526
1527 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1528 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1529 struct sk_buff *skb;
1530 union eth_rx_cqe *cqe;
34f80b04
EG
1531 u8 cqe_fp_flags;
1532 u16 len, pad;
a2fbb9ea
ET
1533
1534 comp_ring_cons = RCQ_BD(sw_comp_cons);
1535 bd_prod = RX_BD(bd_prod);
1536 bd_cons = RX_BD(bd_cons);
1537
619e7a66
EG
1538 /* Prefetch the page containing the BD descriptor
1539 at producer's index. It will be needed when new skb is
1540 allocated */
1541 prefetch((void *)(PAGE_ALIGN((unsigned long)
1542 (&fp->rx_desc_ring[bd_prod])) -
1543 PAGE_SIZE + 1));
1544
a2fbb9ea 1545 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1546 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1547
a2fbb9ea 1548 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1549 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1550 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1551 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1552 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1553 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1554
1555 /* is this a slowpath msg? */
34f80b04 1556 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1557 bnx2x_sp_event(fp, cqe);
1558 goto next_cqe;
1559
1560 /* this is an rx packet */
1561 } else {
1562 rx_buf = &fp->rx_buf_ring[bd_cons];
1563 skb = rx_buf->skb;
a2fbb9ea
ET
1564 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1565 pad = cqe->fast_path_cqe.placement_offset;
1566
7a9b2557
VZ
1567 /* If CQE is marked both TPA_START and TPA_END
1568 it is a non-TPA CQE */
1569 if ((!fp->disable_tpa) &&
1570 (TPA_TYPE(cqe_fp_flags) !=
1571 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1572 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1573
1574 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1575 DP(NETIF_MSG_RX_STATUS,
1576 "calling tpa_start on queue %d\n",
1577 queue);
1578
1579 bnx2x_tpa_start(fp, queue, skb,
1580 bd_cons, bd_prod);
1581 goto next_rx;
1582 }
1583
1584 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1585 DP(NETIF_MSG_RX_STATUS,
1586 "calling tpa_stop on queue %d\n",
1587 queue);
1588
1589 if (!BNX2X_RX_SUM_FIX(cqe))
1590 BNX2X_ERR("STOP on none TCP "
1591 "data\n");
1592
1593 /* This is a size of the linear data
1594 on this skb */
1595 len = le16_to_cpu(cqe->fast_path_cqe.
1596 len_on_bd);
1597 bnx2x_tpa_stop(bp, fp, queue, pad,
1598 len, cqe, comp_ring_cons);
1599#ifdef BNX2X_STOP_ON_ERROR
1600 if (bp->panic)
17cb4006 1601 return 0;
7a9b2557
VZ
1602#endif
1603
1604 bnx2x_update_sge_prod(fp,
1605 &cqe->fast_path_cqe);
1606 goto next_cqe;
1607 }
1608 }
1609
a2fbb9ea
ET
1610 pci_dma_sync_single_for_device(bp->pdev,
1611 pci_unmap_addr(rx_buf, mapping),
1612 pad + RX_COPY_THRESH,
1613 PCI_DMA_FROMDEVICE);
1614 prefetch(skb);
1615 prefetch(((char *)(skb)) + 128);
1616
1617 /* is this an error packet? */
34f80b04 1618 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1619 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1620 "ERROR flags %x rx packet %u\n",
1621 cqe_fp_flags, sw_comp_cons);
de832a55 1622 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1623 goto reuse_rx;
1624 }
1625
1626 /* Since we don't have a jumbo ring
1627 * copy small packets if mtu > 1500
1628 */
1629 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1630 (len <= RX_COPY_THRESH)) {
1631 struct sk_buff *new_skb;
1632
1633 new_skb = netdev_alloc_skb(bp->dev,
1634 len + pad);
1635 if (new_skb == NULL) {
1636 DP(NETIF_MSG_RX_ERR,
34f80b04 1637 "ERROR packet dropped "
a2fbb9ea 1638 "because of alloc failure\n");
de832a55 1639 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1640 goto reuse_rx;
1641 }
1642
1643 /* aligned copy */
1644 skb_copy_from_linear_data_offset(skb, pad,
1645 new_skb->data + pad, len);
1646 skb_reserve(new_skb, pad);
1647 skb_put(new_skb, len);
1648
1649 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1650
1651 skb = new_skb;
1652
a119a069
EG
1653 } else
1654 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1655 pci_unmap_single(bp->pdev,
1656 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1657 bp->rx_buf_size,
a2fbb9ea
ET
1658 PCI_DMA_FROMDEVICE);
1659 skb_reserve(skb, pad);
1660 skb_put(skb, len);
1661
1662 } else {
1663 DP(NETIF_MSG_RX_ERR,
34f80b04 1664 "ERROR packet dropped because "
a2fbb9ea 1665 "of alloc failure\n");
de832a55 1666 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1667reuse_rx:
1668 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1669 goto next_rx;
1670 }
1671
1672 skb->protocol = eth_type_trans(skb, bp->dev);
1673
1674 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1675 if (bp->rx_csum) {
1adcd8be
EG
1676 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1677 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1678 else
de832a55 1679 fp->eth_q_stats.hw_csum_err++;
66e855f3 1680 }
a2fbb9ea
ET
1681 }
1682
748e5439 1683 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1684
a2fbb9ea 1685#ifdef BCM_VLAN
0c6671b0 1686 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1687 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1688 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1689 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1690 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1691 else
1692#endif
34f80b04 1693 netif_receive_skb(skb);
a2fbb9ea 1694
a2fbb9ea
ET
1695
1696next_rx:
1697 rx_buf->skb = NULL;
1698
1699 bd_cons = NEXT_RX_IDX(bd_cons);
1700 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1701 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1702 rx_pkt++;
a2fbb9ea
ET
1703next_cqe:
1704 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1705 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1706
34f80b04 1707 if (rx_pkt == budget)
a2fbb9ea
ET
1708 break;
1709 } /* while */
1710
1711 fp->rx_bd_cons = bd_cons;
34f80b04 1712 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1713 fp->rx_comp_cons = sw_comp_cons;
1714 fp->rx_comp_prod = sw_comp_prod;
1715
7a9b2557
VZ
1716 /* Update producers */
1717 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1718 fp->rx_sge_prod);
a2fbb9ea
ET
1719
1720 fp->rx_pkt += rx_pkt;
1721 fp->rx_calls++;
1722
1723 return rx_pkt;
1724}
1725
1726static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1727{
1728 struct bnx2x_fastpath *fp = fp_cookie;
1729 struct bnx2x *bp = fp->bp;
a2fbb9ea 1730
da5a662a
VZ
1731 /* Return here if interrupt is disabled */
1732 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1733 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1734 return IRQ_HANDLED;
1735 }
1736
34f80b04 1737 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1738 fp->index, fp->sb_id);
0626b899 1739 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1740
1741#ifdef BNX2X_STOP_ON_ERROR
1742 if (unlikely(bp->panic))
1743 return IRQ_HANDLED;
1744#endif
ca00392c
EG
1745 /* Handle Rx or Tx according to MSI-X vector */
1746 if (fp->is_rx_queue) {
1747 prefetch(fp->rx_cons_sb);
1748 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1749
ca00392c 1750 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1751
ca00392c
EG
1752 } else {
1753 prefetch(fp->tx_cons_sb);
1754 prefetch(&fp->status_blk->c_status_block.status_block_index);
1755
1756 bnx2x_update_fpsb_idx(fp);
1757 rmb();
1758 bnx2x_tx_int(fp);
1759
1760 /* Re-enable interrupts */
1761 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1762 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1763 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1764 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1765 }
34f80b04 1766
a2fbb9ea
ET
1767 return IRQ_HANDLED;
1768}
1769
1770static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1771{
555f6c78 1772 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1773 u16 status = bnx2x_ack_int(bp);
34f80b04 1774 u16 mask;
ca00392c 1775 int i;
a2fbb9ea 1776
34f80b04 1777 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1778 if (unlikely(status == 0)) {
1779 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1780 return IRQ_NONE;
1781 }
f5372251 1782 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1783
34f80b04 1784 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1785 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1786 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1787 return IRQ_HANDLED;
1788 }
1789
3196a88a
EG
1790#ifdef BNX2X_STOP_ON_ERROR
1791 if (unlikely(bp->panic))
1792 return IRQ_HANDLED;
1793#endif
1794
ca00392c
EG
1795 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1796 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1797
ca00392c
EG
1798 mask = 0x2 << fp->sb_id;
1799 if (status & mask) {
1800 /* Handle Rx or Tx according to SB id */
1801 if (fp->is_rx_queue) {
1802 prefetch(fp->rx_cons_sb);
1803 prefetch(&fp->status_blk->u_status_block.
1804 status_block_index);
a2fbb9ea 1805
ca00392c 1806 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1807
ca00392c
EG
1808 } else {
1809 prefetch(fp->tx_cons_sb);
1810 prefetch(&fp->status_blk->c_status_block.
1811 status_block_index);
1812
1813 bnx2x_update_fpsb_idx(fp);
1814 rmb();
1815 bnx2x_tx_int(fp);
1816
1817 /* Re-enable interrupts */
1818 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1819 le16_to_cpu(fp->fp_u_idx),
1820 IGU_INT_NOP, 1);
1821 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1822 le16_to_cpu(fp->fp_c_idx),
1823 IGU_INT_ENABLE, 1);
1824 }
1825 status &= ~mask;
1826 }
a2fbb9ea
ET
1827 }
1828
993ac7b5
MC
1829#ifdef BCM_CNIC
1830 mask = 0x2 << CNIC_SB_ID(bp);
1831 if (status & (mask | 0x1)) {
1832 struct cnic_ops *c_ops = NULL;
1833
1834 rcu_read_lock();
1835 c_ops = rcu_dereference(bp->cnic_ops);
1836 if (c_ops)
1837 c_ops->cnic_handler(bp->cnic_data, NULL);
1838 rcu_read_unlock();
1839
1840 status &= ~mask;
1841 }
1842#endif
a2fbb9ea 1843
34f80b04 1844 if (unlikely(status & 0x1)) {
1cf167f2 1845 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1846
1847 status &= ~0x1;
1848 if (!status)
1849 return IRQ_HANDLED;
1850 }
1851
34f80b04
EG
1852 if (status)
1853 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1854 status);
a2fbb9ea 1855
c18487ee 1856 return IRQ_HANDLED;
a2fbb9ea
ET
1857}
1858
c18487ee 1859/* end of fast path */
a2fbb9ea 1860
bb2a0f7a 1861static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1862
c18487ee
YR
1863/* Link */
1864
1865/*
1866 * General service functions
1867 */
a2fbb9ea 1868
4a37fb66 1869static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1870{
1871 u32 lock_status;
1872 u32 resource_bit = (1 << resource);
4a37fb66
YG
1873 int func = BP_FUNC(bp);
1874 u32 hw_lock_control_reg;
c18487ee 1875 int cnt;
a2fbb9ea 1876
c18487ee
YR
1877 /* Validating that the resource is within range */
1878 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1879 DP(NETIF_MSG_HW,
1880 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1881 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1882 return -EINVAL;
1883 }
a2fbb9ea 1884
4a37fb66
YG
1885 if (func <= 5) {
1886 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1887 } else {
1888 hw_lock_control_reg =
1889 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1890 }
1891
c18487ee 1892 /* Validating that the resource is not already taken */
4a37fb66 1893 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1894 if (lock_status & resource_bit) {
1895 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1896 lock_status, resource_bit);
1897 return -EEXIST;
1898 }
a2fbb9ea 1899
46230476
EG
1900 /* Try for 5 second every 5ms */
1901 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1902 /* Try to acquire the lock */
4a37fb66
YG
1903 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1904 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1905 if (lock_status & resource_bit)
1906 return 0;
a2fbb9ea 1907
c18487ee 1908 msleep(5);
a2fbb9ea 1909 }
c18487ee
YR
1910 DP(NETIF_MSG_HW, "Timeout\n");
1911 return -EAGAIN;
1912}
a2fbb9ea 1913
4a37fb66 1914static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1915{
1916 u32 lock_status;
1917 u32 resource_bit = (1 << resource);
4a37fb66
YG
1918 int func = BP_FUNC(bp);
1919 u32 hw_lock_control_reg;
a2fbb9ea 1920
c18487ee
YR
1921 /* Validating that the resource is within range */
1922 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1923 DP(NETIF_MSG_HW,
1924 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1925 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1926 return -EINVAL;
1927 }
1928
4a37fb66
YG
1929 if (func <= 5) {
1930 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1931 } else {
1932 hw_lock_control_reg =
1933 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1934 }
1935
c18487ee 1936 /* Validating that the resource is currently taken */
4a37fb66 1937 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1938 if (!(lock_status & resource_bit)) {
1939 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1940 lock_status, resource_bit);
1941 return -EFAULT;
a2fbb9ea
ET
1942 }
1943
4a37fb66 1944 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1945 return 0;
1946}
1947
1948/* HW Lock for shared dual port PHYs */
4a37fb66 1949static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1950{
34f80b04 1951 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1952
46c6a674
EG
1953 if (bp->port.need_hw_lock)
1954 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1955}
a2fbb9ea 1956
4a37fb66 1957static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1958{
46c6a674
EG
1959 if (bp->port.need_hw_lock)
1960 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1961
34f80b04 1962 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1963}
a2fbb9ea 1964
4acac6a5
EG
1965int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1966{
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1973 u32 gpio_reg;
1974 int value;
1975
1976 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1977 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1978 return -EINVAL;
1979 }
1980
1981 /* read GPIO value */
1982 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1983
1984 /* get the requested pin value */
1985 if ((gpio_reg & gpio_mask) == gpio_mask)
1986 value = 1;
1987 else
1988 value = 0;
1989
1990 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1991
1992 return value;
1993}
1994
17de50b7 1995int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1996{
1997 /* The GPIO should be swapped if swap register is set and active */
1998 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1999 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2000 int gpio_shift = gpio_num +
2001 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2002 u32 gpio_mask = (1 << gpio_shift);
2003 u32 gpio_reg;
a2fbb9ea 2004
c18487ee
YR
2005 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2006 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2007 return -EINVAL;
2008 }
a2fbb9ea 2009
4a37fb66 2010 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2011 /* read GPIO and mask except the float bits */
2012 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2013
c18487ee
YR
2014 switch (mode) {
2015 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2016 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2017 gpio_num, gpio_shift);
2018 /* clear FLOAT and set CLR */
2019 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2020 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2021 break;
a2fbb9ea 2022
c18487ee
YR
2023 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2024 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2025 gpio_num, gpio_shift);
2026 /* clear FLOAT and set SET */
2027 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2028 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2029 break;
a2fbb9ea 2030
17de50b7 2031 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2032 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2033 gpio_num, gpio_shift);
2034 /* set FLOAT */
2035 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2036 break;
a2fbb9ea 2037
c18487ee
YR
2038 default:
2039 break;
a2fbb9ea
ET
2040 }
2041
c18487ee 2042 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2043 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2044
c18487ee 2045 return 0;
a2fbb9ea
ET
2046}
2047
4acac6a5
EG
2048int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2049{
2050 /* The GPIO should be swapped if swap register is set and active */
2051 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2052 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2053 int gpio_shift = gpio_num +
2054 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2055 u32 gpio_mask = (1 << gpio_shift);
2056 u32 gpio_reg;
2057
2058 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2059 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2060 return -EINVAL;
2061 }
2062
2063 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2064 /* read GPIO int */
2065 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2066
2067 switch (mode) {
2068 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2069 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2070 "output low\n", gpio_num, gpio_shift);
2071 /* clear SET and set CLR */
2072 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2073 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2074 break;
2075
2076 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2077 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2078 "output high\n", gpio_num, gpio_shift);
2079 /* clear CLR and set SET */
2080 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2081 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2082 break;
2083
2084 default:
2085 break;
2086 }
2087
2088 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2089 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2090
2091 return 0;
2092}
2093
c18487ee 2094static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2095{
c18487ee
YR
2096 u32 spio_mask = (1 << spio_num);
2097 u32 spio_reg;
a2fbb9ea 2098
c18487ee
YR
2099 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2100 (spio_num > MISC_REGISTERS_SPIO_7)) {
2101 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2102 return -EINVAL;
a2fbb9ea
ET
2103 }
2104
4a37fb66 2105 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2106 /* read SPIO and mask except the float bits */
2107 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2108
c18487ee 2109 switch (mode) {
6378c025 2110 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2111 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2112 /* clear FLOAT and set CLR */
2113 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2114 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2115 break;
a2fbb9ea 2116
6378c025 2117 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2118 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2119 /* clear FLOAT and set SET */
2120 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2121 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2122 break;
a2fbb9ea 2123
c18487ee
YR
2124 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2125 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2126 /* set FLOAT */
2127 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2128 break;
a2fbb9ea 2129
c18487ee
YR
2130 default:
2131 break;
a2fbb9ea
ET
2132 }
2133
c18487ee 2134 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2135 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2136
a2fbb9ea
ET
2137 return 0;
2138}
2139
c18487ee 2140static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2141{
ad33ea3a
EG
2142 switch (bp->link_vars.ieee_fc &
2143 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2144 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2145 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2146 ADVERTISED_Pause);
2147 break;
356e2385 2148
c18487ee 2149 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2150 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2151 ADVERTISED_Pause);
2152 break;
356e2385 2153
c18487ee 2154 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2155 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2156 break;
356e2385 2157
c18487ee 2158 default:
34f80b04 2159 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2160 ADVERTISED_Pause);
2161 break;
2162 }
2163}
f1410647 2164
c18487ee
YR
2165static void bnx2x_link_report(struct bnx2x *bp)
2166{
f34d28ea 2167 if (bp->flags & MF_FUNC_DIS) {
2691d51d
EG
2168 netif_carrier_off(bp->dev);
2169 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2170 return;
2171 }
2172
c18487ee 2173 if (bp->link_vars.link_up) {
35c5f8fe
EG
2174 u16 line_speed;
2175
c18487ee
YR
2176 if (bp->state == BNX2X_STATE_OPEN)
2177 netif_carrier_on(bp->dev);
2178 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2179
35c5f8fe
EG
2180 line_speed = bp->link_vars.line_speed;
2181 if (IS_E1HMF(bp)) {
2182 u16 vn_max_rate;
2183
2184 vn_max_rate =
2185 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2186 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2187 if (vn_max_rate < line_speed)
2188 line_speed = vn_max_rate;
2189 }
2190 printk("%d Mbps ", line_speed);
f1410647 2191
c18487ee
YR
2192 if (bp->link_vars.duplex == DUPLEX_FULL)
2193 printk("full duplex");
2194 else
2195 printk("half duplex");
f1410647 2196
c0700f90
DM
2197 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2198 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2199 printk(", receive ");
356e2385
EG
2200 if (bp->link_vars.flow_ctrl &
2201 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2202 printk("& transmit ");
2203 } else {
2204 printk(", transmit ");
2205 }
2206 printk("flow control ON");
2207 }
2208 printk("\n");
f1410647 2209
c18487ee
YR
2210 } else { /* link_down */
2211 netif_carrier_off(bp->dev);
2212 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2213 }
c18487ee
YR
2214}
2215
b5bf9068 2216static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2217{
19680c48
EG
2218 if (!BP_NOMCP(bp)) {
2219 u8 rc;
a2fbb9ea 2220
19680c48 2221 /* Initialize link parameters structure variables */
8c99e7b0
YR
2222 /* It is recommended to turn off RX FC for jumbo frames
2223 for better performance */
0c593270 2224 if (bp->dev->mtu > 5000)
c0700f90 2225 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2226 else
c0700f90 2227 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2228
4a37fb66 2229 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2230
2231 if (load_mode == LOAD_DIAG)
2232 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2233
19680c48 2234 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2235
4a37fb66 2236 bnx2x_release_phy_lock(bp);
a2fbb9ea 2237
3c96c68b
EG
2238 bnx2x_calc_fc_adv(bp);
2239
b5bf9068
EG
2240 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2242 bnx2x_link_report(bp);
b5bf9068 2243 }
34f80b04 2244
19680c48
EG
2245 return rc;
2246 }
f5372251 2247 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2248 return -EINVAL;
a2fbb9ea
ET
2249}
2250
c18487ee 2251static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2252{
19680c48 2253 if (!BP_NOMCP(bp)) {
4a37fb66 2254 bnx2x_acquire_phy_lock(bp);
19680c48 2255 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2256 bnx2x_release_phy_lock(bp);
a2fbb9ea 2257
19680c48
EG
2258 bnx2x_calc_fc_adv(bp);
2259 } else
f5372251 2260 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2261}
a2fbb9ea 2262
c18487ee
YR
2263static void bnx2x__link_reset(struct bnx2x *bp)
2264{
19680c48 2265 if (!BP_NOMCP(bp)) {
4a37fb66 2266 bnx2x_acquire_phy_lock(bp);
589abe3a 2267 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2268 bnx2x_release_phy_lock(bp);
19680c48 2269 } else
f5372251 2270 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2271}
a2fbb9ea 2272
c18487ee
YR
2273static u8 bnx2x_link_test(struct bnx2x *bp)
2274{
2275 u8 rc;
a2fbb9ea 2276
4a37fb66 2277 bnx2x_acquire_phy_lock(bp);
c18487ee 2278 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2279 bnx2x_release_phy_lock(bp);
a2fbb9ea 2280
c18487ee
YR
2281 return rc;
2282}
a2fbb9ea 2283
8a1c38d1 2284static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2285{
8a1c38d1
EG
2286 u32 r_param = bp->link_vars.line_speed / 8;
2287 u32 fair_periodic_timeout_usec;
2288 u32 t_fair;
34f80b04 2289
8a1c38d1
EG
2290 memset(&(bp->cmng.rs_vars), 0,
2291 sizeof(struct rate_shaping_vars_per_port));
2292 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2293
8a1c38d1
EG
2294 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2295 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2296
8a1c38d1
EG
2297 /* this is the threshold below which no timer arming will occur
2298 1.25 coefficient is for the threshold to be a little bigger
2299 than the real time, to compensate for timer in-accuracy */
2300 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2301 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2302
8a1c38d1
EG
2303 /* resolution of fairness timer */
2304 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2305 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2306 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2307
8a1c38d1
EG
2308 /* this is the threshold below which we won't arm the timer anymore */
2309 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2310
8a1c38d1
EG
2311 /* we multiply by 1e3/8 to get bytes/msec.
2312 We don't want the credits to pass a credit
2313 of the t_fair*FAIR_MEM (algorithm resolution) */
2314 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2315 /* since each tick is 4 usec */
2316 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2317}
2318
2691d51d
EG
2319/* Calculates the sum of vn_min_rates.
2320 It's needed for further normalizing of the min_rates.
2321 Returns:
2322 sum of vn_min_rates.
2323 or
2324 0 - if all the min_rates are 0.
2325 In the later case fainess algorithm should be deactivated.
2326 If not all min_rates are zero then those that are zeroes will be set to 1.
2327 */
2328static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2329{
2330 int all_zero = 1;
2331 int port = BP_PORT(bp);
2332 int vn;
2333
2334 bp->vn_weight_sum = 0;
2335 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2336 int func = 2*vn + port;
2337 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2338 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2339 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2340
2341 /* Skip hidden vns */
2342 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2343 continue;
2344
2345 /* If min rate is zero - set it to 1 */
2346 if (!vn_min_rate)
2347 vn_min_rate = DEF_MIN_RATE;
2348 else
2349 all_zero = 0;
2350
2351 bp->vn_weight_sum += vn_min_rate;
2352 }
2353
2354 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2355 if (all_zero) {
2356 bp->cmng.flags.cmng_enables &=
2357 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2358 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2359 " fairness will be disabled\n");
2360 } else
2361 bp->cmng.flags.cmng_enables |=
2362 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2363}
2364
8a1c38d1 2365static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2366{
2367 struct rate_shaping_vars_per_vn m_rs_vn;
2368 struct fairness_vars_per_vn m_fair_vn;
2369 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2370 u16 vn_min_rate, vn_max_rate;
2371 int i;
2372
2373 /* If function is hidden - set min and max to zeroes */
2374 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2375 vn_min_rate = 0;
2376 vn_max_rate = 0;
2377
2378 } else {
2379 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2380 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2381 /* If min rate is zero - set it to 1 */
2382 if (!vn_min_rate)
34f80b04
EG
2383 vn_min_rate = DEF_MIN_RATE;
2384 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2385 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2386 }
8a1c38d1 2387 DP(NETIF_MSG_IFUP,
b015e3d1 2388 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2389 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2390
2391 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2392 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2393
2394 /* global vn counter - maximal Mbps for this vn */
2395 m_rs_vn.vn_counter.rate = vn_max_rate;
2396
2397 /* quota - number of bytes transmitted in this period */
2398 m_rs_vn.vn_counter.quota =
2399 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2400
8a1c38d1 2401 if (bp->vn_weight_sum) {
34f80b04
EG
2402 /* credit for each period of the fairness algorithm:
2403 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2404 vn_weight_sum should not be larger than 10000, thus
2405 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2406 than zero */
34f80b04 2407 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2408 max((u32)(vn_min_rate * (T_FAIR_COEF /
2409 (8 * bp->vn_weight_sum))),
2410 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2411 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2412 m_fair_vn.vn_credit_delta);
2413 }
2414
34f80b04
EG
2415 /* Store it to internal memory */
2416 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2417 REG_WR(bp, BAR_XSTRORM_INTMEM +
2418 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2419 ((u32 *)(&m_rs_vn))[i]);
2420
2421 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2422 REG_WR(bp, BAR_XSTRORM_INTMEM +
2423 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2424 ((u32 *)(&m_fair_vn))[i]);
2425}
2426
8a1c38d1 2427
c18487ee
YR
2428/* This function is called upon link interrupt */
2429static void bnx2x_link_attn(struct bnx2x *bp)
2430{
bb2a0f7a
YG
2431 /* Make sure that we are synced with the current statistics */
2432 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2433
c18487ee 2434 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2435
bb2a0f7a
YG
2436 if (bp->link_vars.link_up) {
2437
1c06328c 2438 /* dropless flow control */
a18f5128 2439 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2440 int port = BP_PORT(bp);
2441 u32 pause_enabled = 0;
2442
2443 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2444 pause_enabled = 1;
2445
2446 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2447 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2448 pause_enabled);
2449 }
2450
bb2a0f7a
YG
2451 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2452 struct host_port_stats *pstats;
2453
2454 pstats = bnx2x_sp(bp, port_stats);
2455 /* reset old bmac stats */
2456 memset(&(pstats->mac_stx[0]), 0,
2457 sizeof(struct mac_stx));
2458 }
f34d28ea 2459 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2460 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2461 }
2462
c18487ee
YR
2463 /* indicate link status */
2464 bnx2x_link_report(bp);
34f80b04
EG
2465
2466 if (IS_E1HMF(bp)) {
8a1c38d1 2467 int port = BP_PORT(bp);
34f80b04 2468 int func;
8a1c38d1 2469 int vn;
34f80b04 2470
ab6ad5a4 2471 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2472 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2473 if (vn == BP_E1HVN(bp))
2474 continue;
2475
8a1c38d1 2476 func = ((vn << 1) | port);
34f80b04
EG
2477 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2478 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2479 }
34f80b04 2480
8a1c38d1
EG
2481 if (bp->link_vars.link_up) {
2482 int i;
2483
2484 /* Init rate shaping and fairness contexts */
2485 bnx2x_init_port_minmax(bp);
34f80b04 2486
34f80b04 2487 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2488 bnx2x_init_vn_minmax(bp, 2*vn + port);
2489
2490 /* Store it to internal memory */
2491 for (i = 0;
2492 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2493 REG_WR(bp, BAR_XSTRORM_INTMEM +
2494 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2495 ((u32 *)(&bp->cmng))[i]);
2496 }
34f80b04 2497 }
c18487ee 2498}
a2fbb9ea 2499
c18487ee
YR
2500static void bnx2x__link_status_update(struct bnx2x *bp)
2501{
f34d28ea 2502 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2503 return;
a2fbb9ea 2504
c18487ee 2505 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2506
bb2a0f7a
YG
2507 if (bp->link_vars.link_up)
2508 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2509 else
2510 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2511
2691d51d
EG
2512 bnx2x_calc_vn_weight_sum(bp);
2513
c18487ee
YR
2514 /* indicate link status */
2515 bnx2x_link_report(bp);
a2fbb9ea 2516}
a2fbb9ea 2517
34f80b04
EG
2518static void bnx2x_pmf_update(struct bnx2x *bp)
2519{
2520 int port = BP_PORT(bp);
2521 u32 val;
2522
2523 bp->port.pmf = 1;
2524 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2525
2526 /* enable nig attention */
2527 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2528 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2529 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2530
2531 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2532}
2533
c18487ee 2534/* end of Link */
a2fbb9ea
ET
2535
2536/* slow path */
2537
2538/*
2539 * General service functions
2540 */
2541
2691d51d
EG
2542/* send the MCP a request, block until there is a reply */
2543u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2544{
2545 int func = BP_FUNC(bp);
2546 u32 seq = ++bp->fw_seq;
2547 u32 rc = 0;
2548 u32 cnt = 1;
2549 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2550
c4ff7cbf 2551 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2552 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2553 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2554
2555 do {
2556 /* let the FW do it's magic ... */
2557 msleep(delay);
2558
2559 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2560
c4ff7cbf
EG
2561 /* Give the FW up to 5 second (500*10ms) */
2562 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2563
2564 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2565 cnt*delay, rc, seq);
2566
2567 /* is this a reply to our command? */
2568 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2569 rc &= FW_MSG_CODE_MASK;
2570 else {
2571 /* FW BUG! */
2572 BNX2X_ERR("FW failed to respond!\n");
2573 bnx2x_fw_dump(bp);
2574 rc = 0;
2575 }
c4ff7cbf 2576 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2577
2578 return rc;
2579}
2580
2581static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2582static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2583static void bnx2x_set_rx_mode(struct net_device *dev);
2584
2585static void bnx2x_e1h_disable(struct bnx2x *bp)
2586{
2587 int port = BP_PORT(bp);
2691d51d
EG
2588
2589 netif_tx_disable(bp->dev);
2590 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2591
2592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2593
2691d51d
EG
2594 netif_carrier_off(bp->dev);
2595}
2596
2597static void bnx2x_e1h_enable(struct bnx2x *bp)
2598{
2599 int port = BP_PORT(bp);
2600
2601 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2602
2691d51d
EG
2603 /* Tx queue should be only reenabled */
2604 netif_tx_wake_all_queues(bp->dev);
2605
061bc702
EG
2606 /*
2607 * Should not call netif_carrier_on since it will be called if the link
2608 * is up when checking for link state
2609 */
2691d51d
EG
2610}
2611
2612static void bnx2x_update_min_max(struct bnx2x *bp)
2613{
2614 int port = BP_PORT(bp);
2615 int vn, i;
2616
2617 /* Init rate shaping and fairness contexts */
2618 bnx2x_init_port_minmax(bp);
2619
2620 bnx2x_calc_vn_weight_sum(bp);
2621
2622 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2623 bnx2x_init_vn_minmax(bp, 2*vn + port);
2624
2625 if (bp->port.pmf) {
2626 int func;
2627
2628 /* Set the attention towards other drivers on the same port */
2629 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2630 if (vn == BP_E1HVN(bp))
2631 continue;
2632
2633 func = ((vn << 1) | port);
2634 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2635 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2636 }
2637
2638 /* Store it to internal memory */
2639 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2640 REG_WR(bp, BAR_XSTRORM_INTMEM +
2641 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2642 ((u32 *)(&bp->cmng))[i]);
2643 }
2644}
2645
2646static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2647{
2691d51d 2648 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2649
2650 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2651
f34d28ea
EG
2652 /*
2653 * This is the only place besides the function initialization
2654 * where the bp->flags can change so it is done without any
2655 * locks
2656 */
2691d51d
EG
2657 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2658 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2659 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2660
2661 bnx2x_e1h_disable(bp);
2662 } else {
2663 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2664 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2665
2666 bnx2x_e1h_enable(bp);
2667 }
2668 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2669 }
2670 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2671
2672 bnx2x_update_min_max(bp);
2673 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2674 }
2675
2676 /* Report results to MCP */
2677 if (dcc_event)
2678 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2679 else
2680 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2681}
2682
28912902
MC
2683/* must be called under the spq lock */
2684static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2685{
2686 struct eth_spe *next_spe = bp->spq_prod_bd;
2687
2688 if (bp->spq_prod_bd == bp->spq_last_bd) {
2689 bp->spq_prod_bd = bp->spq;
2690 bp->spq_prod_idx = 0;
2691 DP(NETIF_MSG_TIMER, "end of spq\n");
2692 } else {
2693 bp->spq_prod_bd++;
2694 bp->spq_prod_idx++;
2695 }
2696 return next_spe;
2697}
2698
2699/* must be called under the spq lock */
2700static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2701{
2702 int func = BP_FUNC(bp);
2703
2704 /* Make sure that BD data is updated before writing the producer */
2705 wmb();
2706
2707 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2708 bp->spq_prod_idx);
2709 mmiowb();
2710}
2711
a2fbb9ea
ET
2712/* the slow path queue is odd since completions arrive on the fastpath ring */
2713static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2714 u32 data_hi, u32 data_lo, int common)
2715{
28912902 2716 struct eth_spe *spe;
a2fbb9ea 2717
34f80b04
EG
2718 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2719 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2720 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2721 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2722 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2723
2724#ifdef BNX2X_STOP_ON_ERROR
2725 if (unlikely(bp->panic))
2726 return -EIO;
2727#endif
2728
34f80b04 2729 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2730
2731 if (!bp->spq_left) {
2732 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2733 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2734 bnx2x_panic();
2735 return -EBUSY;
2736 }
f1410647 2737
28912902
MC
2738 spe = bnx2x_sp_get_next(bp);
2739
a2fbb9ea 2740 /* CID needs port number to be encoded int it */
28912902 2741 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2742 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2743 HW_CID(bp, cid)));
28912902 2744 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2745 if (common)
28912902 2746 spe->hdr.type |=
a2fbb9ea
ET
2747 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2748
28912902
MC
2749 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2750 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2751
2752 bp->spq_left--;
2753
28912902 2754 bnx2x_sp_prod_update(bp);
34f80b04 2755 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2756 return 0;
2757}
2758
2759/* acquire split MCP access lock register */
4a37fb66 2760static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2761{
a2fbb9ea 2762 u32 i, j, val;
34f80b04 2763 int rc = 0;
a2fbb9ea
ET
2764
2765 might_sleep();
2766 i = 100;
2767 for (j = 0; j < i*10; j++) {
2768 val = (1UL << 31);
2769 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2770 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2771 if (val & (1L << 31))
2772 break;
2773
2774 msleep(5);
2775 }
a2fbb9ea 2776 if (!(val & (1L << 31))) {
19680c48 2777 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2778 rc = -EBUSY;
2779 }
2780
2781 return rc;
2782}
2783
4a37fb66
YG
2784/* release split MCP access lock register */
2785static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2786{
2787 u32 val = 0;
2788
2789 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2790}
2791
2792static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2793{
2794 struct host_def_status_block *def_sb = bp->def_status_blk;
2795 u16 rc = 0;
2796
2797 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2798 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2799 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2800 rc |= 1;
2801 }
2802 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2803 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2804 rc |= 2;
2805 }
2806 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2807 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2808 rc |= 4;
2809 }
2810 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2811 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2812 rc |= 8;
2813 }
2814 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2815 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2816 rc |= 16;
2817 }
2818 return rc;
2819}
2820
2821/*
2822 * slow path service functions
2823 */
2824
2825static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2826{
34f80b04 2827 int port = BP_PORT(bp);
5c862848
EG
2828 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2829 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2830 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2831 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2832 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2833 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2834 u32 aeu_mask;
87942b46 2835 u32 nig_mask = 0;
a2fbb9ea 2836
a2fbb9ea
ET
2837 if (bp->attn_state & asserted)
2838 BNX2X_ERR("IGU ERROR\n");
2839
3fcaf2e5
EG
2840 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2841 aeu_mask = REG_RD(bp, aeu_addr);
2842
a2fbb9ea 2843 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2844 aeu_mask, asserted);
2845 aeu_mask &= ~(asserted & 0xff);
2846 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2847
3fcaf2e5
EG
2848 REG_WR(bp, aeu_addr, aeu_mask);
2849 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2850
3fcaf2e5 2851 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2852 bp->attn_state |= asserted;
3fcaf2e5 2853 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2854
2855 if (asserted & ATTN_HARD_WIRED_MASK) {
2856 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2857
a5e9a7cf
EG
2858 bnx2x_acquire_phy_lock(bp);
2859
877e9aa4 2860 /* save nig interrupt mask */
87942b46 2861 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2862 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2863
c18487ee 2864 bnx2x_link_attn(bp);
a2fbb9ea
ET
2865
2866 /* handle unicore attn? */
2867 }
2868 if (asserted & ATTN_SW_TIMER_4_FUNC)
2869 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2870
2871 if (asserted & GPIO_2_FUNC)
2872 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2873
2874 if (asserted & GPIO_3_FUNC)
2875 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2876
2877 if (asserted & GPIO_4_FUNC)
2878 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2879
2880 if (port == 0) {
2881 if (asserted & ATTN_GENERAL_ATTN_1) {
2882 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2883 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2884 }
2885 if (asserted & ATTN_GENERAL_ATTN_2) {
2886 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2887 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2888 }
2889 if (asserted & ATTN_GENERAL_ATTN_3) {
2890 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2891 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2892 }
2893 } else {
2894 if (asserted & ATTN_GENERAL_ATTN_4) {
2895 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2896 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2897 }
2898 if (asserted & ATTN_GENERAL_ATTN_5) {
2899 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2900 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2901 }
2902 if (asserted & ATTN_GENERAL_ATTN_6) {
2903 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2904 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2905 }
2906 }
2907
2908 } /* if hardwired */
2909
5c862848
EG
2910 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2911 asserted, hc_addr);
2912 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2913
2914 /* now set back the mask */
a5e9a7cf 2915 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2916 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2917 bnx2x_release_phy_lock(bp);
2918 }
a2fbb9ea
ET
2919}
2920
fd4ef40d
EG
2921static inline void bnx2x_fan_failure(struct bnx2x *bp)
2922{
2923 int port = BP_PORT(bp);
2924
2925 /* mark the failure */
2926 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2927 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2928 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2929 bp->link_params.ext_phy_config);
2930
2931 /* log the failure */
2932 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2933 " the driver to shutdown the card to prevent permanent"
2934 " damage. Please contact Dell Support for assistance\n",
2935 bp->dev->name);
2936}
ab6ad5a4 2937
877e9aa4 2938static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2939{
34f80b04 2940 int port = BP_PORT(bp);
877e9aa4 2941 int reg_offset;
4d295db0 2942 u32 val, swap_val, swap_override;
877e9aa4 2943
34f80b04
EG
2944 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2945 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2946
34f80b04 2947 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2948
2949 val = REG_RD(bp, reg_offset);
2950 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2951 REG_WR(bp, reg_offset, val);
2952
2953 BNX2X_ERR("SPIO5 hw attention\n");
2954
fd4ef40d 2955 /* Fan failure attention */
35b19ba5
EG
2956 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2957 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2958 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2959 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2960 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2961 /* The PHY reset is controlled by GPIO 1 */
2962 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2963 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2964 break;
2965
4d295db0
EG
2966 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2967 /* The PHY reset is controlled by GPIO 1 */
2968 /* fake the port number to cancel the swap done in
2969 set_gpio() */
2970 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2971 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2972 port = (swap_val && swap_override) ^ 1;
2973 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2974 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2975 break;
2976
877e9aa4
ET
2977 default:
2978 break;
2979 }
fd4ef40d 2980 bnx2x_fan_failure(bp);
877e9aa4 2981 }
34f80b04 2982
589abe3a
EG
2983 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2984 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2985 bnx2x_acquire_phy_lock(bp);
2986 bnx2x_handle_module_detect_int(&bp->link_params);
2987 bnx2x_release_phy_lock(bp);
2988 }
2989
34f80b04
EG
2990 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2991
2992 val = REG_RD(bp, reg_offset);
2993 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2994 REG_WR(bp, reg_offset, val);
2995
2996 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2997 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2998 bnx2x_panic();
2999 }
877e9aa4
ET
3000}
3001
3002static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3003{
3004 u32 val;
3005
0626b899 3006 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3007
3008 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3009 BNX2X_ERR("DB hw attention 0x%x\n", val);
3010 /* DORQ discard attention */
3011 if (val & 0x2)
3012 BNX2X_ERR("FATAL error from DORQ\n");
3013 }
34f80b04
EG
3014
3015 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3016
3017 int port = BP_PORT(bp);
3018 int reg_offset;
3019
3020 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3021 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3022
3023 val = REG_RD(bp, reg_offset);
3024 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3025 REG_WR(bp, reg_offset, val);
3026
3027 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3028 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3029 bnx2x_panic();
3030 }
877e9aa4
ET
3031}
3032
3033static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3034{
3035 u32 val;
3036
3037 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3038
3039 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3040 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3041 /* CFC error attention */
3042 if (val & 0x2)
3043 BNX2X_ERR("FATAL error from CFC\n");
3044 }
3045
3046 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3047
3048 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3049 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3050 /* RQ_USDMDP_FIFO_OVERFLOW */
3051 if (val & 0x18000)
3052 BNX2X_ERR("FATAL error from PXP\n");
3053 }
34f80b04
EG
3054
3055 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3056
3057 int port = BP_PORT(bp);
3058 int reg_offset;
3059
3060 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3061 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3062
3063 val = REG_RD(bp, reg_offset);
3064 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3065 REG_WR(bp, reg_offset, val);
3066
3067 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3068 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3069 bnx2x_panic();
3070 }
877e9aa4
ET
3071}
3072
3073static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3074{
34f80b04
EG
3075 u32 val;
3076
877e9aa4
ET
3077 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3078
34f80b04
EG
3079 if (attn & BNX2X_PMF_LINK_ASSERT) {
3080 int func = BP_FUNC(bp);
3081
3082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3083 bp->mf_config = SHMEM_RD(bp,
3084 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3085 val = SHMEM_RD(bp, func_mb[func].drv_status);
3086 if (val & DRV_STATUS_DCC_EVENT_MASK)
3087 bnx2x_dcc_event(bp,
3088 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3089 bnx2x__link_status_update(bp);
2691d51d 3090 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3091 bnx2x_pmf_update(bp);
3092
3093 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3094
3095 BNX2X_ERR("MC assert!\n");
3096 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3097 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3098 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3099 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3100 bnx2x_panic();
3101
3102 } else if (attn & BNX2X_MCP_ASSERT) {
3103
3104 BNX2X_ERR("MCP assert!\n");
3105 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3106 bnx2x_fw_dump(bp);
877e9aa4
ET
3107
3108 } else
3109 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3110 }
3111
3112 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3113 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3114 if (attn & BNX2X_GRC_TIMEOUT) {
3115 val = CHIP_IS_E1H(bp) ?
3116 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3117 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3118 }
3119 if (attn & BNX2X_GRC_RSV) {
3120 val = CHIP_IS_E1H(bp) ?
3121 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3122 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3123 }
877e9aa4 3124 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3125 }
3126}
3127
3128static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3129{
a2fbb9ea
ET
3130 struct attn_route attn;
3131 struct attn_route group_mask;
34f80b04 3132 int port = BP_PORT(bp);
877e9aa4 3133 int index;
a2fbb9ea
ET
3134 u32 reg_addr;
3135 u32 val;
3fcaf2e5 3136 u32 aeu_mask;
a2fbb9ea
ET
3137
3138 /* need to take HW lock because MCP or other port might also
3139 try to handle this event */
4a37fb66 3140 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3141
3142 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3143 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3144 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3145 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3146 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3147 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3148
3149 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3150 if (deasserted & (1 << index)) {
3151 group_mask = bp->attn_group[index];
3152
34f80b04
EG
3153 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3154 index, group_mask.sig[0], group_mask.sig[1],
3155 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3156
877e9aa4
ET
3157 bnx2x_attn_int_deasserted3(bp,
3158 attn.sig[3] & group_mask.sig[3]);
3159 bnx2x_attn_int_deasserted1(bp,
3160 attn.sig[1] & group_mask.sig[1]);
3161 bnx2x_attn_int_deasserted2(bp,
3162 attn.sig[2] & group_mask.sig[2]);
3163 bnx2x_attn_int_deasserted0(bp,
3164 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3165
a2fbb9ea
ET
3166 if ((attn.sig[0] & group_mask.sig[0] &
3167 HW_PRTY_ASSERT_SET_0) ||
3168 (attn.sig[1] & group_mask.sig[1] &
3169 HW_PRTY_ASSERT_SET_1) ||
3170 (attn.sig[2] & group_mask.sig[2] &
3171 HW_PRTY_ASSERT_SET_2))
6378c025 3172 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3173 }
3174 }
3175
4a37fb66 3176 bnx2x_release_alr(bp);
a2fbb9ea 3177
5c862848 3178 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3179
3180 val = ~deasserted;
3fcaf2e5
EG
3181 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3182 val, reg_addr);
5c862848 3183 REG_WR(bp, reg_addr, val);
a2fbb9ea 3184
a2fbb9ea 3185 if (~bp->attn_state & deasserted)
3fcaf2e5 3186 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3187
3188 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3189 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3190
3fcaf2e5
EG
3191 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3192 aeu_mask = REG_RD(bp, reg_addr);
3193
3194 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3195 aeu_mask, deasserted);
3196 aeu_mask |= (deasserted & 0xff);
3197 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3198
3fcaf2e5
EG
3199 REG_WR(bp, reg_addr, aeu_mask);
3200 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3201
3202 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3203 bp->attn_state &= ~deasserted;
3204 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3205}
3206
3207static void bnx2x_attn_int(struct bnx2x *bp)
3208{
3209 /* read local copy of bits */
68d59484
EG
3210 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3211 attn_bits);
3212 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3213 attn_bits_ack);
a2fbb9ea
ET
3214 u32 attn_state = bp->attn_state;
3215
3216 /* look for changed bits */
3217 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3218 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3219
3220 DP(NETIF_MSG_HW,
3221 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3222 attn_bits, attn_ack, asserted, deasserted);
3223
3224 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3225 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3226
3227 /* handle bits that were raised */
3228 if (asserted)
3229 bnx2x_attn_int_asserted(bp, asserted);
3230
3231 if (deasserted)
3232 bnx2x_attn_int_deasserted(bp, deasserted);
3233}
3234
3235static void bnx2x_sp_task(struct work_struct *work)
3236{
1cf167f2 3237 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3238 u16 status;
3239
34f80b04 3240
a2fbb9ea
ET
3241 /* Return here if interrupt is disabled */
3242 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3243 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3244 return;
3245 }
3246
3247 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3248/* if (status == 0) */
3249/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3250
3196a88a 3251 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3252
877e9aa4
ET
3253 /* HW attentions */
3254 if (status & 0x1)
a2fbb9ea 3255 bnx2x_attn_int(bp);
a2fbb9ea 3256
68d59484 3257 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3258 IGU_INT_NOP, 1);
3259 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3260 IGU_INT_NOP, 1);
3261 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3262 IGU_INT_NOP, 1);
3263 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3264 IGU_INT_NOP, 1);
3265 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3266 IGU_INT_ENABLE, 1);
877e9aa4 3267
a2fbb9ea
ET
3268}
3269
3270static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3271{
3272 struct net_device *dev = dev_instance;
3273 struct bnx2x *bp = netdev_priv(dev);
3274
3275 /* Return here if interrupt is disabled */
3276 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3277 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3278 return IRQ_HANDLED;
3279 }
3280
8d9c5f34 3281 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3282
3283#ifdef BNX2X_STOP_ON_ERROR
3284 if (unlikely(bp->panic))
3285 return IRQ_HANDLED;
3286#endif
3287
993ac7b5
MC
3288#ifdef BCM_CNIC
3289 {
3290 struct cnic_ops *c_ops;
3291
3292 rcu_read_lock();
3293 c_ops = rcu_dereference(bp->cnic_ops);
3294 if (c_ops)
3295 c_ops->cnic_handler(bp->cnic_data, NULL);
3296 rcu_read_unlock();
3297 }
3298#endif
1cf167f2 3299 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3300
3301 return IRQ_HANDLED;
3302}
3303
3304/* end of slow path */
3305
3306/* Statistics */
3307
3308/****************************************************************************
3309* Macros
3310****************************************************************************/
3311
a2fbb9ea
ET
3312/* sum[hi:lo] += add[hi:lo] */
3313#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3314 do { \
3315 s_lo += a_lo; \
f5ba6772 3316 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3317 } while (0)
3318
3319/* difference = minuend - subtrahend */
3320#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3321 do { \
bb2a0f7a
YG
3322 if (m_lo < s_lo) { \
3323 /* underflow */ \
a2fbb9ea 3324 d_hi = m_hi - s_hi; \
bb2a0f7a 3325 if (d_hi > 0) { \
6378c025 3326 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3327 d_hi--; \
3328 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3329 } else { \
6378c025 3330 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3331 d_hi = 0; \
3332 d_lo = 0; \
3333 } \
bb2a0f7a
YG
3334 } else { \
3335 /* m_lo >= s_lo */ \
a2fbb9ea 3336 if (m_hi < s_hi) { \
bb2a0f7a
YG
3337 d_hi = 0; \
3338 d_lo = 0; \
3339 } else { \
6378c025 3340 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3341 d_hi = m_hi - s_hi; \
3342 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3343 } \
3344 } \
3345 } while (0)
3346
bb2a0f7a 3347#define UPDATE_STAT64(s, t) \
a2fbb9ea 3348 do { \
bb2a0f7a
YG
3349 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3350 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3351 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3352 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3353 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3354 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3355 } while (0)
3356
bb2a0f7a 3357#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3358 do { \
bb2a0f7a
YG
3359 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3360 diff.lo, new->s##_lo, old->s##_lo); \
3361 ADD_64(estats->t##_hi, diff.hi, \
3362 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3363 } while (0)
3364
3365/* sum[hi:lo] += add */
3366#define ADD_EXTEND_64(s_hi, s_lo, a) \
3367 do { \
3368 s_lo += a; \
3369 s_hi += (s_lo < a) ? 1 : 0; \
3370 } while (0)
3371
bb2a0f7a 3372#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3373 do { \
bb2a0f7a
YG
3374 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3375 pstats->mac_stx[1].s##_lo, \
3376 new->s); \
a2fbb9ea
ET
3377 } while (0)
3378
bb2a0f7a 3379#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3380 do { \
4781bfad
EG
3381 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3382 old_tclient->s = tclient->s; \
de832a55
EG
3383 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3384 } while (0)
3385
3386#define UPDATE_EXTEND_USTAT(s, t) \
3387 do { \
3388 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3389 old_uclient->s = uclient->s; \
3390 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3391 } while (0)
3392
3393#define UPDATE_EXTEND_XSTAT(s, t) \
3394 do { \
4781bfad
EG
3395 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3396 old_xclient->s = xclient->s; \
de832a55
EG
3397 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3398 } while (0)
3399
3400/* minuend -= subtrahend */
3401#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3402 do { \
3403 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3404 } while (0)
3405
3406/* minuend[hi:lo] -= subtrahend */
3407#define SUB_EXTEND_64(m_hi, m_lo, s) \
3408 do { \
3409 SUB_64(m_hi, 0, m_lo, s); \
3410 } while (0)
3411
3412#define SUB_EXTEND_USTAT(s, t) \
3413 do { \
3414 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3415 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3416 } while (0)
3417
3418/*
3419 * General service functions
3420 */
3421
3422static inline long bnx2x_hilo(u32 *hiref)
3423{
3424 u32 lo = *(hiref + 1);
3425#if (BITS_PER_LONG == 64)
3426 u32 hi = *hiref;
3427
3428 return HILO_U64(hi, lo);
3429#else
3430 return lo;
3431#endif
3432}
3433
3434/*
3435 * Init service functions
3436 */
3437
bb2a0f7a
YG
3438static void bnx2x_storm_stats_post(struct bnx2x *bp)
3439{
3440 if (!bp->stats_pending) {
3441 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3442 int i, rc;
bb2a0f7a
YG
3443
3444 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3445 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3446 for_each_queue(bp, i)
3447 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3448
3449 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3450 ((u32 *)&ramrod_data)[1],
3451 ((u32 *)&ramrod_data)[0], 0);
3452 if (rc == 0) {
3453 /* stats ramrod has it's own slot on the spq */
3454 bp->spq_left++;
3455 bp->stats_pending = 1;
3456 }
3457 }
3458}
3459
bb2a0f7a
YG
3460static void bnx2x_hw_stats_post(struct bnx2x *bp)
3461{
3462 struct dmae_command *dmae = &bp->stats_dmae;
3463 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3464
3465 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3466 if (CHIP_REV_IS_SLOW(bp))
3467 return;
bb2a0f7a
YG
3468
3469 /* loader */
3470 if (bp->executer_idx) {
3471 int loader_idx = PMF_DMAE_C(bp);
3472
3473 memset(dmae, 0, sizeof(struct dmae_command));
3474
3475 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3476 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3477 DMAE_CMD_DST_RESET |
3478#ifdef __BIG_ENDIAN
3479 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3480#else
3481 DMAE_CMD_ENDIANITY_DW_SWAP |
3482#endif
3483 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3484 DMAE_CMD_PORT_0) |
3485 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3486 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3487 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3488 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3489 sizeof(struct dmae_command) *
3490 (loader_idx + 1)) >> 2;
3491 dmae->dst_addr_hi = 0;
3492 dmae->len = sizeof(struct dmae_command) >> 2;
3493 if (CHIP_IS_E1(bp))
3494 dmae->len--;
3495 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3496 dmae->comp_addr_hi = 0;
3497 dmae->comp_val = 1;
3498
3499 *stats_comp = 0;
3500 bnx2x_post_dmae(bp, dmae, loader_idx);
3501
3502 } else if (bp->func_stx) {
3503 *stats_comp = 0;
3504 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3505 }
3506}
3507
3508static int bnx2x_stats_comp(struct bnx2x *bp)
3509{
3510 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3511 int cnt = 10;
3512
3513 might_sleep();
3514 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3515 if (!cnt) {
3516 BNX2X_ERR("timeout waiting for stats finished\n");
3517 break;
3518 }
3519 cnt--;
12469401 3520 msleep(1);
bb2a0f7a
YG
3521 }
3522 return 1;
3523}
3524
3525/*
3526 * Statistics service functions
3527 */
3528
3529static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3530{
3531 struct dmae_command *dmae;
3532 u32 opcode;
3533 int loader_idx = PMF_DMAE_C(bp);
3534 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3535
3536 /* sanity */
3537 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3538 BNX2X_ERR("BUG!\n");
3539 return;
3540 }
3541
3542 bp->executer_idx = 0;
3543
3544 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3545 DMAE_CMD_C_ENABLE |
3546 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3547#ifdef __BIG_ENDIAN
3548 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3549#else
3550 DMAE_CMD_ENDIANITY_DW_SWAP |
3551#endif
3552 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3553 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3554
3555 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3556 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3557 dmae->src_addr_lo = bp->port.port_stx >> 2;
3558 dmae->src_addr_hi = 0;
3559 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3560 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3561 dmae->len = DMAE_LEN32_RD_MAX;
3562 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3563 dmae->comp_addr_hi = 0;
3564 dmae->comp_val = 1;
3565
3566 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3567 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3568 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3569 dmae->src_addr_hi = 0;
7a9b2557
VZ
3570 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3571 DMAE_LEN32_RD_MAX * 4);
3572 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3573 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3574 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3575 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3576 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3577 dmae->comp_val = DMAE_COMP_VAL;
3578
3579 *stats_comp = 0;
3580 bnx2x_hw_stats_post(bp);
3581 bnx2x_stats_comp(bp);
3582}
3583
3584static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3585{
3586 struct dmae_command *dmae;
34f80b04 3587 int port = BP_PORT(bp);
bb2a0f7a 3588 int vn = BP_E1HVN(bp);
a2fbb9ea 3589 u32 opcode;
bb2a0f7a 3590 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3591 u32 mac_addr;
bb2a0f7a
YG
3592 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3593
3594 /* sanity */
3595 if (!bp->link_vars.link_up || !bp->port.pmf) {
3596 BNX2X_ERR("BUG!\n");
3597 return;
3598 }
a2fbb9ea
ET
3599
3600 bp->executer_idx = 0;
bb2a0f7a
YG
3601
3602 /* MCP */
3603 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3604 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3605 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3606#ifdef __BIG_ENDIAN
bb2a0f7a 3607 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3608#else
bb2a0f7a 3609 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3610#endif
bb2a0f7a
YG
3611 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3612 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3613
bb2a0f7a 3614 if (bp->port.port_stx) {
a2fbb9ea
ET
3615
3616 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617 dmae->opcode = opcode;
bb2a0f7a
YG
3618 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3619 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3620 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3621 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3622 dmae->len = sizeof(struct host_port_stats) >> 2;
3623 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3624 dmae->comp_addr_hi = 0;
3625 dmae->comp_val = 1;
a2fbb9ea
ET
3626 }
3627
bb2a0f7a
YG
3628 if (bp->func_stx) {
3629
3630 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631 dmae->opcode = opcode;
3632 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3633 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3634 dmae->dst_addr_lo = bp->func_stx >> 2;
3635 dmae->dst_addr_hi = 0;
3636 dmae->len = sizeof(struct host_func_stats) >> 2;
3637 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3638 dmae->comp_addr_hi = 0;
3639 dmae->comp_val = 1;
a2fbb9ea
ET
3640 }
3641
bb2a0f7a 3642 /* MAC */
a2fbb9ea
ET
3643 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3644 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3645 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3646#ifdef __BIG_ENDIAN
3647 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3648#else
3649 DMAE_CMD_ENDIANITY_DW_SWAP |
3650#endif
bb2a0f7a
YG
3651 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3652 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3653
c18487ee 3654 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3655
3656 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3657 NIG_REG_INGRESS_BMAC0_MEM);
3658
3659 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3660 BIGMAC_REGISTER_TX_STAT_GTBYT */
3661 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3662 dmae->opcode = opcode;
3663 dmae->src_addr_lo = (mac_addr +
3664 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3665 dmae->src_addr_hi = 0;
3666 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3667 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3668 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3669 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3670 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3671 dmae->comp_addr_hi = 0;
3672 dmae->comp_val = 1;
3673
3674 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3675 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3676 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3677 dmae->opcode = opcode;
3678 dmae->src_addr_lo = (mac_addr +
3679 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3680 dmae->src_addr_hi = 0;
3681 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3682 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3683 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3684 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3685 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3686 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3687 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3688 dmae->comp_addr_hi = 0;
3689 dmae->comp_val = 1;
3690
c18487ee 3691 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3692
3693 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3694
3695 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3696 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3697 dmae->opcode = opcode;
3698 dmae->src_addr_lo = (mac_addr +
3699 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3700 dmae->src_addr_hi = 0;
3701 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3702 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3703 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3704 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3705 dmae->comp_addr_hi = 0;
3706 dmae->comp_val = 1;
3707
3708 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (mac_addr +
3712 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3715 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3716 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3717 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3718 dmae->len = 1;
3719 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3720 dmae->comp_addr_hi = 0;
3721 dmae->comp_val = 1;
3722
3723 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3724 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3725 dmae->opcode = opcode;
3726 dmae->src_addr_lo = (mac_addr +
3727 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3728 dmae->src_addr_hi = 0;
3729 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3730 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3731 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3732 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3733 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3734 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3735 dmae->comp_addr_hi = 0;
3736 dmae->comp_val = 1;
3737 }
3738
3739 /* NIG */
bb2a0f7a
YG
3740 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3741 dmae->opcode = opcode;
3742 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3743 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3744 dmae->src_addr_hi = 0;
3745 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3746 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3747 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3748 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3749 dmae->comp_addr_hi = 0;
3750 dmae->comp_val = 1;
3751
3752 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3753 dmae->opcode = opcode;
3754 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3755 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3756 dmae->src_addr_hi = 0;
3757 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3758 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3759 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3760 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3761 dmae->len = (2*sizeof(u32)) >> 2;
3762 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3763 dmae->comp_addr_hi = 0;
3764 dmae->comp_val = 1;
3765
a2fbb9ea
ET
3766 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3767 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3768 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3769 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3770#ifdef __BIG_ENDIAN
3771 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3772#else
3773 DMAE_CMD_ENDIANITY_DW_SWAP |
3774#endif
bb2a0f7a
YG
3775 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3776 (vn << DMAE_CMD_E1HVN_SHIFT));
3777 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3778 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3779 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3780 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3781 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3782 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3783 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3784 dmae->len = (2*sizeof(u32)) >> 2;
3785 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3786 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3787 dmae->comp_val = DMAE_COMP_VAL;
3788
3789 *stats_comp = 0;
a2fbb9ea
ET
3790}
3791
bb2a0f7a 3792static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3793{
bb2a0f7a
YG
3794 struct dmae_command *dmae = &bp->stats_dmae;
3795 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3796
bb2a0f7a
YG
3797 /* sanity */
3798 if (!bp->func_stx) {
3799 BNX2X_ERR("BUG!\n");
3800 return;
3801 }
a2fbb9ea 3802
bb2a0f7a
YG
3803 bp->executer_idx = 0;
3804 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3805
bb2a0f7a
YG
3806 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3807 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3808 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3809#ifdef __BIG_ENDIAN
3810 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3811#else
3812 DMAE_CMD_ENDIANITY_DW_SWAP |
3813#endif
3814 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3815 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3816 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3817 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3818 dmae->dst_addr_lo = bp->func_stx >> 2;
3819 dmae->dst_addr_hi = 0;
3820 dmae->len = sizeof(struct host_func_stats) >> 2;
3821 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3822 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3823 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3824
bb2a0f7a
YG
3825 *stats_comp = 0;
3826}
a2fbb9ea 3827
bb2a0f7a
YG
3828static void bnx2x_stats_start(struct bnx2x *bp)
3829{
3830 if (bp->port.pmf)
3831 bnx2x_port_stats_init(bp);
3832
3833 else if (bp->func_stx)
3834 bnx2x_func_stats_init(bp);
3835
3836 bnx2x_hw_stats_post(bp);
3837 bnx2x_storm_stats_post(bp);
3838}
3839
3840static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3841{
3842 bnx2x_stats_comp(bp);
3843 bnx2x_stats_pmf_update(bp);
3844 bnx2x_stats_start(bp);
3845}
3846
3847static void bnx2x_stats_restart(struct bnx2x *bp)
3848{
3849 bnx2x_stats_comp(bp);
3850 bnx2x_stats_start(bp);
3851}
3852
3853static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3854{
3855 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3856 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3857 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3858 struct {
3859 u32 lo;
3860 u32 hi;
3861 } diff;
bb2a0f7a
YG
3862
3863 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3864 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3865 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3866 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3867 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3868 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3869 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3870 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3871 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3872 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3873 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3874 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3875 UPDATE_STAT64(tx_stat_gt127,
3876 tx_stat_etherstatspkts65octetsto127octets);
3877 UPDATE_STAT64(tx_stat_gt255,
3878 tx_stat_etherstatspkts128octetsto255octets);
3879 UPDATE_STAT64(tx_stat_gt511,
3880 tx_stat_etherstatspkts256octetsto511octets);
3881 UPDATE_STAT64(tx_stat_gt1023,
3882 tx_stat_etherstatspkts512octetsto1023octets);
3883 UPDATE_STAT64(tx_stat_gt1518,
3884 tx_stat_etherstatspkts1024octetsto1522octets);
3885 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3886 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3887 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3888 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3889 UPDATE_STAT64(tx_stat_gterr,
3890 tx_stat_dot3statsinternalmactransmiterrors);
3891 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3892
3893 estats->pause_frames_received_hi =
3894 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3895 estats->pause_frames_received_lo =
3896 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3897
3898 estats->pause_frames_sent_hi =
3899 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3900 estats->pause_frames_sent_lo =
3901 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3902}
3903
3904static void bnx2x_emac_stats_update(struct bnx2x *bp)
3905{
3906 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3907 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3908 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3909
3910 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3911 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3912 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3913 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3914 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3915 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3916 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3917 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3918 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3919 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3920 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3921 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3922 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3923 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3924 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3925 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3926 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3927 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3928 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3929 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3930 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3931 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3932 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3933 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3934 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3935 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3936 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3937 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3938 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3939 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3940 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3941
3942 estats->pause_frames_received_hi =
3943 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3944 estats->pause_frames_received_lo =
3945 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3946 ADD_64(estats->pause_frames_received_hi,
3947 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3948 estats->pause_frames_received_lo,
3949 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3950
3951 estats->pause_frames_sent_hi =
3952 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3953 estats->pause_frames_sent_lo =
3954 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3955 ADD_64(estats->pause_frames_sent_hi,
3956 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3957 estats->pause_frames_sent_lo,
3958 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3959}
3960
3961static int bnx2x_hw_stats_update(struct bnx2x *bp)
3962{
3963 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3964 struct nig_stats *old = &(bp->port.old_nig_stats);
3965 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3966 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3967 struct {
3968 u32 lo;
3969 u32 hi;
3970 } diff;
de832a55 3971 u32 nig_timer_max;
bb2a0f7a
YG
3972
3973 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3974 bnx2x_bmac_stats_update(bp);
3975
3976 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3977 bnx2x_emac_stats_update(bp);
3978
3979 else { /* unreached */
c3eefaf6 3980 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3981 return -1;
3982 }
a2fbb9ea 3983
bb2a0f7a
YG
3984 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3985 new->brb_discard - old->brb_discard);
66e855f3
YG
3986 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3987 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3988
bb2a0f7a
YG
3989 UPDATE_STAT64_NIG(egress_mac_pkt0,
3990 etherstatspkts1024octetsto1522octets);
3991 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3992
bb2a0f7a 3993 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3994
bb2a0f7a
YG
3995 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3996 sizeof(struct mac_stx));
3997 estats->brb_drop_hi = pstats->brb_drop_hi;
3998 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3999
bb2a0f7a 4000 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 4001
de832a55
EG
4002 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4003 if (nig_timer_max != estats->nig_timer_max) {
4004 estats->nig_timer_max = nig_timer_max;
4005 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
4006 }
4007
bb2a0f7a 4008 return 0;
a2fbb9ea
ET
4009}
4010
bb2a0f7a 4011static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4012{
4013 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4014 struct tstorm_per_port_stats *tport =
de832a55 4015 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4016 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4017 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4018 int i;
4019
6fe49bb9
EG
4020 memcpy(&(fstats->total_bytes_received_hi),
4021 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4022 sizeof(struct host_func_stats) - 2*sizeof(u32));
4023 estats->error_bytes_received_hi = 0;
4024 estats->error_bytes_received_lo = 0;
4025 estats->etherstatsoverrsizepkts_hi = 0;
4026 estats->etherstatsoverrsizepkts_lo = 0;
4027 estats->no_buff_discard_hi = 0;
4028 estats->no_buff_discard_lo = 0;
a2fbb9ea 4029
ca00392c 4030 for_each_rx_queue(bp, i) {
de832a55
EG
4031 struct bnx2x_fastpath *fp = &bp->fp[i];
4032 int cl_id = fp->cl_id;
4033 struct tstorm_per_client_stats *tclient =
4034 &stats->tstorm_common.client_statistics[cl_id];
4035 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4036 struct ustorm_per_client_stats *uclient =
4037 &stats->ustorm_common.client_statistics[cl_id];
4038 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4039 struct xstorm_per_client_stats *xclient =
4040 &stats->xstorm_common.client_statistics[cl_id];
4041 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4042 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4043 u32 diff;
4044
4045 /* are storm stats valid? */
4046 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4047 bp->stats_counter) {
de832a55
EG
4048 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4049 " xstorm counter (%d) != stats_counter (%d)\n",
4050 i, xclient->stats_counter, bp->stats_counter);
4051 return -1;
4052 }
4053 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4054 bp->stats_counter) {
de832a55
EG
4055 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4056 " tstorm counter (%d) != stats_counter (%d)\n",
4057 i, tclient->stats_counter, bp->stats_counter);
4058 return -2;
4059 }
4060 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4061 bp->stats_counter) {
4062 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4063 " ustorm counter (%d) != stats_counter (%d)\n",
4064 i, uclient->stats_counter, bp->stats_counter);
4065 return -4;
4066 }
a2fbb9ea 4067
de832a55 4068 qstats->total_bytes_received_hi =
ca00392c 4069 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4070 qstats->total_bytes_received_lo =
ca00392c
EG
4071 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4072
4073 ADD_64(qstats->total_bytes_received_hi,
4074 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4075 qstats->total_bytes_received_lo,
4076 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4077
4078 ADD_64(qstats->total_bytes_received_hi,
4079 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4080 qstats->total_bytes_received_lo,
4081 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4082
4083 qstats->valid_bytes_received_hi =
4084 qstats->total_bytes_received_hi;
de832a55 4085 qstats->valid_bytes_received_lo =
ca00392c 4086 qstats->total_bytes_received_lo;
bb2a0f7a 4087
de832a55 4088 qstats->error_bytes_received_hi =
bb2a0f7a 4089 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4090 qstats->error_bytes_received_lo =
bb2a0f7a 4091 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4092
de832a55
EG
4093 ADD_64(qstats->total_bytes_received_hi,
4094 qstats->error_bytes_received_hi,
4095 qstats->total_bytes_received_lo,
4096 qstats->error_bytes_received_lo);
4097
4098 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4099 total_unicast_packets_received);
4100 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4101 total_multicast_packets_received);
4102 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4103 total_broadcast_packets_received);
4104 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4105 etherstatsoverrsizepkts);
4106 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4107
4108 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4109 total_unicast_packets_received);
4110 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4111 total_multicast_packets_received);
4112 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4113 total_broadcast_packets_received);
4114 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4115 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4116 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4117
4118 qstats->total_bytes_transmitted_hi =
ca00392c 4119 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4120 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4121 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4122
4123 ADD_64(qstats->total_bytes_transmitted_hi,
4124 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4125 qstats->total_bytes_transmitted_lo,
4126 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4127
4128 ADD_64(qstats->total_bytes_transmitted_hi,
4129 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4130 qstats->total_bytes_transmitted_lo,
4131 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4132
de832a55
EG
4133 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4134 total_unicast_packets_transmitted);
4135 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4136 total_multicast_packets_transmitted);
4137 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4138 total_broadcast_packets_transmitted);
4139
4140 old_tclient->checksum_discard = tclient->checksum_discard;
4141 old_tclient->ttl0_discard = tclient->ttl0_discard;
4142
4143 ADD_64(fstats->total_bytes_received_hi,
4144 qstats->total_bytes_received_hi,
4145 fstats->total_bytes_received_lo,
4146 qstats->total_bytes_received_lo);
4147 ADD_64(fstats->total_bytes_transmitted_hi,
4148 qstats->total_bytes_transmitted_hi,
4149 fstats->total_bytes_transmitted_lo,
4150 qstats->total_bytes_transmitted_lo);
4151 ADD_64(fstats->total_unicast_packets_received_hi,
4152 qstats->total_unicast_packets_received_hi,
4153 fstats->total_unicast_packets_received_lo,
4154 qstats->total_unicast_packets_received_lo);
4155 ADD_64(fstats->total_multicast_packets_received_hi,
4156 qstats->total_multicast_packets_received_hi,
4157 fstats->total_multicast_packets_received_lo,
4158 qstats->total_multicast_packets_received_lo);
4159 ADD_64(fstats->total_broadcast_packets_received_hi,
4160 qstats->total_broadcast_packets_received_hi,
4161 fstats->total_broadcast_packets_received_lo,
4162 qstats->total_broadcast_packets_received_lo);
4163 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4164 qstats->total_unicast_packets_transmitted_hi,
4165 fstats->total_unicast_packets_transmitted_lo,
4166 qstats->total_unicast_packets_transmitted_lo);
4167 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4168 qstats->total_multicast_packets_transmitted_hi,
4169 fstats->total_multicast_packets_transmitted_lo,
4170 qstats->total_multicast_packets_transmitted_lo);
4171 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4172 qstats->total_broadcast_packets_transmitted_hi,
4173 fstats->total_broadcast_packets_transmitted_lo,
4174 qstats->total_broadcast_packets_transmitted_lo);
4175 ADD_64(fstats->valid_bytes_received_hi,
4176 qstats->valid_bytes_received_hi,
4177 fstats->valid_bytes_received_lo,
4178 qstats->valid_bytes_received_lo);
4179
4180 ADD_64(estats->error_bytes_received_hi,
4181 qstats->error_bytes_received_hi,
4182 estats->error_bytes_received_lo,
4183 qstats->error_bytes_received_lo);
4184 ADD_64(estats->etherstatsoverrsizepkts_hi,
4185 qstats->etherstatsoverrsizepkts_hi,
4186 estats->etherstatsoverrsizepkts_lo,
4187 qstats->etherstatsoverrsizepkts_lo);
4188 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4189 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4190 }
4191
4192 ADD_64(fstats->total_bytes_received_hi,
4193 estats->rx_stat_ifhcinbadoctets_hi,
4194 fstats->total_bytes_received_lo,
4195 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4196
4197 memcpy(estats, &(fstats->total_bytes_received_hi),
4198 sizeof(struct host_func_stats) - 2*sizeof(u32));
4199
de832a55
EG
4200 ADD_64(estats->etherstatsoverrsizepkts_hi,
4201 estats->rx_stat_dot3statsframestoolong_hi,
4202 estats->etherstatsoverrsizepkts_lo,
4203 estats->rx_stat_dot3statsframestoolong_lo);
4204 ADD_64(estats->error_bytes_received_hi,
4205 estats->rx_stat_ifhcinbadoctets_hi,
4206 estats->error_bytes_received_lo,
4207 estats->rx_stat_ifhcinbadoctets_lo);
4208
4209 if (bp->port.pmf) {
4210 estats->mac_filter_discard =
4211 le32_to_cpu(tport->mac_filter_discard);
4212 estats->xxoverflow_discard =
4213 le32_to_cpu(tport->xxoverflow_discard);
4214 estats->brb_truncate_discard =
bb2a0f7a 4215 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4216 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4217 }
bb2a0f7a
YG
4218
4219 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4220
de832a55
EG
4221 bp->stats_pending = 0;
4222
a2fbb9ea
ET
4223 return 0;
4224}
4225
bb2a0f7a 4226static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4227{
bb2a0f7a 4228 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4229 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4230 int i;
a2fbb9ea
ET
4231
4232 nstats->rx_packets =
4233 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4234 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4235 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4236
4237 nstats->tx_packets =
4238 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4239 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4240 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4241
de832a55 4242 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4243
0e39e645 4244 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4245
de832a55 4246 nstats->rx_dropped = estats->mac_discard;
ca00392c 4247 for_each_rx_queue(bp, i)
de832a55
EG
4248 nstats->rx_dropped +=
4249 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4250
a2fbb9ea
ET
4251 nstats->tx_dropped = 0;
4252
4253 nstats->multicast =
de832a55 4254 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4255
bb2a0f7a 4256 nstats->collisions =
de832a55 4257 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4258
4259 nstats->rx_length_errors =
de832a55
EG
4260 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4261 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4262 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4263 bnx2x_hilo(&estats->brb_truncate_hi);
4264 nstats->rx_crc_errors =
4265 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4266 nstats->rx_frame_errors =
4267 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4268 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4269 nstats->rx_missed_errors = estats->xxoverflow_discard;
4270
4271 nstats->rx_errors = nstats->rx_length_errors +
4272 nstats->rx_over_errors +
4273 nstats->rx_crc_errors +
4274 nstats->rx_frame_errors +
0e39e645
ET
4275 nstats->rx_fifo_errors +
4276 nstats->rx_missed_errors;
a2fbb9ea 4277
bb2a0f7a 4278 nstats->tx_aborted_errors =
de832a55
EG
4279 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4280 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4281 nstats->tx_carrier_errors =
4282 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4283 nstats->tx_fifo_errors = 0;
4284 nstats->tx_heartbeat_errors = 0;
4285 nstats->tx_window_errors = 0;
4286
4287 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4288 nstats->tx_carrier_errors +
4289 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4290}
4291
4292static void bnx2x_drv_stats_update(struct bnx2x *bp)
4293{
4294 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4295 int i;
4296
4297 estats->driver_xoff = 0;
4298 estats->rx_err_discard_pkt = 0;
4299 estats->rx_skb_alloc_failed = 0;
4300 estats->hw_csum_err = 0;
ca00392c 4301 for_each_rx_queue(bp, i) {
de832a55
EG
4302 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4303
4304 estats->driver_xoff += qstats->driver_xoff;
4305 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4306 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4307 estats->hw_csum_err += qstats->hw_csum_err;
4308 }
a2fbb9ea
ET
4309}
4310
bb2a0f7a 4311static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4312{
bb2a0f7a 4313 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4314
bb2a0f7a
YG
4315 if (*stats_comp != DMAE_COMP_VAL)
4316 return;
4317
4318 if (bp->port.pmf)
de832a55 4319 bnx2x_hw_stats_update(bp);
a2fbb9ea 4320
de832a55
EG
4321 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4322 BNX2X_ERR("storm stats were not updated for 3 times\n");
4323 bnx2x_panic();
4324 return;
a2fbb9ea
ET
4325 }
4326
de832a55
EG
4327 bnx2x_net_stats_update(bp);
4328 bnx2x_drv_stats_update(bp);
4329
a2fbb9ea 4330 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4331 struct bnx2x_fastpath *fp0_rx = bp->fp;
4332 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4333 struct tstorm_per_client_stats *old_tclient =
4334 &bp->fp->old_tclient;
4335 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4336 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4337 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4338 int i;
a2fbb9ea
ET
4339
4340 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4341 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4342 " tx pkt (%lx)\n",
ca00392c
EG
4343 bnx2x_tx_avail(fp0_tx),
4344 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4345 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4346 " rx pkt (%lx)\n",
ca00392c
EG
4347 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4348 fp0_rx->rx_comp_cons),
4349 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4350 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4351 "brb truncate %u\n",
4352 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4353 qstats->driver_xoff,
4354 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4355 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4356 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4357 "mac_discard %u mac_filter_discard %u "
4358 "xxovrflow_discard %u brb_truncate_discard %u "
4359 "ttl0_discard %u\n",
4781bfad 4360 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4361 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4362 bnx2x_hilo(&qstats->no_buff_discard_hi),
4363 estats->mac_discard, estats->mac_filter_discard,
4364 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4365 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4366
4367 for_each_queue(bp, i) {
4368 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4369 bnx2x_fp(bp, i, tx_pkt),
4370 bnx2x_fp(bp, i, rx_pkt),
4371 bnx2x_fp(bp, i, rx_calls));
4372 }
4373 }
4374
bb2a0f7a
YG
4375 bnx2x_hw_stats_post(bp);
4376 bnx2x_storm_stats_post(bp);
4377}
a2fbb9ea 4378
bb2a0f7a
YG
4379static void bnx2x_port_stats_stop(struct bnx2x *bp)
4380{
4381 struct dmae_command *dmae;
4382 u32 opcode;
4383 int loader_idx = PMF_DMAE_C(bp);
4384 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4385
bb2a0f7a 4386 bp->executer_idx = 0;
a2fbb9ea 4387
bb2a0f7a
YG
4388 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4389 DMAE_CMD_C_ENABLE |
4390 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4391#ifdef __BIG_ENDIAN
bb2a0f7a 4392 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4393#else
bb2a0f7a 4394 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4395#endif
bb2a0f7a
YG
4396 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4397 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4398
4399 if (bp->port.port_stx) {
4400
4401 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4402 if (bp->func_stx)
4403 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4404 else
4405 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4406 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4407 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4408 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4409 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4410 dmae->len = sizeof(struct host_port_stats) >> 2;
4411 if (bp->func_stx) {
4412 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4413 dmae->comp_addr_hi = 0;
4414 dmae->comp_val = 1;
4415 } else {
4416 dmae->comp_addr_lo =
4417 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4418 dmae->comp_addr_hi =
4419 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4420 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4421
bb2a0f7a
YG
4422 *stats_comp = 0;
4423 }
a2fbb9ea
ET
4424 }
4425
bb2a0f7a
YG
4426 if (bp->func_stx) {
4427
4428 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4429 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4430 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4431 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4432 dmae->dst_addr_lo = bp->func_stx >> 2;
4433 dmae->dst_addr_hi = 0;
4434 dmae->len = sizeof(struct host_func_stats) >> 2;
4435 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4436 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4437 dmae->comp_val = DMAE_COMP_VAL;
4438
4439 *stats_comp = 0;
a2fbb9ea 4440 }
bb2a0f7a
YG
4441}
4442
4443static void bnx2x_stats_stop(struct bnx2x *bp)
4444{
4445 int update = 0;
4446
4447 bnx2x_stats_comp(bp);
4448
4449 if (bp->port.pmf)
4450 update = (bnx2x_hw_stats_update(bp) == 0);
4451
4452 update |= (bnx2x_storm_stats_update(bp) == 0);
4453
4454 if (update) {
4455 bnx2x_net_stats_update(bp);
a2fbb9ea 4456
bb2a0f7a
YG
4457 if (bp->port.pmf)
4458 bnx2x_port_stats_stop(bp);
4459
4460 bnx2x_hw_stats_post(bp);
4461 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4462 }
4463}
4464
bb2a0f7a
YG
4465static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4466{
4467}
4468
4469static const struct {
4470 void (*action)(struct bnx2x *bp);
4471 enum bnx2x_stats_state next_state;
4472} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4473/* state event */
4474{
4475/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4476/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4477/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4478/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4479},
4480{
4481/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4482/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4483/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4484/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4485}
4486};
4487
4488static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4489{
4490 enum bnx2x_stats_state state = bp->stats_state;
4491
4492 bnx2x_stats_stm[state][event].action(bp);
4493 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4494
8924665a
EG
4495 /* Make sure the state has been "changed" */
4496 smp_wmb();
4497
bb2a0f7a
YG
4498 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4499 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4500 state, event, bp->stats_state);
4501}
4502
6fe49bb9
EG
4503static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4504{
4505 struct dmae_command *dmae;
4506 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4507
4508 /* sanity */
4509 if (!bp->port.pmf || !bp->port.port_stx) {
4510 BNX2X_ERR("BUG!\n");
4511 return;
4512 }
4513
4514 bp->executer_idx = 0;
4515
4516 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4517 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4518 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4519 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4520#ifdef __BIG_ENDIAN
4521 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4522#else
4523 DMAE_CMD_ENDIANITY_DW_SWAP |
4524#endif
4525 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4526 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4527 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4528 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4529 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4530 dmae->dst_addr_hi = 0;
4531 dmae->len = sizeof(struct host_port_stats) >> 2;
4532 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4533 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4534 dmae->comp_val = DMAE_COMP_VAL;
4535
4536 *stats_comp = 0;
4537 bnx2x_hw_stats_post(bp);
4538 bnx2x_stats_comp(bp);
4539}
4540
4541static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4542{
4543 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4544 int port = BP_PORT(bp);
4545 int func;
4546 u32 func_stx;
4547
4548 /* sanity */
4549 if (!bp->port.pmf || !bp->func_stx) {
4550 BNX2X_ERR("BUG!\n");
4551 return;
4552 }
4553
4554 /* save our func_stx */
4555 func_stx = bp->func_stx;
4556
4557 for (vn = VN_0; vn < vn_max; vn++) {
4558 func = 2*vn + port;
4559
4560 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4561 bnx2x_func_stats_init(bp);
4562 bnx2x_hw_stats_post(bp);
4563 bnx2x_stats_comp(bp);
4564 }
4565
4566 /* restore our func_stx */
4567 bp->func_stx = func_stx;
4568}
4569
4570static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4571{
4572 struct dmae_command *dmae = &bp->stats_dmae;
4573 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4574
4575 /* sanity */
4576 if (!bp->func_stx) {
4577 BNX2X_ERR("BUG!\n");
4578 return;
4579 }
4580
4581 bp->executer_idx = 0;
4582 memset(dmae, 0, sizeof(struct dmae_command));
4583
4584 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4585 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4586 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4587#ifdef __BIG_ENDIAN
4588 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4589#else
4590 DMAE_CMD_ENDIANITY_DW_SWAP |
4591#endif
4592 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4593 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4594 dmae->src_addr_lo = bp->func_stx >> 2;
4595 dmae->src_addr_hi = 0;
4596 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4597 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4598 dmae->len = sizeof(struct host_func_stats) >> 2;
4599 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4600 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4601 dmae->comp_val = DMAE_COMP_VAL;
4602
4603 *stats_comp = 0;
4604 bnx2x_hw_stats_post(bp);
4605 bnx2x_stats_comp(bp);
4606}
4607
4608static void bnx2x_stats_init(struct bnx2x *bp)
4609{
4610 int port = BP_PORT(bp);
4611 int func = BP_FUNC(bp);
4612 int i;
4613
4614 bp->stats_pending = 0;
4615 bp->executer_idx = 0;
4616 bp->stats_counter = 0;
4617
4618 /* port and func stats for management */
4619 if (!BP_NOMCP(bp)) {
4620 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4621 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4622
4623 } else {
4624 bp->port.port_stx = 0;
4625 bp->func_stx = 0;
4626 }
4627 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4628 bp->port.port_stx, bp->func_stx);
4629
4630 /* port stats */
4631 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4632 bp->port.old_nig_stats.brb_discard =
4633 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4634 bp->port.old_nig_stats.brb_truncate =
4635 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4636 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4637 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4638 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4639 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4640
4641 /* function stats */
4642 for_each_queue(bp, i) {
4643 struct bnx2x_fastpath *fp = &bp->fp[i];
4644
4645 memset(&fp->old_tclient, 0,
4646 sizeof(struct tstorm_per_client_stats));
4647 memset(&fp->old_uclient, 0,
4648 sizeof(struct ustorm_per_client_stats));
4649 memset(&fp->old_xclient, 0,
4650 sizeof(struct xstorm_per_client_stats));
4651 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4652 }
4653
4654 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4655 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4656
4657 bp->stats_state = STATS_STATE_DISABLED;
4658
4659 if (bp->port.pmf) {
4660 if (bp->port.port_stx)
4661 bnx2x_port_stats_base_init(bp);
4662
4663 if (bp->func_stx)
4664 bnx2x_func_stats_base_init(bp);
4665
4666 } else if (bp->func_stx)
4667 bnx2x_func_stats_base_update(bp);
4668}
4669
a2fbb9ea
ET
4670static void bnx2x_timer(unsigned long data)
4671{
4672 struct bnx2x *bp = (struct bnx2x *) data;
4673
4674 if (!netif_running(bp->dev))
4675 return;
4676
4677 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4678 goto timer_restart;
a2fbb9ea
ET
4679
4680 if (poll) {
4681 struct bnx2x_fastpath *fp = &bp->fp[0];
4682 int rc;
4683
7961f791 4684 bnx2x_tx_int(fp);
a2fbb9ea
ET
4685 rc = bnx2x_rx_int(fp, 1000);
4686 }
4687
34f80b04
EG
4688 if (!BP_NOMCP(bp)) {
4689 int func = BP_FUNC(bp);
a2fbb9ea
ET
4690 u32 drv_pulse;
4691 u32 mcp_pulse;
4692
4693 ++bp->fw_drv_pulse_wr_seq;
4694 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4695 /* TBD - add SYSTEM_TIME */
4696 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4697 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4698
34f80b04 4699 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4700 MCP_PULSE_SEQ_MASK);
4701 /* The delta between driver pulse and mcp response
4702 * should be 1 (before mcp response) or 0 (after mcp response)
4703 */
4704 if ((drv_pulse != mcp_pulse) &&
4705 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4706 /* someone lost a heartbeat... */
4707 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4708 drv_pulse, mcp_pulse);
4709 }
4710 }
4711
f34d28ea 4712 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 4713 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4714
f1410647 4715timer_restart:
a2fbb9ea
ET
4716 mod_timer(&bp->timer, jiffies + bp->current_interval);
4717}
4718
4719/* end of Statistics */
4720
4721/* nic init */
4722
4723/*
4724 * nic init service functions
4725 */
4726
34f80b04 4727static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4728{
34f80b04
EG
4729 int port = BP_PORT(bp);
4730
ca00392c
EG
4731 /* "CSTORM" */
4732 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4733 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4734 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4735 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4736 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4737 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4738}
4739
5c862848
EG
4740static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4741 dma_addr_t mapping, int sb_id)
34f80b04
EG
4742{
4743 int port = BP_PORT(bp);
bb2a0f7a 4744 int func = BP_FUNC(bp);
a2fbb9ea 4745 int index;
34f80b04 4746 u64 section;
a2fbb9ea
ET
4747
4748 /* USTORM */
4749 section = ((u64)mapping) + offsetof(struct host_status_block,
4750 u_status_block);
34f80b04 4751 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4752
ca00392c
EG
4753 REG_WR(bp, BAR_CSTRORM_INTMEM +
4754 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4755 REG_WR(bp, BAR_CSTRORM_INTMEM +
4756 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4757 U64_HI(section));
ca00392c
EG
4758 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4759 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4760
4761 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4762 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4763 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4764
4765 /* CSTORM */
4766 section = ((u64)mapping) + offsetof(struct host_status_block,
4767 c_status_block);
34f80b04 4768 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4769
4770 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4771 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4772 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4773 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4774 U64_HI(section));
7a9b2557 4775 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4776 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4777
4778 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4779 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4780 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4781
4782 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4783}
4784
4785static void bnx2x_zero_def_sb(struct bnx2x *bp)
4786{
4787 int func = BP_FUNC(bp);
a2fbb9ea 4788
ca00392c 4789 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4790 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4791 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4792 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4793 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4794 sizeof(struct cstorm_def_status_block_u)/4);
4795 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4796 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4797 sizeof(struct cstorm_def_status_block_c)/4);
4798 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4799 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4800 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4801}
4802
4803static void bnx2x_init_def_sb(struct bnx2x *bp,
4804 struct host_def_status_block *def_sb,
34f80b04 4805 dma_addr_t mapping, int sb_id)
a2fbb9ea 4806{
34f80b04
EG
4807 int port = BP_PORT(bp);
4808 int func = BP_FUNC(bp);
a2fbb9ea
ET
4809 int index, val, reg_offset;
4810 u64 section;
4811
4812 /* ATTN */
4813 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4814 atten_status_block);
34f80b04 4815 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4816
49d66772
ET
4817 bp->attn_state = 0;
4818
a2fbb9ea
ET
4819 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4820 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4821
34f80b04 4822 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4823 bp->attn_group[index].sig[0] = REG_RD(bp,
4824 reg_offset + 0x10*index);
4825 bp->attn_group[index].sig[1] = REG_RD(bp,
4826 reg_offset + 0x4 + 0x10*index);
4827 bp->attn_group[index].sig[2] = REG_RD(bp,
4828 reg_offset + 0x8 + 0x10*index);
4829 bp->attn_group[index].sig[3] = REG_RD(bp,
4830 reg_offset + 0xc + 0x10*index);
4831 }
4832
a2fbb9ea
ET
4833 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4834 HC_REG_ATTN_MSG0_ADDR_L);
4835
4836 REG_WR(bp, reg_offset, U64_LO(section));
4837 REG_WR(bp, reg_offset + 4, U64_HI(section));
4838
4839 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4840
4841 val = REG_RD(bp, reg_offset);
34f80b04 4842 val |= sb_id;
a2fbb9ea
ET
4843 REG_WR(bp, reg_offset, val);
4844
4845 /* USTORM */
4846 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4847 u_def_status_block);
34f80b04 4848 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4849
ca00392c
EG
4850 REG_WR(bp, BAR_CSTRORM_INTMEM +
4851 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4852 REG_WR(bp, BAR_CSTRORM_INTMEM +
4853 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4854 U64_HI(section));
ca00392c
EG
4855 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4856 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4857
4858 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4859 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4860 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4861
4862 /* CSTORM */
4863 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4864 c_def_status_block);
34f80b04 4865 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4866
4867 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4868 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4869 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4870 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4871 U64_HI(section));
5c862848 4872 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4873 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4874
4875 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4876 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4877 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4878
4879 /* TSTORM */
4880 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4881 t_def_status_block);
34f80b04 4882 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4883
4884 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4885 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4886 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4887 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4888 U64_HI(section));
5c862848 4889 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4890 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4891
4892 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4893 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4894 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4895
4896 /* XSTORM */
4897 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4898 x_def_status_block);
34f80b04 4899 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4900
4901 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4902 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4903 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4904 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4905 U64_HI(section));
5c862848 4906 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4907 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4908
4909 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4910 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4911 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4912
bb2a0f7a 4913 bp->stats_pending = 0;
66e855f3 4914 bp->set_mac_pending = 0;
bb2a0f7a 4915
34f80b04 4916 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4917}
4918
4919static void bnx2x_update_coalesce(struct bnx2x *bp)
4920{
34f80b04 4921 int port = BP_PORT(bp);
a2fbb9ea
ET
4922 int i;
4923
4924 for_each_queue(bp, i) {
34f80b04 4925 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4926
4927 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4928 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4929 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4930 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4931 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
4932 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4933 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4934 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4935 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4936
4937 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4938 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4939 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4940 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4941 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 4942 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4943 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4944 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4945 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4946 }
4947}
4948
7a9b2557
VZ
4949static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4950 struct bnx2x_fastpath *fp, int last)
4951{
4952 int i;
4953
4954 for (i = 0; i < last; i++) {
4955 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4956 struct sk_buff *skb = rx_buf->skb;
4957
4958 if (skb == NULL) {
4959 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4960 continue;
4961 }
4962
4963 if (fp->tpa_state[i] == BNX2X_TPA_START)
4964 pci_unmap_single(bp->pdev,
4965 pci_unmap_addr(rx_buf, mapping),
356e2385 4966 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4967
4968 dev_kfree_skb(skb);
4969 rx_buf->skb = NULL;
4970 }
4971}
4972
a2fbb9ea
ET
4973static void bnx2x_init_rx_rings(struct bnx2x *bp)
4974{
7a9b2557 4975 int func = BP_FUNC(bp);
32626230
EG
4976 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4977 ETH_MAX_AGGREGATION_QUEUES_E1H;
4978 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4979 int i, j;
a2fbb9ea 4980
87942b46 4981 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4982 DP(NETIF_MSG_IFUP,
4983 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4984
7a9b2557 4985 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4986
555f6c78 4987 for_each_rx_queue(bp, j) {
32626230 4988 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4989
32626230 4990 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4991 fp->tpa_pool[i].skb =
4992 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4993 if (!fp->tpa_pool[i].skb) {
4994 BNX2X_ERR("Failed to allocate TPA "
4995 "skb pool for queue[%d] - "
4996 "disabling TPA on this "
4997 "queue!\n", j);
4998 bnx2x_free_tpa_pool(bp, fp, i);
4999 fp->disable_tpa = 1;
5000 break;
5001 }
5002 pci_unmap_addr_set((struct sw_rx_bd *)
5003 &bp->fp->tpa_pool[i],
5004 mapping, 0);
5005 fp->tpa_state[i] = BNX2X_TPA_STOP;
5006 }
5007 }
5008 }
5009
555f6c78 5010 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
5011 struct bnx2x_fastpath *fp = &bp->fp[j];
5012
5013 fp->rx_bd_cons = 0;
5014 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5015 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5016
ca00392c
EG
5017 /* Mark queue as Rx */
5018 fp->is_rx_queue = 1;
5019
7a9b2557
VZ
5020 /* "next page" elements initialization */
5021 /* SGE ring */
5022 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5023 struct eth_rx_sge *sge;
5024
5025 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5026 sge->addr_hi =
5027 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5028 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5029 sge->addr_lo =
5030 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5031 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5032 }
5033
5034 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5035
7a9b2557 5036 /* RX BD ring */
a2fbb9ea
ET
5037 for (i = 1; i <= NUM_RX_RINGS; i++) {
5038 struct eth_rx_bd *rx_bd;
5039
5040 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5041 rx_bd->addr_hi =
5042 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5043 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5044 rx_bd->addr_lo =
5045 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5046 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5047 }
5048
34f80b04 5049 /* CQ ring */
a2fbb9ea
ET
5050 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5051 struct eth_rx_cqe_next_page *nextpg;
5052
5053 nextpg = (struct eth_rx_cqe_next_page *)
5054 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5055 nextpg->addr_hi =
5056 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5057 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5058 nextpg->addr_lo =
5059 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5060 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5061 }
5062
7a9b2557
VZ
5063 /* Allocate SGEs and initialize the ring elements */
5064 for (i = 0, ring_prod = 0;
5065 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5066
7a9b2557
VZ
5067 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5068 BNX2X_ERR("was only able to allocate "
5069 "%d rx sges\n", i);
5070 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5071 /* Cleanup already allocated elements */
5072 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5073 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5074 fp->disable_tpa = 1;
5075 ring_prod = 0;
5076 break;
5077 }
5078 ring_prod = NEXT_SGE_IDX(ring_prod);
5079 }
5080 fp->rx_sge_prod = ring_prod;
5081
5082 /* Allocate BDs and initialize BD ring */
66e855f3 5083 fp->rx_comp_cons = 0;
7a9b2557 5084 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5085 for (i = 0; i < bp->rx_ring_size; i++) {
5086 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5087 BNX2X_ERR("was only able to allocate "
de832a55
EG
5088 "%d rx skbs on queue[%d]\n", i, j);
5089 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5090 break;
5091 }
5092 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5093 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5094 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5095 }
5096
7a9b2557
VZ
5097 fp->rx_bd_prod = ring_prod;
5098 /* must not have more available CQEs than BDs */
5099 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5100 cqe_ring_prod);
a2fbb9ea
ET
5101 fp->rx_pkt = fp->rx_calls = 0;
5102
7a9b2557
VZ
5103 /* Warning!
5104 * this will generate an interrupt (to the TSTORM)
5105 * must only be done after chip is initialized
5106 */
5107 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5108 fp->rx_sge_prod);
a2fbb9ea
ET
5109 if (j != 0)
5110 continue;
5111
5112 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5113 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5114 U64_LO(fp->rx_comp_mapping));
5115 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5116 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5117 U64_HI(fp->rx_comp_mapping));
5118 }
5119}
5120
5121static void bnx2x_init_tx_ring(struct bnx2x *bp)
5122{
5123 int i, j;
5124
555f6c78 5125 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5126 struct bnx2x_fastpath *fp = &bp->fp[j];
5127
5128 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5129 struct eth_tx_next_bd *tx_next_bd =
5130 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5131
ca00392c 5132 tx_next_bd->addr_hi =
a2fbb9ea 5133 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5134 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5135 tx_next_bd->addr_lo =
a2fbb9ea 5136 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5137 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5138 }
5139
ca00392c
EG
5140 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5141 fp->tx_db.data.zero_fill1 = 0;
5142 fp->tx_db.data.prod = 0;
5143
a2fbb9ea
ET
5144 fp->tx_pkt_prod = 0;
5145 fp->tx_pkt_cons = 0;
5146 fp->tx_bd_prod = 0;
5147 fp->tx_bd_cons = 0;
5148 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5149 fp->tx_pkt = 0;
5150 }
6fe49bb9
EG
5151
5152 /* clean tx statistics */
5153 for_each_rx_queue(bp, i)
5154 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5155}
5156
5157static void bnx2x_init_sp_ring(struct bnx2x *bp)
5158{
34f80b04 5159 int func = BP_FUNC(bp);
a2fbb9ea
ET
5160
5161 spin_lock_init(&bp->spq_lock);
5162
5163 bp->spq_left = MAX_SPQ_PENDING;
5164 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5165 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5166 bp->spq_prod_bd = bp->spq;
5167 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5168
34f80b04 5169 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5170 U64_LO(bp->spq_mapping));
34f80b04
EG
5171 REG_WR(bp,
5172 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5173 U64_HI(bp->spq_mapping));
5174
34f80b04 5175 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5176 bp->spq_prod_idx);
5177}
5178
5179static void bnx2x_init_context(struct bnx2x *bp)
5180{
5181 int i;
5182
ca00392c 5183 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5184 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5185 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5186 u8 cl_id = fp->cl_id;
a2fbb9ea 5187
34f80b04
EG
5188 context->ustorm_st_context.common.sb_index_numbers =
5189 BNX2X_RX_SB_INDEX_NUM;
0626b899 5190 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5191 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5192 context->ustorm_st_context.common.flags =
de832a55
EG
5193 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5194 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5195 context->ustorm_st_context.common.statistics_counter_id =
5196 cl_id;
8d9c5f34 5197 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5198 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5199 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5200 bp->rx_buf_size;
34f80b04 5201 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5202 U64_HI(fp->rx_desc_mapping);
34f80b04 5203 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5204 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5205 if (!fp->disable_tpa) {
5206 context->ustorm_st_context.common.flags |=
ca00392c 5207 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5208 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5209 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5210 (u32)0xffff);
7a9b2557
VZ
5211 context->ustorm_st_context.common.sge_page_base_hi =
5212 U64_HI(fp->rx_sge_mapping);
5213 context->ustorm_st_context.common.sge_page_base_lo =
5214 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5215
5216 context->ustorm_st_context.common.max_sges_for_packet =
5217 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5218 context->ustorm_st_context.common.max_sges_for_packet =
5219 ((context->ustorm_st_context.common.
5220 max_sges_for_packet + PAGES_PER_SGE - 1) &
5221 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5222 }
5223
8d9c5f34
EG
5224 context->ustorm_ag_context.cdu_usage =
5225 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5226 CDU_REGION_NUMBER_UCM_AG,
5227 ETH_CONNECTION_TYPE);
5228
ca00392c
EG
5229 context->xstorm_ag_context.cdu_reserved =
5230 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5231 CDU_REGION_NUMBER_XCM_AG,
5232 ETH_CONNECTION_TYPE);
5233 }
5234
5235 for_each_tx_queue(bp, i) {
5236 struct bnx2x_fastpath *fp = &bp->fp[i];
5237 struct eth_context *context =
5238 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5239
5240 context->cstorm_st_context.sb_index_number =
5241 C_SB_ETH_TX_CQ_INDEX;
5242 context->cstorm_st_context.status_block_id = fp->sb_id;
5243
8d9c5f34
EG
5244 context->xstorm_st_context.tx_bd_page_base_hi =
5245 U64_HI(fp->tx_desc_mapping);
5246 context->xstorm_st_context.tx_bd_page_base_lo =
5247 U64_LO(fp->tx_desc_mapping);
ca00392c 5248 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5249 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5250 }
5251}
5252
5253static void bnx2x_init_ind_table(struct bnx2x *bp)
5254{
26c8fa4d 5255 int func = BP_FUNC(bp);
a2fbb9ea
ET
5256 int i;
5257
555f6c78 5258 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5259 return;
5260
555f6c78
EG
5261 DP(NETIF_MSG_IFUP,
5262 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5263 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5264 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5265 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5266 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5267}
5268
49d66772
ET
5269static void bnx2x_set_client_config(struct bnx2x *bp)
5270{
49d66772 5271 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5272 int port = BP_PORT(bp);
5273 int i;
49d66772 5274
e7799c5f 5275 tstorm_client.mtu = bp->dev->mtu;
49d66772 5276 tstorm_client.config_flags =
de832a55
EG
5277 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5278 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5279#ifdef BCM_VLAN
0c6671b0 5280 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5281 tstorm_client.config_flags |=
8d9c5f34 5282 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5283 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5284 }
5285#endif
49d66772
ET
5286
5287 for_each_queue(bp, i) {
de832a55
EG
5288 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5289
49d66772 5290 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5291 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5292 ((u32 *)&tstorm_client)[0]);
5293 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5294 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5295 ((u32 *)&tstorm_client)[1]);
5296 }
5297
34f80b04
EG
5298 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5299 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5300}
5301
a2fbb9ea
ET
5302static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5303{
a2fbb9ea 5304 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5305 int mode = bp->rx_mode;
37b091ba 5306 int mask = bp->rx_mode_cl_mask;
34f80b04 5307 int func = BP_FUNC(bp);
581ce43d 5308 int port = BP_PORT(bp);
a2fbb9ea 5309 int i;
581ce43d
EG
5310 /* All but management unicast packets should pass to the host as well */
5311 u32 llh_mask =
5312 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5313 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5314 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5315 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5316
3196a88a 5317 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5318
5319 switch (mode) {
5320 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5321 tstorm_mac_filter.ucast_drop_all = mask;
5322 tstorm_mac_filter.mcast_drop_all = mask;
5323 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5324 break;
356e2385 5325
a2fbb9ea 5326 case BNX2X_RX_MODE_NORMAL:
34f80b04 5327 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5328 break;
356e2385 5329
a2fbb9ea 5330 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5331 tstorm_mac_filter.mcast_accept_all = mask;
5332 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5333 break;
356e2385 5334
a2fbb9ea 5335 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5336 tstorm_mac_filter.ucast_accept_all = mask;
5337 tstorm_mac_filter.mcast_accept_all = mask;
5338 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5339 /* pass management unicast packets as well */
5340 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5341 break;
356e2385 5342
a2fbb9ea 5343 default:
34f80b04
EG
5344 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5345 break;
a2fbb9ea
ET
5346 }
5347
581ce43d
EG
5348 REG_WR(bp,
5349 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5350 llh_mask);
5351
a2fbb9ea
ET
5352 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5353 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5354 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5355 ((u32 *)&tstorm_mac_filter)[i]);
5356
34f80b04 5357/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5358 ((u32 *)&tstorm_mac_filter)[i]); */
5359 }
a2fbb9ea 5360
49d66772
ET
5361 if (mode != BNX2X_RX_MODE_NONE)
5362 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5363}
5364
471de716
EG
5365static void bnx2x_init_internal_common(struct bnx2x *bp)
5366{
5367 int i;
5368
5369 /* Zero this manually as its initialization is
5370 currently missing in the initTool */
5371 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5372 REG_WR(bp, BAR_USTRORM_INTMEM +
5373 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5374}
5375
5376static void bnx2x_init_internal_port(struct bnx2x *bp)
5377{
5378 int port = BP_PORT(bp);
5379
ca00392c
EG
5380 REG_WR(bp,
5381 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5382 REG_WR(bp,
5383 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5384 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5385 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5386}
5387
5388static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5389{
a2fbb9ea
ET
5390 struct tstorm_eth_function_common_config tstorm_config = {0};
5391 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5392 int port = BP_PORT(bp);
5393 int func = BP_FUNC(bp);
de832a55
EG
5394 int i, j;
5395 u32 offset;
471de716 5396 u16 max_agg_size;
a2fbb9ea
ET
5397
5398 if (is_multi(bp)) {
555f6c78 5399 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5400 tstorm_config.rss_result_mask = MULTI_MASK;
5401 }
ca00392c
EG
5402
5403 /* Enable TPA if needed */
5404 if (bp->flags & TPA_ENABLE_FLAG)
5405 tstorm_config.config_flags |=
5406 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5407
8d9c5f34
EG
5408 if (IS_E1HMF(bp))
5409 tstorm_config.config_flags |=
5410 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5411
34f80b04
EG
5412 tstorm_config.leading_client_id = BP_L_ID(bp);
5413
a2fbb9ea 5414 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5415 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5416 (*(u32 *)&tstorm_config));
5417
c14423fe 5418 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5419 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5420 bnx2x_set_storm_rx_mode(bp);
5421
de832a55
EG
5422 for_each_queue(bp, i) {
5423 u8 cl_id = bp->fp[i].cl_id;
5424
5425 /* reset xstorm per client statistics */
5426 offset = BAR_XSTRORM_INTMEM +
5427 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5428 for (j = 0;
5429 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5430 REG_WR(bp, offset + j*4, 0);
5431
5432 /* reset tstorm per client statistics */
5433 offset = BAR_TSTRORM_INTMEM +
5434 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5435 for (j = 0;
5436 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5437 REG_WR(bp, offset + j*4, 0);
5438
5439 /* reset ustorm per client statistics */
5440 offset = BAR_USTRORM_INTMEM +
5441 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5442 for (j = 0;
5443 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5444 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5445 }
5446
5447 /* Init statistics related context */
34f80b04 5448 stats_flags.collect_eth = 1;
a2fbb9ea 5449
66e855f3 5450 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5451 ((u32 *)&stats_flags)[0]);
66e855f3 5452 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5453 ((u32 *)&stats_flags)[1]);
5454
66e855f3 5455 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5456 ((u32 *)&stats_flags)[0]);
66e855f3 5457 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5458 ((u32 *)&stats_flags)[1]);
5459
de832a55
EG
5460 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5461 ((u32 *)&stats_flags)[0]);
5462 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5463 ((u32 *)&stats_flags)[1]);
5464
66e855f3 5465 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5466 ((u32 *)&stats_flags)[0]);
66e855f3 5467 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5468 ((u32 *)&stats_flags)[1]);
5469
66e855f3
YG
5470 REG_WR(bp, BAR_XSTRORM_INTMEM +
5471 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5472 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5473 REG_WR(bp, BAR_XSTRORM_INTMEM +
5474 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5475 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5476
5477 REG_WR(bp, BAR_TSTRORM_INTMEM +
5478 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5479 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5480 REG_WR(bp, BAR_TSTRORM_INTMEM +
5481 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5482 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5483
de832a55
EG
5484 REG_WR(bp, BAR_USTRORM_INTMEM +
5485 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5486 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5487 REG_WR(bp, BAR_USTRORM_INTMEM +
5488 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5489 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5490
34f80b04
EG
5491 if (CHIP_IS_E1H(bp)) {
5492 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5493 IS_E1HMF(bp));
5494 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5495 IS_E1HMF(bp));
5496 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5497 IS_E1HMF(bp));
5498 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5499 IS_E1HMF(bp));
5500
7a9b2557
VZ
5501 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5502 bp->e1hov);
34f80b04
EG
5503 }
5504
4f40f2cb
EG
5505 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5506 max_agg_size =
5507 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5508 SGE_PAGE_SIZE * PAGES_PER_SGE),
5509 (u32)0xffff);
555f6c78 5510 for_each_rx_queue(bp, i) {
7a9b2557 5511 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5512
5513 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5514 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5515 U64_LO(fp->rx_comp_mapping));
5516 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5517 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5518 U64_HI(fp->rx_comp_mapping));
5519
ca00392c
EG
5520 /* Next page */
5521 REG_WR(bp, BAR_USTRORM_INTMEM +
5522 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5523 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5524 REG_WR(bp, BAR_USTRORM_INTMEM +
5525 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5526 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5527
7a9b2557 5528 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5529 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5530 max_agg_size);
5531 }
8a1c38d1 5532
1c06328c
EG
5533 /* dropless flow control */
5534 if (CHIP_IS_E1H(bp)) {
5535 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5536
5537 rx_pause.bd_thr_low = 250;
5538 rx_pause.cqe_thr_low = 250;
5539 rx_pause.cos = 1;
5540 rx_pause.sge_thr_low = 0;
5541 rx_pause.bd_thr_high = 350;
5542 rx_pause.cqe_thr_high = 350;
5543 rx_pause.sge_thr_high = 0;
5544
5545 for_each_rx_queue(bp, i) {
5546 struct bnx2x_fastpath *fp = &bp->fp[i];
5547
5548 if (!fp->disable_tpa) {
5549 rx_pause.sge_thr_low = 150;
5550 rx_pause.sge_thr_high = 250;
5551 }
5552
5553
5554 offset = BAR_USTRORM_INTMEM +
5555 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5556 fp->cl_id);
5557 for (j = 0;
5558 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5559 j++)
5560 REG_WR(bp, offset + j*4,
5561 ((u32 *)&rx_pause)[j]);
5562 }
5563 }
5564
8a1c38d1
EG
5565 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5566
5567 /* Init rate shaping and fairness contexts */
5568 if (IS_E1HMF(bp)) {
5569 int vn;
5570
5571 /* During init there is no active link
5572 Until link is up, set link rate to 10Gbps */
5573 bp->link_vars.line_speed = SPEED_10000;
5574 bnx2x_init_port_minmax(bp);
5575
b015e3d1
EG
5576 if (!BP_NOMCP(bp))
5577 bp->mf_config =
5578 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5579 bnx2x_calc_vn_weight_sum(bp);
5580
5581 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5582 bnx2x_init_vn_minmax(bp, 2*vn + port);
5583
5584 /* Enable rate shaping and fairness */
b015e3d1 5585 bp->cmng.flags.cmng_enables |=
8a1c38d1 5586 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5587
8a1c38d1
EG
5588 } else {
5589 /* rate shaping and fairness are disabled */
5590 DP(NETIF_MSG_IFUP,
5591 "single function mode minmax will be disabled\n");
5592 }
5593
5594
5595 /* Store it to internal memory */
5596 if (bp->port.pmf)
5597 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5598 REG_WR(bp, BAR_XSTRORM_INTMEM +
5599 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5600 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5601}
5602
471de716
EG
5603static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5604{
5605 switch (load_code) {
5606 case FW_MSG_CODE_DRV_LOAD_COMMON:
5607 bnx2x_init_internal_common(bp);
5608 /* no break */
5609
5610 case FW_MSG_CODE_DRV_LOAD_PORT:
5611 bnx2x_init_internal_port(bp);
5612 /* no break */
5613
5614 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5615 bnx2x_init_internal_func(bp);
5616 break;
5617
5618 default:
5619 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5620 break;
5621 }
5622}
5623
5624static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5625{
5626 int i;
5627
5628 for_each_queue(bp, i) {
5629 struct bnx2x_fastpath *fp = &bp->fp[i];
5630
34f80b04 5631 fp->bp = bp;
a2fbb9ea 5632 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5633 fp->index = i;
34f80b04 5634 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5635#ifdef BCM_CNIC
5636 fp->sb_id = fp->cl_id + 1;
5637#else
34f80b04 5638 fp->sb_id = fp->cl_id;
37b091ba 5639#endif
ca00392c
EG
5640 /* Suitable Rx and Tx SBs are served by the same client */
5641 if (i >= bp->num_rx_queues)
5642 fp->cl_id -= bp->num_rx_queues;
34f80b04 5643 DP(NETIF_MSG_IFUP,
f5372251
EG
5644 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5645 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5646 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5647 fp->sb_id);
5c862848 5648 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5649 }
5650
16119785
EG
5651 /* ensure status block indices were read */
5652 rmb();
5653
5654
5c862848
EG
5655 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5656 DEF_SB_ID);
5657 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5658 bnx2x_update_coalesce(bp);
5659 bnx2x_init_rx_rings(bp);
5660 bnx2x_init_tx_ring(bp);
5661 bnx2x_init_sp_ring(bp);
5662 bnx2x_init_context(bp);
471de716 5663 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5664 bnx2x_init_ind_table(bp);
0ef00459
EG
5665 bnx2x_stats_init(bp);
5666
5667 /* At this point, we are ready for interrupts */
5668 atomic_set(&bp->intr_sem, 0);
5669
5670 /* flush all before enabling interrupts */
5671 mb();
5672 mmiowb();
5673
615f8fd9 5674 bnx2x_int_enable(bp);
eb8da205
EG
5675
5676 /* Check for SPIO5 */
5677 bnx2x_attn_int_deasserted0(bp,
5678 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5679 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5680}
5681
5682/* end of nic init */
5683
5684/*
5685 * gzip service functions
5686 */
5687
5688static int bnx2x_gunzip_init(struct bnx2x *bp)
5689{
5690 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5691 &bp->gunzip_mapping);
5692 if (bp->gunzip_buf == NULL)
5693 goto gunzip_nomem1;
5694
5695 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5696 if (bp->strm == NULL)
5697 goto gunzip_nomem2;
5698
5699 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5700 GFP_KERNEL);
5701 if (bp->strm->workspace == NULL)
5702 goto gunzip_nomem3;
5703
5704 return 0;
5705
5706gunzip_nomem3:
5707 kfree(bp->strm);
5708 bp->strm = NULL;
5709
5710gunzip_nomem2:
5711 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5712 bp->gunzip_mapping);
5713 bp->gunzip_buf = NULL;
5714
5715gunzip_nomem1:
5716 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5717 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5718 return -ENOMEM;
5719}
5720
5721static void bnx2x_gunzip_end(struct bnx2x *bp)
5722{
5723 kfree(bp->strm->workspace);
5724
5725 kfree(bp->strm);
5726 bp->strm = NULL;
5727
5728 if (bp->gunzip_buf) {
5729 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5730 bp->gunzip_mapping);
5731 bp->gunzip_buf = NULL;
5732 }
5733}
5734
94a78b79 5735static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5736{
5737 int n, rc;
5738
5739 /* check gzip header */
94a78b79
VZ
5740 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5741 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5742 return -EINVAL;
94a78b79 5743 }
a2fbb9ea
ET
5744
5745 n = 10;
5746
34f80b04 5747#define FNAME 0x8
a2fbb9ea
ET
5748
5749 if (zbuf[3] & FNAME)
5750 while ((zbuf[n++] != 0) && (n < len));
5751
94a78b79 5752 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5753 bp->strm->avail_in = len - n;
5754 bp->strm->next_out = bp->gunzip_buf;
5755 bp->strm->avail_out = FW_BUF_SIZE;
5756
5757 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5758 if (rc != Z_OK)
5759 return rc;
5760
5761 rc = zlib_inflate(bp->strm, Z_FINISH);
5762 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5763 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5764 bp->dev->name, bp->strm->msg);
5765
5766 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5767 if (bp->gunzip_outlen & 0x3)
5768 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5769 " gunzip_outlen (%d) not aligned\n",
5770 bp->dev->name, bp->gunzip_outlen);
5771 bp->gunzip_outlen >>= 2;
5772
5773 zlib_inflateEnd(bp->strm);
5774
5775 if (rc == Z_STREAM_END)
5776 return 0;
5777
5778 return rc;
5779}
5780
5781/* nic load/unload */
5782
5783/*
34f80b04 5784 * General service functions
a2fbb9ea
ET
5785 */
5786
5787/* send a NIG loopback debug packet */
5788static void bnx2x_lb_pckt(struct bnx2x *bp)
5789{
a2fbb9ea 5790 u32 wb_write[3];
a2fbb9ea
ET
5791
5792 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5793 wb_write[0] = 0x55555555;
5794 wb_write[1] = 0x55555555;
34f80b04 5795 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5796 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5797
5798 /* NON-IP protocol */
a2fbb9ea
ET
5799 wb_write[0] = 0x09000000;
5800 wb_write[1] = 0x55555555;
34f80b04 5801 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5802 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5803}
5804
5805/* some of the internal memories
5806 * are not directly readable from the driver
5807 * to test them we send debug packets
5808 */
5809static int bnx2x_int_mem_test(struct bnx2x *bp)
5810{
5811 int factor;
5812 int count, i;
5813 u32 val = 0;
5814
ad8d3948 5815 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5816 factor = 120;
ad8d3948
EG
5817 else if (CHIP_REV_IS_EMUL(bp))
5818 factor = 200;
5819 else
a2fbb9ea 5820 factor = 1;
a2fbb9ea
ET
5821
5822 DP(NETIF_MSG_HW, "start part1\n");
5823
5824 /* Disable inputs of parser neighbor blocks */
5825 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5826 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5827 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5828 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5829
5830 /* Write 0 to parser credits for CFC search request */
5831 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5832
5833 /* send Ethernet packet */
5834 bnx2x_lb_pckt(bp);
5835
5836 /* TODO do i reset NIG statistic? */
5837 /* Wait until NIG register shows 1 packet of size 0x10 */
5838 count = 1000 * factor;
5839 while (count) {
34f80b04 5840
a2fbb9ea
ET
5841 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5842 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5843 if (val == 0x10)
5844 break;
5845
5846 msleep(10);
5847 count--;
5848 }
5849 if (val != 0x10) {
5850 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5851 return -1;
5852 }
5853
5854 /* Wait until PRS register shows 1 packet */
5855 count = 1000 * factor;
5856 while (count) {
5857 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5858 if (val == 1)
5859 break;
5860
5861 msleep(10);
5862 count--;
5863 }
5864 if (val != 0x1) {
5865 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5866 return -2;
5867 }
5868
5869 /* Reset and init BRB, PRS */
34f80b04 5870 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5871 msleep(50);
34f80b04 5872 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5873 msleep(50);
94a78b79
VZ
5874 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5875 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5876
5877 DP(NETIF_MSG_HW, "part2\n");
5878
5879 /* Disable inputs of parser neighbor blocks */
5880 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5881 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5882 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5883 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5884
5885 /* Write 0 to parser credits for CFC search request */
5886 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5887
5888 /* send 10 Ethernet packets */
5889 for (i = 0; i < 10; i++)
5890 bnx2x_lb_pckt(bp);
5891
5892 /* Wait until NIG register shows 10 + 1
5893 packets of size 11*0x10 = 0xb0 */
5894 count = 1000 * factor;
5895 while (count) {
34f80b04 5896
a2fbb9ea
ET
5897 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5898 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5899 if (val == 0xb0)
5900 break;
5901
5902 msleep(10);
5903 count--;
5904 }
5905 if (val != 0xb0) {
5906 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5907 return -3;
5908 }
5909
5910 /* Wait until PRS register shows 2 packets */
5911 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5912 if (val != 2)
5913 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5914
5915 /* Write 1 to parser credits for CFC search request */
5916 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5917
5918 /* Wait until PRS register shows 3 packets */
5919 msleep(10 * factor);
5920 /* Wait until NIG register shows 1 packet of size 0x10 */
5921 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5922 if (val != 3)
5923 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5924
5925 /* clear NIG EOP FIFO */
5926 for (i = 0; i < 11; i++)
5927 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5928 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5929 if (val != 1) {
5930 BNX2X_ERR("clear of NIG failed\n");
5931 return -4;
5932 }
5933
5934 /* Reset and init BRB, PRS, NIG */
5935 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5936 msleep(50);
5937 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5938 msleep(50);
94a78b79
VZ
5939 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5940 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5941#ifndef BCM_CNIC
a2fbb9ea
ET
5942 /* set NIC mode */
5943 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5944#endif
5945
5946 /* Enable inputs of parser neighbor blocks */
5947 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5948 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5949 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5950 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5951
5952 DP(NETIF_MSG_HW, "done\n");
5953
5954 return 0; /* OK */
5955}
5956
5957static void enable_blocks_attention(struct bnx2x *bp)
5958{
5959 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5960 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5961 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5962 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5963 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5964 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5965 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5966 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5967 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5968/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5969/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5970 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5971 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5972 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5973/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5974/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5975 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5976 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5977 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5978 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5979/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5980/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5981 if (CHIP_REV_IS_FPGA(bp))
5982 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5983 else
5984 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5985 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5986 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5987 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5988/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5989/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5990 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5991 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5992/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5993 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5994}
5995
34f80b04 5996
81f75bbf
EG
5997static void bnx2x_reset_common(struct bnx2x *bp)
5998{
5999 /* reset_common */
6000 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6001 0xd3ffff7f);
6002 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6003}
6004
573f2035
EG
6005static void bnx2x_init_pxp(struct bnx2x *bp)
6006{
6007 u16 devctl;
6008 int r_order, w_order;
6009
6010 pci_read_config_word(bp->pdev,
6011 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6012 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6013 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6014 if (bp->mrrs == -1)
6015 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6016 else {
6017 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6018 r_order = bp->mrrs;
6019 }
6020
6021 bnx2x_init_pxp_arb(bp, r_order, w_order);
6022}
fd4ef40d
EG
6023
6024static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6025{
6026 u32 val;
6027 u8 port;
6028 u8 is_required = 0;
6029
6030 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6031 SHARED_HW_CFG_FAN_FAILURE_MASK;
6032
6033 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6034 is_required = 1;
6035
6036 /*
6037 * The fan failure mechanism is usually related to the PHY type since
6038 * the power consumption of the board is affected by the PHY. Currently,
6039 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6040 */
6041 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6042 for (port = PORT_0; port < PORT_MAX; port++) {
6043 u32 phy_type =
6044 SHMEM_RD(bp, dev_info.port_hw_config[port].
6045 external_phy_config) &
6046 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6047 is_required |=
6048 ((phy_type ==
6049 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6050 (phy_type ==
6051 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6052 (phy_type ==
6053 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6054 }
6055
6056 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6057
6058 if (is_required == 0)
6059 return;
6060
6061 /* Fan failure is indicated by SPIO 5 */
6062 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6063 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6064
6065 /* set to active low mode */
6066 val = REG_RD(bp, MISC_REG_SPIO_INT);
6067 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6068 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6069 REG_WR(bp, MISC_REG_SPIO_INT, val);
6070
6071 /* enable interrupt to signal the IGU */
6072 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6073 val |= (1 << MISC_REGISTERS_SPIO_5);
6074 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6075}
6076
34f80b04 6077static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6078{
a2fbb9ea 6079 u32 val, i;
37b091ba
MC
6080#ifdef BCM_CNIC
6081 u32 wb_write[2];
6082#endif
a2fbb9ea 6083
34f80b04 6084 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6085
81f75bbf 6086 bnx2x_reset_common(bp);
34f80b04
EG
6087 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6088 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6089
94a78b79 6090 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6091 if (CHIP_IS_E1H(bp))
6092 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6093
34f80b04
EG
6094 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6095 msleep(30);
6096 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6097
94a78b79 6098 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6099 if (CHIP_IS_E1(bp)) {
6100 /* enable HW interrupt from PXP on USDM overflow
6101 bit 16 on INT_MASK_0 */
6102 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6103 }
a2fbb9ea 6104
94a78b79 6105 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6106 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6107
6108#ifdef __BIG_ENDIAN
34f80b04
EG
6109 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6110 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6111 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6112 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6113 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6114 /* make sure this value is 0 */
6115 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6116
6117/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6118 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6119 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6120 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6121 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6122#endif
6123
34f80b04 6124 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6125#ifdef BCM_CNIC
34f80b04
EG
6126 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6127 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6128 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6129#endif
6130
34f80b04
EG
6131 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6132 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6133
34f80b04
EG
6134 /* let the HW do it's magic ... */
6135 msleep(100);
6136 /* finish PXP init */
6137 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6138 if (val != 1) {
6139 BNX2X_ERR("PXP2 CFG failed\n");
6140 return -EBUSY;
6141 }
6142 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6143 if (val != 1) {
6144 BNX2X_ERR("PXP2 RD_INIT failed\n");
6145 return -EBUSY;
6146 }
a2fbb9ea 6147
34f80b04
EG
6148 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6149 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6150
94a78b79 6151 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6152
34f80b04
EG
6153 /* clean the DMAE memory */
6154 bp->dmae_ready = 1;
6155 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6156
94a78b79
VZ
6157 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6158 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6159 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6160 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6161
34f80b04
EG
6162 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6163 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6164 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6165 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6166
94a78b79 6167 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6168
6169#ifdef BCM_CNIC
6170 wb_write[0] = 0;
6171 wb_write[1] = 0;
6172 for (i = 0; i < 64; i++) {
6173 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6174 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6175
6176 if (CHIP_IS_E1H(bp)) {
6177 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6178 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6179 wb_write, 2);
6180 }
6181 }
6182#endif
34f80b04
EG
6183 /* soft reset pulse */
6184 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6185 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6186
37b091ba 6187#ifdef BCM_CNIC
94a78b79 6188 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6189#endif
a2fbb9ea 6190
94a78b79 6191 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6192 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6193 if (!CHIP_REV_IS_SLOW(bp)) {
6194 /* enable hw interrupt from doorbell Q */
6195 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6196 }
a2fbb9ea 6197
94a78b79
VZ
6198 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6199 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6200 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6201#ifndef BCM_CNIC
3196a88a
EG
6202 /* set NIC mode */
6203 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6204#endif
34f80b04
EG
6205 if (CHIP_IS_E1H(bp))
6206 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6207
94a78b79
VZ
6208 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6209 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6210 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6211 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6212
ca00392c
EG
6213 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6214 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6215 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6216 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6217
94a78b79
VZ
6218 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6219 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6220 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6221 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6222
34f80b04
EG
6223 /* sync semi rtc */
6224 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6225 0x80000000);
6226 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6227 0x80000000);
a2fbb9ea 6228
94a78b79
VZ
6229 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6230 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6231 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6232
34f80b04
EG
6233 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6234 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6235 REG_WR(bp, i, 0xc0cac01a);
6236 /* TODO: replace with something meaningful */
6237 }
94a78b79 6238 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6239#ifdef BCM_CNIC
6240 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6241 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6242 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6243 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6244 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6245 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6246 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6247 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6248 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6249 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6250#endif
34f80b04 6251 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6252
34f80b04
EG
6253 if (sizeof(union cdu_context) != 1024)
6254 /* we currently assume that a context is 1024 bytes */
6255 printk(KERN_ALERT PFX "please adjust the size of"
6256 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6257
94a78b79 6258 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6259 val = (4 << 24) + (0 << 12) + 1024;
6260 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6261
94a78b79 6262 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6263 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6264 /* enable context validation interrupt from CFC */
6265 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6266
6267 /* set the thresholds to prevent CFC/CDU race */
6268 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6269
94a78b79
VZ
6270 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6271 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6272
94a78b79 6273 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6274 /* Reset PCIE errors for debug */
6275 REG_WR(bp, 0x2814, 0xffffffff);
6276 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6277
94a78b79 6278 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6279 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6280 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6281 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6282
94a78b79 6283 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6284 if (CHIP_IS_E1H(bp)) {
6285 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6286 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6287 }
6288
6289 if (CHIP_REV_IS_SLOW(bp))
6290 msleep(200);
6291
6292 /* finish CFC init */
6293 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6294 if (val != 1) {
6295 BNX2X_ERR("CFC LL_INIT failed\n");
6296 return -EBUSY;
6297 }
6298 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6299 if (val != 1) {
6300 BNX2X_ERR("CFC AC_INIT failed\n");
6301 return -EBUSY;
6302 }
6303 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6304 if (val != 1) {
6305 BNX2X_ERR("CFC CAM_INIT failed\n");
6306 return -EBUSY;
6307 }
6308 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6309
34f80b04
EG
6310 /* read NIG statistic
6311 to see if this is our first up since powerup */
6312 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6313 val = *bnx2x_sp(bp, wb_data[0]);
6314
6315 /* do internal memory self test */
6316 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6317 BNX2X_ERR("internal mem self test failed\n");
6318 return -EBUSY;
6319 }
6320
35b19ba5 6321 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6322 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6323 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6324 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6325 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6326 bp->port.need_hw_lock = 1;
6327 break;
6328
34f80b04
EG
6329 default:
6330 break;
6331 }
f1410647 6332
fd4ef40d
EG
6333 bnx2x_setup_fan_failure_detection(bp);
6334
34f80b04
EG
6335 /* clear PXP2 attentions */
6336 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6337
34f80b04 6338 enable_blocks_attention(bp);
a2fbb9ea 6339
6bbca910
YR
6340 if (!BP_NOMCP(bp)) {
6341 bnx2x_acquire_phy_lock(bp);
6342 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6343 bnx2x_release_phy_lock(bp);
6344 } else
6345 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6346
34f80b04
EG
6347 return 0;
6348}
a2fbb9ea 6349
34f80b04
EG
6350static int bnx2x_init_port(struct bnx2x *bp)
6351{
6352 int port = BP_PORT(bp);
94a78b79 6353 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6354 u32 low, high;
34f80b04 6355 u32 val;
a2fbb9ea 6356
34f80b04
EG
6357 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6358
6359 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6360
94a78b79 6361 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6362 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6363
6364 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6365 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6366 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6367 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6368
37b091ba
MC
6369#ifdef BCM_CNIC
6370 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6371
94a78b79 6372 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6373 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6374 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6375#endif
94a78b79 6376 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6377
94a78b79 6378 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6379 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6380 /* no pause for emulation and FPGA */
6381 low = 0;
6382 high = 513;
6383 } else {
6384 if (IS_E1HMF(bp))
6385 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6386 else if (bp->dev->mtu > 4096) {
6387 if (bp->flags & ONE_PORT_FLAG)
6388 low = 160;
6389 else {
6390 val = bp->dev->mtu;
6391 /* (24*1024 + val*4)/256 */
6392 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6393 }
6394 } else
6395 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6396 high = low + 56; /* 14*1024/256 */
6397 }
6398 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6399 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6400
6401
94a78b79 6402 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6403
94a78b79 6404 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6405 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6406 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6407 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6408
94a78b79
VZ
6409 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6410 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6411 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6412 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6413
94a78b79 6414 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6415 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6416
94a78b79 6417 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6418
6419 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6420 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6421
6422 /* update threshold */
34f80b04 6423 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6424 /* update init credit */
34f80b04 6425 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6426
6427 /* probe changes */
34f80b04 6428 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6429 msleep(5);
34f80b04 6430 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6431
37b091ba
MC
6432#ifdef BCM_CNIC
6433 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6434#endif
94a78b79 6435 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6436 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6437
6438 if (CHIP_IS_E1(bp)) {
6439 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6440 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6441 }
94a78b79 6442 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6443
94a78b79 6444 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6445 /* init aeu_mask_attn_func_0/1:
6446 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6447 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6448 * bits 4-7 are used for "per vn group attention" */
6449 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6450 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6451
94a78b79 6452 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6453 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6454 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6455 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6456 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6457
94a78b79 6458 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6459
6460 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6461
6462 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6463 /* 0x2 disable e1hov, 0x1 enable */
6464 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6465 (IS_E1HMF(bp) ? 0x1 : 0x2));
6466
1c06328c
EG
6467 {
6468 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6469 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6470 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6471 }
34f80b04
EG
6472 }
6473
94a78b79 6474 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6475 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6476
35b19ba5 6477 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6478 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6479 {
6480 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6481
6482 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6483 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6484
6485 /* The GPIO should be swapped if the swap register is
6486 set and active */
6487 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6488 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6489
6490 /* Select function upon port-swap configuration */
6491 if (port == 0) {
6492 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6493 aeu_gpio_mask = (swap_val && swap_override) ?
6494 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6495 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6496 } else {
6497 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6498 aeu_gpio_mask = (swap_val && swap_override) ?
6499 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6500 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6501 }
6502 val = REG_RD(bp, offset);
6503 /* add GPIO3 to group */
6504 val |= aeu_gpio_mask;
6505 REG_WR(bp, offset, val);
6506 }
6507 break;
6508
35b19ba5 6509 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6510 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6511 /* add SPIO 5 to group 0 */
4d295db0
EG
6512 {
6513 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6514 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6515 val = REG_RD(bp, reg_addr);
f1410647 6516 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6517 REG_WR(bp, reg_addr, val);
6518 }
f1410647
ET
6519 break;
6520
6521 default:
6522 break;
6523 }
6524
c18487ee 6525 bnx2x__link_reset(bp);
a2fbb9ea 6526
34f80b04
EG
6527 return 0;
6528}
6529
6530#define ILT_PER_FUNC (768/2)
6531#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6532/* the phys address is shifted right 12 bits and has an added
6533 1=valid bit added to the 53rd bit
6534 then since this is a wide register(TM)
6535 we split it into two 32 bit writes
6536 */
6537#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6538#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6539#define PXP_ONE_ILT(x) (((x) << 10) | x)
6540#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6541
37b091ba
MC
6542#ifdef BCM_CNIC
6543#define CNIC_ILT_LINES 127
6544#define CNIC_CTX_PER_ILT 16
6545#else
34f80b04 6546#define CNIC_ILT_LINES 0
37b091ba 6547#endif
34f80b04
EG
6548
6549static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6550{
6551 int reg;
6552
6553 if (CHIP_IS_E1H(bp))
6554 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6555 else /* E1 */
6556 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6557
6558 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6559}
6560
6561static int bnx2x_init_func(struct bnx2x *bp)
6562{
6563 int port = BP_PORT(bp);
6564 int func = BP_FUNC(bp);
8badd27a 6565 u32 addr, val;
34f80b04
EG
6566 int i;
6567
6568 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6569
8badd27a
EG
6570 /* set MSI reconfigure capability */
6571 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6572 val = REG_RD(bp, addr);
6573 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6574 REG_WR(bp, addr, val);
6575
34f80b04
EG
6576 i = FUNC_ILT_BASE(func);
6577
6578 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6579 if (CHIP_IS_E1H(bp)) {
6580 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6581 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6582 } else /* E1 */
6583 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6584 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6585
37b091ba
MC
6586#ifdef BCM_CNIC
6587 i += 1 + CNIC_ILT_LINES;
6588 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6589 if (CHIP_IS_E1(bp))
6590 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6591 else {
6592 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6593 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6594 }
6595
6596 i++;
6597 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6598 if (CHIP_IS_E1(bp))
6599 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6600 else {
6601 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6602 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6603 }
6604
6605 i++;
6606 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6607 if (CHIP_IS_E1(bp))
6608 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6609 else {
6610 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6611 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6612 }
6613
6614 /* tell the searcher where the T2 table is */
6615 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6616
6617 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6618 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6619
6620 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6621 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6622 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6623
6624 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6625#endif
34f80b04
EG
6626
6627 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6628 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6629 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6630 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6631 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6632 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6633 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6634 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6635 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6636 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6637
6638 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6639 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6640 }
6641
6642 /* HC init per function */
6643 if (CHIP_IS_E1H(bp)) {
6644 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6645
6646 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6647 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6648 }
94a78b79 6649 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6650
c14423fe 6651 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6652 REG_WR(bp, 0x2114, 0xffffffff);
6653 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6654
34f80b04
EG
6655 return 0;
6656}
6657
6658static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6659{
6660 int i, rc = 0;
a2fbb9ea 6661
34f80b04
EG
6662 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6663 BP_FUNC(bp), load_code);
a2fbb9ea 6664
34f80b04
EG
6665 bp->dmae_ready = 0;
6666 mutex_init(&bp->dmae_mutex);
54016b26
EG
6667 rc = bnx2x_gunzip_init(bp);
6668 if (rc)
6669 return rc;
a2fbb9ea 6670
34f80b04
EG
6671 switch (load_code) {
6672 case FW_MSG_CODE_DRV_LOAD_COMMON:
6673 rc = bnx2x_init_common(bp);
6674 if (rc)
6675 goto init_hw_err;
6676 /* no break */
6677
6678 case FW_MSG_CODE_DRV_LOAD_PORT:
6679 bp->dmae_ready = 1;
6680 rc = bnx2x_init_port(bp);
6681 if (rc)
6682 goto init_hw_err;
6683 /* no break */
6684
6685 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6686 bp->dmae_ready = 1;
6687 rc = bnx2x_init_func(bp);
6688 if (rc)
6689 goto init_hw_err;
6690 break;
6691
6692 default:
6693 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6694 break;
6695 }
6696
6697 if (!BP_NOMCP(bp)) {
6698 int func = BP_FUNC(bp);
a2fbb9ea
ET
6699
6700 bp->fw_drv_pulse_wr_seq =
34f80b04 6701 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6702 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6703 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6704 }
a2fbb9ea 6705
34f80b04
EG
6706 /* this needs to be done before gunzip end */
6707 bnx2x_zero_def_sb(bp);
6708 for_each_queue(bp, i)
6709 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
6710#ifdef BCM_CNIC
6711 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6712#endif
34f80b04
EG
6713
6714init_hw_err:
6715 bnx2x_gunzip_end(bp);
6716
6717 return rc;
a2fbb9ea
ET
6718}
6719
a2fbb9ea
ET
6720static void bnx2x_free_mem(struct bnx2x *bp)
6721{
6722
6723#define BNX2X_PCI_FREE(x, y, size) \
6724 do { \
6725 if (x) { \
6726 pci_free_consistent(bp->pdev, size, x, y); \
6727 x = NULL; \
6728 y = 0; \
6729 } \
6730 } while (0)
6731
6732#define BNX2X_FREE(x) \
6733 do { \
6734 if (x) { \
6735 vfree(x); \
6736 x = NULL; \
6737 } \
6738 } while (0)
6739
6740 int i;
6741
6742 /* fastpath */
555f6c78 6743 /* Common */
a2fbb9ea
ET
6744 for_each_queue(bp, i) {
6745
555f6c78 6746 /* status blocks */
a2fbb9ea
ET
6747 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6748 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6749 sizeof(struct host_status_block));
555f6c78
EG
6750 }
6751 /* Rx */
6752 for_each_rx_queue(bp, i) {
a2fbb9ea 6753
555f6c78 6754 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6755 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6756 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6757 bnx2x_fp(bp, i, rx_desc_mapping),
6758 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6759
6760 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6761 bnx2x_fp(bp, i, rx_comp_mapping),
6762 sizeof(struct eth_fast_path_rx_cqe) *
6763 NUM_RCQ_BD);
a2fbb9ea 6764
7a9b2557 6765 /* SGE ring */
32626230 6766 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6767 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6768 bnx2x_fp(bp, i, rx_sge_mapping),
6769 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6770 }
555f6c78
EG
6771 /* Tx */
6772 for_each_tx_queue(bp, i) {
6773
6774 /* fastpath tx rings: tx_buf tx_desc */
6775 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6776 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6777 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6778 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6779 }
a2fbb9ea
ET
6780 /* end of fastpath */
6781
6782 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6783 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6784
6785 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6786 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6787
37b091ba 6788#ifdef BCM_CNIC
a2fbb9ea
ET
6789 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6790 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6791 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6792 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
6793 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6794 sizeof(struct host_status_block));
a2fbb9ea 6795#endif
7a9b2557 6796 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6797
6798#undef BNX2X_PCI_FREE
6799#undef BNX2X_KFREE
6800}
6801
6802static int bnx2x_alloc_mem(struct bnx2x *bp)
6803{
6804
6805#define BNX2X_PCI_ALLOC(x, y, size) \
6806 do { \
6807 x = pci_alloc_consistent(bp->pdev, size, y); \
6808 if (x == NULL) \
6809 goto alloc_mem_err; \
6810 memset(x, 0, size); \
6811 } while (0)
6812
6813#define BNX2X_ALLOC(x, size) \
6814 do { \
6815 x = vmalloc(size); \
6816 if (x == NULL) \
6817 goto alloc_mem_err; \
6818 memset(x, 0, size); \
6819 } while (0)
6820
6821 int i;
6822
6823 /* fastpath */
555f6c78 6824 /* Common */
a2fbb9ea
ET
6825 for_each_queue(bp, i) {
6826 bnx2x_fp(bp, i, bp) = bp;
6827
555f6c78 6828 /* status blocks */
a2fbb9ea
ET
6829 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6830 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6831 sizeof(struct host_status_block));
555f6c78
EG
6832 }
6833 /* Rx */
6834 for_each_rx_queue(bp, i) {
a2fbb9ea 6835
555f6c78 6836 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6837 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6838 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6839 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6840 &bnx2x_fp(bp, i, rx_desc_mapping),
6841 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6842
6843 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6844 &bnx2x_fp(bp, i, rx_comp_mapping),
6845 sizeof(struct eth_fast_path_rx_cqe) *
6846 NUM_RCQ_BD);
6847
7a9b2557
VZ
6848 /* SGE ring */
6849 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6850 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6851 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6852 &bnx2x_fp(bp, i, rx_sge_mapping),
6853 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6854 }
555f6c78
EG
6855 /* Tx */
6856 for_each_tx_queue(bp, i) {
6857
555f6c78
EG
6858 /* fastpath tx rings: tx_buf tx_desc */
6859 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6860 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6861 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6862 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6863 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6864 }
a2fbb9ea
ET
6865 /* end of fastpath */
6866
6867 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6868 sizeof(struct host_def_status_block));
6869
6870 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6871 sizeof(struct bnx2x_slowpath));
6872
37b091ba 6873#ifdef BCM_CNIC
a2fbb9ea
ET
6874 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6875
a2fbb9ea
ET
6876 /* allocate searcher T2 table
6877 we allocate 1/4 of alloc num for T2
6878 (which is not entered into the ILT) */
6879 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6880
37b091ba 6881 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 6882 for (i = 0; i < 16*1024; i += 64)
37b091ba 6883 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6884
37b091ba 6885 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
6886 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6887
6888 /* QM queues (128*MAX_CONN) */
6889 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
6890
6891 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6892 sizeof(struct host_status_block));
a2fbb9ea
ET
6893#endif
6894
6895 /* Slow path ring */
6896 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6897
6898 return 0;
6899
6900alloc_mem_err:
6901 bnx2x_free_mem(bp);
6902 return -ENOMEM;
6903
6904#undef BNX2X_PCI_ALLOC
6905#undef BNX2X_ALLOC
6906}
6907
6908static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6909{
6910 int i;
6911
555f6c78 6912 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6913 struct bnx2x_fastpath *fp = &bp->fp[i];
6914
6915 u16 bd_cons = fp->tx_bd_cons;
6916 u16 sw_prod = fp->tx_pkt_prod;
6917 u16 sw_cons = fp->tx_pkt_cons;
6918
a2fbb9ea
ET
6919 while (sw_cons != sw_prod) {
6920 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6921 sw_cons++;
6922 }
6923 }
6924}
6925
6926static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6927{
6928 int i, j;
6929
555f6c78 6930 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6931 struct bnx2x_fastpath *fp = &bp->fp[j];
6932
a2fbb9ea
ET
6933 for (i = 0; i < NUM_RX_BD; i++) {
6934 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6935 struct sk_buff *skb = rx_buf->skb;
6936
6937 if (skb == NULL)
6938 continue;
6939
6940 pci_unmap_single(bp->pdev,
6941 pci_unmap_addr(rx_buf, mapping),
356e2385 6942 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6943
6944 rx_buf->skb = NULL;
6945 dev_kfree_skb(skb);
6946 }
7a9b2557 6947 if (!fp->disable_tpa)
32626230
EG
6948 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6949 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6950 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6951 }
6952}
6953
6954static void bnx2x_free_skbs(struct bnx2x *bp)
6955{
6956 bnx2x_free_tx_skbs(bp);
6957 bnx2x_free_rx_skbs(bp);
6958}
6959
6960static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6961{
34f80b04 6962 int i, offset = 1;
a2fbb9ea
ET
6963
6964 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6965 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6966 bp->msix_table[0].vector);
6967
37b091ba
MC
6968#ifdef BCM_CNIC
6969 offset++;
6970#endif
a2fbb9ea 6971 for_each_queue(bp, i) {
c14423fe 6972 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6973 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6974 bnx2x_fp(bp, i, state));
6975
34f80b04 6976 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6977 }
a2fbb9ea
ET
6978}
6979
6980static void bnx2x_free_irq(struct bnx2x *bp)
6981{
a2fbb9ea 6982 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6983 bnx2x_free_msix_irqs(bp);
6984 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6985 bp->flags &= ~USING_MSIX_FLAG;
6986
8badd27a
EG
6987 } else if (bp->flags & USING_MSI_FLAG) {
6988 free_irq(bp->pdev->irq, bp->dev);
6989 pci_disable_msi(bp->pdev);
6990 bp->flags &= ~USING_MSI_FLAG;
6991
a2fbb9ea
ET
6992 } else
6993 free_irq(bp->pdev->irq, bp->dev);
6994}
6995
6996static int bnx2x_enable_msix(struct bnx2x *bp)
6997{
8badd27a
EG
6998 int i, rc, offset = 1;
6999 int igu_vec = 0;
a2fbb9ea 7000
8badd27a
EG
7001 bp->msix_table[0].entry = igu_vec;
7002 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 7003
37b091ba
MC
7004#ifdef BCM_CNIC
7005 igu_vec = BP_L_ID(bp) + offset;
7006 bp->msix_table[1].entry = igu_vec;
7007 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7008 offset++;
7009#endif
34f80b04 7010 for_each_queue(bp, i) {
8badd27a 7011 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7012 bp->msix_table[i + offset].entry = igu_vec;
7013 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7014 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7015 }
7016
34f80b04 7017 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7018 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 7019 if (rc) {
8badd27a
EG
7020 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7021 return rc;
34f80b04 7022 }
8badd27a 7023
a2fbb9ea
ET
7024 bp->flags |= USING_MSIX_FLAG;
7025
7026 return 0;
a2fbb9ea
ET
7027}
7028
a2fbb9ea
ET
7029static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7030{
34f80b04 7031 int i, rc, offset = 1;
a2fbb9ea 7032
a2fbb9ea
ET
7033 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7034 bp->dev->name, bp->dev);
a2fbb9ea
ET
7035 if (rc) {
7036 BNX2X_ERR("request sp irq failed\n");
7037 return -EBUSY;
7038 }
7039
37b091ba
MC
7040#ifdef BCM_CNIC
7041 offset++;
7042#endif
a2fbb9ea 7043 for_each_queue(bp, i) {
555f6c78
EG
7044 struct bnx2x_fastpath *fp = &bp->fp[i];
7045
ca00392c
EG
7046 if (i < bp->num_rx_queues)
7047 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7048 else
7049 sprintf(fp->name, "%s-tx-%d",
7050 bp->dev->name, i - bp->num_rx_queues);
7051
34f80b04 7052 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7053 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7054 if (rc) {
555f6c78 7055 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7056 bnx2x_free_msix_irqs(bp);
7057 return -EBUSY;
7058 }
7059
555f6c78 7060 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7061 }
7062
555f6c78 7063 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
7064 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7065 " ... fp[%d] %d\n",
7066 bp->dev->name, bp->msix_table[0].vector,
7067 0, bp->msix_table[offset].vector,
7068 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7069
a2fbb9ea 7070 return 0;
a2fbb9ea
ET
7071}
7072
8badd27a
EG
7073static int bnx2x_enable_msi(struct bnx2x *bp)
7074{
7075 int rc;
7076
7077 rc = pci_enable_msi(bp->pdev);
7078 if (rc) {
7079 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7080 return -1;
7081 }
7082 bp->flags |= USING_MSI_FLAG;
7083
7084 return 0;
7085}
7086
a2fbb9ea
ET
7087static int bnx2x_req_irq(struct bnx2x *bp)
7088{
8badd27a 7089 unsigned long flags;
34f80b04 7090 int rc;
a2fbb9ea 7091
8badd27a
EG
7092 if (bp->flags & USING_MSI_FLAG)
7093 flags = 0;
7094 else
7095 flags = IRQF_SHARED;
7096
7097 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7098 bp->dev->name, bp->dev);
a2fbb9ea
ET
7099 if (!rc)
7100 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7101
7102 return rc;
a2fbb9ea
ET
7103}
7104
65abd74d
YG
7105static void bnx2x_napi_enable(struct bnx2x *bp)
7106{
7107 int i;
7108
555f6c78 7109 for_each_rx_queue(bp, i)
65abd74d
YG
7110 napi_enable(&bnx2x_fp(bp, i, napi));
7111}
7112
7113static void bnx2x_napi_disable(struct bnx2x *bp)
7114{
7115 int i;
7116
555f6c78 7117 for_each_rx_queue(bp, i)
65abd74d
YG
7118 napi_disable(&bnx2x_fp(bp, i, napi));
7119}
7120
7121static void bnx2x_netif_start(struct bnx2x *bp)
7122{
e1510706
EG
7123 int intr_sem;
7124
7125 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7126 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7127
7128 if (intr_sem) {
65abd74d 7129 if (netif_running(bp->dev)) {
65abd74d
YG
7130 bnx2x_napi_enable(bp);
7131 bnx2x_int_enable(bp);
555f6c78
EG
7132 if (bp->state == BNX2X_STATE_OPEN)
7133 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7134 }
7135 }
7136}
7137
f8ef6e44 7138static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7139{
f8ef6e44 7140 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7141 bnx2x_napi_disable(bp);
762d5f6c
EG
7142 netif_tx_disable(bp->dev);
7143 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
7144}
7145
a2fbb9ea
ET
7146/*
7147 * Init service functions
7148 */
7149
e665bfda
MC
7150/**
7151 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7152 *
7153 * @param bp driver descriptor
7154 * @param set set or clear an entry (1 or 0)
7155 * @param mac pointer to a buffer containing a MAC
7156 * @param cl_bit_vec bit vector of clients to register a MAC for
7157 * @param cam_offset offset in a CAM to use
7158 * @param with_bcast set broadcast MAC as well
7159 */
7160static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7161 u32 cl_bit_vec, u8 cam_offset,
7162 u8 with_bcast)
a2fbb9ea
ET
7163{
7164 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7165 int port = BP_PORT(bp);
a2fbb9ea
ET
7166
7167 /* CAM allocation
7168 * unicasts 0-31:port0 32-63:port1
7169 * multicast 64-127:port0 128-191:port1
7170 */
e665bfda
MC
7171 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7172 config->hdr.offset = cam_offset;
7173 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7174 config->hdr.reserved1 = 0;
7175
7176 /* primary MAC */
7177 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7178 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7179 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7180 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7181 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7182 swab16(*(u16 *)&mac[4]);
34f80b04 7183 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7184 if (set)
7185 config->config_table[0].target_table_entry.flags = 0;
7186 else
7187 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7188 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7189 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7190 config->config_table[0].target_table_entry.vlan_id = 0;
7191
3101c2bc
YG
7192 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7193 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7194 config->config_table[0].cam_entry.msb_mac_addr,
7195 config->config_table[0].cam_entry.middle_mac_addr,
7196 config->config_table[0].cam_entry.lsb_mac_addr);
7197
7198 /* broadcast */
e665bfda
MC
7199 if (with_bcast) {
7200 config->config_table[1].cam_entry.msb_mac_addr =
7201 cpu_to_le16(0xffff);
7202 config->config_table[1].cam_entry.middle_mac_addr =
7203 cpu_to_le16(0xffff);
7204 config->config_table[1].cam_entry.lsb_mac_addr =
7205 cpu_to_le16(0xffff);
7206 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7207 if (set)
7208 config->config_table[1].target_table_entry.flags =
7209 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7210 else
7211 CAM_INVALIDATE(config->config_table[1]);
7212 config->config_table[1].target_table_entry.clients_bit_vector =
7213 cpu_to_le32(cl_bit_vec);
7214 config->config_table[1].target_table_entry.vlan_id = 0;
7215 }
a2fbb9ea
ET
7216
7217 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7218 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7219 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7220}
7221
e665bfda
MC
7222/**
7223 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7224 *
7225 * @param bp driver descriptor
7226 * @param set set or clear an entry (1 or 0)
7227 * @param mac pointer to a buffer containing a MAC
7228 * @param cl_bit_vec bit vector of clients to register a MAC for
7229 * @param cam_offset offset in a CAM to use
7230 */
7231static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7232 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7233{
7234 struct mac_configuration_cmd_e1h *config =
7235 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7236
8d9c5f34 7237 config->hdr.length = 1;
e665bfda
MC
7238 config->hdr.offset = cam_offset;
7239 config->hdr.client_id = 0xff;
34f80b04
EG
7240 config->hdr.reserved1 = 0;
7241
7242 /* primary MAC */
7243 config->config_table[0].msb_mac_addr =
e665bfda 7244 swab16(*(u16 *)&mac[0]);
34f80b04 7245 config->config_table[0].middle_mac_addr =
e665bfda 7246 swab16(*(u16 *)&mac[2]);
34f80b04 7247 config->config_table[0].lsb_mac_addr =
e665bfda 7248 swab16(*(u16 *)&mac[4]);
ca00392c 7249 config->config_table[0].clients_bit_vector =
e665bfda 7250 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7251 config->config_table[0].vlan_id = 0;
7252 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7253 if (set)
7254 config->config_table[0].flags = BP_PORT(bp);
7255 else
7256 config->config_table[0].flags =
7257 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7258
e665bfda 7259 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7260 (set ? "setting" : "clearing"),
34f80b04
EG
7261 config->config_table[0].msb_mac_addr,
7262 config->config_table[0].middle_mac_addr,
e665bfda 7263 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7264
7265 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7266 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7267 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7268}
7269
a2fbb9ea
ET
7270static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7271 int *state_p, int poll)
7272{
7273 /* can take a while if any port is running */
8b3a0f0b 7274 int cnt = 5000;
a2fbb9ea 7275
c14423fe
ET
7276 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7277 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7278
7279 might_sleep();
34f80b04 7280 while (cnt--) {
a2fbb9ea
ET
7281 if (poll) {
7282 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7283 /* if index is different from 0
7284 * the reply for some commands will
3101c2bc 7285 * be on the non default queue
a2fbb9ea
ET
7286 */
7287 if (idx)
7288 bnx2x_rx_int(&bp->fp[idx], 10);
7289 }
a2fbb9ea 7290
3101c2bc 7291 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7292 if (*state_p == state) {
7293#ifdef BNX2X_STOP_ON_ERROR
7294 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7295#endif
a2fbb9ea 7296 return 0;
8b3a0f0b 7297 }
a2fbb9ea 7298
a2fbb9ea 7299 msleep(1);
e3553b29
EG
7300
7301 if (bp->panic)
7302 return -EIO;
a2fbb9ea
ET
7303 }
7304
a2fbb9ea 7305 /* timeout! */
49d66772
ET
7306 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7307 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7308#ifdef BNX2X_STOP_ON_ERROR
7309 bnx2x_panic();
7310#endif
a2fbb9ea 7311
49d66772 7312 return -EBUSY;
a2fbb9ea
ET
7313}
7314
e665bfda
MC
7315static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7316{
7317 bp->set_mac_pending++;
7318 smp_wmb();
7319
7320 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7321 (1 << bp->fp->cl_id), BP_FUNC(bp));
7322
7323 /* Wait for a completion */
7324 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7325}
7326
7327static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7328{
7329 bp->set_mac_pending++;
7330 smp_wmb();
7331
7332 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7333 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7334 1);
7335
7336 /* Wait for a completion */
7337 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7338}
7339
993ac7b5
MC
7340#ifdef BCM_CNIC
7341/**
7342 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7343 * MAC(s). This function will wait until the ramdord completion
7344 * returns.
7345 *
7346 * @param bp driver handle
7347 * @param set set or clear the CAM entry
7348 *
7349 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7350 */
7351static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7352{
7353 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7354
7355 bp->set_mac_pending++;
7356 smp_wmb();
7357
7358 /* Send a SET_MAC ramrod */
7359 if (CHIP_IS_E1(bp))
7360 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7361 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7362 1);
7363 else
7364 /* CAM allocation for E1H
7365 * unicasts: by func number
7366 * multicast: 20+FUNC*20, 20 each
7367 */
7368 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7369 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7370
7371 /* Wait for a completion when setting */
7372 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7373
7374 return 0;
7375}
7376#endif
7377
a2fbb9ea
ET
7378static int bnx2x_setup_leading(struct bnx2x *bp)
7379{
34f80b04 7380 int rc;
a2fbb9ea 7381
c14423fe 7382 /* reset IGU state */
34f80b04 7383 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7384
7385 /* SETUP ramrod */
7386 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7387
34f80b04
EG
7388 /* Wait for completion */
7389 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7390
34f80b04 7391 return rc;
a2fbb9ea
ET
7392}
7393
7394static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7395{
555f6c78
EG
7396 struct bnx2x_fastpath *fp = &bp->fp[index];
7397
a2fbb9ea 7398 /* reset IGU state */
555f6c78 7399 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7400
228241eb 7401 /* SETUP ramrod */
555f6c78
EG
7402 fp->state = BNX2X_FP_STATE_OPENING;
7403 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7404 fp->cl_id, 0);
a2fbb9ea
ET
7405
7406 /* Wait for completion */
7407 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7408 &(fp->state), 0);
a2fbb9ea
ET
7409}
7410
a2fbb9ea 7411static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7412
ca00392c
EG
7413static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7414 int *num_tx_queues_out)
7415{
7416 int _num_rx_queues = 0, _num_tx_queues = 0;
7417
7418 switch (bp->multi_mode) {
7419 case ETH_RSS_MODE_DISABLED:
7420 _num_rx_queues = 1;
7421 _num_tx_queues = 1;
7422 break;
7423
7424 case ETH_RSS_MODE_REGULAR:
7425 if (num_rx_queues)
7426 _num_rx_queues = min_t(u32, num_rx_queues,
7427 BNX2X_MAX_QUEUES(bp));
7428 else
7429 _num_rx_queues = min_t(u32, num_online_cpus(),
7430 BNX2X_MAX_QUEUES(bp));
7431
7432 if (num_tx_queues)
7433 _num_tx_queues = min_t(u32, num_tx_queues,
7434 BNX2X_MAX_QUEUES(bp));
7435 else
7436 _num_tx_queues = min_t(u32, num_online_cpus(),
7437 BNX2X_MAX_QUEUES(bp));
7438
7439 /* There must be not more Tx queues than Rx queues */
7440 if (_num_tx_queues > _num_rx_queues) {
7441 BNX2X_ERR("number of tx queues (%d) > "
7442 "number of rx queues (%d)"
7443 " defaulting to %d\n",
7444 _num_tx_queues, _num_rx_queues,
7445 _num_rx_queues);
7446 _num_tx_queues = _num_rx_queues;
7447 }
7448 break;
7449
7450
7451 default:
7452 _num_rx_queues = 1;
7453 _num_tx_queues = 1;
7454 break;
7455 }
7456
7457 *num_rx_queues_out = _num_rx_queues;
7458 *num_tx_queues_out = _num_tx_queues;
7459}
7460
7461static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7462{
ca00392c 7463 int rc = 0;
a2fbb9ea 7464
8badd27a
EG
7465 switch (int_mode) {
7466 case INT_MODE_INTx:
7467 case INT_MODE_MSI:
ca00392c
EG
7468 bp->num_rx_queues = 1;
7469 bp->num_tx_queues = 1;
7470 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7471 break;
7472
7473 case INT_MODE_MSIX:
7474 default:
ca00392c
EG
7475 /* Set interrupt mode according to bp->multi_mode value */
7476 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7477 &bp->num_tx_queues);
7478
7479 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7480 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7481
2dfe0e1f
EG
7482 /* if we can't use MSI-X we only need one fp,
7483 * so try to enable MSI-X with the requested number of fp's
7484 * and fallback to MSI or legacy INTx with one fp
7485 */
ca00392c
EG
7486 rc = bnx2x_enable_msix(bp);
7487 if (rc) {
34f80b04 7488 /* failed to enable MSI-X */
ca00392c
EG
7489 bp->num_rx_queues = 1;
7490 bp->num_tx_queues = 1;
a2fbb9ea 7491 }
8badd27a 7492 break;
a2fbb9ea 7493 }
555f6c78 7494 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7495 return rc;
8badd27a
EG
7496}
7497
993ac7b5
MC
7498#ifdef BCM_CNIC
7499static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7500static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7501#endif
8badd27a
EG
7502
7503/* must be called with rtnl_lock */
7504static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7505{
7506 u32 load_code;
ca00392c
EG
7507 int i, rc;
7508
8badd27a 7509#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7510 if (unlikely(bp->panic))
7511 return -EPERM;
7512#endif
7513
7514 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7515
ca00392c 7516 rc = bnx2x_set_int_mode(bp);
c14423fe 7517
a2fbb9ea
ET
7518 if (bnx2x_alloc_mem(bp))
7519 return -ENOMEM;
7520
555f6c78 7521 for_each_rx_queue(bp, i)
7a9b2557
VZ
7522 bnx2x_fp(bp, i, disable_tpa) =
7523 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7524
555f6c78 7525 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7526 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7527 bnx2x_poll, 128);
7528
2dfe0e1f
EG
7529 bnx2x_napi_enable(bp);
7530
34f80b04
EG
7531 if (bp->flags & USING_MSIX_FLAG) {
7532 rc = bnx2x_req_msix_irqs(bp);
7533 if (rc) {
7534 pci_disable_msix(bp->pdev);
2dfe0e1f 7535 goto load_error1;
34f80b04
EG
7536 }
7537 } else {
ca00392c
EG
7538 /* Fall to INTx if failed to enable MSI-X due to lack of
7539 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7540 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7541 bnx2x_enable_msi(bp);
34f80b04
EG
7542 bnx2x_ack_int(bp);
7543 rc = bnx2x_req_irq(bp);
7544 if (rc) {
2dfe0e1f 7545 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7546 if (bp->flags & USING_MSI_FLAG)
7547 pci_disable_msi(bp->pdev);
2dfe0e1f 7548 goto load_error1;
a2fbb9ea 7549 }
8badd27a
EG
7550 if (bp->flags & USING_MSI_FLAG) {
7551 bp->dev->irq = bp->pdev->irq;
7552 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7553 bp->dev->name, bp->pdev->irq);
7554 }
a2fbb9ea
ET
7555 }
7556
2dfe0e1f
EG
7557 /* Send LOAD_REQUEST command to MCP
7558 Returns the type of LOAD command:
7559 if it is the first port to be initialized
7560 common blocks should be initialized, otherwise - not
7561 */
7562 if (!BP_NOMCP(bp)) {
7563 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7564 if (!load_code) {
7565 BNX2X_ERR("MCP response failure, aborting\n");
7566 rc = -EBUSY;
7567 goto load_error2;
7568 }
7569 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7570 rc = -EBUSY; /* other port in diagnostic mode */
7571 goto load_error2;
7572 }
7573
7574 } else {
7575 int port = BP_PORT(bp);
7576
f5372251 7577 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7578 load_count[0], load_count[1], load_count[2]);
7579 load_count[0]++;
7580 load_count[1 + port]++;
f5372251 7581 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7582 load_count[0], load_count[1], load_count[2]);
7583 if (load_count[0] == 1)
7584 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7585 else if (load_count[1 + port] == 1)
7586 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7587 else
7588 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7589 }
7590
7591 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7592 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7593 bp->port.pmf = 1;
7594 else
7595 bp->port.pmf = 0;
7596 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7597
a2fbb9ea 7598 /* Initialize HW */
34f80b04
EG
7599 rc = bnx2x_init_hw(bp, load_code);
7600 if (rc) {
a2fbb9ea 7601 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7602 goto load_error2;
a2fbb9ea
ET
7603 }
7604
a2fbb9ea 7605 /* Setup NIC internals and enable interrupts */
471de716 7606 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7607
2691d51d
EG
7608 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7609 (bp->common.shmem2_base))
7610 SHMEM2_WR(bp, dcc_support,
7611 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7612 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7613
a2fbb9ea 7614 /* Send LOAD_DONE command to MCP */
34f80b04 7615 if (!BP_NOMCP(bp)) {
228241eb
ET
7616 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7617 if (!load_code) {
da5a662a 7618 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7619 rc = -EBUSY;
2dfe0e1f 7620 goto load_error3;
a2fbb9ea
ET
7621 }
7622 }
7623
7624 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7625
34f80b04
EG
7626 rc = bnx2x_setup_leading(bp);
7627 if (rc) {
da5a662a 7628 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7629#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7630 goto load_error3;
e3553b29
EG
7631#else
7632 bp->panic = 1;
7633 return -EBUSY;
7634#endif
34f80b04 7635 }
a2fbb9ea 7636
34f80b04
EG
7637 if (CHIP_IS_E1H(bp))
7638 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7639 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 7640 bp->flags |= MF_FUNC_DIS;
34f80b04 7641 }
a2fbb9ea 7642
ca00392c 7643 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7644#ifdef BCM_CNIC
7645 /* Enable Timer scan */
7646 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7647#endif
34f80b04
EG
7648 for_each_nondefault_queue(bp, i) {
7649 rc = bnx2x_setup_multi(bp, i);
7650 if (rc)
37b091ba
MC
7651#ifdef BCM_CNIC
7652 goto load_error4;
7653#else
2dfe0e1f 7654 goto load_error3;
37b091ba 7655#endif
34f80b04 7656 }
a2fbb9ea 7657
ca00392c 7658 if (CHIP_IS_E1(bp))
e665bfda 7659 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7660 else
e665bfda 7661 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7662#ifdef BCM_CNIC
7663 /* Set iSCSI L2 MAC */
7664 mutex_lock(&bp->cnic_mutex);
7665 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7666 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7667 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7668 }
7669 mutex_unlock(&bp->cnic_mutex);
7670#endif
ca00392c 7671 }
34f80b04
EG
7672
7673 if (bp->port.pmf)
b5bf9068 7674 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7675
7676 /* Start fast path */
34f80b04
EG
7677 switch (load_mode) {
7678 case LOAD_NORMAL:
ca00392c
EG
7679 if (bp->state == BNX2X_STATE_OPEN) {
7680 /* Tx queue should be only reenabled */
7681 netif_tx_wake_all_queues(bp->dev);
7682 }
2dfe0e1f 7683 /* Initialize the receive filter. */
34f80b04
EG
7684 bnx2x_set_rx_mode(bp->dev);
7685 break;
7686
7687 case LOAD_OPEN:
555f6c78 7688 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7689 if (bp->state != BNX2X_STATE_OPEN)
7690 netif_tx_disable(bp->dev);
2dfe0e1f 7691 /* Initialize the receive filter. */
34f80b04 7692 bnx2x_set_rx_mode(bp->dev);
34f80b04 7693 break;
a2fbb9ea 7694
34f80b04 7695 case LOAD_DIAG:
2dfe0e1f 7696 /* Initialize the receive filter. */
a2fbb9ea 7697 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7698 bp->state = BNX2X_STATE_DIAG;
7699 break;
7700
7701 default:
7702 break;
a2fbb9ea
ET
7703 }
7704
34f80b04
EG
7705 if (!bp->port.pmf)
7706 bnx2x__link_status_update(bp);
7707
a2fbb9ea
ET
7708 /* start the timer */
7709 mod_timer(&bp->timer, jiffies + bp->current_interval);
7710
993ac7b5
MC
7711#ifdef BCM_CNIC
7712 bnx2x_setup_cnic_irq_info(bp);
7713 if (bp->state == BNX2X_STATE_OPEN)
7714 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7715#endif
34f80b04 7716
a2fbb9ea
ET
7717 return 0;
7718
37b091ba
MC
7719#ifdef BCM_CNIC
7720load_error4:
7721 /* Disable Timer scan */
7722 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7723#endif
2dfe0e1f
EG
7724load_error3:
7725 bnx2x_int_disable_sync(bp, 1);
7726 if (!BP_NOMCP(bp)) {
7727 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7728 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7729 }
7730 bp->port.pmf = 0;
7a9b2557
VZ
7731 /* Free SKBs, SGEs, TPA pool and driver internals */
7732 bnx2x_free_skbs(bp);
555f6c78 7733 for_each_rx_queue(bp, i)
3196a88a 7734 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7735load_error2:
d1014634
YG
7736 /* Release IRQs */
7737 bnx2x_free_irq(bp);
2dfe0e1f
EG
7738load_error1:
7739 bnx2x_napi_disable(bp);
555f6c78 7740 for_each_rx_queue(bp, i)
7cde1c8b 7741 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7742 bnx2x_free_mem(bp);
7743
34f80b04 7744 return rc;
a2fbb9ea
ET
7745}
7746
7747static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7748{
555f6c78 7749 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7750 int rc;
7751
c14423fe 7752 /* halt the connection */
555f6c78
EG
7753 fp->state = BNX2X_FP_STATE_HALTING;
7754 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7755
34f80b04 7756 /* Wait for completion */
a2fbb9ea 7757 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7758 &(fp->state), 1);
c14423fe 7759 if (rc) /* timeout */
a2fbb9ea
ET
7760 return rc;
7761
7762 /* delete cfc entry */
7763 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7764
34f80b04
EG
7765 /* Wait for completion */
7766 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7767 &(fp->state), 1);
34f80b04 7768 return rc;
a2fbb9ea
ET
7769}
7770
da5a662a 7771static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7772{
4781bfad 7773 __le16 dsb_sp_prod_idx;
c14423fe 7774 /* if the other port is handling traffic,
a2fbb9ea 7775 this can take a lot of time */
34f80b04
EG
7776 int cnt = 500;
7777 int rc;
a2fbb9ea
ET
7778
7779 might_sleep();
7780
7781 /* Send HALT ramrod */
7782 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7783 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7784
34f80b04
EG
7785 /* Wait for completion */
7786 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7787 &(bp->fp[0].state), 1);
7788 if (rc) /* timeout */
da5a662a 7789 return rc;
a2fbb9ea 7790
49d66772 7791 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7792
228241eb 7793 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7794 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7795
49d66772 7796 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7797 we are going to reset the chip anyway
7798 so there is not much to do if this times out
7799 */
34f80b04 7800 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7801 if (!cnt) {
7802 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7803 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7804 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7805#ifdef BNX2X_STOP_ON_ERROR
7806 bnx2x_panic();
7807#endif
36e552ab 7808 rc = -EBUSY;
34f80b04
EG
7809 break;
7810 }
7811 cnt--;
da5a662a 7812 msleep(1);
5650d9d4 7813 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7814 }
7815 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7816 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7817
7818 return rc;
a2fbb9ea
ET
7819}
7820
34f80b04
EG
7821static void bnx2x_reset_func(struct bnx2x *bp)
7822{
7823 int port = BP_PORT(bp);
7824 int func = BP_FUNC(bp);
7825 int base, i;
7826
7827 /* Configure IGU */
7828 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7829 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7830
37b091ba
MC
7831#ifdef BCM_CNIC
7832 /* Disable Timer scan */
7833 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7834 /*
7835 * Wait for at least 10ms and up to 2 second for the timers scan to
7836 * complete
7837 */
7838 for (i = 0; i < 200; i++) {
7839 msleep(10);
7840 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7841 break;
7842 }
7843#endif
34f80b04
EG
7844 /* Clear ILT */
7845 base = FUNC_ILT_BASE(func);
7846 for (i = base; i < base + ILT_PER_FUNC; i++)
7847 bnx2x_ilt_wr(bp, i, 0);
7848}
7849
7850static void bnx2x_reset_port(struct bnx2x *bp)
7851{
7852 int port = BP_PORT(bp);
7853 u32 val;
7854
7855 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7856
7857 /* Do not rcv packets to BRB */
7858 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7859 /* Do not direct rcv packets that are not for MCP to the BRB */
7860 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7861 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7862
7863 /* Configure AEU */
7864 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7865
7866 msleep(100);
7867 /* Check for BRB port occupancy */
7868 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7869 if (val)
7870 DP(NETIF_MSG_IFDOWN,
33471629 7871 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7872
7873 /* TODO: Close Doorbell port? */
7874}
7875
34f80b04
EG
7876static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7877{
7878 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7879 BP_FUNC(bp), reset_code);
7880
7881 switch (reset_code) {
7882 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7883 bnx2x_reset_port(bp);
7884 bnx2x_reset_func(bp);
7885 bnx2x_reset_common(bp);
7886 break;
7887
7888 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7889 bnx2x_reset_port(bp);
7890 bnx2x_reset_func(bp);
7891 break;
7892
7893 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7894 bnx2x_reset_func(bp);
7895 break;
49d66772 7896
34f80b04
EG
7897 default:
7898 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7899 break;
7900 }
7901}
7902
33471629 7903/* must be called with rtnl_lock */
34f80b04 7904static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7905{
da5a662a 7906 int port = BP_PORT(bp);
a2fbb9ea 7907 u32 reset_code = 0;
da5a662a 7908 int i, cnt, rc;
a2fbb9ea 7909
993ac7b5
MC
7910#ifdef BCM_CNIC
7911 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7912#endif
a2fbb9ea
ET
7913 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7914
ab6ad5a4 7915 /* Set "drop all" */
228241eb
ET
7916 bp->rx_mode = BNX2X_RX_MODE_NONE;
7917 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7918
ab6ad5a4 7919 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7920 bnx2x_netif_stop(bp, 1);
e94d8af3 7921
34f80b04
EG
7922 del_timer_sync(&bp->timer);
7923 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7924 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7925 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7926
70b9986c
EG
7927 /* Release IRQs */
7928 bnx2x_free_irq(bp);
7929
555f6c78
EG
7930 /* Wait until tx fastpath tasks complete */
7931 for_each_tx_queue(bp, i) {
228241eb
ET
7932 struct bnx2x_fastpath *fp = &bp->fp[i];
7933
34f80b04 7934 cnt = 1000;
e8b5fc51 7935 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7936
7961f791 7937 bnx2x_tx_int(fp);
34f80b04
EG
7938 if (!cnt) {
7939 BNX2X_ERR("timeout waiting for queue[%d]\n",
7940 i);
7941#ifdef BNX2X_STOP_ON_ERROR
7942 bnx2x_panic();
7943 return -EBUSY;
7944#else
7945 break;
7946#endif
7947 }
7948 cnt--;
da5a662a 7949 msleep(1);
34f80b04 7950 }
228241eb 7951 }
da5a662a
VZ
7952 /* Give HW time to discard old tx messages */
7953 msleep(1);
a2fbb9ea 7954
3101c2bc
YG
7955 if (CHIP_IS_E1(bp)) {
7956 struct mac_configuration_cmd *config =
7957 bnx2x_sp(bp, mcast_config);
7958
e665bfda 7959 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7960
8d9c5f34 7961 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7962 CAM_INVALIDATE(config->config_table[i]);
7963
8d9c5f34 7964 config->hdr.length = i;
3101c2bc
YG
7965 if (CHIP_REV_IS_SLOW(bp))
7966 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7967 else
7968 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7969 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7970 config->hdr.reserved1 = 0;
7971
e665bfda
MC
7972 bp->set_mac_pending++;
7973 smp_wmb();
7974
3101c2bc
YG
7975 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7976 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7977 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7978
7979 } else { /* E1H */
65abd74d
YG
7980 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7981
e665bfda 7982 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7983
7984 for (i = 0; i < MC_HASH_SIZE; i++)
7985 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7986
7987 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 7988 }
993ac7b5
MC
7989#ifdef BCM_CNIC
7990 /* Clear iSCSI L2 MAC */
7991 mutex_lock(&bp->cnic_mutex);
7992 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7993 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7994 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7995 }
7996 mutex_unlock(&bp->cnic_mutex);
7997#endif
3101c2bc 7998
65abd74d
YG
7999 if (unload_mode == UNLOAD_NORMAL)
8000 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8001
7d0446c2 8002 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8003 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8004
7d0446c2 8005 else if (bp->wol) {
65abd74d
YG
8006 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8007 u8 *mac_addr = bp->dev->dev_addr;
8008 u32 val;
8009 /* The mac address is written to entries 1-4 to
8010 preserve entry 0 which is used by the PMF */
8011 u8 entry = (BP_E1HVN(bp) + 1)*8;
8012
8013 val = (mac_addr[0] << 8) | mac_addr[1];
8014 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8015
8016 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8017 (mac_addr[4] << 8) | mac_addr[5];
8018 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8019
8020 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8021
8022 } else
8023 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8024
34f80b04
EG
8025 /* Close multi and leading connections
8026 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8027 for_each_nondefault_queue(bp, i)
8028 if (bnx2x_stop_multi(bp, i))
228241eb 8029 goto unload_error;
a2fbb9ea 8030
da5a662a
VZ
8031 rc = bnx2x_stop_leading(bp);
8032 if (rc) {
34f80b04 8033 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8034#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8035 return -EBUSY;
da5a662a
VZ
8036#else
8037 goto unload_error;
34f80b04 8038#endif
228241eb
ET
8039 }
8040
8041unload_error:
34f80b04 8042 if (!BP_NOMCP(bp))
228241eb 8043 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8044 else {
f5372251 8045 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8046 load_count[0], load_count[1], load_count[2]);
8047 load_count[0]--;
da5a662a 8048 load_count[1 + port]--;
f5372251 8049 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8050 load_count[0], load_count[1], load_count[2]);
8051 if (load_count[0] == 0)
8052 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8053 else if (load_count[1 + port] == 0)
34f80b04
EG
8054 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8055 else
8056 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8057 }
a2fbb9ea 8058
34f80b04
EG
8059 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8060 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8061 bnx2x__link_reset(bp);
a2fbb9ea
ET
8062
8063 /* Reset the chip */
228241eb 8064 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8065
8066 /* Report UNLOAD_DONE to MCP */
34f80b04 8067 if (!BP_NOMCP(bp))
a2fbb9ea 8068 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8069
9a035440 8070 bp->port.pmf = 0;
a2fbb9ea 8071
7a9b2557 8072 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8073 bnx2x_free_skbs(bp);
555f6c78 8074 for_each_rx_queue(bp, i)
3196a88a 8075 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 8076 for_each_rx_queue(bp, i)
7cde1c8b 8077 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8078 bnx2x_free_mem(bp);
8079
8080 bp->state = BNX2X_STATE_CLOSED;
228241eb 8081
a2fbb9ea
ET
8082 netif_carrier_off(bp->dev);
8083
8084 return 0;
8085}
8086
34f80b04
EG
8087static void bnx2x_reset_task(struct work_struct *work)
8088{
8089 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8090
8091#ifdef BNX2X_STOP_ON_ERROR
8092 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8093 " so reset not done to allow debug dump,\n"
ad361c98 8094 " you will need to reboot when done\n");
34f80b04
EG
8095 return;
8096#endif
8097
8098 rtnl_lock();
8099
8100 if (!netif_running(bp->dev))
8101 goto reset_task_exit;
8102
8103 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8104 bnx2x_nic_load(bp, LOAD_NORMAL);
8105
8106reset_task_exit:
8107 rtnl_unlock();
8108}
8109
a2fbb9ea
ET
8110/* end of nic load/unload */
8111
8112/* ethtool_ops */
8113
8114/*
8115 * Init service functions
8116 */
8117
f1ef27ef
EG
8118static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8119{
8120 switch (func) {
8121 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8122 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8123 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8124 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8125 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8126 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8127 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8128 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8129 default:
8130 BNX2X_ERR("Unsupported function index: %d\n", func);
8131 return (u32)(-1);
8132 }
8133}
8134
8135static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8136{
8137 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8138
8139 /* Flush all outstanding writes */
8140 mmiowb();
8141
8142 /* Pretend to be function 0 */
8143 REG_WR(bp, reg, 0);
8144 /* Flush the GRC transaction (in the chip) */
8145 new_val = REG_RD(bp, reg);
8146 if (new_val != 0) {
8147 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8148 new_val);
8149 BUG();
8150 }
8151
8152 /* From now we are in the "like-E1" mode */
8153 bnx2x_int_disable(bp);
8154
8155 /* Flush all outstanding writes */
8156 mmiowb();
8157
8158 /* Restore the original funtion settings */
8159 REG_WR(bp, reg, orig_func);
8160 new_val = REG_RD(bp, reg);
8161 if (new_val != orig_func) {
8162 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8163 orig_func, new_val);
8164 BUG();
8165 }
8166}
8167
8168static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8169{
8170 if (CHIP_IS_E1H(bp))
8171 bnx2x_undi_int_disable_e1h(bp, func);
8172 else
8173 bnx2x_int_disable(bp);
8174}
8175
34f80b04
EG
8176static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8177{
8178 u32 val;
8179
8180 /* Check if there is any driver already loaded */
8181 val = REG_RD(bp, MISC_REG_UNPREPARED);
8182 if (val == 0x1) {
8183 /* Check if it is the UNDI driver
8184 * UNDI driver initializes CID offset for normal bell to 0x7
8185 */
4a37fb66 8186 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8187 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8188 if (val == 0x7) {
8189 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8190 /* save our func */
34f80b04 8191 int func = BP_FUNC(bp);
da5a662a
VZ
8192 u32 swap_en;
8193 u32 swap_val;
34f80b04 8194
b4661739
EG
8195 /* clear the UNDI indication */
8196 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8197
34f80b04
EG
8198 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8199
8200 /* try unload UNDI on port 0 */
8201 bp->func = 0;
da5a662a
VZ
8202 bp->fw_seq =
8203 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8204 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8205 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8206
8207 /* if UNDI is loaded on the other port */
8208 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8209
da5a662a
VZ
8210 /* send "DONE" for previous unload */
8211 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8212
8213 /* unload UNDI on port 1 */
34f80b04 8214 bp->func = 1;
da5a662a
VZ
8215 bp->fw_seq =
8216 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8217 DRV_MSG_SEQ_NUMBER_MASK);
8218 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8219
8220 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8221 }
8222
b4661739
EG
8223 /* now it's safe to release the lock */
8224 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8225
f1ef27ef 8226 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8227
8228 /* close input traffic and wait for it */
8229 /* Do not rcv packets to BRB */
8230 REG_WR(bp,
8231 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8232 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8233 /* Do not direct rcv packets that are not for MCP to
8234 * the BRB */
8235 REG_WR(bp,
8236 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8237 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8238 /* clear AEU */
8239 REG_WR(bp,
8240 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8241 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8242 msleep(10);
8243
8244 /* save NIG port swap info */
8245 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8246 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8247 /* reset device */
8248 REG_WR(bp,
8249 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8250 0xd3ffffff);
34f80b04
EG
8251 REG_WR(bp,
8252 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8253 0x1403);
da5a662a
VZ
8254 /* take the NIG out of reset and restore swap values */
8255 REG_WR(bp,
8256 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8257 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8258 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8259 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8260
8261 /* send unload done to the MCP */
8262 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8263
8264 /* restore our func and fw_seq */
8265 bp->func = func;
8266 bp->fw_seq =
8267 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8268 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8269
8270 } else
8271 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8272 }
8273}
8274
8275static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8276{
8277 u32 val, val2, val3, val4, id;
72ce58c3 8278 u16 pmc;
34f80b04
EG
8279
8280 /* Get the chip revision id and number. */
8281 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8282 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8283 id = ((val & 0xffff) << 16);
8284 val = REG_RD(bp, MISC_REG_CHIP_REV);
8285 id |= ((val & 0xf) << 12);
8286 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8287 id |= ((val & 0xff) << 4);
5a40e08e 8288 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8289 id |= (val & 0xf);
8290 bp->common.chip_id = id;
8291 bp->link_params.chip_id = bp->common.chip_id;
8292 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8293
1c06328c
EG
8294 val = (REG_RD(bp, 0x2874) & 0x55);
8295 if ((bp->common.chip_id & 0x1) ||
8296 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8297 bp->flags |= ONE_PORT_FLAG;
8298 BNX2X_DEV_INFO("single port device\n");
8299 }
8300
34f80b04
EG
8301 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8302 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8303 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8304 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8305 bp->common.flash_size, bp->common.flash_size);
8306
8307 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8308 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8309 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8310 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8311 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8312
8313 if (!bp->common.shmem_base ||
8314 (bp->common.shmem_base < 0xA0000) ||
8315 (bp->common.shmem_base >= 0xC0000)) {
8316 BNX2X_DEV_INFO("MCP not active\n");
8317 bp->flags |= NO_MCP_FLAG;
8318 return;
8319 }
8320
8321 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8322 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8323 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8324 BNX2X_ERR("BAD MCP validity signature\n");
8325
8326 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8327 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8328
8329 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8330 SHARED_HW_CFG_LED_MODE_MASK) >>
8331 SHARED_HW_CFG_LED_MODE_SHIFT);
8332
c2c8b03e
EG
8333 bp->link_params.feature_config_flags = 0;
8334 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8335 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8336 bp->link_params.feature_config_flags |=
8337 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8338 else
8339 bp->link_params.feature_config_flags &=
8340 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8341
34f80b04
EG
8342 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8343 bp->common.bc_ver = val;
8344 BNX2X_DEV_INFO("bc_ver %X\n", val);
8345 if (val < BNX2X_BC_VER) {
8346 /* for now only warn
8347 * later we might need to enforce this */
8348 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8349 " please upgrade BC\n", BNX2X_BC_VER, val);
8350 }
4d295db0
EG
8351 bp->link_params.feature_config_flags |=
8352 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8353 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8354
8355 if (BP_E1HVN(bp) == 0) {
8356 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8357 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8358 } else {
8359 /* no WOL capability for E1HVN != 0 */
8360 bp->flags |= NO_WOL_FLAG;
8361 }
8362 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8363 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8364
8365 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8366 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8367 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8368 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8369
8370 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8371 val, val2, val3, val4);
8372}
8373
8374static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8375 u32 switch_cfg)
a2fbb9ea 8376{
34f80b04 8377 int port = BP_PORT(bp);
a2fbb9ea
ET
8378 u32 ext_phy_type;
8379
a2fbb9ea
ET
8380 switch (switch_cfg) {
8381 case SWITCH_CFG_1G:
8382 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8383
c18487ee
YR
8384 ext_phy_type =
8385 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8386 switch (ext_phy_type) {
8387 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8388 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8389 ext_phy_type);
8390
34f80b04
EG
8391 bp->port.supported |= (SUPPORTED_10baseT_Half |
8392 SUPPORTED_10baseT_Full |
8393 SUPPORTED_100baseT_Half |
8394 SUPPORTED_100baseT_Full |
8395 SUPPORTED_1000baseT_Full |
8396 SUPPORTED_2500baseX_Full |
8397 SUPPORTED_TP |
8398 SUPPORTED_FIBRE |
8399 SUPPORTED_Autoneg |
8400 SUPPORTED_Pause |
8401 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8402 break;
8403
8404 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8405 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8406 ext_phy_type);
8407
34f80b04
EG
8408 bp->port.supported |= (SUPPORTED_10baseT_Half |
8409 SUPPORTED_10baseT_Full |
8410 SUPPORTED_100baseT_Half |
8411 SUPPORTED_100baseT_Full |
8412 SUPPORTED_1000baseT_Full |
8413 SUPPORTED_TP |
8414 SUPPORTED_FIBRE |
8415 SUPPORTED_Autoneg |
8416 SUPPORTED_Pause |
8417 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8418 break;
8419
8420 default:
8421 BNX2X_ERR("NVRAM config error. "
8422 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8423 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8424 return;
8425 }
8426
34f80b04
EG
8427 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8428 port*0x10);
8429 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8430 break;
8431
8432 case SWITCH_CFG_10G:
8433 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8434
c18487ee
YR
8435 ext_phy_type =
8436 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8437 switch (ext_phy_type) {
8438 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8439 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8440 ext_phy_type);
8441
34f80b04
EG
8442 bp->port.supported |= (SUPPORTED_10baseT_Half |
8443 SUPPORTED_10baseT_Full |
8444 SUPPORTED_100baseT_Half |
8445 SUPPORTED_100baseT_Full |
8446 SUPPORTED_1000baseT_Full |
8447 SUPPORTED_2500baseX_Full |
8448 SUPPORTED_10000baseT_Full |
8449 SUPPORTED_TP |
8450 SUPPORTED_FIBRE |
8451 SUPPORTED_Autoneg |
8452 SUPPORTED_Pause |
8453 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8454 break;
8455
589abe3a
EG
8456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8457 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8458 ext_phy_type);
f1410647 8459
34f80b04 8460 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8461 SUPPORTED_1000baseT_Full |
34f80b04 8462 SUPPORTED_FIBRE |
589abe3a 8463 SUPPORTED_Autoneg |
34f80b04
EG
8464 SUPPORTED_Pause |
8465 SUPPORTED_Asym_Pause);
f1410647
ET
8466 break;
8467
589abe3a
EG
8468 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8469 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8470 ext_phy_type);
8471
34f80b04 8472 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8473 SUPPORTED_2500baseX_Full |
34f80b04 8474 SUPPORTED_1000baseT_Full |
589abe3a
EG
8475 SUPPORTED_FIBRE |
8476 SUPPORTED_Autoneg |
8477 SUPPORTED_Pause |
8478 SUPPORTED_Asym_Pause);
8479 break;
8480
8481 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8482 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8483 ext_phy_type);
8484
8485 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8486 SUPPORTED_FIBRE |
8487 SUPPORTED_Pause |
8488 SUPPORTED_Asym_Pause);
f1410647
ET
8489 break;
8490
589abe3a
EG
8491 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8492 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8493 ext_phy_type);
8494
34f80b04
EG
8495 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8496 SUPPORTED_1000baseT_Full |
8497 SUPPORTED_FIBRE |
34f80b04
EG
8498 SUPPORTED_Pause |
8499 SUPPORTED_Asym_Pause);
f1410647
ET
8500 break;
8501
589abe3a
EG
8502 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8503 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8504 ext_phy_type);
8505
34f80b04 8506 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8507 SUPPORTED_1000baseT_Full |
34f80b04 8508 SUPPORTED_Autoneg |
589abe3a 8509 SUPPORTED_FIBRE |
34f80b04
EG
8510 SUPPORTED_Pause |
8511 SUPPORTED_Asym_Pause);
c18487ee
YR
8512 break;
8513
4d295db0
EG
8514 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8515 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8516 ext_phy_type);
8517
8518 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8519 SUPPORTED_1000baseT_Full |
8520 SUPPORTED_Autoneg |
8521 SUPPORTED_FIBRE |
8522 SUPPORTED_Pause |
8523 SUPPORTED_Asym_Pause);
8524 break;
8525
f1410647
ET
8526 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8527 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8528 ext_phy_type);
8529
34f80b04
EG
8530 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8531 SUPPORTED_TP |
8532 SUPPORTED_Autoneg |
8533 SUPPORTED_Pause |
8534 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8535 break;
8536
28577185
EG
8537 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8538 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8539 ext_phy_type);
8540
8541 bp->port.supported |= (SUPPORTED_10baseT_Half |
8542 SUPPORTED_10baseT_Full |
8543 SUPPORTED_100baseT_Half |
8544 SUPPORTED_100baseT_Full |
8545 SUPPORTED_1000baseT_Full |
8546 SUPPORTED_10000baseT_Full |
8547 SUPPORTED_TP |
8548 SUPPORTED_Autoneg |
8549 SUPPORTED_Pause |
8550 SUPPORTED_Asym_Pause);
8551 break;
8552
c18487ee
YR
8553 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8554 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8555 bp->link_params.ext_phy_config);
8556 break;
8557
a2fbb9ea
ET
8558 default:
8559 BNX2X_ERR("NVRAM config error. "
8560 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8561 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8562 return;
8563 }
8564
34f80b04
EG
8565 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8566 port*0x18);
8567 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8568
a2fbb9ea
ET
8569 break;
8570
8571 default:
8572 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8573 bp->port.link_config);
a2fbb9ea
ET
8574 return;
8575 }
34f80b04 8576 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8577
8578 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8579 if (!(bp->link_params.speed_cap_mask &
8580 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8581 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8582
c18487ee
YR
8583 if (!(bp->link_params.speed_cap_mask &
8584 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8585 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8586
c18487ee
YR
8587 if (!(bp->link_params.speed_cap_mask &
8588 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8589 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8590
c18487ee
YR
8591 if (!(bp->link_params.speed_cap_mask &
8592 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8593 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8594
c18487ee
YR
8595 if (!(bp->link_params.speed_cap_mask &
8596 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8597 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8598 SUPPORTED_1000baseT_Full);
a2fbb9ea 8599
c18487ee
YR
8600 if (!(bp->link_params.speed_cap_mask &
8601 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8602 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8603
c18487ee
YR
8604 if (!(bp->link_params.speed_cap_mask &
8605 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8606 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8607
34f80b04 8608 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8609}
8610
34f80b04 8611static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8612{
c18487ee 8613 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8614
34f80b04 8615 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8616 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8617 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8618 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8619 bp->port.advertising = bp->port.supported;
a2fbb9ea 8620 } else {
c18487ee
YR
8621 u32 ext_phy_type =
8622 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8623
8624 if ((ext_phy_type ==
8625 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8626 (ext_phy_type ==
8627 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8628 /* force 10G, no AN */
c18487ee 8629 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8630 bp->port.advertising =
a2fbb9ea
ET
8631 (ADVERTISED_10000baseT_Full |
8632 ADVERTISED_FIBRE);
8633 break;
8634 }
8635 BNX2X_ERR("NVRAM config error. "
8636 "Invalid link_config 0x%x"
8637 " Autoneg not supported\n",
34f80b04 8638 bp->port.link_config);
a2fbb9ea
ET
8639 return;
8640 }
8641 break;
8642
8643 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8644 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8645 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8646 bp->port.advertising = (ADVERTISED_10baseT_Full |
8647 ADVERTISED_TP);
a2fbb9ea
ET
8648 } else {
8649 BNX2X_ERR("NVRAM config error. "
8650 "Invalid link_config 0x%x"
8651 " speed_cap_mask 0x%x\n",
34f80b04 8652 bp->port.link_config,
c18487ee 8653 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8654 return;
8655 }
8656 break;
8657
8658 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8659 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8660 bp->link_params.req_line_speed = SPEED_10;
8661 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8662 bp->port.advertising = (ADVERTISED_10baseT_Half |
8663 ADVERTISED_TP);
a2fbb9ea
ET
8664 } else {
8665 BNX2X_ERR("NVRAM config error. "
8666 "Invalid link_config 0x%x"
8667 " speed_cap_mask 0x%x\n",
34f80b04 8668 bp->port.link_config,
c18487ee 8669 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8670 return;
8671 }
8672 break;
8673
8674 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8675 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8676 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8677 bp->port.advertising = (ADVERTISED_100baseT_Full |
8678 ADVERTISED_TP);
a2fbb9ea
ET
8679 } else {
8680 BNX2X_ERR("NVRAM config error. "
8681 "Invalid link_config 0x%x"
8682 " speed_cap_mask 0x%x\n",
34f80b04 8683 bp->port.link_config,
c18487ee 8684 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8685 return;
8686 }
8687 break;
8688
8689 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8690 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8691 bp->link_params.req_line_speed = SPEED_100;
8692 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8693 bp->port.advertising = (ADVERTISED_100baseT_Half |
8694 ADVERTISED_TP);
a2fbb9ea
ET
8695 } else {
8696 BNX2X_ERR("NVRAM config error. "
8697 "Invalid link_config 0x%x"
8698 " speed_cap_mask 0x%x\n",
34f80b04 8699 bp->port.link_config,
c18487ee 8700 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8701 return;
8702 }
8703 break;
8704
8705 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8706 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8707 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8708 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8709 ADVERTISED_TP);
a2fbb9ea
ET
8710 } else {
8711 BNX2X_ERR("NVRAM config error. "
8712 "Invalid link_config 0x%x"
8713 " speed_cap_mask 0x%x\n",
34f80b04 8714 bp->port.link_config,
c18487ee 8715 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8716 return;
8717 }
8718 break;
8719
8720 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8721 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8722 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8723 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8724 ADVERTISED_TP);
a2fbb9ea
ET
8725 } else {
8726 BNX2X_ERR("NVRAM config error. "
8727 "Invalid link_config 0x%x"
8728 " speed_cap_mask 0x%x\n",
34f80b04 8729 bp->port.link_config,
c18487ee 8730 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8731 return;
8732 }
8733 break;
8734
8735 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8736 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8737 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8738 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8739 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8740 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8741 ADVERTISED_FIBRE);
a2fbb9ea
ET
8742 } else {
8743 BNX2X_ERR("NVRAM config error. "
8744 "Invalid link_config 0x%x"
8745 " speed_cap_mask 0x%x\n",
34f80b04 8746 bp->port.link_config,
c18487ee 8747 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8748 return;
8749 }
8750 break;
8751
8752 default:
8753 BNX2X_ERR("NVRAM config error. "
8754 "BAD link speed link_config 0x%x\n",
34f80b04 8755 bp->port.link_config);
c18487ee 8756 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8757 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8758 break;
8759 }
a2fbb9ea 8760
34f80b04
EG
8761 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8762 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8763 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8764 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8765 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8766
c18487ee 8767 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8768 " advertising 0x%x\n",
c18487ee
YR
8769 bp->link_params.req_line_speed,
8770 bp->link_params.req_duplex,
34f80b04 8771 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8772}
8773
e665bfda
MC
8774static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8775{
8776 mac_hi = cpu_to_be16(mac_hi);
8777 mac_lo = cpu_to_be32(mac_lo);
8778 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8779 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8780}
8781
34f80b04 8782static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8783{
34f80b04
EG
8784 int port = BP_PORT(bp);
8785 u32 val, val2;
589abe3a 8786 u32 config;
c2c8b03e 8787 u16 i;
01cd4528 8788 u32 ext_phy_type;
a2fbb9ea 8789
c18487ee 8790 bp->link_params.bp = bp;
34f80b04 8791 bp->link_params.port = port;
c18487ee 8792
c18487ee 8793 bp->link_params.lane_config =
a2fbb9ea 8794 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8795 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8796 SHMEM_RD(bp,
8797 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8798 /* BCM8727_NOC => BCM8727 no over current */
8799 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8800 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8801 bp->link_params.ext_phy_config &=
8802 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8803 bp->link_params.ext_phy_config |=
8804 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8805 bp->link_params.feature_config_flags |=
8806 FEATURE_CONFIG_BCM8727_NOC;
8807 }
8808
c18487ee 8809 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8810 SHMEM_RD(bp,
8811 dev_info.port_hw_config[port].speed_capability_mask);
8812
34f80b04 8813 bp->port.link_config =
a2fbb9ea
ET
8814 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8815
c2c8b03e
EG
8816 /* Get the 4 lanes xgxs config rx and tx */
8817 for (i = 0; i < 2; i++) {
8818 val = SHMEM_RD(bp,
8819 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8820 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8821 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8822
8823 val = SHMEM_RD(bp,
8824 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8825 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8826 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8827 }
8828
3ce2c3f9
EG
8829 /* If the device is capable of WoL, set the default state according
8830 * to the HW
8831 */
4d295db0 8832 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8833 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8834 (config & PORT_FEATURE_WOL_ENABLED));
8835
c2c8b03e
EG
8836 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8837 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8838 bp->link_params.lane_config,
8839 bp->link_params.ext_phy_config,
34f80b04 8840 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8841
4d295db0
EG
8842 bp->link_params.switch_cfg |= (bp->port.link_config &
8843 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8844 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8845
8846 bnx2x_link_settings_requested(bp);
8847
01cd4528
EG
8848 /*
8849 * If connected directly, work with the internal PHY, otherwise, work
8850 * with the external PHY
8851 */
8852 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8853 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8854 bp->mdio.prtad = bp->link_params.phy_addr;
8855
8856 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8857 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8858 bp->mdio.prtad =
659bc5c4 8859 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8860
a2fbb9ea
ET
8861 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8862 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8863 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8864 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8865 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8866
8867#ifdef BCM_CNIC
8868 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8869 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8870 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8871#endif
34f80b04
EG
8872}
8873
8874static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8875{
8876 int func = BP_FUNC(bp);
8877 u32 val, val2;
8878 int rc = 0;
a2fbb9ea 8879
34f80b04 8880 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8881
34f80b04
EG
8882 bp->e1hov = 0;
8883 bp->e1hmf = 0;
8884 if (CHIP_IS_E1H(bp)) {
8885 bp->mf_config =
8886 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8887
2691d51d 8888 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8889 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8890 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8891 bp->e1hmf = 1;
2691d51d
EG
8892 BNX2X_DEV_INFO("%s function mode\n",
8893 IS_E1HMF(bp) ? "multi" : "single");
8894
8895 if (IS_E1HMF(bp)) {
8896 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8897 e1hov_tag) &
8898 FUNC_MF_CFG_E1HOV_TAG_MASK);
8899 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8900 bp->e1hov = val;
8901 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8902 "(0x%04x)\n",
8903 func, bp->e1hov, bp->e1hov);
8904 } else {
34f80b04
EG
8905 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8906 " aborting\n", func);
8907 rc = -EPERM;
8908 }
2691d51d
EG
8909 } else {
8910 if (BP_E1HVN(bp)) {
8911 BNX2X_ERR("!!! VN %d in single function mode,"
8912 " aborting\n", BP_E1HVN(bp));
8913 rc = -EPERM;
8914 }
34f80b04
EG
8915 }
8916 }
a2fbb9ea 8917
34f80b04
EG
8918 if (!BP_NOMCP(bp)) {
8919 bnx2x_get_port_hwinfo(bp);
8920
8921 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8922 DRV_MSG_SEQ_NUMBER_MASK);
8923 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8924 }
8925
8926 if (IS_E1HMF(bp)) {
8927 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8928 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8929 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8930 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8931 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8932 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8933 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8934 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8935 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8936 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8937 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8938 ETH_ALEN);
8939 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8940 ETH_ALEN);
a2fbb9ea 8941 }
34f80b04
EG
8942
8943 return rc;
a2fbb9ea
ET
8944 }
8945
34f80b04
EG
8946 if (BP_NOMCP(bp)) {
8947 /* only supposed to happen on emulation/FPGA */
33471629 8948 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8949 random_ether_addr(bp->dev->dev_addr);
8950 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8951 }
a2fbb9ea 8952
34f80b04
EG
8953 return rc;
8954}
8955
8956static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8957{
8958 int func = BP_FUNC(bp);
87942b46 8959 int timer_interval;
34f80b04
EG
8960 int rc;
8961
da5a662a
VZ
8962 /* Disable interrupt handling until HW is initialized */
8963 atomic_set(&bp->intr_sem, 1);
e1510706 8964 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8965
34f80b04 8966 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8967 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
8968#ifdef BCM_CNIC
8969 mutex_init(&bp->cnic_mutex);
8970#endif
a2fbb9ea 8971
1cf167f2 8972 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8973 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8974
8975 rc = bnx2x_get_hwinfo(bp);
8976
8977 /* need to reset chip if undi was active */
8978 if (!BP_NOMCP(bp))
8979 bnx2x_undi_unload(bp);
8980
8981 if (CHIP_REV_IS_FPGA(bp))
8982 printk(KERN_ERR PFX "FPGA detected\n");
8983
8984 if (BP_NOMCP(bp) && (func == 0))
8985 printk(KERN_ERR PFX
8986 "MCP disabled, must load devices in order!\n");
8987
555f6c78 8988 /* Set multi queue mode */
8badd27a
EG
8989 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8990 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8991 printk(KERN_ERR PFX
8badd27a 8992 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8993 multi_mode = ETH_RSS_MODE_DISABLED;
8994 }
8995 bp->multi_mode = multi_mode;
8996
8997
7a9b2557
VZ
8998 /* Set TPA flags */
8999 if (disable_tpa) {
9000 bp->flags &= ~TPA_ENABLE_FLAG;
9001 bp->dev->features &= ~NETIF_F_LRO;
9002 } else {
9003 bp->flags |= TPA_ENABLE_FLAG;
9004 bp->dev->features |= NETIF_F_LRO;
9005 }
9006
a18f5128
EG
9007 if (CHIP_IS_E1(bp))
9008 bp->dropless_fc = 0;
9009 else
9010 bp->dropless_fc = dropless_fc;
9011
8d5726c4 9012 bp->mrrs = mrrs;
7a9b2557 9013
34f80b04
EG
9014 bp->tx_ring_size = MAX_TX_AVAIL;
9015 bp->rx_ring_size = MAX_RX_AVAIL;
9016
9017 bp->rx_csum = 1;
34f80b04 9018
7d323bfd
EG
9019 /* make sure that the numbers are in the right granularity */
9020 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9021 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 9022
87942b46
EG
9023 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9024 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9025
9026 init_timer(&bp->timer);
9027 bp->timer.expires = jiffies + bp->current_interval;
9028 bp->timer.data = (unsigned long) bp;
9029 bp->timer.function = bnx2x_timer;
9030
9031 return rc;
a2fbb9ea
ET
9032}
9033
9034/*
9035 * ethtool service functions
9036 */
9037
9038/* All ethtool functions called with rtnl_lock */
9039
9040static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9041{
9042 struct bnx2x *bp = netdev_priv(dev);
9043
34f80b04
EG
9044 cmd->supported = bp->port.supported;
9045 cmd->advertising = bp->port.advertising;
a2fbb9ea 9046
f34d28ea
EG
9047 if ((bp->state == BNX2X_STATE_OPEN) &&
9048 !(bp->flags & MF_FUNC_DIS) &&
9049 (bp->link_vars.link_up)) {
c18487ee
YR
9050 cmd->speed = bp->link_vars.line_speed;
9051 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9052 if (IS_E1HMF(bp)) {
9053 u16 vn_max_rate;
34f80b04 9054
b015e3d1
EG
9055 vn_max_rate =
9056 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 9057 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
9058 if (vn_max_rate < cmd->speed)
9059 cmd->speed = vn_max_rate;
9060 }
9061 } else {
9062 cmd->speed = -1;
9063 cmd->duplex = -1;
34f80b04 9064 }
a2fbb9ea 9065
c18487ee
YR
9066 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9067 u32 ext_phy_type =
9068 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9069
9070 switch (ext_phy_type) {
9071 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9072 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9073 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9074 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9075 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9076 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9077 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9078 cmd->port = PORT_FIBRE;
9079 break;
9080
9081 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9082 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9083 cmd->port = PORT_TP;
9084 break;
9085
c18487ee
YR
9086 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9087 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9088 bp->link_params.ext_phy_config);
9089 break;
9090
f1410647
ET
9091 default:
9092 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9093 bp->link_params.ext_phy_config);
9094 break;
f1410647
ET
9095 }
9096 } else
a2fbb9ea 9097 cmd->port = PORT_TP;
a2fbb9ea 9098
01cd4528 9099 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9100 cmd->transceiver = XCVR_INTERNAL;
9101
c18487ee 9102 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9103 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9104 else
a2fbb9ea 9105 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9106
9107 cmd->maxtxpkt = 0;
9108 cmd->maxrxpkt = 0;
9109
9110 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9111 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9112 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9113 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9114 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9115 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9116 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9117
9118 return 0;
9119}
9120
9121static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9122{
9123 struct bnx2x *bp = netdev_priv(dev);
9124 u32 advertising;
9125
34f80b04
EG
9126 if (IS_E1HMF(bp))
9127 return 0;
9128
a2fbb9ea
ET
9129 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9130 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9131 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9132 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9133 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9134 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9135 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9136
a2fbb9ea 9137 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9138 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9139 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9140 return -EINVAL;
f1410647 9141 }
a2fbb9ea
ET
9142
9143 /* advertise the requested speed and duplex if supported */
34f80b04 9144 cmd->advertising &= bp->port.supported;
a2fbb9ea 9145
c18487ee
YR
9146 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9147 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9148 bp->port.advertising |= (ADVERTISED_Autoneg |
9149 cmd->advertising);
a2fbb9ea
ET
9150
9151 } else { /* forced speed */
9152 /* advertise the requested speed and duplex if supported */
9153 switch (cmd->speed) {
9154 case SPEED_10:
9155 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9156 if (!(bp->port.supported &
f1410647
ET
9157 SUPPORTED_10baseT_Full)) {
9158 DP(NETIF_MSG_LINK,
9159 "10M full not supported\n");
a2fbb9ea 9160 return -EINVAL;
f1410647 9161 }
a2fbb9ea
ET
9162
9163 advertising = (ADVERTISED_10baseT_Full |
9164 ADVERTISED_TP);
9165 } else {
34f80b04 9166 if (!(bp->port.supported &
f1410647
ET
9167 SUPPORTED_10baseT_Half)) {
9168 DP(NETIF_MSG_LINK,
9169 "10M half not supported\n");
a2fbb9ea 9170 return -EINVAL;
f1410647 9171 }
a2fbb9ea
ET
9172
9173 advertising = (ADVERTISED_10baseT_Half |
9174 ADVERTISED_TP);
9175 }
9176 break;
9177
9178 case SPEED_100:
9179 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9180 if (!(bp->port.supported &
f1410647
ET
9181 SUPPORTED_100baseT_Full)) {
9182 DP(NETIF_MSG_LINK,
9183 "100M full not supported\n");
a2fbb9ea 9184 return -EINVAL;
f1410647 9185 }
a2fbb9ea
ET
9186
9187 advertising = (ADVERTISED_100baseT_Full |
9188 ADVERTISED_TP);
9189 } else {
34f80b04 9190 if (!(bp->port.supported &
f1410647
ET
9191 SUPPORTED_100baseT_Half)) {
9192 DP(NETIF_MSG_LINK,
9193 "100M half not supported\n");
a2fbb9ea 9194 return -EINVAL;
f1410647 9195 }
a2fbb9ea
ET
9196
9197 advertising = (ADVERTISED_100baseT_Half |
9198 ADVERTISED_TP);
9199 }
9200 break;
9201
9202 case SPEED_1000:
f1410647
ET
9203 if (cmd->duplex != DUPLEX_FULL) {
9204 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 9205 return -EINVAL;
f1410647 9206 }
a2fbb9ea 9207
34f80b04 9208 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 9209 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 9210 return -EINVAL;
f1410647 9211 }
a2fbb9ea
ET
9212
9213 advertising = (ADVERTISED_1000baseT_Full |
9214 ADVERTISED_TP);
9215 break;
9216
9217 case SPEED_2500:
f1410647
ET
9218 if (cmd->duplex != DUPLEX_FULL) {
9219 DP(NETIF_MSG_LINK,
9220 "2.5G half not supported\n");
a2fbb9ea 9221 return -EINVAL;
f1410647 9222 }
a2fbb9ea 9223
34f80b04 9224 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9225 DP(NETIF_MSG_LINK,
9226 "2.5G full not supported\n");
a2fbb9ea 9227 return -EINVAL;
f1410647 9228 }
a2fbb9ea 9229
f1410647 9230 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9231 ADVERTISED_TP);
9232 break;
9233
9234 case SPEED_10000:
f1410647
ET
9235 if (cmd->duplex != DUPLEX_FULL) {
9236 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9237 return -EINVAL;
f1410647 9238 }
a2fbb9ea 9239
34f80b04 9240 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9241 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9242 return -EINVAL;
f1410647 9243 }
a2fbb9ea
ET
9244
9245 advertising = (ADVERTISED_10000baseT_Full |
9246 ADVERTISED_FIBRE);
9247 break;
9248
9249 default:
f1410647 9250 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9251 return -EINVAL;
9252 }
9253
c18487ee
YR
9254 bp->link_params.req_line_speed = cmd->speed;
9255 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9256 bp->port.advertising = advertising;
a2fbb9ea
ET
9257 }
9258
c18487ee 9259 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9260 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9261 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9262 bp->port.advertising);
a2fbb9ea 9263
34f80b04 9264 if (netif_running(dev)) {
bb2a0f7a 9265 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9266 bnx2x_link_set(bp);
9267 }
a2fbb9ea
ET
9268
9269 return 0;
9270}
9271
0a64ea57
EG
9272#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9273#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9274
9275static int bnx2x_get_regs_len(struct net_device *dev)
9276{
0a64ea57 9277 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9278 int regdump_len = 0;
0a64ea57
EG
9279 int i;
9280
0a64ea57
EG
9281 if (CHIP_IS_E1(bp)) {
9282 for (i = 0; i < REGS_COUNT; i++)
9283 if (IS_E1_ONLINE(reg_addrs[i].info))
9284 regdump_len += reg_addrs[i].size;
9285
9286 for (i = 0; i < WREGS_COUNT_E1; i++)
9287 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9288 regdump_len += wreg_addrs_e1[i].size *
9289 (1 + wreg_addrs_e1[i].read_regs_count);
9290
9291 } else { /* E1H */
9292 for (i = 0; i < REGS_COUNT; i++)
9293 if (IS_E1H_ONLINE(reg_addrs[i].info))
9294 regdump_len += reg_addrs[i].size;
9295
9296 for (i = 0; i < WREGS_COUNT_E1H; i++)
9297 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9298 regdump_len += wreg_addrs_e1h[i].size *
9299 (1 + wreg_addrs_e1h[i].read_regs_count);
9300 }
9301 regdump_len *= 4;
9302 regdump_len += sizeof(struct dump_hdr);
9303
9304 return regdump_len;
9305}
9306
9307static void bnx2x_get_regs(struct net_device *dev,
9308 struct ethtool_regs *regs, void *_p)
9309{
9310 u32 *p = _p, i, j;
9311 struct bnx2x *bp = netdev_priv(dev);
9312 struct dump_hdr dump_hdr = {0};
9313
9314 regs->version = 0;
9315 memset(p, 0, regs->len);
9316
9317 if (!netif_running(bp->dev))
9318 return;
9319
9320 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9321 dump_hdr.dump_sign = dump_sign_all;
9322 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9323 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9324 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9325 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9326 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9327
9328 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9329 p += dump_hdr.hdr_size + 1;
9330
9331 if (CHIP_IS_E1(bp)) {
9332 for (i = 0; i < REGS_COUNT; i++)
9333 if (IS_E1_ONLINE(reg_addrs[i].info))
9334 for (j = 0; j < reg_addrs[i].size; j++)
9335 *p++ = REG_RD(bp,
9336 reg_addrs[i].addr + j*4);
9337
9338 } else { /* E1H */
9339 for (i = 0; i < REGS_COUNT; i++)
9340 if (IS_E1H_ONLINE(reg_addrs[i].info))
9341 for (j = 0; j < reg_addrs[i].size; j++)
9342 *p++ = REG_RD(bp,
9343 reg_addrs[i].addr + j*4);
9344 }
9345}
9346
0d28e49a
EG
9347#define PHY_FW_VER_LEN 10
9348
9349static void bnx2x_get_drvinfo(struct net_device *dev,
9350 struct ethtool_drvinfo *info)
9351{
9352 struct bnx2x *bp = netdev_priv(dev);
9353 u8 phy_fw_ver[PHY_FW_VER_LEN];
9354
9355 strcpy(info->driver, DRV_MODULE_NAME);
9356 strcpy(info->version, DRV_MODULE_VERSION);
9357
9358 phy_fw_ver[0] = '\0';
9359 if (bp->port.pmf) {
9360 bnx2x_acquire_phy_lock(bp);
9361 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9362 (bp->state != BNX2X_STATE_CLOSED),
9363 phy_fw_ver, PHY_FW_VER_LEN);
9364 bnx2x_release_phy_lock(bp);
9365 }
9366
9367 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9368 (bp->common.bc_ver & 0xff0000) >> 16,
9369 (bp->common.bc_ver & 0xff00) >> 8,
9370 (bp->common.bc_ver & 0xff),
9371 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9372 strcpy(info->bus_info, pci_name(bp->pdev));
9373 info->n_stats = BNX2X_NUM_STATS;
9374 info->testinfo_len = BNX2X_NUM_TESTS;
9375 info->eedump_len = bp->common.flash_size;
9376 info->regdump_len = bnx2x_get_regs_len(dev);
9377}
9378
a2fbb9ea
ET
9379static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9380{
9381 struct bnx2x *bp = netdev_priv(dev);
9382
9383 if (bp->flags & NO_WOL_FLAG) {
9384 wol->supported = 0;
9385 wol->wolopts = 0;
9386 } else {
9387 wol->supported = WAKE_MAGIC;
9388 if (bp->wol)
9389 wol->wolopts = WAKE_MAGIC;
9390 else
9391 wol->wolopts = 0;
9392 }
9393 memset(&wol->sopass, 0, sizeof(wol->sopass));
9394}
9395
9396static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9397{
9398 struct bnx2x *bp = netdev_priv(dev);
9399
9400 if (wol->wolopts & ~WAKE_MAGIC)
9401 return -EINVAL;
9402
9403 if (wol->wolopts & WAKE_MAGIC) {
9404 if (bp->flags & NO_WOL_FLAG)
9405 return -EINVAL;
9406
9407 bp->wol = 1;
34f80b04 9408 } else
a2fbb9ea 9409 bp->wol = 0;
34f80b04 9410
a2fbb9ea
ET
9411 return 0;
9412}
9413
9414static u32 bnx2x_get_msglevel(struct net_device *dev)
9415{
9416 struct bnx2x *bp = netdev_priv(dev);
9417
9418 return bp->msglevel;
9419}
9420
9421static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9422{
9423 struct bnx2x *bp = netdev_priv(dev);
9424
9425 if (capable(CAP_NET_ADMIN))
9426 bp->msglevel = level;
9427}
9428
9429static int bnx2x_nway_reset(struct net_device *dev)
9430{
9431 struct bnx2x *bp = netdev_priv(dev);
9432
34f80b04
EG
9433 if (!bp->port.pmf)
9434 return 0;
a2fbb9ea 9435
34f80b04 9436 if (netif_running(dev)) {
bb2a0f7a 9437 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9438 bnx2x_link_set(bp);
9439 }
a2fbb9ea
ET
9440
9441 return 0;
9442}
9443
ab6ad5a4 9444static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9445{
9446 struct bnx2x *bp = netdev_priv(dev);
9447
f34d28ea
EG
9448 if (bp->flags & MF_FUNC_DIS)
9449 return 0;
9450
01e53298
NO
9451 return bp->link_vars.link_up;
9452}
9453
a2fbb9ea
ET
9454static int bnx2x_get_eeprom_len(struct net_device *dev)
9455{
9456 struct bnx2x *bp = netdev_priv(dev);
9457
34f80b04 9458 return bp->common.flash_size;
a2fbb9ea
ET
9459}
9460
9461static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9462{
34f80b04 9463 int port = BP_PORT(bp);
a2fbb9ea
ET
9464 int count, i;
9465 u32 val = 0;
9466
9467 /* adjust timeout for emulation/FPGA */
9468 count = NVRAM_TIMEOUT_COUNT;
9469 if (CHIP_REV_IS_SLOW(bp))
9470 count *= 100;
9471
9472 /* request access to nvram interface */
9473 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9474 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9475
9476 for (i = 0; i < count*10; i++) {
9477 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9478 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9479 break;
9480
9481 udelay(5);
9482 }
9483
9484 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9485 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9486 return -EBUSY;
9487 }
9488
9489 return 0;
9490}
9491
9492static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9493{
34f80b04 9494 int port = BP_PORT(bp);
a2fbb9ea
ET
9495 int count, i;
9496 u32 val = 0;
9497
9498 /* adjust timeout for emulation/FPGA */
9499 count = NVRAM_TIMEOUT_COUNT;
9500 if (CHIP_REV_IS_SLOW(bp))
9501 count *= 100;
9502
9503 /* relinquish nvram interface */
9504 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9505 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9506
9507 for (i = 0; i < count*10; i++) {
9508 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9509 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9510 break;
9511
9512 udelay(5);
9513 }
9514
9515 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9516 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9517 return -EBUSY;
9518 }
9519
9520 return 0;
9521}
9522
9523static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9524{
9525 u32 val;
9526
9527 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9528
9529 /* enable both bits, even on read */
9530 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9531 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9532 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9533}
9534
9535static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9536{
9537 u32 val;
9538
9539 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9540
9541 /* disable both bits, even after read */
9542 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9543 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9544 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9545}
9546
4781bfad 9547static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9548 u32 cmd_flags)
9549{
f1410647 9550 int count, i, rc;
a2fbb9ea
ET
9551 u32 val;
9552
9553 /* build the command word */
9554 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9555
9556 /* need to clear DONE bit separately */
9557 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9558
9559 /* address of the NVRAM to read from */
9560 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9561 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9562
9563 /* issue a read command */
9564 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9565
9566 /* adjust timeout for emulation/FPGA */
9567 count = NVRAM_TIMEOUT_COUNT;
9568 if (CHIP_REV_IS_SLOW(bp))
9569 count *= 100;
9570
9571 /* wait for completion */
9572 *ret_val = 0;
9573 rc = -EBUSY;
9574 for (i = 0; i < count; i++) {
9575 udelay(5);
9576 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9577
9578 if (val & MCPR_NVM_COMMAND_DONE) {
9579 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9580 /* we read nvram data in cpu order
9581 * but ethtool sees it as an array of bytes
9582 * converting to big-endian will do the work */
4781bfad 9583 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9584 rc = 0;
9585 break;
9586 }
9587 }
9588
9589 return rc;
9590}
9591
9592static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9593 int buf_size)
9594{
9595 int rc;
9596 u32 cmd_flags;
4781bfad 9597 __be32 val;
a2fbb9ea
ET
9598
9599 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9600 DP(BNX2X_MSG_NVM,
c14423fe 9601 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9602 offset, buf_size);
9603 return -EINVAL;
9604 }
9605
34f80b04
EG
9606 if (offset + buf_size > bp->common.flash_size) {
9607 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9608 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9609 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9610 return -EINVAL;
9611 }
9612
9613 /* request access to nvram interface */
9614 rc = bnx2x_acquire_nvram_lock(bp);
9615 if (rc)
9616 return rc;
9617
9618 /* enable access to nvram interface */
9619 bnx2x_enable_nvram_access(bp);
9620
9621 /* read the first word(s) */
9622 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9623 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9624 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9625 memcpy(ret_buf, &val, 4);
9626
9627 /* advance to the next dword */
9628 offset += sizeof(u32);
9629 ret_buf += sizeof(u32);
9630 buf_size -= sizeof(u32);
9631 cmd_flags = 0;
9632 }
9633
9634 if (rc == 0) {
9635 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9636 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9637 memcpy(ret_buf, &val, 4);
9638 }
9639
9640 /* disable access to nvram interface */
9641 bnx2x_disable_nvram_access(bp);
9642 bnx2x_release_nvram_lock(bp);
9643
9644 return rc;
9645}
9646
9647static int bnx2x_get_eeprom(struct net_device *dev,
9648 struct ethtool_eeprom *eeprom, u8 *eebuf)
9649{
9650 struct bnx2x *bp = netdev_priv(dev);
9651 int rc;
9652
2add3acb
EG
9653 if (!netif_running(dev))
9654 return -EAGAIN;
9655
34f80b04 9656 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9657 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9658 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9659 eeprom->len, eeprom->len);
9660
9661 /* parameters already validated in ethtool_get_eeprom */
9662
9663 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9664
9665 return rc;
9666}
9667
9668static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9669 u32 cmd_flags)
9670{
f1410647 9671 int count, i, rc;
a2fbb9ea
ET
9672
9673 /* build the command word */
9674 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9675
9676 /* need to clear DONE bit separately */
9677 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9678
9679 /* write the data */
9680 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9681
9682 /* address of the NVRAM to write to */
9683 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9684 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9685
9686 /* issue the write command */
9687 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9688
9689 /* adjust timeout for emulation/FPGA */
9690 count = NVRAM_TIMEOUT_COUNT;
9691 if (CHIP_REV_IS_SLOW(bp))
9692 count *= 100;
9693
9694 /* wait for completion */
9695 rc = -EBUSY;
9696 for (i = 0; i < count; i++) {
9697 udelay(5);
9698 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9699 if (val & MCPR_NVM_COMMAND_DONE) {
9700 rc = 0;
9701 break;
9702 }
9703 }
9704
9705 return rc;
9706}
9707
f1410647 9708#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9709
9710static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9711 int buf_size)
9712{
9713 int rc;
9714 u32 cmd_flags;
9715 u32 align_offset;
4781bfad 9716 __be32 val;
a2fbb9ea 9717
34f80b04
EG
9718 if (offset + buf_size > bp->common.flash_size) {
9719 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9720 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9721 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9722 return -EINVAL;
9723 }
9724
9725 /* request access to nvram interface */
9726 rc = bnx2x_acquire_nvram_lock(bp);
9727 if (rc)
9728 return rc;
9729
9730 /* enable access to nvram interface */
9731 bnx2x_enable_nvram_access(bp);
9732
9733 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9734 align_offset = (offset & ~0x03);
9735 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9736
9737 if (rc == 0) {
9738 val &= ~(0xff << BYTE_OFFSET(offset));
9739 val |= (*data_buf << BYTE_OFFSET(offset));
9740
9741 /* nvram data is returned as an array of bytes
9742 * convert it back to cpu order */
9743 val = be32_to_cpu(val);
9744
a2fbb9ea
ET
9745 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9746 cmd_flags);
9747 }
9748
9749 /* disable access to nvram interface */
9750 bnx2x_disable_nvram_access(bp);
9751 bnx2x_release_nvram_lock(bp);
9752
9753 return rc;
9754}
9755
9756static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9757 int buf_size)
9758{
9759 int rc;
9760 u32 cmd_flags;
9761 u32 val;
9762 u32 written_so_far;
9763
34f80b04 9764 if (buf_size == 1) /* ethtool */
a2fbb9ea 9765 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9766
9767 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9768 DP(BNX2X_MSG_NVM,
c14423fe 9769 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9770 offset, buf_size);
9771 return -EINVAL;
9772 }
9773
34f80b04
EG
9774 if (offset + buf_size > bp->common.flash_size) {
9775 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9776 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9777 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9778 return -EINVAL;
9779 }
9780
9781 /* request access to nvram interface */
9782 rc = bnx2x_acquire_nvram_lock(bp);
9783 if (rc)
9784 return rc;
9785
9786 /* enable access to nvram interface */
9787 bnx2x_enable_nvram_access(bp);
9788
9789 written_so_far = 0;
9790 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9791 while ((written_so_far < buf_size) && (rc == 0)) {
9792 if (written_so_far == (buf_size - sizeof(u32)))
9793 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9794 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9795 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9796 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9797 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9798
9799 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9800
9801 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9802
9803 /* advance to the next dword */
9804 offset += sizeof(u32);
9805 data_buf += sizeof(u32);
9806 written_so_far += sizeof(u32);
9807 cmd_flags = 0;
9808 }
9809
9810 /* disable access to nvram interface */
9811 bnx2x_disable_nvram_access(bp);
9812 bnx2x_release_nvram_lock(bp);
9813
9814 return rc;
9815}
9816
9817static int bnx2x_set_eeprom(struct net_device *dev,
9818 struct ethtool_eeprom *eeprom, u8 *eebuf)
9819{
9820 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9821 int port = BP_PORT(bp);
9822 int rc = 0;
a2fbb9ea 9823
9f4c9583
EG
9824 if (!netif_running(dev))
9825 return -EAGAIN;
9826
34f80b04 9827 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9828 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9829 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9830 eeprom->len, eeprom->len);
9831
9832 /* parameters already validated in ethtool_set_eeprom */
9833
f57a6025
EG
9834 /* PHY eeprom can be accessed only by the PMF */
9835 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9836 !bp->port.pmf)
9837 return -EINVAL;
9838
9839 if (eeprom->magic == 0x50485950) {
9840 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9841 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9842
f57a6025
EG
9843 bnx2x_acquire_phy_lock(bp);
9844 rc |= bnx2x_link_reset(&bp->link_params,
9845 &bp->link_vars, 0);
9846 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9847 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9848 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9849 MISC_REGISTERS_GPIO_HIGH, port);
9850 bnx2x_release_phy_lock(bp);
9851 bnx2x_link_report(bp);
9852
9853 } else if (eeprom->magic == 0x50485952) {
9854 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 9855 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 9856 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9857 rc |= bnx2x_link_reset(&bp->link_params,
9858 &bp->link_vars, 1);
9859
9860 rc |= bnx2x_phy_init(&bp->link_params,
9861 &bp->link_vars);
4a37fb66 9862 bnx2x_release_phy_lock(bp);
f57a6025
EG
9863 bnx2x_calc_fc_adv(bp);
9864 }
9865 } else if (eeprom->magic == 0x53985943) {
9866 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9867 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9869 u8 ext_phy_addr =
659bc5c4 9870 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9871
9872 /* DSP Remove Download Mode */
9873 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9874 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9875
f57a6025
EG
9876 bnx2x_acquire_phy_lock(bp);
9877
9878 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9879
9880 /* wait 0.5 sec to allow it to run */
9881 msleep(500);
9882 bnx2x_ext_phy_hw_reset(bp, port);
9883 msleep(500);
9884 bnx2x_release_phy_lock(bp);
9885 }
9886 } else
c18487ee 9887 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9888
9889 return rc;
9890}
9891
9892static int bnx2x_get_coalesce(struct net_device *dev,
9893 struct ethtool_coalesce *coal)
9894{
9895 struct bnx2x *bp = netdev_priv(dev);
9896
9897 memset(coal, 0, sizeof(struct ethtool_coalesce));
9898
9899 coal->rx_coalesce_usecs = bp->rx_ticks;
9900 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9901
9902 return 0;
9903}
9904
ca00392c 9905#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9906static int bnx2x_set_coalesce(struct net_device *dev,
9907 struct ethtool_coalesce *coal)
9908{
9909 struct bnx2x *bp = netdev_priv(dev);
9910
9911 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9912 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9913 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9914
9915 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9916 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9917 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9918
34f80b04 9919 if (netif_running(dev))
a2fbb9ea
ET
9920 bnx2x_update_coalesce(bp);
9921
9922 return 0;
9923}
9924
9925static void bnx2x_get_ringparam(struct net_device *dev,
9926 struct ethtool_ringparam *ering)
9927{
9928 struct bnx2x *bp = netdev_priv(dev);
9929
9930 ering->rx_max_pending = MAX_RX_AVAIL;
9931 ering->rx_mini_max_pending = 0;
9932 ering->rx_jumbo_max_pending = 0;
9933
9934 ering->rx_pending = bp->rx_ring_size;
9935 ering->rx_mini_pending = 0;
9936 ering->rx_jumbo_pending = 0;
9937
9938 ering->tx_max_pending = MAX_TX_AVAIL;
9939 ering->tx_pending = bp->tx_ring_size;
9940}
9941
9942static int bnx2x_set_ringparam(struct net_device *dev,
9943 struct ethtool_ringparam *ering)
9944{
9945 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9946 int rc = 0;
a2fbb9ea
ET
9947
9948 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9949 (ering->tx_pending > MAX_TX_AVAIL) ||
9950 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9951 return -EINVAL;
9952
9953 bp->rx_ring_size = ering->rx_pending;
9954 bp->tx_ring_size = ering->tx_pending;
9955
34f80b04
EG
9956 if (netif_running(dev)) {
9957 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9958 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9959 }
9960
34f80b04 9961 return rc;
a2fbb9ea
ET
9962}
9963
9964static void bnx2x_get_pauseparam(struct net_device *dev,
9965 struct ethtool_pauseparam *epause)
9966{
9967 struct bnx2x *bp = netdev_priv(dev);
9968
356e2385
EG
9969 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9970 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9971 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9972
c0700f90
DM
9973 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9974 BNX2X_FLOW_CTRL_RX);
9975 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9976 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9977
9978 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9979 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9980 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9981}
9982
9983static int bnx2x_set_pauseparam(struct net_device *dev,
9984 struct ethtool_pauseparam *epause)
9985{
9986 struct bnx2x *bp = netdev_priv(dev);
9987
34f80b04
EG
9988 if (IS_E1HMF(bp))
9989 return 0;
9990
a2fbb9ea
ET
9991 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9992 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9993 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9994
c0700f90 9995 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9996
f1410647 9997 if (epause->rx_pause)
c0700f90 9998 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9999
f1410647 10000 if (epause->tx_pause)
c0700f90 10001 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 10002
c0700f90
DM
10003 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10004 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10005
c18487ee 10006 if (epause->autoneg) {
34f80b04 10007 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10008 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10009 return -EINVAL;
10010 }
a2fbb9ea 10011
c18487ee 10012 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10013 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10014 }
a2fbb9ea 10015
c18487ee
YR
10016 DP(NETIF_MSG_LINK,
10017 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10018
10019 if (netif_running(dev)) {
bb2a0f7a 10020 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10021 bnx2x_link_set(bp);
10022 }
a2fbb9ea
ET
10023
10024 return 0;
10025}
10026
df0f2343
VZ
10027static int bnx2x_set_flags(struct net_device *dev, u32 data)
10028{
10029 struct bnx2x *bp = netdev_priv(dev);
10030 int changed = 0;
10031 int rc = 0;
10032
10033 /* TPA requires Rx CSUM offloading */
10034 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10035 if (!(dev->features & NETIF_F_LRO)) {
10036 dev->features |= NETIF_F_LRO;
10037 bp->flags |= TPA_ENABLE_FLAG;
10038 changed = 1;
10039 }
10040
10041 } else if (dev->features & NETIF_F_LRO) {
10042 dev->features &= ~NETIF_F_LRO;
10043 bp->flags &= ~TPA_ENABLE_FLAG;
10044 changed = 1;
10045 }
10046
10047 if (changed && netif_running(dev)) {
10048 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10049 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10050 }
10051
10052 return rc;
10053}
10054
a2fbb9ea
ET
10055static u32 bnx2x_get_rx_csum(struct net_device *dev)
10056{
10057 struct bnx2x *bp = netdev_priv(dev);
10058
10059 return bp->rx_csum;
10060}
10061
10062static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10063{
10064 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10065 int rc = 0;
a2fbb9ea
ET
10066
10067 bp->rx_csum = data;
df0f2343
VZ
10068
10069 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10070 TPA'ed packets will be discarded due to wrong TCP CSUM */
10071 if (!data) {
10072 u32 flags = ethtool_op_get_flags(dev);
10073
10074 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10075 }
10076
10077 return rc;
a2fbb9ea
ET
10078}
10079
10080static int bnx2x_set_tso(struct net_device *dev, u32 data)
10081{
755735eb 10082 if (data) {
a2fbb9ea 10083 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10084 dev->features |= NETIF_F_TSO6;
10085 } else {
a2fbb9ea 10086 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10087 dev->features &= ~NETIF_F_TSO6;
10088 }
10089
a2fbb9ea
ET
10090 return 0;
10091}
10092
f3c87cdd 10093static const struct {
a2fbb9ea
ET
10094 char string[ETH_GSTRING_LEN];
10095} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10096 { "register_test (offline)" },
10097 { "memory_test (offline)" },
10098 { "loopback_test (offline)" },
10099 { "nvram_test (online)" },
10100 { "interrupt_test (online)" },
10101 { "link_test (online)" },
d3d4f495 10102 { "idle check (online)" }
a2fbb9ea
ET
10103};
10104
f3c87cdd
YG
10105static int bnx2x_test_registers(struct bnx2x *bp)
10106{
10107 int idx, i, rc = -ENODEV;
10108 u32 wr_val = 0;
9dabc424 10109 int port = BP_PORT(bp);
f3c87cdd
YG
10110 static const struct {
10111 u32 offset0;
10112 u32 offset1;
10113 u32 mask;
10114 } reg_tbl[] = {
10115/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10116 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10117 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10118 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10119 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10120 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10121 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10122 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10123 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10124 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10125/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10126 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10127 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10128 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10129 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10130 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10131 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10132 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10133 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10134 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10135/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10136 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10137 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10138 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10139 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10140 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10141 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10142 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10143 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10144 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10145/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10146 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10147 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10148 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10149 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10150 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10151 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10152
10153 { 0xffffffff, 0, 0x00000000 }
10154 };
10155
10156 if (!netif_running(bp->dev))
10157 return rc;
10158
10159 /* Repeat the test twice:
10160 First by writing 0x00000000, second by writing 0xffffffff */
10161 for (idx = 0; idx < 2; idx++) {
10162
10163 switch (idx) {
10164 case 0:
10165 wr_val = 0;
10166 break;
10167 case 1:
10168 wr_val = 0xffffffff;
10169 break;
10170 }
10171
10172 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10173 u32 offset, mask, save_val, val;
f3c87cdd
YG
10174
10175 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10176 mask = reg_tbl[i].mask;
10177
10178 save_val = REG_RD(bp, offset);
10179
10180 REG_WR(bp, offset, wr_val);
10181 val = REG_RD(bp, offset);
10182
10183 /* Restore the original register's value */
10184 REG_WR(bp, offset, save_val);
10185
10186 /* verify that value is as expected value */
10187 if ((val & mask) != (wr_val & mask))
10188 goto test_reg_exit;
10189 }
10190 }
10191
10192 rc = 0;
10193
10194test_reg_exit:
10195 return rc;
10196}
10197
10198static int bnx2x_test_memory(struct bnx2x *bp)
10199{
10200 int i, j, rc = -ENODEV;
10201 u32 val;
10202 static const struct {
10203 u32 offset;
10204 int size;
10205 } mem_tbl[] = {
10206 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10207 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10208 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10209 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10210 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10211 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10212 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10213
10214 { 0xffffffff, 0 }
10215 };
10216 static const struct {
10217 char *name;
10218 u32 offset;
9dabc424
YG
10219 u32 e1_mask;
10220 u32 e1h_mask;
f3c87cdd 10221 } prty_tbl[] = {
9dabc424
YG
10222 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10223 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10224 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10225 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10226 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10227 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10228
10229 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10230 };
10231
10232 if (!netif_running(bp->dev))
10233 return rc;
10234
10235 /* Go through all the memories */
10236 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10237 for (j = 0; j < mem_tbl[i].size; j++)
10238 REG_RD(bp, mem_tbl[i].offset + j*4);
10239
10240 /* Check the parity status */
10241 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10242 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10243 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10244 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10245 DP(NETIF_MSG_HW,
10246 "%s is 0x%x\n", prty_tbl[i].name, val);
10247 goto test_mem_exit;
10248 }
10249 }
10250
10251 rc = 0;
10252
10253test_mem_exit:
10254 return rc;
10255}
10256
f3c87cdd
YG
10257static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10258{
10259 int cnt = 1000;
10260
10261 if (link_up)
10262 while (bnx2x_link_test(bp) && cnt--)
10263 msleep(10);
10264}
10265
10266static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10267{
10268 unsigned int pkt_size, num_pkts, i;
10269 struct sk_buff *skb;
10270 unsigned char *packet;
ca00392c
EG
10271 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10272 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
10273 u16 tx_start_idx, tx_idx;
10274 u16 rx_start_idx, rx_idx;
ca00392c 10275 u16 pkt_prod, bd_prod;
f3c87cdd 10276 struct sw_tx_bd *tx_buf;
ca00392c
EG
10277 struct eth_tx_start_bd *tx_start_bd;
10278 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10279 dma_addr_t mapping;
10280 union eth_rx_cqe *cqe;
10281 u8 cqe_fp_flags;
10282 struct sw_rx_bd *rx_buf;
10283 u16 len;
10284 int rc = -ENODEV;
10285
b5bf9068
EG
10286 /* check the loopback mode */
10287 switch (loopback_mode) {
10288 case BNX2X_PHY_LOOPBACK:
10289 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10290 return -EINVAL;
10291 break;
10292 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10293 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10294 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10295 break;
10296 default:
f3c87cdd 10297 return -EINVAL;
b5bf9068 10298 }
f3c87cdd 10299
b5bf9068
EG
10300 /* prepare the loopback packet */
10301 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10302 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10303 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10304 if (!skb) {
10305 rc = -ENOMEM;
10306 goto test_loopback_exit;
10307 }
10308 packet = skb_put(skb, pkt_size);
10309 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10310 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10311 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10312 for (i = ETH_HLEN; i < pkt_size; i++)
10313 packet[i] = (unsigned char) (i & 0xff);
10314
b5bf9068 10315 /* send the loopback packet */
f3c87cdd 10316 num_pkts = 0;
ca00392c
EG
10317 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10318 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10319
ca00392c
EG
10320 pkt_prod = fp_tx->tx_pkt_prod++;
10321 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10322 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10323 tx_buf->skb = skb;
ca00392c 10324 tx_buf->flags = 0;
f3c87cdd 10325
ca00392c
EG
10326 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10327 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10328 mapping = pci_map_single(bp->pdev, skb->data,
10329 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10330 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10331 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10332 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10333 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10334 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10335 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10336 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10337 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10338
10339 /* turn on parsing and get a BD */
10340 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10341 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10342
10343 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10344
58f4c4cf
EG
10345 wmb();
10346
ca00392c
EG
10347 fp_tx->tx_db.data.prod += 2;
10348 barrier();
10349 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10350
10351 mmiowb();
10352
10353 num_pkts++;
ca00392c 10354 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10355 bp->dev->trans_start = jiffies;
10356
10357 udelay(100);
10358
ca00392c 10359 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10360 if (tx_idx != tx_start_idx + num_pkts)
10361 goto test_loopback_exit;
10362
ca00392c 10363 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10364 if (rx_idx != rx_start_idx + num_pkts)
10365 goto test_loopback_exit;
10366
ca00392c 10367 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10368 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10369 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10370 goto test_loopback_rx_exit;
10371
10372 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10373 if (len != pkt_size)
10374 goto test_loopback_rx_exit;
10375
ca00392c 10376 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10377 skb = rx_buf->skb;
10378 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10379 for (i = ETH_HLEN; i < pkt_size; i++)
10380 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10381 goto test_loopback_rx_exit;
10382
10383 rc = 0;
10384
10385test_loopback_rx_exit:
f3c87cdd 10386
ca00392c
EG
10387 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10388 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10389 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10390 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10391
10392 /* Update producers */
ca00392c
EG
10393 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10394 fp_rx->rx_sge_prod);
f3c87cdd
YG
10395
10396test_loopback_exit:
10397 bp->link_params.loopback_mode = LOOPBACK_NONE;
10398
10399 return rc;
10400}
10401
10402static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10403{
b5bf9068 10404 int rc = 0, res;
f3c87cdd
YG
10405
10406 if (!netif_running(bp->dev))
10407 return BNX2X_LOOPBACK_FAILED;
10408
f8ef6e44 10409 bnx2x_netif_stop(bp, 1);
3910c8ae 10410 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10411
b5bf9068
EG
10412 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10413 if (res) {
10414 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10415 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10416 }
10417
b5bf9068
EG
10418 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10419 if (res) {
10420 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10421 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10422 }
10423
3910c8ae 10424 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10425 bnx2x_netif_start(bp);
10426
10427 return rc;
10428}
10429
10430#define CRC32_RESIDUAL 0xdebb20e3
10431
10432static int bnx2x_test_nvram(struct bnx2x *bp)
10433{
10434 static const struct {
10435 int offset;
10436 int size;
10437 } nvram_tbl[] = {
10438 { 0, 0x14 }, /* bootstrap */
10439 { 0x14, 0xec }, /* dir */
10440 { 0x100, 0x350 }, /* manuf_info */
10441 { 0x450, 0xf0 }, /* feature_info */
10442 { 0x640, 0x64 }, /* upgrade_key_info */
10443 { 0x6a4, 0x64 },
10444 { 0x708, 0x70 }, /* manuf_key_info */
10445 { 0x778, 0x70 },
10446 { 0, 0 }
10447 };
4781bfad 10448 __be32 buf[0x350 / 4];
f3c87cdd
YG
10449 u8 *data = (u8 *)buf;
10450 int i, rc;
ab6ad5a4 10451 u32 magic, crc;
f3c87cdd
YG
10452
10453 rc = bnx2x_nvram_read(bp, 0, data, 4);
10454 if (rc) {
f5372251 10455 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10456 goto test_nvram_exit;
10457 }
10458
10459 magic = be32_to_cpu(buf[0]);
10460 if (magic != 0x669955aa) {
10461 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10462 rc = -ENODEV;
10463 goto test_nvram_exit;
10464 }
10465
10466 for (i = 0; nvram_tbl[i].size; i++) {
10467
10468 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10469 nvram_tbl[i].size);
10470 if (rc) {
10471 DP(NETIF_MSG_PROBE,
f5372251 10472 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10473 goto test_nvram_exit;
10474 }
10475
ab6ad5a4
EG
10476 crc = ether_crc_le(nvram_tbl[i].size, data);
10477 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10478 DP(NETIF_MSG_PROBE,
ab6ad5a4 10479 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10480 rc = -ENODEV;
10481 goto test_nvram_exit;
10482 }
10483 }
10484
10485test_nvram_exit:
10486 return rc;
10487}
10488
10489static int bnx2x_test_intr(struct bnx2x *bp)
10490{
10491 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10492 int i, rc;
10493
10494 if (!netif_running(bp->dev))
10495 return -ENODEV;
10496
8d9c5f34 10497 config->hdr.length = 0;
af246401
EG
10498 if (CHIP_IS_E1(bp))
10499 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10500 else
10501 config->hdr.offset = BP_FUNC(bp);
0626b899 10502 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10503 config->hdr.reserved1 = 0;
10504
e665bfda
MC
10505 bp->set_mac_pending++;
10506 smp_wmb();
f3c87cdd
YG
10507 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10508 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10509 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10510 if (rc == 0) {
f3c87cdd
YG
10511 for (i = 0; i < 10; i++) {
10512 if (!bp->set_mac_pending)
10513 break;
e665bfda 10514 smp_rmb();
f3c87cdd
YG
10515 msleep_interruptible(10);
10516 }
10517 if (i == 10)
10518 rc = -ENODEV;
10519 }
10520
10521 return rc;
10522}
10523
a2fbb9ea
ET
10524static void bnx2x_self_test(struct net_device *dev,
10525 struct ethtool_test *etest, u64 *buf)
10526{
10527 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10528
10529 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10530
f3c87cdd 10531 if (!netif_running(dev))
a2fbb9ea 10532 return;
a2fbb9ea 10533
33471629 10534 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10535 if (IS_E1HMF(bp))
10536 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10537
10538 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10539 int port = BP_PORT(bp);
10540 u32 val;
f3c87cdd
YG
10541 u8 link_up;
10542
279abdf5
EG
10543 /* save current value of input enable for TX port IF */
10544 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10545 /* disable input for TX port IF */
10546 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10547
061bc702 10548 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
10549 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10550 bnx2x_nic_load(bp, LOAD_DIAG);
10551 /* wait until link state is restored */
10552 bnx2x_wait_for_link(bp, link_up);
10553
10554 if (bnx2x_test_registers(bp) != 0) {
10555 buf[0] = 1;
10556 etest->flags |= ETH_TEST_FL_FAILED;
10557 }
10558 if (bnx2x_test_memory(bp) != 0) {
10559 buf[1] = 1;
10560 etest->flags |= ETH_TEST_FL_FAILED;
10561 }
10562 buf[2] = bnx2x_test_loopback(bp, link_up);
10563 if (buf[2] != 0)
10564 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10565
f3c87cdd 10566 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10567
10568 /* restore input for TX port IF */
10569 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10570
f3c87cdd
YG
10571 bnx2x_nic_load(bp, LOAD_NORMAL);
10572 /* wait until link state is restored */
10573 bnx2x_wait_for_link(bp, link_up);
10574 }
10575 if (bnx2x_test_nvram(bp) != 0) {
10576 buf[3] = 1;
a2fbb9ea
ET
10577 etest->flags |= ETH_TEST_FL_FAILED;
10578 }
f3c87cdd
YG
10579 if (bnx2x_test_intr(bp) != 0) {
10580 buf[4] = 1;
10581 etest->flags |= ETH_TEST_FL_FAILED;
10582 }
10583 if (bp->port.pmf)
10584 if (bnx2x_link_test(bp) != 0) {
10585 buf[5] = 1;
10586 etest->flags |= ETH_TEST_FL_FAILED;
10587 }
f3c87cdd
YG
10588
10589#ifdef BNX2X_EXTRA_DEBUG
10590 bnx2x_panic_dump(bp);
10591#endif
a2fbb9ea
ET
10592}
10593
de832a55
EG
10594static const struct {
10595 long offset;
10596 int size;
10597 u8 string[ETH_GSTRING_LEN];
10598} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10599/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10600 { Q_STATS_OFFSET32(error_bytes_received_hi),
10601 8, "[%d]: rx_error_bytes" },
10602 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10603 8, "[%d]: rx_ucast_packets" },
10604 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10605 8, "[%d]: rx_mcast_packets" },
10606 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10607 8, "[%d]: rx_bcast_packets" },
10608 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10609 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10610 4, "[%d]: rx_phy_ip_err_discards"},
10611 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10612 4, "[%d]: rx_skb_alloc_discard" },
10613 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10614
10615/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10616 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10617 8, "[%d]: tx_packets" }
10618};
10619
bb2a0f7a
YG
10620static const struct {
10621 long offset;
10622 int size;
10623 u32 flags;
66e855f3
YG
10624#define STATS_FLAGS_PORT 1
10625#define STATS_FLAGS_FUNC 2
de832a55 10626#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10627 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10628} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10629/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10630 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10631 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10632 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10633 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10634 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10635 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10636 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10637 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10638 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10639 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10640 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10641 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10642 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10643 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10644 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10645 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10646 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10647/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10648 8, STATS_FLAGS_PORT, "rx_fragments" },
10649 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10650 8, STATS_FLAGS_PORT, "rx_jabbers" },
10651 { STATS_OFFSET32(no_buff_discard_hi),
10652 8, STATS_FLAGS_BOTH, "rx_discards" },
10653 { STATS_OFFSET32(mac_filter_discard),
10654 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10655 { STATS_OFFSET32(xxoverflow_discard),
10656 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10657 { STATS_OFFSET32(brb_drop_hi),
10658 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10659 { STATS_OFFSET32(brb_truncate_hi),
10660 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10661 { STATS_OFFSET32(pause_frames_received_hi),
10662 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10663 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10664 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10665 { STATS_OFFSET32(nig_timer_max),
10666 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10667/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10668 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10669 { STATS_OFFSET32(rx_skb_alloc_failed),
10670 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10671 { STATS_OFFSET32(hw_csum_err),
10672 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10673
10674 { STATS_OFFSET32(total_bytes_transmitted_hi),
10675 8, STATS_FLAGS_BOTH, "tx_bytes" },
10676 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10677 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10678 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10679 8, STATS_FLAGS_BOTH, "tx_packets" },
10680 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10681 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10682 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10683 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10684 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10685 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10686 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10687 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10688/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10689 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10690 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10691 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10692 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10693 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10694 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10695 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10696 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10697 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10698 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10699 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10700 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10701 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10702 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10703 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10704 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10705 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10706 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10707 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10708/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10709 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10710 { STATS_OFFSET32(pause_frames_sent_hi),
10711 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10712};
10713
de832a55
EG
10714#define IS_PORT_STAT(i) \
10715 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10716#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10717#define IS_E1HMF_MODE_STAT(bp) \
10718 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10719
15f0a394
BH
10720static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10721{
10722 struct bnx2x *bp = netdev_priv(dev);
10723 int i, num_stats;
10724
10725 switch(stringset) {
10726 case ETH_SS_STATS:
10727 if (is_multi(bp)) {
10728 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10729 if (!IS_E1HMF_MODE_STAT(bp))
10730 num_stats += BNX2X_NUM_STATS;
10731 } else {
10732 if (IS_E1HMF_MODE_STAT(bp)) {
10733 num_stats = 0;
10734 for (i = 0; i < BNX2X_NUM_STATS; i++)
10735 if (IS_FUNC_STAT(i))
10736 num_stats++;
10737 } else
10738 num_stats = BNX2X_NUM_STATS;
10739 }
10740 return num_stats;
10741
10742 case ETH_SS_TEST:
10743 return BNX2X_NUM_TESTS;
10744
10745 default:
10746 return -EINVAL;
10747 }
10748}
10749
a2fbb9ea
ET
10750static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10751{
bb2a0f7a 10752 struct bnx2x *bp = netdev_priv(dev);
de832a55 10753 int i, j, k;
bb2a0f7a 10754
a2fbb9ea
ET
10755 switch (stringset) {
10756 case ETH_SS_STATS:
de832a55
EG
10757 if (is_multi(bp)) {
10758 k = 0;
ca00392c 10759 for_each_rx_queue(bp, i) {
de832a55
EG
10760 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10761 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10762 bnx2x_q_stats_arr[j].string, i);
10763 k += BNX2X_NUM_Q_STATS;
10764 }
10765 if (IS_E1HMF_MODE_STAT(bp))
10766 break;
10767 for (j = 0; j < BNX2X_NUM_STATS; j++)
10768 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10769 bnx2x_stats_arr[j].string);
10770 } else {
10771 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10772 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10773 continue;
10774 strcpy(buf + j*ETH_GSTRING_LEN,
10775 bnx2x_stats_arr[i].string);
10776 j++;
10777 }
bb2a0f7a 10778 }
a2fbb9ea
ET
10779 break;
10780
10781 case ETH_SS_TEST:
10782 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10783 break;
10784 }
10785}
10786
a2fbb9ea
ET
10787static void bnx2x_get_ethtool_stats(struct net_device *dev,
10788 struct ethtool_stats *stats, u64 *buf)
10789{
10790 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10791 u32 *hw_stats, *offset;
10792 int i, j, k;
bb2a0f7a 10793
de832a55
EG
10794 if (is_multi(bp)) {
10795 k = 0;
ca00392c 10796 for_each_rx_queue(bp, i) {
de832a55
EG
10797 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10798 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10799 if (bnx2x_q_stats_arr[j].size == 0) {
10800 /* skip this counter */
10801 buf[k + j] = 0;
10802 continue;
10803 }
10804 offset = (hw_stats +
10805 bnx2x_q_stats_arr[j].offset);
10806 if (bnx2x_q_stats_arr[j].size == 4) {
10807 /* 4-byte counter */
10808 buf[k + j] = (u64) *offset;
10809 continue;
10810 }
10811 /* 8-byte counter */
10812 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10813 }
10814 k += BNX2X_NUM_Q_STATS;
10815 }
10816 if (IS_E1HMF_MODE_STAT(bp))
10817 return;
10818 hw_stats = (u32 *)&bp->eth_stats;
10819 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10820 if (bnx2x_stats_arr[j].size == 0) {
10821 /* skip this counter */
10822 buf[k + j] = 0;
10823 continue;
10824 }
10825 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10826 if (bnx2x_stats_arr[j].size == 4) {
10827 /* 4-byte counter */
10828 buf[k + j] = (u64) *offset;
10829 continue;
10830 }
10831 /* 8-byte counter */
10832 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10833 }
de832a55
EG
10834 } else {
10835 hw_stats = (u32 *)&bp->eth_stats;
10836 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10837 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10838 continue;
10839 if (bnx2x_stats_arr[i].size == 0) {
10840 /* skip this counter */
10841 buf[j] = 0;
10842 j++;
10843 continue;
10844 }
10845 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10846 if (bnx2x_stats_arr[i].size == 4) {
10847 /* 4-byte counter */
10848 buf[j] = (u64) *offset;
10849 j++;
10850 continue;
10851 }
10852 /* 8-byte counter */
10853 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10854 j++;
a2fbb9ea 10855 }
a2fbb9ea
ET
10856 }
10857}
10858
10859static int bnx2x_phys_id(struct net_device *dev, u32 data)
10860{
10861 struct bnx2x *bp = netdev_priv(dev);
10862 int i;
10863
34f80b04
EG
10864 if (!netif_running(dev))
10865 return 0;
10866
10867 if (!bp->port.pmf)
10868 return 0;
10869
a2fbb9ea
ET
10870 if (data == 0)
10871 data = 2;
10872
10873 for (i = 0; i < (data * 2); i++) {
c18487ee 10874 if ((i % 2) == 0)
7846e471
YR
10875 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10876 SPEED_1000);
c18487ee 10877 else
7846e471 10878 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 10879
a2fbb9ea
ET
10880 msleep_interruptible(500);
10881 if (signal_pending(current))
10882 break;
10883 }
10884
c18487ee 10885 if (bp->link_vars.link_up)
7846e471
YR
10886 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10887 bp->link_vars.line_speed);
a2fbb9ea
ET
10888
10889 return 0;
10890}
10891
0fc0b732 10892static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10893 .get_settings = bnx2x_get_settings,
10894 .set_settings = bnx2x_set_settings,
10895 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10896 .get_regs_len = bnx2x_get_regs_len,
10897 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10898 .get_wol = bnx2x_get_wol,
10899 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10900 .get_msglevel = bnx2x_get_msglevel,
10901 .set_msglevel = bnx2x_set_msglevel,
10902 .nway_reset = bnx2x_nway_reset,
01e53298 10903 .get_link = bnx2x_get_link,
7a9b2557
VZ
10904 .get_eeprom_len = bnx2x_get_eeprom_len,
10905 .get_eeprom = bnx2x_get_eeprom,
10906 .set_eeprom = bnx2x_set_eeprom,
10907 .get_coalesce = bnx2x_get_coalesce,
10908 .set_coalesce = bnx2x_set_coalesce,
10909 .get_ringparam = bnx2x_get_ringparam,
10910 .set_ringparam = bnx2x_set_ringparam,
10911 .get_pauseparam = bnx2x_get_pauseparam,
10912 .set_pauseparam = bnx2x_set_pauseparam,
10913 .get_rx_csum = bnx2x_get_rx_csum,
10914 .set_rx_csum = bnx2x_set_rx_csum,
10915 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10916 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10917 .set_flags = bnx2x_set_flags,
10918 .get_flags = ethtool_op_get_flags,
10919 .get_sg = ethtool_op_get_sg,
10920 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10921 .get_tso = ethtool_op_get_tso,
10922 .set_tso = bnx2x_set_tso,
7a9b2557 10923 .self_test = bnx2x_self_test,
15f0a394 10924 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10925 .get_strings = bnx2x_get_strings,
a2fbb9ea 10926 .phys_id = bnx2x_phys_id,
bb2a0f7a 10927 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10928};
10929
10930/* end of ethtool_ops */
10931
10932/****************************************************************************
10933* General service functions
10934****************************************************************************/
10935
10936static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10937{
10938 u16 pmcsr;
10939
10940 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10941
10942 switch (state) {
10943 case PCI_D0:
34f80b04 10944 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10945 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10946 PCI_PM_CTRL_PME_STATUS));
10947
10948 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10949 /* delay required during transition out of D3hot */
a2fbb9ea 10950 msleep(20);
34f80b04 10951 break;
a2fbb9ea 10952
34f80b04
EG
10953 case PCI_D3hot:
10954 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10955 pmcsr |= 3;
a2fbb9ea 10956
34f80b04
EG
10957 if (bp->wol)
10958 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10959
34f80b04
EG
10960 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10961 pmcsr);
a2fbb9ea 10962
34f80b04
EG
10963 /* No more memory access after this point until
10964 * device is brought back to D0.
10965 */
10966 break;
10967
10968 default:
10969 return -EINVAL;
10970 }
10971 return 0;
a2fbb9ea
ET
10972}
10973
237907c1
EG
10974static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10975{
10976 u16 rx_cons_sb;
10977
10978 /* Tell compiler that status block fields can change */
10979 barrier();
10980 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10981 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10982 rx_cons_sb++;
10983 return (fp->rx_comp_cons != rx_cons_sb);
10984}
10985
34f80b04
EG
10986/*
10987 * net_device service functions
10988 */
10989
a2fbb9ea
ET
10990static int bnx2x_poll(struct napi_struct *napi, int budget)
10991{
10992 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10993 napi);
10994 struct bnx2x *bp = fp->bp;
10995 int work_done = 0;
10996
10997#ifdef BNX2X_STOP_ON_ERROR
10998 if (unlikely(bp->panic))
34f80b04 10999 goto poll_panic;
a2fbb9ea
ET
11000#endif
11001
a2fbb9ea
ET
11002 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
11003 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
11004
11005 bnx2x_update_fpsb_idx(fp);
11006
8534f32c 11007 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 11008 work_done = bnx2x_rx_int(fp, budget);
356e2385 11009
8534f32c
EG
11010 /* must not complete if we consumed full budget */
11011 if (work_done >= budget)
11012 goto poll_again;
11013 }
a2fbb9ea 11014
ca00392c 11015 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 11016 * ensure that status block indices have been actually read
ca00392c 11017 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 11018 * so that we won't write the "newer" value of the status block to IGU
ca00392c 11019 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
11020 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11021 * may be postponed to right before bnx2x_ack_sb). In this case
11022 * there will never be another interrupt until there is another update
11023 * of the status block, while there is still unhandled work.
11024 */
11025 rmb();
a2fbb9ea 11026
ca00392c 11027 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 11028#ifdef BNX2X_STOP_ON_ERROR
34f80b04 11029poll_panic:
a2fbb9ea 11030#endif
288379f0 11031 napi_complete(napi);
a2fbb9ea 11032
0626b899 11033 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 11034 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 11035 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
11036 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11037 }
356e2385 11038
8534f32c 11039poll_again:
a2fbb9ea
ET
11040 return work_done;
11041}
11042
755735eb
EG
11043
11044/* we split the first BD into headers and data BDs
33471629 11045 * to ease the pain of our fellow microcode engineers
755735eb
EG
11046 * we use one mapping for both BDs
11047 * So far this has only been observed to happen
11048 * in Other Operating Systems(TM)
11049 */
11050static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11051 struct bnx2x_fastpath *fp,
ca00392c
EG
11052 struct sw_tx_bd *tx_buf,
11053 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
11054 u16 bd_prod, int nbd)
11055{
ca00392c 11056 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
11057 struct eth_tx_bd *d_tx_bd;
11058 dma_addr_t mapping;
11059 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11060
11061 /* first fix first BD */
11062 h_tx_bd->nbd = cpu_to_le16(nbd);
11063 h_tx_bd->nbytes = cpu_to_le16(hlen);
11064
11065 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11066 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11067 h_tx_bd->addr_lo, h_tx_bd->nbd);
11068
11069 /* now get a new data BD
11070 * (after the pbd) and fill it */
11071 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11072 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11073
11074 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11075 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11076
11077 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11078 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11079 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11080
11081 /* this marks the BD as one that has no individual mapping */
11082 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11083
755735eb
EG
11084 DP(NETIF_MSG_TX_QUEUED,
11085 "TSO split data size is %d (%x:%x)\n",
11086 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11087
ca00392c
EG
11088 /* update tx_bd */
11089 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11090
11091 return bd_prod;
11092}
11093
11094static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11095{
11096 if (fix > 0)
11097 csum = (u16) ~csum_fold(csum_sub(csum,
11098 csum_partial(t_header - fix, fix, 0)));
11099
11100 else if (fix < 0)
11101 csum = (u16) ~csum_fold(csum_add(csum,
11102 csum_partial(t_header, -fix, 0)));
11103
11104 return swab16(csum);
11105}
11106
11107static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11108{
11109 u32 rc;
11110
11111 if (skb->ip_summed != CHECKSUM_PARTIAL)
11112 rc = XMIT_PLAIN;
11113
11114 else {
4781bfad 11115 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11116 rc = XMIT_CSUM_V6;
11117 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11118 rc |= XMIT_CSUM_TCP;
11119
11120 } else {
11121 rc = XMIT_CSUM_V4;
11122 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11123 rc |= XMIT_CSUM_TCP;
11124 }
11125 }
11126
11127 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 11128 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
11129
11130 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 11131 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
11132
11133 return rc;
11134}
11135
632da4d6 11136#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11137/* check if packet requires linearization (packet is too fragmented)
11138 no need to check fragmentation if page size > 8K (there will be no
11139 violation to FW restrictions) */
755735eb
EG
11140static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11141 u32 xmit_type)
11142{
11143 int to_copy = 0;
11144 int hlen = 0;
11145 int first_bd_sz = 0;
11146
11147 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11148 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11149
11150 if (xmit_type & XMIT_GSO) {
11151 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11152 /* Check if LSO packet needs to be copied:
11153 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11154 int wnd_size = MAX_FETCH_BD - 3;
33471629 11155 /* Number of windows to check */
755735eb
EG
11156 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11157 int wnd_idx = 0;
11158 int frag_idx = 0;
11159 u32 wnd_sum = 0;
11160
11161 /* Headers length */
11162 hlen = (int)(skb_transport_header(skb) - skb->data) +
11163 tcp_hdrlen(skb);
11164
11165 /* Amount of data (w/o headers) on linear part of SKB*/
11166 first_bd_sz = skb_headlen(skb) - hlen;
11167
11168 wnd_sum = first_bd_sz;
11169
11170 /* Calculate the first sum - it's special */
11171 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11172 wnd_sum +=
11173 skb_shinfo(skb)->frags[frag_idx].size;
11174
11175 /* If there was data on linear skb data - check it */
11176 if (first_bd_sz > 0) {
11177 if (unlikely(wnd_sum < lso_mss)) {
11178 to_copy = 1;
11179 goto exit_lbl;
11180 }
11181
11182 wnd_sum -= first_bd_sz;
11183 }
11184
11185 /* Others are easier: run through the frag list and
11186 check all windows */
11187 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11188 wnd_sum +=
11189 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11190
11191 if (unlikely(wnd_sum < lso_mss)) {
11192 to_copy = 1;
11193 break;
11194 }
11195 wnd_sum -=
11196 skb_shinfo(skb)->frags[wnd_idx].size;
11197 }
755735eb
EG
11198 } else {
11199 /* in non-LSO too fragmented packet should always
11200 be linearized */
11201 to_copy = 1;
11202 }
11203 }
11204
11205exit_lbl:
11206 if (unlikely(to_copy))
11207 DP(NETIF_MSG_TX_QUEUED,
11208 "Linearization IS REQUIRED for %s packet. "
11209 "num_frags %d hlen %d first_bd_sz %d\n",
11210 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11211 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11212
11213 return to_copy;
11214}
632da4d6 11215#endif
755735eb
EG
11216
11217/* called with netif_tx_lock
a2fbb9ea 11218 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11219 * netif_wake_queue()
a2fbb9ea 11220 */
61357325 11221static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11222{
11223 struct bnx2x *bp = netdev_priv(dev);
ca00392c 11224 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 11225 struct netdev_queue *txq;
a2fbb9ea 11226 struct sw_tx_bd *tx_buf;
ca00392c
EG
11227 struct eth_tx_start_bd *tx_start_bd;
11228 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11229 struct eth_tx_parse_bd *pbd = NULL;
11230 u16 pkt_prod, bd_prod;
755735eb 11231 int nbd, fp_index;
a2fbb9ea 11232 dma_addr_t mapping;
755735eb 11233 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11234 int i;
11235 u8 hlen = 0;
ca00392c 11236 __le16 pkt_size = 0;
a2fbb9ea
ET
11237
11238#ifdef BNX2X_STOP_ON_ERROR
11239 if (unlikely(bp->panic))
11240 return NETDEV_TX_BUSY;
11241#endif
11242
555f6c78
EG
11243 fp_index = skb_get_queue_mapping(skb);
11244 txq = netdev_get_tx_queue(dev, fp_index);
11245
ca00392c
EG
11246 fp = &bp->fp[fp_index + bp->num_rx_queues];
11247 fp_stat = &bp->fp[fp_index];
755735eb 11248
231fd58a 11249 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 11250 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 11251 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11252 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11253 return NETDEV_TX_BUSY;
11254 }
11255
755735eb
EG
11256 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11257 " gso type %x xmit_type %x\n",
11258 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11259 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11260
632da4d6 11261#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11262 /* First, check if we need to linearize the skb (due to FW
11263 restrictions). No need to check fragmentation if page size > 8K
11264 (there will be no violation to FW restrictions) */
755735eb
EG
11265 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11266 /* Statistics of linearization */
11267 bp->lin_cnt++;
11268 if (skb_linearize(skb) != 0) {
11269 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11270 "silently dropping this SKB\n");
11271 dev_kfree_skb_any(skb);
da5a662a 11272 return NETDEV_TX_OK;
755735eb
EG
11273 }
11274 }
632da4d6 11275#endif
755735eb 11276
a2fbb9ea 11277 /*
755735eb 11278 Please read carefully. First we use one BD which we mark as start,
ca00392c 11279 then we have a parsing info BD (used for TSO or xsum),
755735eb 11280 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11281 (don't forget to mark the last one as last,
11282 and to unmap only AFTER you write to the BD ...)
755735eb 11283 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11284 */
11285
11286 pkt_prod = fp->tx_pkt_prod++;
755735eb 11287 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11288
755735eb 11289 /* get a tx_buf and first BD */
a2fbb9ea 11290 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11291 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11292
ca00392c
EG
11293 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11294 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11295 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11296 /* header nbd */
ca00392c 11297 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11298
755735eb
EG
11299 /* remember the first BD of the packet */
11300 tx_buf->first_bd = fp->tx_bd_prod;
11301 tx_buf->skb = skb;
ca00392c 11302 tx_buf->flags = 0;
a2fbb9ea
ET
11303
11304 DP(NETIF_MSG_TX_QUEUED,
11305 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11306 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11307
0c6671b0
EG
11308#ifdef BCM_VLAN
11309 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11310 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11311 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11312 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11313 } else
0c6671b0 11314#endif
ca00392c 11315 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11316
ca00392c
EG
11317 /* turn on parsing and get a BD */
11318 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11319 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11320
ca00392c 11321 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11322
11323 if (xmit_type & XMIT_CSUM) {
ca00392c 11324 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11325
11326 /* for now NS flag is not used in Linux */
4781bfad
EG
11327 pbd->global_data =
11328 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11329 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11330
755735eb
EG
11331 pbd->ip_hlen = (skb_transport_header(skb) -
11332 skb_network_header(skb)) / 2;
11333
11334 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11335
755735eb 11336 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11337 hlen = hlen*2;
a2fbb9ea 11338
ca00392c 11339 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11340
11341 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11342 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11343 ETH_TX_BD_FLAGS_IP_CSUM;
11344 else
ca00392c
EG
11345 tx_start_bd->bd_flags.as_bitfield |=
11346 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11347
11348 if (xmit_type & XMIT_CSUM_TCP) {
11349 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11350
11351 } else {
11352 s8 fix = SKB_CS_OFF(skb); /* signed! */
11353
ca00392c 11354 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11355
755735eb 11356 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11357 "hlen %d fix %d csum before fix %x\n",
11358 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11359
11360 /* HW bug: fixup the CSUM */
11361 pbd->tcp_pseudo_csum =
11362 bnx2x_csum_fix(skb_transport_header(skb),
11363 SKB_CS(skb), fix);
11364
11365 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11366 pbd->tcp_pseudo_csum);
11367 }
a2fbb9ea
ET
11368 }
11369
11370 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11371 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11372
ca00392c
EG
11373 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11374 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11375 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11376 tx_start_bd->nbd = cpu_to_le16(nbd);
11377 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11378 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11379
11380 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11381 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11382 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11383 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11384 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11385
755735eb 11386 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11387
11388 DP(NETIF_MSG_TX_QUEUED,
11389 "TSO packet len %d hlen %d total len %d tso size %d\n",
11390 skb->len, hlen, skb_headlen(skb),
11391 skb_shinfo(skb)->gso_size);
11392
ca00392c 11393 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11394
755735eb 11395 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11396 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11397 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11398
11399 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11400 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11401 pbd->tcp_flags = pbd_tcp_flags(skb);
11402
11403 if (xmit_type & XMIT_GSO_V4) {
11404 pbd->ip_id = swab16(ip_hdr(skb)->id);
11405 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11406 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11407 ip_hdr(skb)->daddr,
11408 0, IPPROTO_TCP, 0));
755735eb
EG
11409
11410 } else
11411 pbd->tcp_pseudo_csum =
11412 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11413 &ipv6_hdr(skb)->daddr,
11414 0, IPPROTO_TCP, 0));
11415
a2fbb9ea
ET
11416 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11417 }
ca00392c 11418 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11419
755735eb
EG
11420 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11421 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11422
755735eb 11423 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11424 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11425 if (total_pkt_bd == NULL)
11426 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11427
755735eb
EG
11428 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11429 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11430
ca00392c
EG
11431 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11432 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11433 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11434 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11435
755735eb 11436 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11437 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11438 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11439 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11440 }
11441
ca00392c 11442 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11443
a2fbb9ea
ET
11444 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11445
755735eb 11446 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11447 * if the packet contains or ends with it
11448 */
11449 if (TX_BD_POFF(bd_prod) < nbd)
11450 nbd++;
11451
ca00392c
EG
11452 if (total_pkt_bd != NULL)
11453 total_pkt_bd->total_pkt_bytes = pkt_size;
11454
a2fbb9ea
ET
11455 if (pbd)
11456 DP(NETIF_MSG_TX_QUEUED,
11457 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11458 " tcp_flags %x xsum %x seq %u hlen %u\n",
11459 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11460 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11461 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11462
755735eb 11463 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11464
58f4c4cf
EG
11465 /*
11466 * Make sure that the BD data is updated before updating the producer
11467 * since FW might read the BD right after the producer is updated.
11468 * This is only applicable for weak-ordered memory model archs such
11469 * as IA-64. The following barrier is also mandatory since FW will
11470 * assumes packets must have BDs.
11471 */
11472 wmb();
11473
ca00392c
EG
11474 fp->tx_db.data.prod += nbd;
11475 barrier();
11476 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11477
11478 mmiowb();
11479
755735eb 11480 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11481
11482 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11483 netif_tx_stop_queue(txq);
58f4c4cf
EG
11484 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11485 if we put Tx into XOFF state. */
11486 smp_mb();
ca00392c 11487 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11488 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11489 netif_tx_wake_queue(txq);
a2fbb9ea 11490 }
ca00392c 11491 fp_stat->tx_pkt++;
a2fbb9ea
ET
11492
11493 return NETDEV_TX_OK;
11494}
11495
bb2a0f7a 11496/* called with rtnl_lock */
a2fbb9ea
ET
11497static int bnx2x_open(struct net_device *dev)
11498{
11499 struct bnx2x *bp = netdev_priv(dev);
11500
6eccabb3
EG
11501 netif_carrier_off(dev);
11502
a2fbb9ea
ET
11503 bnx2x_set_power_state(bp, PCI_D0);
11504
bb2a0f7a 11505 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11506}
11507
bb2a0f7a 11508/* called with rtnl_lock */
a2fbb9ea
ET
11509static int bnx2x_close(struct net_device *dev)
11510{
a2fbb9ea
ET
11511 struct bnx2x *bp = netdev_priv(dev);
11512
11513 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11514 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11515 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11516 if (!CHIP_REV_IS_SLOW(bp))
11517 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11518
11519 return 0;
11520}
11521
f5372251 11522/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11523static void bnx2x_set_rx_mode(struct net_device *dev)
11524{
11525 struct bnx2x *bp = netdev_priv(dev);
11526 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11527 int port = BP_PORT(bp);
11528
11529 if (bp->state != BNX2X_STATE_OPEN) {
11530 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11531 return;
11532 }
11533
11534 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11535
11536 if (dev->flags & IFF_PROMISC)
11537 rx_mode = BNX2X_RX_MODE_PROMISC;
11538
11539 else if ((dev->flags & IFF_ALLMULTI) ||
11540 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11541 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11542
11543 else { /* some multicasts */
11544 if (CHIP_IS_E1(bp)) {
11545 int i, old, offset;
11546 struct dev_mc_list *mclist;
11547 struct mac_configuration_cmd *config =
11548 bnx2x_sp(bp, mcast_config);
11549
11550 for (i = 0, mclist = dev->mc_list;
11551 mclist && (i < dev->mc_count);
11552 i++, mclist = mclist->next) {
11553
11554 config->config_table[i].
11555 cam_entry.msb_mac_addr =
11556 swab16(*(u16 *)&mclist->dmi_addr[0]);
11557 config->config_table[i].
11558 cam_entry.middle_mac_addr =
11559 swab16(*(u16 *)&mclist->dmi_addr[2]);
11560 config->config_table[i].
11561 cam_entry.lsb_mac_addr =
11562 swab16(*(u16 *)&mclist->dmi_addr[4]);
11563 config->config_table[i].cam_entry.flags =
11564 cpu_to_le16(port);
11565 config->config_table[i].
11566 target_table_entry.flags = 0;
ca00392c
EG
11567 config->config_table[i].target_table_entry.
11568 clients_bit_vector =
11569 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11570 config->config_table[i].
11571 target_table_entry.vlan_id = 0;
11572
11573 DP(NETIF_MSG_IFUP,
11574 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11575 config->config_table[i].
11576 cam_entry.msb_mac_addr,
11577 config->config_table[i].
11578 cam_entry.middle_mac_addr,
11579 config->config_table[i].
11580 cam_entry.lsb_mac_addr);
11581 }
8d9c5f34 11582 old = config->hdr.length;
34f80b04
EG
11583 if (old > i) {
11584 for (; i < old; i++) {
11585 if (CAM_IS_INVALID(config->
11586 config_table[i])) {
af246401 11587 /* already invalidated */
34f80b04
EG
11588 break;
11589 }
11590 /* invalidate */
11591 CAM_INVALIDATE(config->
11592 config_table[i]);
11593 }
11594 }
11595
11596 if (CHIP_REV_IS_SLOW(bp))
11597 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11598 else
11599 offset = BNX2X_MAX_MULTICAST*(1 + port);
11600
8d9c5f34 11601 config->hdr.length = i;
34f80b04 11602 config->hdr.offset = offset;
8d9c5f34 11603 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11604 config->hdr.reserved1 = 0;
11605
e665bfda
MC
11606 bp->set_mac_pending++;
11607 smp_wmb();
11608
34f80b04
EG
11609 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11610 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11611 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11612 0);
11613 } else { /* E1H */
11614 /* Accept one or more multicasts */
11615 struct dev_mc_list *mclist;
11616 u32 mc_filter[MC_HASH_SIZE];
11617 u32 crc, bit, regidx;
11618 int i;
11619
11620 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11621
11622 for (i = 0, mclist = dev->mc_list;
11623 mclist && (i < dev->mc_count);
11624 i++, mclist = mclist->next) {
11625
7c510e4b
JB
11626 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11627 mclist->dmi_addr);
34f80b04
EG
11628
11629 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11630 bit = (crc >> 24) & 0xff;
11631 regidx = bit >> 5;
11632 bit &= 0x1f;
11633 mc_filter[regidx] |= (1 << bit);
11634 }
11635
11636 for (i = 0; i < MC_HASH_SIZE; i++)
11637 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11638 mc_filter[i]);
11639 }
11640 }
11641
11642 bp->rx_mode = rx_mode;
11643 bnx2x_set_storm_rx_mode(bp);
11644}
11645
11646/* called with rtnl_lock */
a2fbb9ea
ET
11647static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11648{
11649 struct sockaddr *addr = p;
11650 struct bnx2x *bp = netdev_priv(dev);
11651
34f80b04 11652 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11653 return -EINVAL;
11654
11655 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11656 if (netif_running(dev)) {
11657 if (CHIP_IS_E1(bp))
e665bfda 11658 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11659 else
e665bfda 11660 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11661 }
a2fbb9ea
ET
11662
11663 return 0;
11664}
11665
c18487ee 11666/* called with rtnl_lock */
01cd4528
EG
11667static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11668 int devad, u16 addr)
a2fbb9ea 11669{
01cd4528
EG
11670 struct bnx2x *bp = netdev_priv(netdev);
11671 u16 value;
11672 int rc;
11673 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11674
01cd4528
EG
11675 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11676 prtad, devad, addr);
a2fbb9ea 11677
01cd4528
EG
11678 if (prtad != bp->mdio.prtad) {
11679 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11680 prtad, bp->mdio.prtad);
11681 return -EINVAL;
11682 }
11683
11684 /* The HW expects different devad if CL22 is used */
11685 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11686
01cd4528
EG
11687 bnx2x_acquire_phy_lock(bp);
11688 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11689 devad, addr, &value);
11690 bnx2x_release_phy_lock(bp);
11691 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11692
01cd4528
EG
11693 if (!rc)
11694 rc = value;
11695 return rc;
11696}
a2fbb9ea 11697
01cd4528
EG
11698/* called with rtnl_lock */
11699static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11700 u16 addr, u16 value)
11701{
11702 struct bnx2x *bp = netdev_priv(netdev);
11703 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11704 int rc;
11705
11706 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11707 " value 0x%x\n", prtad, devad, addr, value);
11708
11709 if (prtad != bp->mdio.prtad) {
11710 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11711 prtad, bp->mdio.prtad);
11712 return -EINVAL;
a2fbb9ea
ET
11713 }
11714
01cd4528
EG
11715 /* The HW expects different devad if CL22 is used */
11716 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11717
01cd4528
EG
11718 bnx2x_acquire_phy_lock(bp);
11719 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11720 devad, addr, value);
11721 bnx2x_release_phy_lock(bp);
11722 return rc;
11723}
c18487ee 11724
01cd4528
EG
11725/* called with rtnl_lock */
11726static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11727{
11728 struct bnx2x *bp = netdev_priv(dev);
11729 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11730
01cd4528
EG
11731 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11732 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11733
01cd4528
EG
11734 if (!netif_running(dev))
11735 return -EAGAIN;
11736
11737 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11738}
11739
34f80b04 11740/* called with rtnl_lock */
a2fbb9ea
ET
11741static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11742{
11743 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11744 int rc = 0;
a2fbb9ea
ET
11745
11746 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11747 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11748 return -EINVAL;
11749
11750 /* This does not race with packet allocation
c14423fe 11751 * because the actual alloc size is
a2fbb9ea
ET
11752 * only updated as part of load
11753 */
11754 dev->mtu = new_mtu;
11755
11756 if (netif_running(dev)) {
34f80b04
EG
11757 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11758 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11759 }
34f80b04
EG
11760
11761 return rc;
a2fbb9ea
ET
11762}
11763
11764static void bnx2x_tx_timeout(struct net_device *dev)
11765{
11766 struct bnx2x *bp = netdev_priv(dev);
11767
11768#ifdef BNX2X_STOP_ON_ERROR
11769 if (!bp->panic)
11770 bnx2x_panic();
11771#endif
11772 /* This allows the netif to be shutdown gracefully before resetting */
11773 schedule_work(&bp->reset_task);
11774}
11775
11776#ifdef BCM_VLAN
34f80b04 11777/* called with rtnl_lock */
a2fbb9ea
ET
11778static void bnx2x_vlan_rx_register(struct net_device *dev,
11779 struct vlan_group *vlgrp)
11780{
11781 struct bnx2x *bp = netdev_priv(dev);
11782
11783 bp->vlgrp = vlgrp;
0c6671b0
EG
11784
11785 /* Set flags according to the required capabilities */
11786 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11787
11788 if (dev->features & NETIF_F_HW_VLAN_TX)
11789 bp->flags |= HW_VLAN_TX_FLAG;
11790
11791 if (dev->features & NETIF_F_HW_VLAN_RX)
11792 bp->flags |= HW_VLAN_RX_FLAG;
11793
a2fbb9ea 11794 if (netif_running(dev))
49d66772 11795 bnx2x_set_client_config(bp);
a2fbb9ea 11796}
34f80b04 11797
a2fbb9ea
ET
11798#endif
11799
11800#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11801static void poll_bnx2x(struct net_device *dev)
11802{
11803 struct bnx2x *bp = netdev_priv(dev);
11804
11805 disable_irq(bp->pdev->irq);
11806 bnx2x_interrupt(bp->pdev->irq, dev);
11807 enable_irq(bp->pdev->irq);
11808}
11809#endif
11810
c64213cd
SH
11811static const struct net_device_ops bnx2x_netdev_ops = {
11812 .ndo_open = bnx2x_open,
11813 .ndo_stop = bnx2x_close,
11814 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11815 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11816 .ndo_set_mac_address = bnx2x_change_mac_addr,
11817 .ndo_validate_addr = eth_validate_addr,
11818 .ndo_do_ioctl = bnx2x_ioctl,
11819 .ndo_change_mtu = bnx2x_change_mtu,
11820 .ndo_tx_timeout = bnx2x_tx_timeout,
11821#ifdef BCM_VLAN
11822 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11823#endif
11824#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11825 .ndo_poll_controller = poll_bnx2x,
11826#endif
11827};
11828
34f80b04
EG
11829static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11830 struct net_device *dev)
a2fbb9ea
ET
11831{
11832 struct bnx2x *bp;
11833 int rc;
11834
11835 SET_NETDEV_DEV(dev, &pdev->dev);
11836 bp = netdev_priv(dev);
11837
34f80b04
EG
11838 bp->dev = dev;
11839 bp->pdev = pdev;
a2fbb9ea 11840 bp->flags = 0;
34f80b04 11841 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11842
11843 rc = pci_enable_device(pdev);
11844 if (rc) {
11845 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11846 goto err_out;
11847 }
11848
11849 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11850 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11851 " aborting\n");
11852 rc = -ENODEV;
11853 goto err_out_disable;
11854 }
11855
11856 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11857 printk(KERN_ERR PFX "Cannot find second PCI device"
11858 " base address, aborting\n");
11859 rc = -ENODEV;
11860 goto err_out_disable;
11861 }
11862
34f80b04
EG
11863 if (atomic_read(&pdev->enable_cnt) == 1) {
11864 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11865 if (rc) {
11866 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11867 " aborting\n");
11868 goto err_out_disable;
11869 }
a2fbb9ea 11870
34f80b04
EG
11871 pci_set_master(pdev);
11872 pci_save_state(pdev);
11873 }
a2fbb9ea
ET
11874
11875 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11876 if (bp->pm_cap == 0) {
11877 printk(KERN_ERR PFX "Cannot find power management"
11878 " capability, aborting\n");
11879 rc = -EIO;
11880 goto err_out_release;
11881 }
11882
11883 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11884 if (bp->pcie_cap == 0) {
11885 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11886 " aborting\n");
11887 rc = -EIO;
11888 goto err_out_release;
11889 }
11890
6a35528a 11891 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11892 bp->flags |= USING_DAC_FLAG;
6a35528a 11893 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11894 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11895 " failed, aborting\n");
11896 rc = -EIO;
11897 goto err_out_release;
11898 }
11899
284901a9 11900 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11901 printk(KERN_ERR PFX "System does not support DMA,"
11902 " aborting\n");
11903 rc = -EIO;
11904 goto err_out_release;
11905 }
11906
34f80b04
EG
11907 dev->mem_start = pci_resource_start(pdev, 0);
11908 dev->base_addr = dev->mem_start;
11909 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11910
11911 dev->irq = pdev->irq;
11912
275f165f 11913 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11914 if (!bp->regview) {
11915 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11916 rc = -ENOMEM;
11917 goto err_out_release;
11918 }
11919
34f80b04
EG
11920 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11921 min_t(u64, BNX2X_DB_SIZE,
11922 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11923 if (!bp->doorbells) {
11924 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11925 rc = -ENOMEM;
11926 goto err_out_unmap;
11927 }
11928
11929 bnx2x_set_power_state(bp, PCI_D0);
11930
34f80b04
EG
11931 /* clean indirect addresses */
11932 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11933 PCICFG_VENDOR_ID_OFFSET);
11934 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11935 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11936 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11937 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11938
34f80b04 11939 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11940
c64213cd 11941 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11942 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11943 dev->features |= NETIF_F_SG;
11944 dev->features |= NETIF_F_HW_CSUM;
11945 if (bp->flags & USING_DAC_FLAG)
11946 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11947 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11948 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11949#ifdef BCM_VLAN
11950 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11951 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11952
11953 dev->vlan_features |= NETIF_F_SG;
11954 dev->vlan_features |= NETIF_F_HW_CSUM;
11955 if (bp->flags & USING_DAC_FLAG)
11956 dev->vlan_features |= NETIF_F_HIGHDMA;
11957 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11958 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11959#endif
a2fbb9ea 11960
01cd4528
EG
11961 /* get_port_hwinfo() will set prtad and mmds properly */
11962 bp->mdio.prtad = MDIO_PRTAD_NONE;
11963 bp->mdio.mmds = 0;
11964 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11965 bp->mdio.dev = dev;
11966 bp->mdio.mdio_read = bnx2x_mdio_read;
11967 bp->mdio.mdio_write = bnx2x_mdio_write;
11968
a2fbb9ea
ET
11969 return 0;
11970
11971err_out_unmap:
11972 if (bp->regview) {
11973 iounmap(bp->regview);
11974 bp->regview = NULL;
11975 }
a2fbb9ea
ET
11976 if (bp->doorbells) {
11977 iounmap(bp->doorbells);
11978 bp->doorbells = NULL;
11979 }
11980
11981err_out_release:
34f80b04
EG
11982 if (atomic_read(&pdev->enable_cnt) == 1)
11983 pci_release_regions(pdev);
a2fbb9ea
ET
11984
11985err_out_disable:
11986 pci_disable_device(pdev);
11987 pci_set_drvdata(pdev, NULL);
11988
11989err_out:
11990 return rc;
11991}
11992
37f9ce62
EG
11993static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11994 int *width, int *speed)
25047950
ET
11995{
11996 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11997
37f9ce62 11998 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11999
37f9ce62
EG
12000 /* return value of 1=2.5GHz 2=5GHz */
12001 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 12002}
37f9ce62 12003
94a78b79
VZ
12004static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12005{
37f9ce62 12006 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
12007 struct bnx2x_fw_file_hdr *fw_hdr;
12008 struct bnx2x_fw_file_section *sections;
94a78b79 12009 u32 offset, len, num_ops;
37f9ce62 12010 u16 *ops_offsets;
94a78b79 12011 int i;
37f9ce62 12012 const u8 *fw_ver;
94a78b79
VZ
12013
12014 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12015 return -EINVAL;
12016
12017 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12018 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12019
12020 /* Make sure none of the offsets and sizes make us read beyond
12021 * the end of the firmware data */
12022 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12023 offset = be32_to_cpu(sections[i].offset);
12024 len = be32_to_cpu(sections[i].len);
12025 if (offset + len > firmware->size) {
37f9ce62
EG
12026 printk(KERN_ERR PFX "Section %d length is out of "
12027 "bounds\n", i);
94a78b79
VZ
12028 return -EINVAL;
12029 }
12030 }
12031
12032 /* Likewise for the init_ops offsets */
12033 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12034 ops_offsets = (u16 *)(firmware->data + offset);
12035 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12036
12037 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12038 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
12039 printk(KERN_ERR PFX "Section offset %d is out of "
12040 "bounds\n", i);
94a78b79
VZ
12041 return -EINVAL;
12042 }
12043 }
12044
12045 /* Check FW version */
12046 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12047 fw_ver = firmware->data + offset;
12048 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12049 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12050 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12051 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12052 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12053 " Should be %d.%d.%d.%d\n",
12054 fw_ver[0], fw_ver[1], fw_ver[2],
12055 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12056 BCM_5710_FW_MINOR_VERSION,
12057 BCM_5710_FW_REVISION_VERSION,
12058 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 12059 return -EINVAL;
94a78b79
VZ
12060 }
12061
12062 return 0;
12063}
12064
ab6ad5a4 12065static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12066{
ab6ad5a4
EG
12067 const __be32 *source = (const __be32 *)_source;
12068 u32 *target = (u32 *)_target;
94a78b79 12069 u32 i;
94a78b79
VZ
12070
12071 for (i = 0; i < n/4; i++)
12072 target[i] = be32_to_cpu(source[i]);
12073}
12074
12075/*
12076 Ops array is stored in the following format:
12077 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12078 */
ab6ad5a4 12079static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12080{
ab6ad5a4
EG
12081 const __be32 *source = (const __be32 *)_source;
12082 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12083 u32 i, j, tmp;
94a78b79 12084
ab6ad5a4 12085 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12086 tmp = be32_to_cpu(source[j]);
12087 target[i].op = (tmp >> 24) & 0xff;
12088 target[i].offset = tmp & 0xffffff;
12089 target[i].raw_data = be32_to_cpu(source[j+1]);
12090 }
12091}
ab6ad5a4
EG
12092
12093static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12094{
ab6ad5a4
EG
12095 const __be16 *source = (const __be16 *)_source;
12096 u16 *target = (u16 *)_target;
94a78b79 12097 u32 i;
94a78b79
VZ
12098
12099 for (i = 0; i < n/2; i++)
12100 target[i] = be16_to_cpu(source[i]);
12101}
12102
12103#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
ab6ad5a4
EG
12104 do { \
12105 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12106 bp->arr = kmalloc(len, GFP_KERNEL); \
94a78b79 12107 if (!bp->arr) { \
ab6ad5a4
EG
12108 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12109 "for "#arr"\n", len); \
94a78b79
VZ
12110 goto lbl; \
12111 } \
ab6ad5a4
EG
12112 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12113 (u8 *)bp->arr, len); \
94a78b79
VZ
12114 } while (0)
12115
94a78b79
VZ
12116static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12117{
45229b42 12118 const char *fw_file_name;
94a78b79 12119 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 12120 int rc;
94a78b79 12121
94a78b79 12122 if (CHIP_IS_E1(bp))
45229b42 12123 fw_file_name = FW_FILE_NAME_E1;
94a78b79 12124 else
45229b42 12125 fw_file_name = FW_FILE_NAME_E1H;
94a78b79
VZ
12126
12127 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12128
12129 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12130 if (rc) {
ab6ad5a4
EG
12131 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12132 fw_file_name);
94a78b79
VZ
12133 goto request_firmware_exit;
12134 }
12135
12136 rc = bnx2x_check_firmware(bp);
12137 if (rc) {
12138 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12139 goto request_firmware_exit;
12140 }
12141
12142 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12143
12144 /* Initialize the pointers to the init arrays */
12145 /* Blob */
12146 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12147
12148 /* Opcodes */
12149 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12150
12151 /* Offsets */
ab6ad5a4
EG
12152 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12153 be16_to_cpu_n);
94a78b79
VZ
12154
12155 /* STORMs firmware */
573f2035
EG
12156 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12157 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12158 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12159 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12160 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12161 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12162 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12163 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12164 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12165 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12166 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12167 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12168 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12169 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12170 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12171 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
12172
12173 return 0;
ab6ad5a4 12174
94a78b79
VZ
12175init_offsets_alloc_err:
12176 kfree(bp->init_ops);
12177init_ops_alloc_err:
12178 kfree(bp->init_data);
12179request_firmware_exit:
12180 release_firmware(bp->firmware);
12181
12182 return rc;
12183}
12184
12185
a2fbb9ea
ET
12186static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12187 const struct pci_device_id *ent)
12188{
a2fbb9ea
ET
12189 struct net_device *dev = NULL;
12190 struct bnx2x *bp;
37f9ce62 12191 int pcie_width, pcie_speed;
25047950 12192 int rc;
a2fbb9ea 12193
a2fbb9ea 12194 /* dev zeroed in init_etherdev */
555f6c78 12195 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
12196 if (!dev) {
12197 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 12198 return -ENOMEM;
34f80b04 12199 }
a2fbb9ea 12200
a2fbb9ea
ET
12201 bp = netdev_priv(dev);
12202 bp->msglevel = debug;
12203
df4770de
EG
12204 pci_set_drvdata(pdev, dev);
12205
34f80b04 12206 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12207 if (rc < 0) {
12208 free_netdev(dev);
12209 return rc;
12210 }
12211
34f80b04 12212 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12213 if (rc)
12214 goto init_one_exit;
12215
94a78b79
VZ
12216 /* Set init arrays */
12217 rc = bnx2x_init_firmware(bp, &pdev->dev);
12218 if (rc) {
12219 printk(KERN_ERR PFX "Error loading firmware\n");
12220 goto init_one_exit;
12221 }
12222
693fc0d1 12223 rc = register_netdev(dev);
34f80b04 12224 if (rc) {
693fc0d1 12225 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12226 goto init_one_exit;
12227 }
12228
37f9ce62 12229 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 12230 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 12231 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 12232 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 12233 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 12234 dev->base_addr, bp->pdev->irq);
e174961c 12235 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 12236
a2fbb9ea 12237 return 0;
34f80b04
EG
12238
12239init_one_exit:
12240 if (bp->regview)
12241 iounmap(bp->regview);
12242
12243 if (bp->doorbells)
12244 iounmap(bp->doorbells);
12245
12246 free_netdev(dev);
12247
12248 if (atomic_read(&pdev->enable_cnt) == 1)
12249 pci_release_regions(pdev);
12250
12251 pci_disable_device(pdev);
12252 pci_set_drvdata(pdev, NULL);
12253
12254 return rc;
a2fbb9ea
ET
12255}
12256
12257static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12258{
12259 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12260 struct bnx2x *bp;
12261
12262 if (!dev) {
228241eb
ET
12263 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12264 return;
12265 }
228241eb 12266 bp = netdev_priv(dev);
a2fbb9ea 12267
a2fbb9ea
ET
12268 unregister_netdev(dev);
12269
94a78b79
VZ
12270 kfree(bp->init_ops_offsets);
12271 kfree(bp->init_ops);
12272 kfree(bp->init_data);
12273 release_firmware(bp->firmware);
12274
a2fbb9ea
ET
12275 if (bp->regview)
12276 iounmap(bp->regview);
12277
12278 if (bp->doorbells)
12279 iounmap(bp->doorbells);
12280
12281 free_netdev(dev);
34f80b04
EG
12282
12283 if (atomic_read(&pdev->enable_cnt) == 1)
12284 pci_release_regions(pdev);
12285
a2fbb9ea
ET
12286 pci_disable_device(pdev);
12287 pci_set_drvdata(pdev, NULL);
12288}
12289
12290static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12291{
12292 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12293 struct bnx2x *bp;
12294
34f80b04
EG
12295 if (!dev) {
12296 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12297 return -ENODEV;
12298 }
12299 bp = netdev_priv(dev);
a2fbb9ea 12300
34f80b04 12301 rtnl_lock();
a2fbb9ea 12302
34f80b04 12303 pci_save_state(pdev);
228241eb 12304
34f80b04
EG
12305 if (!netif_running(dev)) {
12306 rtnl_unlock();
12307 return 0;
12308 }
a2fbb9ea
ET
12309
12310 netif_device_detach(dev);
a2fbb9ea 12311
da5a662a 12312 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12313
a2fbb9ea 12314 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12315
34f80b04
EG
12316 rtnl_unlock();
12317
a2fbb9ea
ET
12318 return 0;
12319}
12320
12321static int bnx2x_resume(struct pci_dev *pdev)
12322{
12323 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12324 struct bnx2x *bp;
a2fbb9ea
ET
12325 int rc;
12326
228241eb
ET
12327 if (!dev) {
12328 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12329 return -ENODEV;
12330 }
228241eb 12331 bp = netdev_priv(dev);
a2fbb9ea 12332
34f80b04
EG
12333 rtnl_lock();
12334
228241eb 12335 pci_restore_state(pdev);
34f80b04
EG
12336
12337 if (!netif_running(dev)) {
12338 rtnl_unlock();
12339 return 0;
12340 }
12341
a2fbb9ea
ET
12342 bnx2x_set_power_state(bp, PCI_D0);
12343 netif_device_attach(dev);
12344
da5a662a 12345 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12346
34f80b04
EG
12347 rtnl_unlock();
12348
12349 return rc;
a2fbb9ea
ET
12350}
12351
f8ef6e44
YG
12352static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12353{
12354 int i;
12355
12356 bp->state = BNX2X_STATE_ERROR;
12357
12358 bp->rx_mode = BNX2X_RX_MODE_NONE;
12359
12360 bnx2x_netif_stop(bp, 0);
12361
12362 del_timer_sync(&bp->timer);
12363 bp->stats_state = STATS_STATE_DISABLED;
12364 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12365
12366 /* Release IRQs */
12367 bnx2x_free_irq(bp);
12368
12369 if (CHIP_IS_E1(bp)) {
12370 struct mac_configuration_cmd *config =
12371 bnx2x_sp(bp, mcast_config);
12372
8d9c5f34 12373 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12374 CAM_INVALIDATE(config->config_table[i]);
12375 }
12376
12377 /* Free SKBs, SGEs, TPA pool and driver internals */
12378 bnx2x_free_skbs(bp);
555f6c78 12379 for_each_rx_queue(bp, i)
f8ef6e44 12380 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12381 for_each_rx_queue(bp, i)
7cde1c8b 12382 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12383 bnx2x_free_mem(bp);
12384
12385 bp->state = BNX2X_STATE_CLOSED;
12386
12387 netif_carrier_off(bp->dev);
12388
12389 return 0;
12390}
12391
12392static void bnx2x_eeh_recover(struct bnx2x *bp)
12393{
12394 u32 val;
12395
12396 mutex_init(&bp->port.phy_mutex);
12397
12398 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12399 bp->link_params.shmem_base = bp->common.shmem_base;
12400 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12401
12402 if (!bp->common.shmem_base ||
12403 (bp->common.shmem_base < 0xA0000) ||
12404 (bp->common.shmem_base >= 0xC0000)) {
12405 BNX2X_DEV_INFO("MCP not active\n");
12406 bp->flags |= NO_MCP_FLAG;
12407 return;
12408 }
12409
12410 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12411 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12412 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12413 BNX2X_ERR("BAD MCP validity signature\n");
12414
12415 if (!BP_NOMCP(bp)) {
12416 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12417 & DRV_MSG_SEQ_NUMBER_MASK);
12418 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12419 }
12420}
12421
493adb1f
WX
12422/**
12423 * bnx2x_io_error_detected - called when PCI error is detected
12424 * @pdev: Pointer to PCI device
12425 * @state: The current pci connection state
12426 *
12427 * This function is called after a PCI bus error affecting
12428 * this device has been detected.
12429 */
12430static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12431 pci_channel_state_t state)
12432{
12433 struct net_device *dev = pci_get_drvdata(pdev);
12434 struct bnx2x *bp = netdev_priv(dev);
12435
12436 rtnl_lock();
12437
12438 netif_device_detach(dev);
12439
07ce50e4
DN
12440 if (state == pci_channel_io_perm_failure) {
12441 rtnl_unlock();
12442 return PCI_ERS_RESULT_DISCONNECT;
12443 }
12444
493adb1f 12445 if (netif_running(dev))
f8ef6e44 12446 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12447
12448 pci_disable_device(pdev);
12449
12450 rtnl_unlock();
12451
12452 /* Request a slot reset */
12453 return PCI_ERS_RESULT_NEED_RESET;
12454}
12455
12456/**
12457 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12458 * @pdev: Pointer to PCI device
12459 *
12460 * Restart the card from scratch, as if from a cold-boot.
12461 */
12462static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12463{
12464 struct net_device *dev = pci_get_drvdata(pdev);
12465 struct bnx2x *bp = netdev_priv(dev);
12466
12467 rtnl_lock();
12468
12469 if (pci_enable_device(pdev)) {
12470 dev_err(&pdev->dev,
12471 "Cannot re-enable PCI device after reset\n");
12472 rtnl_unlock();
12473 return PCI_ERS_RESULT_DISCONNECT;
12474 }
12475
12476 pci_set_master(pdev);
12477 pci_restore_state(pdev);
12478
12479 if (netif_running(dev))
12480 bnx2x_set_power_state(bp, PCI_D0);
12481
12482 rtnl_unlock();
12483
12484 return PCI_ERS_RESULT_RECOVERED;
12485}
12486
12487/**
12488 * bnx2x_io_resume - called when traffic can start flowing again
12489 * @pdev: Pointer to PCI device
12490 *
12491 * This callback is called when the error recovery driver tells us that
12492 * its OK to resume normal operation.
12493 */
12494static void bnx2x_io_resume(struct pci_dev *pdev)
12495{
12496 struct net_device *dev = pci_get_drvdata(pdev);
12497 struct bnx2x *bp = netdev_priv(dev);
12498
12499 rtnl_lock();
12500
f8ef6e44
YG
12501 bnx2x_eeh_recover(bp);
12502
493adb1f 12503 if (netif_running(dev))
f8ef6e44 12504 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12505
12506 netif_device_attach(dev);
12507
12508 rtnl_unlock();
12509}
12510
12511static struct pci_error_handlers bnx2x_err_handler = {
12512 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12513 .slot_reset = bnx2x_io_slot_reset,
12514 .resume = bnx2x_io_resume,
493adb1f
WX
12515};
12516
a2fbb9ea 12517static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12518 .name = DRV_MODULE_NAME,
12519 .id_table = bnx2x_pci_tbl,
12520 .probe = bnx2x_init_one,
12521 .remove = __devexit_p(bnx2x_remove_one),
12522 .suspend = bnx2x_suspend,
12523 .resume = bnx2x_resume,
12524 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12525};
12526
12527static int __init bnx2x_init(void)
12528{
dd21ca6d
SG
12529 int ret;
12530
938cf541
EG
12531 printk(KERN_INFO "%s", version);
12532
1cf167f2
EG
12533 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12534 if (bnx2x_wq == NULL) {
12535 printk(KERN_ERR PFX "Cannot create workqueue\n");
12536 return -ENOMEM;
12537 }
12538
dd21ca6d
SG
12539 ret = pci_register_driver(&bnx2x_pci_driver);
12540 if (ret) {
12541 printk(KERN_ERR PFX "Cannot register driver\n");
12542 destroy_workqueue(bnx2x_wq);
12543 }
12544 return ret;
a2fbb9ea
ET
12545}
12546
12547static void __exit bnx2x_cleanup(void)
12548{
12549 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12550
12551 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12552}
12553
12554module_init(bnx2x_init);
12555module_exit(bnx2x_cleanup);
12556
993ac7b5
MC
12557#ifdef BCM_CNIC
12558
12559/* count denotes the number of new completions we have seen */
12560static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12561{
12562 struct eth_spe *spe;
12563
12564#ifdef BNX2X_STOP_ON_ERROR
12565 if (unlikely(bp->panic))
12566 return;
12567#endif
12568
12569 spin_lock_bh(&bp->spq_lock);
12570 bp->cnic_spq_pending -= count;
12571
12572 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12573 bp->cnic_spq_pending++) {
12574
12575 if (!bp->cnic_kwq_pending)
12576 break;
12577
12578 spe = bnx2x_sp_get_next(bp);
12579 *spe = *bp->cnic_kwq_cons;
12580
12581 bp->cnic_kwq_pending--;
12582
12583 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12584 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12585
12586 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12587 bp->cnic_kwq_cons = bp->cnic_kwq;
12588 else
12589 bp->cnic_kwq_cons++;
12590 }
12591 bnx2x_sp_prod_update(bp);
12592 spin_unlock_bh(&bp->spq_lock);
12593}
12594
12595static int bnx2x_cnic_sp_queue(struct net_device *dev,
12596 struct kwqe_16 *kwqes[], u32 count)
12597{
12598 struct bnx2x *bp = netdev_priv(dev);
12599 int i;
12600
12601#ifdef BNX2X_STOP_ON_ERROR
12602 if (unlikely(bp->panic))
12603 return -EIO;
12604#endif
12605
12606 spin_lock_bh(&bp->spq_lock);
12607
12608 for (i = 0; i < count; i++) {
12609 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12610
12611 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12612 break;
12613
12614 *bp->cnic_kwq_prod = *spe;
12615
12616 bp->cnic_kwq_pending++;
12617
12618 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12619 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12620 spe->data.mac_config_addr.hi,
12621 spe->data.mac_config_addr.lo,
12622 bp->cnic_kwq_pending);
12623
12624 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12625 bp->cnic_kwq_prod = bp->cnic_kwq;
12626 else
12627 bp->cnic_kwq_prod++;
12628 }
12629
12630 spin_unlock_bh(&bp->spq_lock);
12631
12632 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12633 bnx2x_cnic_sp_post(bp, 0);
12634
12635 return i;
12636}
12637
12638static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12639{
12640 struct cnic_ops *c_ops;
12641 int rc = 0;
12642
12643 mutex_lock(&bp->cnic_mutex);
12644 c_ops = bp->cnic_ops;
12645 if (c_ops)
12646 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12647 mutex_unlock(&bp->cnic_mutex);
12648
12649 return rc;
12650}
12651
12652static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12653{
12654 struct cnic_ops *c_ops;
12655 int rc = 0;
12656
12657 rcu_read_lock();
12658 c_ops = rcu_dereference(bp->cnic_ops);
12659 if (c_ops)
12660 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12661 rcu_read_unlock();
12662
12663 return rc;
12664}
12665
12666/*
12667 * for commands that have no data
12668 */
12669static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12670{
12671 struct cnic_ctl_info ctl = {0};
12672
12673 ctl.cmd = cmd;
12674
12675 return bnx2x_cnic_ctl_send(bp, &ctl);
12676}
12677
12678static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12679{
12680 struct cnic_ctl_info ctl;
12681
12682 /* first we tell CNIC and only then we count this as a completion */
12683 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12684 ctl.data.comp.cid = cid;
12685
12686 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12687 bnx2x_cnic_sp_post(bp, 1);
12688}
12689
12690static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12691{
12692 struct bnx2x *bp = netdev_priv(dev);
12693 int rc = 0;
12694
12695 switch (ctl->cmd) {
12696 case DRV_CTL_CTXTBL_WR_CMD: {
12697 u32 index = ctl->data.io.offset;
12698 dma_addr_t addr = ctl->data.io.dma_addr;
12699
12700 bnx2x_ilt_wr(bp, index, addr);
12701 break;
12702 }
12703
12704 case DRV_CTL_COMPLETION_CMD: {
12705 int count = ctl->data.comp.comp_count;
12706
12707 bnx2x_cnic_sp_post(bp, count);
12708 break;
12709 }
12710
12711 /* rtnl_lock is held. */
12712 case DRV_CTL_START_L2_CMD: {
12713 u32 cli = ctl->data.ring.client_id;
12714
12715 bp->rx_mode_cl_mask |= (1 << cli);
12716 bnx2x_set_storm_rx_mode(bp);
12717 break;
12718 }
12719
12720 /* rtnl_lock is held. */
12721 case DRV_CTL_STOP_L2_CMD: {
12722 u32 cli = ctl->data.ring.client_id;
12723
12724 bp->rx_mode_cl_mask &= ~(1 << cli);
12725 bnx2x_set_storm_rx_mode(bp);
12726 break;
12727 }
12728
12729 default:
12730 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12731 rc = -EINVAL;
12732 }
12733
12734 return rc;
12735}
12736
12737static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12738{
12739 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12740
12741 if (bp->flags & USING_MSIX_FLAG) {
12742 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12743 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12744 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12745 } else {
12746 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12747 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12748 }
12749 cp->irq_arr[0].status_blk = bp->cnic_sb;
12750 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12751 cp->irq_arr[1].status_blk = bp->def_status_blk;
12752 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12753
12754 cp->num_irq = 2;
12755}
12756
12757static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12758 void *data)
12759{
12760 struct bnx2x *bp = netdev_priv(dev);
12761 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12762
12763 if (ops == NULL)
12764 return -EINVAL;
12765
12766 if (atomic_read(&bp->intr_sem) != 0)
12767 return -EBUSY;
12768
12769 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12770 if (!bp->cnic_kwq)
12771 return -ENOMEM;
12772
12773 bp->cnic_kwq_cons = bp->cnic_kwq;
12774 bp->cnic_kwq_prod = bp->cnic_kwq;
12775 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12776
12777 bp->cnic_spq_pending = 0;
12778 bp->cnic_kwq_pending = 0;
12779
12780 bp->cnic_data = data;
12781
12782 cp->num_irq = 0;
12783 cp->drv_state = CNIC_DRV_STATE_REGD;
12784
12785 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12786
12787 bnx2x_setup_cnic_irq_info(bp);
12788 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12789 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12790 rcu_assign_pointer(bp->cnic_ops, ops);
12791
12792 return 0;
12793}
12794
12795static int bnx2x_unregister_cnic(struct net_device *dev)
12796{
12797 struct bnx2x *bp = netdev_priv(dev);
12798 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12799
12800 mutex_lock(&bp->cnic_mutex);
12801 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12802 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12803 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12804 }
12805 cp->drv_state = 0;
12806 rcu_assign_pointer(bp->cnic_ops, NULL);
12807 mutex_unlock(&bp->cnic_mutex);
12808 synchronize_rcu();
12809 kfree(bp->cnic_kwq);
12810 bp->cnic_kwq = NULL;
12811
12812 return 0;
12813}
12814
12815struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12816{
12817 struct bnx2x *bp = netdev_priv(dev);
12818 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12819
12820 cp->drv_owner = THIS_MODULE;
12821 cp->chip_id = CHIP_ID(bp);
12822 cp->pdev = bp->pdev;
12823 cp->io_base = bp->regview;
12824 cp->io_base2 = bp->doorbells;
12825 cp->max_kwqe_pending = 8;
12826 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12827 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12828 cp->ctx_tbl_len = CNIC_ILT_LINES;
12829 cp->starting_cid = BCM_CNIC_CID_START;
12830 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12831 cp->drv_ctl = bnx2x_drv_ctl;
12832 cp->drv_register_cnic = bnx2x_register_cnic;
12833 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12834
12835 return cp;
12836}
12837EXPORT_SYMBOL(bnx2x_cnic_probe);
12838
12839#endif /* BCM_CNIC */
94a78b79 12840
This page took 1.282485 seconds and 5 git commands to generate.