scm: Only support SCM_RIGHTS on unix domain sockets.
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
a07da6df
VZ
60#define DRV_MODULE_VERSION "1.52.1-6"
61#define DRV_MODULE_RELDATE "2010/02/16"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 124
1cf167f2 125static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
126
127enum bnx2x_board_type {
128 BCM57710 = 0,
34f80b04
EG
129 BCM57711 = 1,
130 BCM57711E = 2,
a2fbb9ea
ET
131};
132
34f80b04 133/* indexed by board_type, above */
53a10565 134static struct {
a2fbb9ea
ET
135 char *name;
136} board_info[] __devinitdata = {
34f80b04
EG
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
140};
141
34f80b04 142
a3aa1884 143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
573f2035 159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
a2fbb9ea
ET
167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
a2fbb9ea
ET
178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
ad8d3948
EG
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
ad8d3948
EG
203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
a2fbb9ea 205{
5ff7b6d4 206 struct dmae_command dmae;
a2fbb9ea 207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
5ff7b6d4 219 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 220
5ff7b6d4
EG
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 224#ifdef __BIG_ENDIAN
5ff7b6d4 225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 226#else
5ff7b6d4 227 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 228#endif
5ff7b6d4
EG
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 239
c3eefaf6 240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 250
5ff7b6d4
EG
251 mutex_lock(&bp->dmae_mutex);
252
a2fbb9ea
ET
253 *wb_comp = 0;
254
5ff7b6d4 255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
256
257 udelay(5);
ad8d3948
EG
258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
ad8d3948 262 if (!cnt) {
c3eefaf6 263 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
264 break;
265 }
ad8d3948 266 cnt--;
12469401
YG
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
a2fbb9ea 272 }
ad8d3948
EG
273
274 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
275}
276
c18487ee 277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 278{
5ff7b6d4 279 struct dmae_command dmae;
a2fbb9ea 280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
5ff7b6d4 294 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 295
5ff7b6d4
EG
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 299#ifdef __BIG_ENDIAN
5ff7b6d4 300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 301#else
5ff7b6d4 302 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 303#endif
5ff7b6d4
EG
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 314
c3eefaf6 315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 322
5ff7b6d4
EG
323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
326 *wb_comp = 0;
327
5ff7b6d4 328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
329
330 udelay(5);
ad8d3948
EG
331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
ad8d3948 334 if (!cnt) {
c3eefaf6 335 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
336 break;
337 }
ad8d3948 338 cnt--;
12469401
YG
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
a2fbb9ea 344 }
ad8d3948 345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
348
349 mutex_unlock(&bp->dmae_mutex);
350}
351
573f2035
EG
352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
ad8d3948
EG
367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 375}
a2fbb9ea 376
ad8d3948
EG
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
a2fbb9ea
ET
388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
a2fbb9ea 390 char last_idx;
34f80b04
EG
391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
393
394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
419 }
420 }
421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
a2fbb9ea
ET
503 }
504 }
34f80b04 505
a2fbb9ea
ET
506 return rc;
507}
c14423fe 508
a2fbb9ea
ET
509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
4781bfad 512 __be32 data[9];
a2fbb9ea
ET
513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 516 mark = ((mark + 0x3) & ~0x3);
7995c64e 517 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 518
7995c64e 519 pr_err("");
a2fbb9ea
ET
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
7995c64e 525 pr_cont("%s", (char *)data);
a2fbb9ea
ET
526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
7995c64e 532 pr_cont("%s", (char *)data);
a2fbb9ea 533 }
7995c64e 534 pr_err("end of fw dump\n");
a2fbb9ea
ET
535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
66e855f3
YG
542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
a2fbb9ea
ET
545 BNX2X_ERR("begin crash dump -----------------\n");
546
8440d2b6
EG
547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
54b9ddaa 556 for_each_queue(bp, i) {
a2fbb9ea 557 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 558
c3eefaf6 559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 562 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
a2fbb9ea 571
8440d2b6 572 /* Tx */
54b9ddaa 573 for_each_queue(bp, i) {
8440d2b6 574 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 575
c3eefaf6 576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 582 fp->status_blk->c_status_block.status_block_index,
ca00392c 583 fp->tx_db.data.prod);
8440d2b6 584 }
a2fbb9ea 585
8440d2b6
EG
586 /* Rings */
587 /* Rx */
54b9ddaa 588 for_each_queue(bp, i) {
8440d2b6 589 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 593 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
c3eefaf6
EG
597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
599 }
600
3196a88a
EG
601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
8440d2b6 603 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
c3eefaf6
EG
607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
609 }
610
a2fbb9ea
ET
611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
c3eefaf6
EG
616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
618 }
619 }
620
8440d2b6 621 /* Tx */
54b9ddaa 622 for_each_queue(bp, i) {
8440d2b6
EG
623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
c3eefaf6
EG
630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
c3eefaf6
EG
639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
641 }
642 }
a2fbb9ea 643
34f80b04 644 bnx2x_fw_dump(bp);
a2fbb9ea
ET
645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
647}
648
615f8fd9 649static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 650{
34f80b04 651 int port = BP_PORT(bp);
a2fbb9ea
ET
652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
656
657 if (msix) {
8badd27a
EG
658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 672
8badd27a
EG
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
615f8fd9
ET
675
676 REG_WR(bp, addr, val);
677
a2fbb9ea
ET
678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
8badd27a
EG
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
683
684 REG_WR(bp, addr, val);
37dbbf32
EG
685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
34f80b04
EG
690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
8badd27a 694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 695 if (bp->port.pmf)
4acac6a5
EG
696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
34f80b04
EG
698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
37dbbf32
EG
704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
a2fbb9ea
ET
707}
708
615f8fd9 709static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 710{
34f80b04 711 int port = BP_PORT(bp);
a2fbb9ea
ET
712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
8badd27a
EG
723 /* flush all outstanding writes */
724 mmiowb();
725
a2fbb9ea
ET
726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
f8ef6e44 731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 732{
a2fbb9ea 733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 734 int i, offset;
a2fbb9ea 735
34f80b04 736 /* disable interrupt handling */
a2fbb9ea 737 atomic_inc(&bp->intr_sem);
e1510706
EG
738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
f8ef6e44
YG
740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
a2fbb9ea
ET
743
744 /* make sure all ISRs are done */
745 if (msix) {
8badd27a
EG
746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
37b091ba
MC
748#ifdef BCM_CNIC
749 offset++;
750#endif
a2fbb9ea 751 for_each_queue(bp, i)
8badd27a 752 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
1cf167f2
EG
757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
759}
760
34f80b04 761/* fast path */
a2fbb9ea
ET
762
763/*
34f80b04 764 * General service functions
a2fbb9ea
ET
765 */
766
34f80b04 767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
768 u8 storm, u16 index, u8 op, u8 update)
769{
5c862848
EG
770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
772 struct igu_ack_register igu_ack;
773
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
34f80b04 776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
5c862848
EG
781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
784
785 /* Make sure that ACK is written */
786 mmiowb();
787 barrier();
a2fbb9ea
ET
788}
789
54b9ddaa 790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
791{
792 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
793
794 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
797}
798
a2fbb9ea
ET
799static u16 bnx2x_ack_int(struct bnx2x *bp)
800{
5c862848
EG
801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 804
5c862848
EG
805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806 result, hc_addr);
a2fbb9ea 807
a2fbb9ea
ET
808 return result;
809}
810
811
812/*
813 * fast path service functions
814 */
815
e8b5fc51
VZ
816static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817{
818 /* Tell compiler that consumer and producer can change */
819 barrier();
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
821}
822
a2fbb9ea
ET
823/* free skb in the packet ring at pos idx
824 * return idx of last bd freed
825 */
826static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 idx)
828{
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 832 struct sk_buff *skb = tx_buf->skb;
34f80b04 833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
834 int nbd;
835
54b9ddaa
VZ
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
a2fbb9ea
ET
839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
841
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 847
ca00392c 848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 849#ifdef BNX2X_STOP_ON_ERROR
ca00392c 850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 851 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
852 bnx2x_panic();
853 }
854#endif
ca00392c 855 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 856
ca00392c
EG
857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 859
ca00392c
EG
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
868 }
869
870 /* now free frags */
871 while (nbd > 0) {
872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 }
880
881 /* release skb */
53e5e96e 882 WARN_ON(!skb);
54b9ddaa 883 dev_kfree_skb(skb);
a2fbb9ea
ET
884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
886
34f80b04 887 return new_cons;
a2fbb9ea
ET
888}
889
34f80b04 890static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 891{
34f80b04
EG
892 s16 used;
893 u16 prod;
894 u16 cons;
a2fbb9ea 895
34f80b04 896 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
899
34f80b04
EG
900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 903
34f80b04 904#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
905 WARN_ON(used < 0);
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 908#endif
a2fbb9ea 909
34f80b04 910 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
911}
912
54b9ddaa
VZ
913static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914{
915 u16 hw_cons;
916
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
921}
922
923static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
924{
925 struct bnx2x *bp = fp->bp;
555f6c78 926 struct netdev_queue *txq;
a2fbb9ea 927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
928
929#ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
54b9ddaa 931 return -1;
a2fbb9ea
ET
932#endif
933
54b9ddaa 934 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936 sw_cons = fp->tx_pkt_cons;
937
938 while (sw_cons != hw_cons) {
939 u16 pkt_cons;
940
941 pkt_cons = TX_BD(sw_cons);
942
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
34f80b04 945 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
946 hw_cons, sw_cons, pkt_cons);
947
34f80b04 948/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
949 rmb();
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951 }
952*/
953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954 sw_cons++;
a2fbb9ea
ET
955 }
956
957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons;
959
a2fbb9ea 960 /* TBD need a thresh? */
555f6c78 961 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 962
6044735d
EG
963 /* Need to make the tx_bd_cons update visible to start_xmit()
964 * before checking for netif_tx_queue_stopped(). Without the
965 * memory barrier, there is a small possibility that
966 * start_xmit() will miss it and cause the queue to be stopped
967 * forever.
968 */
969 smp_mb();
970
555f6c78 971 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 972 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 974 netif_tx_wake_queue(txq);
a2fbb9ea 975 }
54b9ddaa 976 return 0;
a2fbb9ea
ET
977}
978
993ac7b5
MC
979#ifdef BCM_CNIC
980static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981#endif
3196a88a 982
a2fbb9ea
ET
983static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984 union eth_rx_cqe *rr_cqe)
985{
986 struct bnx2x *bp = fp->bp;
987 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
34f80b04 990 DP(BNX2X_MSG_SP,
a2fbb9ea 991 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 992 fp->index, cid, command, bp->state,
34f80b04 993 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
994
995 bp->spq_left++;
996
0626b899 997 if (fp->index) {
a2fbb9ea
ET
998 switch (command | fp->state) {
999 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000 BNX2X_FP_STATE_OPENING):
1001 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002 cid);
1003 fp->state = BNX2X_FP_STATE_OPEN;
1004 break;
1005
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008 cid);
1009 fp->state = BNX2X_FP_STATE_HALTED;
1010 break;
1011
1012 default:
34f80b04
EG
1013 BNX2X_ERR("unexpected MC reply (%d) "
1014 "fp->state is %x\n", command, fp->state);
1015 break;
a2fbb9ea 1016 }
34f80b04 1017 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1018 return;
1019 }
c14423fe 1020
a2fbb9ea
ET
1021 switch (command | bp->state) {
1022 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024 bp->state = BNX2X_STATE_OPEN;
1025 break;
1026
1027 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030 fp->state = BNX2X_FP_STATE_HALTED;
1031 break;
1032
a2fbb9ea 1033 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1034 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1035 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1036 break;
1037
993ac7b5
MC
1038#ifdef BCM_CNIC
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041 bnx2x_cnic_cfc_comp(bp, cid);
1042 break;
1043#endif
3196a88a 1044
a2fbb9ea 1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1047 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1048 bp->set_mac_pending--;
1049 smp_wmb();
a2fbb9ea
ET
1050 break;
1051
49d66772 1052 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1053 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1054 bp->set_mac_pending--;
1055 smp_wmb();
49d66772
ET
1056 break;
1057
a2fbb9ea 1058 default:
34f80b04 1059 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1060 command, bp->state);
34f80b04 1061 break;
a2fbb9ea 1062 }
34f80b04 1063 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1064}
1065
7a9b2557
VZ
1066static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1068{
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct page *page = sw_buf->page;
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073 /* Skip "next page" elements */
1074 if (!page)
1075 return;
1076
1077 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1078 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081 sw_buf->page = NULL;
1082 sge->addr_hi = 0;
1083 sge->addr_lo = 0;
1084}
1085
1086static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, int last)
1088{
1089 int i;
1090
1091 for (i = 0; i < last; i++)
1092 bnx2x_free_rx_sge(bp, fp, i);
1093}
1094
1095static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096 struct bnx2x_fastpath *fp, u16 index)
1097{
1098 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101 dma_addr_t mapping;
1102
1103 if (unlikely(page == NULL))
1104 return -ENOMEM;
1105
4f40f2cb 1106 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1107 PCI_DMA_FROMDEVICE);
8d8bb39b 1108 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1109 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110 return -ENOMEM;
1111 }
1112
1113 sw_buf->page = page;
1114 pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119 return 0;
1120}
1121
a2fbb9ea
ET
1122static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123 struct bnx2x_fastpath *fp, u16 index)
1124{
1125 struct sk_buff *skb;
1126 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128 dma_addr_t mapping;
1129
1130 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131 if (unlikely(skb == NULL))
1132 return -ENOMEM;
1133
437cf2f1 1134 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1135 PCI_DMA_FROMDEVICE);
8d8bb39b 1136 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1137 dev_kfree_skb(skb);
1138 return -ENOMEM;
1139 }
1140
1141 rx_buf->skb = skb;
1142 pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147 return 0;
1148}
1149
1150/* note that we are not allocating a new skb,
1151 * we are just moving one from cons to prod
1152 * we are not creating a new mapping,
1153 * so there is no need to check for dma_mapping_error().
1154 */
1155static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156 struct sk_buff *skb, u16 cons, u16 prod)
1157{
1158 struct bnx2x *bp = fp->bp;
1159 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164 pci_dma_sync_single_for_device(bp->pdev,
1165 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1166 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1167
1168 prod_rx_buf->skb = cons_rx_buf->skb;
1169 pci_unmap_addr_set(prod_rx_buf, mapping,
1170 pci_unmap_addr(cons_rx_buf, mapping));
1171 *prod_bd = *cons_bd;
1172}
1173
7a9b2557
VZ
1174static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175 u16 idx)
1176{
1177 u16 last_max = fp->last_max_sge;
1178
1179 if (SUB_S16(idx, last_max) > 0)
1180 fp->last_max_sge = idx;
1181}
1182
1183static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184{
1185 int i, j;
1186
1187 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188 int idx = RX_SGE_CNT * i - 1;
1189
1190 for (j = 0; j < 2; j++) {
1191 SGE_MASK_CLEAR_BIT(fp, idx);
1192 idx--;
1193 }
1194 }
1195}
1196
1197static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198 struct eth_fast_path_rx_cqe *fp_cqe)
1199{
1200 struct bnx2x *bp = fp->bp;
4f40f2cb 1201 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1202 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1203 SGE_PAGE_SHIFT;
7a9b2557
VZ
1204 u16 last_max, last_elem, first_elem;
1205 u16 delta = 0;
1206 u16 i;
1207
1208 if (!sge_len)
1209 return;
1210
1211 /* First mark all used pages */
1212 for (i = 0; i < sge_len; i++)
1213 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218 /* Here we assume that the last SGE index is the biggest */
1219 prefetch((void *)(fp->sge_mask));
1220 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222 last_max = RX_SGE(fp->last_max_sge);
1223 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226 /* If ring is not full */
1227 if (last_elem + 1 != first_elem)
1228 last_elem++;
1229
1230 /* Now update the prod */
1231 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232 if (likely(fp->sge_mask[i]))
1233 break;
1234
1235 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236 delta += RX_SGE_MASK_ELEM_SZ;
1237 }
1238
1239 if (delta > 0) {
1240 fp->rx_sge_prod += delta;
1241 /* clear page-end entries */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1243 }
1244
1245 DP(NETIF_MSG_RX_STATUS,
1246 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1247 fp->last_max_sge, fp->rx_sge_prod);
1248}
1249
1250static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251{
1252 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253 memset(fp->sge_mask, 0xff,
1254 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
33471629
EG
1256 /* Clear the two last indices in the page to 1:
1257 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1258 hence will never be indicated and should be removed from
1259 the calculations. */
1260 bnx2x_clear_sge_mask_next_elems(fp);
1261}
1262
1263static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264 struct sk_buff *skb, u16 cons, u16 prod)
1265{
1266 struct bnx2x *bp = fp->bp;
1267 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270 dma_addr_t mapping;
1271
1272 /* move empty skb from pool to prod and map it */
1273 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1275 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1276 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278 /* move partial skb from cons to pool (don't unmap yet) */
1279 fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281 /* mark bin state as start - print error if current state != stop */
1282 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285 fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287 /* point prod_bd to new skb */
1288 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291#ifdef BNX2X_STOP_ON_ERROR
1292 fp->tpa_queue_used |= (1 << queue);
1293#ifdef __powerpc64__
1294 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295#else
1296 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297#endif
1298 fp->tpa_queue_used);
1299#endif
1300}
1301
1302static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303 struct sk_buff *skb,
1304 struct eth_fast_path_rx_cqe *fp_cqe,
1305 u16 cqe_idx)
1306{
1307 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1308 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309 u32 i, frag_len, frag_size, pages;
1310 int err;
1311 int j;
1312
1313 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1314 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1315
1316 /* This is needed in order to enable forwarding support */
1317 if (frag_size)
4f40f2cb 1318 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1319 max(frag_size, (u32)len_on_bd));
1320
1321#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1322 if (pages >
1323 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1324 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325 pages, cqe_idx);
1326 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1327 fp_cqe->pkt_len, len_on_bd);
1328 bnx2x_panic();
1329 return -EINVAL;
1330 }
1331#endif
1332
1333 /* Run through the SGL and compose the fragmented skb */
1334 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337 /* FW gives the indices of the SGE as if the ring is an array
1338 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1339 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1340 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1341 old_rx_pg = *rx_pg;
1342
1343 /* If we fail to allocate a substitute page, we simply stop
1344 where we are and drop the whole packet */
1345 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346 if (unlikely(err)) {
de832a55 1347 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1348 return err;
1349 }
1350
1351 /* Unmap the page as we r going to pass it to the stack */
1352 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1353 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1354
1355 /* Add one frag and update the appropriate fields in the skb */
1356 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358 skb->data_len += frag_len;
1359 skb->truesize += frag_len;
1360 skb->len += frag_len;
1361
1362 frag_size -= frag_len;
1363 }
1364
1365 return 0;
1366}
1367
1368static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370 u16 cqe_idx)
1371{
1372 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373 struct sk_buff *skb = rx_buf->skb;
1374 /* alloc new skb */
1375 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377 /* Unmap skb in the pool anyway, as we are going to change
1378 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379 fails. */
1380 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1381 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1382
7a9b2557 1383 if (likely(new_skb)) {
66e855f3
YG
1384 /* fix ip xsum and give it to the stack */
1385 /* (no need to map the new skb) */
0c6671b0
EG
1386#ifdef BCM_VLAN
1387 int is_vlan_cqe =
1388 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389 PARSING_FLAGS_VLAN);
1390 int is_not_hwaccel_vlan_cqe =
1391 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392#endif
7a9b2557
VZ
1393
1394 prefetch(skb);
1395 prefetch(((char *)(skb)) + 128);
1396
7a9b2557
VZ
1397#ifdef BNX2X_STOP_ON_ERROR
1398 if (pad + len > bp->rx_buf_size) {
1399 BNX2X_ERR("skb_put is about to fail... "
1400 "pad %d len %d rx_buf_size %d\n",
1401 pad, len, bp->rx_buf_size);
1402 bnx2x_panic();
1403 return;
1404 }
1405#endif
1406
1407 skb_reserve(skb, pad);
1408 skb_put(skb, len);
1409
1410 skb->protocol = eth_type_trans(skb, bp->dev);
1411 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413 {
1414 struct iphdr *iph;
1415
1416 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1417#ifdef BCM_VLAN
1418 /* If there is no Rx VLAN offloading -
1419 take VLAN tag into an account */
1420 if (unlikely(is_not_hwaccel_vlan_cqe))
1421 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422#endif
7a9b2557
VZ
1423 iph->check = 0;
1424 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425 }
1426
1427 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428 &cqe->fast_path_cqe, cqe_idx)) {
1429#ifdef BCM_VLAN
0c6671b0
EG
1430 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1432 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433 le16_to_cpu(cqe->fast_path_cqe.
1434 vlan_tag));
1435 else
1436#endif
1437 netif_receive_skb(skb);
1438 } else {
1439 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440 " - dropping packet!\n");
1441 dev_kfree_skb(skb);
1442 }
1443
7a9b2557
VZ
1444
1445 /* put new skb in bin */
1446 fp->tpa_pool[queue].skb = new_skb;
1447
1448 } else {
66e855f3 1449 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1450 DP(NETIF_MSG_RX_STATUS,
1451 "Failed to allocate new skb - dropping packet!\n");
de832a55 1452 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1453 }
1454
1455 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456}
1457
1458static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459 struct bnx2x_fastpath *fp,
1460 u16 bd_prod, u16 rx_comp_prod,
1461 u16 rx_sge_prod)
1462{
8d9c5f34 1463 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1464 int i;
1465
1466 /* Update producers */
1467 rx_prods.bd_prod = bd_prod;
1468 rx_prods.cqe_prod = rx_comp_prod;
1469 rx_prods.sge_prod = rx_sge_prod;
1470
58f4c4cf
EG
1471 /*
1472 * Make sure that the BD and SGE data is updated before updating the
1473 * producers since FW might read the BD/SGE right after the producer
1474 * is updated.
1475 * This is only applicable for weak-ordered memory model archs such
1476 * as IA-64. The following barrier is also mandatory since FW will
1477 * assumes BDs must have buffers.
1478 */
1479 wmb();
1480
8d9c5f34
EG
1481 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1483 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1484 ((u32 *)&rx_prods)[i]);
1485
58f4c4cf
EG
1486 mmiowb(); /* keep prod updates ordered */
1487
7a9b2557 1488 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1489 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1490 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1491}
1492
a2fbb9ea
ET
1493static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494{
1495 struct bnx2x *bp = fp->bp;
34f80b04 1496 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1497 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498 int rx_pkt = 0;
1499
1500#ifdef BNX2X_STOP_ON_ERROR
1501 if (unlikely(bp->panic))
1502 return 0;
1503#endif
1504
34f80b04
EG
1505 /* CQ "next element" is of the size of the regular element,
1506 that's why it's ok here */
a2fbb9ea
ET
1507 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509 hw_comp_cons++;
1510
1511 bd_cons = fp->rx_bd_cons;
1512 bd_prod = fp->rx_bd_prod;
34f80b04 1513 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1514 sw_comp_cons = fp->rx_comp_cons;
1515 sw_comp_prod = fp->rx_comp_prod;
1516
1517 /* Memory barrier necessary as speculative reads of the rx
1518 * buffer can be ahead of the index in the status block
1519 */
1520 rmb();
1521
1522 DP(NETIF_MSG_RX_STATUS,
1523 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1524 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1525
1526 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1527 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1528 struct sk_buff *skb;
1529 union eth_rx_cqe *cqe;
34f80b04
EG
1530 u8 cqe_fp_flags;
1531 u16 len, pad;
a2fbb9ea
ET
1532
1533 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534 bd_prod = RX_BD(bd_prod);
1535 bd_cons = RX_BD(bd_cons);
1536
619e7a66
EG
1537 /* Prefetch the page containing the BD descriptor
1538 at producer's index. It will be needed when new skb is
1539 allocated */
1540 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541 (&fp->rx_desc_ring[bd_prod])) -
1542 PAGE_SIZE + 1));
1543
a2fbb9ea 1544 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1545 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1546
a2fbb9ea 1547 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1548 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1549 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1550 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1551 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1553
1554 /* is this a slowpath msg? */
34f80b04 1555 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1556 bnx2x_sp_event(fp, cqe);
1557 goto next_cqe;
1558
1559 /* this is an rx packet */
1560 } else {
1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1562 skb = rx_buf->skb;
54b9ddaa
VZ
1563 prefetch(skb);
1564 prefetch((u8 *)skb + 256);
a2fbb9ea
ET
1565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566 pad = cqe->fast_path_cqe.placement_offset;
1567
7a9b2557
VZ
1568 /* If CQE is marked both TPA_START and TPA_END
1569 it is a non-TPA CQE */
1570 if ((!fp->disable_tpa) &&
1571 (TPA_TYPE(cqe_fp_flags) !=
1572 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1573 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1574
1575 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576 DP(NETIF_MSG_RX_STATUS,
1577 "calling tpa_start on queue %d\n",
1578 queue);
1579
1580 bnx2x_tpa_start(fp, queue, skb,
1581 bd_cons, bd_prod);
1582 goto next_rx;
1583 }
1584
1585 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586 DP(NETIF_MSG_RX_STATUS,
1587 "calling tpa_stop on queue %d\n",
1588 queue);
1589
1590 if (!BNX2X_RX_SUM_FIX(cqe))
1591 BNX2X_ERR("STOP on none TCP "
1592 "data\n");
1593
1594 /* This is a size of the linear data
1595 on this skb */
1596 len = le16_to_cpu(cqe->fast_path_cqe.
1597 len_on_bd);
1598 bnx2x_tpa_stop(bp, fp, queue, pad,
1599 len, cqe, comp_ring_cons);
1600#ifdef BNX2X_STOP_ON_ERROR
1601 if (bp->panic)
17cb4006 1602 return 0;
7a9b2557
VZ
1603#endif
1604
1605 bnx2x_update_sge_prod(fp,
1606 &cqe->fast_path_cqe);
1607 goto next_cqe;
1608 }
1609 }
1610
a2fbb9ea
ET
1611 pci_dma_sync_single_for_device(bp->pdev,
1612 pci_unmap_addr(rx_buf, mapping),
1613 pad + RX_COPY_THRESH,
1614 PCI_DMA_FROMDEVICE);
1615 prefetch(skb);
1616 prefetch(((char *)(skb)) + 128);
1617
1618 /* is this an error packet? */
34f80b04 1619 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1620 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1621 "ERROR flags %x rx packet %u\n",
1622 cqe_fp_flags, sw_comp_cons);
de832a55 1623 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1624 goto reuse_rx;
1625 }
1626
1627 /* Since we don't have a jumbo ring
1628 * copy small packets if mtu > 1500
1629 */
1630 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631 (len <= RX_COPY_THRESH)) {
1632 struct sk_buff *new_skb;
1633
1634 new_skb = netdev_alloc_skb(bp->dev,
1635 len + pad);
1636 if (new_skb == NULL) {
1637 DP(NETIF_MSG_RX_ERR,
34f80b04 1638 "ERROR packet dropped "
a2fbb9ea 1639 "because of alloc failure\n");
de832a55 1640 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1641 goto reuse_rx;
1642 }
1643
1644 /* aligned copy */
1645 skb_copy_from_linear_data_offset(skb, pad,
1646 new_skb->data + pad, len);
1647 skb_reserve(new_skb, pad);
1648 skb_put(new_skb, len);
1649
1650 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652 skb = new_skb;
1653
a119a069
EG
1654 } else
1655 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1656 pci_unmap_single(bp->pdev,
1657 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1658 bp->rx_buf_size,
a2fbb9ea
ET
1659 PCI_DMA_FROMDEVICE);
1660 skb_reserve(skb, pad);
1661 skb_put(skb, len);
1662
1663 } else {
1664 DP(NETIF_MSG_RX_ERR,
34f80b04 1665 "ERROR packet dropped because "
a2fbb9ea 1666 "of alloc failure\n");
de832a55 1667 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1668reuse_rx:
1669 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670 goto next_rx;
1671 }
1672
1673 skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1676 if (bp->rx_csum) {
1adcd8be
EG
1677 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1679 else
de832a55 1680 fp->eth_q_stats.hw_csum_err++;
66e855f3 1681 }
a2fbb9ea
ET
1682 }
1683
748e5439 1684 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1685
a2fbb9ea 1686#ifdef BCM_VLAN
0c6671b0 1687 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1688 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1690 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692 else
1693#endif
34f80b04 1694 netif_receive_skb(skb);
a2fbb9ea 1695
a2fbb9ea
ET
1696
1697next_rx:
1698 rx_buf->skb = NULL;
1699
1700 bd_cons = NEXT_RX_IDX(bd_cons);
1701 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1702 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703 rx_pkt++;
a2fbb9ea
ET
1704next_cqe:
1705 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1707
34f80b04 1708 if (rx_pkt == budget)
a2fbb9ea
ET
1709 break;
1710 } /* while */
1711
1712 fp->rx_bd_cons = bd_cons;
34f80b04 1713 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1714 fp->rx_comp_cons = sw_comp_cons;
1715 fp->rx_comp_prod = sw_comp_prod;
1716
7a9b2557
VZ
1717 /* Update producers */
1718 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719 fp->rx_sge_prod);
a2fbb9ea
ET
1720
1721 fp->rx_pkt += rx_pkt;
1722 fp->rx_calls++;
1723
1724 return rx_pkt;
1725}
1726
1727static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728{
1729 struct bnx2x_fastpath *fp = fp_cookie;
1730 struct bnx2x *bp = fp->bp;
a2fbb9ea 1731
da5a662a
VZ
1732 /* Return here if interrupt is disabled */
1733 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735 return IRQ_HANDLED;
1736 }
1737
34f80b04 1738 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1739 fp->index, fp->sb_id);
0626b899 1740 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1741
1742#ifdef BNX2X_STOP_ON_ERROR
1743 if (unlikely(bp->panic))
1744 return IRQ_HANDLED;
1745#endif
ca00392c 1746
54b9ddaa
VZ
1747 /* Handle Rx and Tx according to MSI-X vector */
1748 prefetch(fp->rx_cons_sb);
1749 prefetch(fp->tx_cons_sb);
1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1753
a2fbb9ea
ET
1754 return IRQ_HANDLED;
1755}
1756
1757static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758{
555f6c78 1759 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1760 u16 status = bnx2x_ack_int(bp);
34f80b04 1761 u16 mask;
ca00392c 1762 int i;
a2fbb9ea 1763
34f80b04 1764 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1765 if (unlikely(status == 0)) {
1766 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767 return IRQ_NONE;
1768 }
f5372251 1769 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1770
34f80b04 1771 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1772 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774 return IRQ_HANDLED;
1775 }
1776
3196a88a
EG
1777#ifdef BNX2X_STOP_ON_ERROR
1778 if (unlikely(bp->panic))
1779 return IRQ_HANDLED;
1780#endif
1781
ca00392c
EG
1782 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1784
ca00392c
EG
1785 mask = 0x2 << fp->sb_id;
1786 if (status & mask) {
54b9ddaa
VZ
1787 /* Handle Rx and Tx according to SB id */
1788 prefetch(fp->rx_cons_sb);
1789 prefetch(&fp->status_blk->u_status_block.
1790 status_block_index);
1791 prefetch(fp->tx_cons_sb);
1792 prefetch(&fp->status_blk->c_status_block.
1793 status_block_index);
1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1795 status &= ~mask;
1796 }
a2fbb9ea
ET
1797 }
1798
993ac7b5
MC
1799#ifdef BCM_CNIC
1800 mask = 0x2 << CNIC_SB_ID(bp);
1801 if (status & (mask | 0x1)) {
1802 struct cnic_ops *c_ops = NULL;
1803
1804 rcu_read_lock();
1805 c_ops = rcu_dereference(bp->cnic_ops);
1806 if (c_ops)
1807 c_ops->cnic_handler(bp->cnic_data, NULL);
1808 rcu_read_unlock();
1809
1810 status &= ~mask;
1811 }
1812#endif
a2fbb9ea 1813
34f80b04 1814 if (unlikely(status & 0x1)) {
1cf167f2 1815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1816
1817 status &= ~0x1;
1818 if (!status)
1819 return IRQ_HANDLED;
1820 }
1821
34f80b04
EG
1822 if (status)
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824 status);
a2fbb9ea 1825
c18487ee 1826 return IRQ_HANDLED;
a2fbb9ea
ET
1827}
1828
c18487ee 1829/* end of fast path */
a2fbb9ea 1830
bb2a0f7a 1831static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1832
c18487ee
YR
1833/* Link */
1834
1835/*
1836 * General service functions
1837 */
a2fbb9ea 1838
4a37fb66 1839static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1840{
1841 u32 lock_status;
1842 u32 resource_bit = (1 << resource);
4a37fb66
YG
1843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
c18487ee 1845 int cnt;
a2fbb9ea 1846
c18487ee
YR
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849 DP(NETIF_MSG_HW,
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852 return -EINVAL;
1853 }
a2fbb9ea 1854
4a37fb66
YG
1855 if (func <= 5) {
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857 } else {
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860 }
1861
c18487ee 1862 /* Validating that the resource is not already taken */
4a37fb66 1863 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1867 return -EEXIST;
1868 }
a2fbb9ea 1869
46230476
EG
1870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1872 /* Try to acquire the lock */
4a37fb66
YG
1873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1875 if (lock_status & resource_bit)
1876 return 0;
a2fbb9ea 1877
c18487ee 1878 msleep(5);
a2fbb9ea 1879 }
c18487ee
YR
1880 DP(NETIF_MSG_HW, "Timeout\n");
1881 return -EAGAIN;
1882}
a2fbb9ea 1883
4a37fb66 1884static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1885{
1886 u32 lock_status;
1887 u32 resource_bit = (1 << resource);
4a37fb66
YG
1888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
a2fbb9ea 1890
c18487ee
YR
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893 DP(NETIF_MSG_HW,
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896 return -EINVAL;
1897 }
1898
4a37fb66
YG
1899 if (func <= 5) {
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901 } else {
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904 }
1905
c18487ee 1906 /* Validating that the resource is currently taken */
4a37fb66 1907 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1911 return -EFAULT;
a2fbb9ea
ET
1912 }
1913
4a37fb66 1914 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1915 return 0;
1916}
1917
1918/* HW Lock for shared dual port PHYs */
4a37fb66 1919static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1920{
34f80b04 1921 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1922
46c6a674
EG
1923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1925}
a2fbb9ea 1926
4a37fb66 1927static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1928{
46c6a674
EG
1929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1931
34f80b04 1932 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1933}
a2fbb9ea 1934
4acac6a5
EG
1935int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
1944 int value;
1945
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948 return -EINVAL;
1949 }
1950
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1956 value = 1;
1957 else
1958 value = 0;
1959
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1961
1962 return value;
1963}
1964
17de50b7 1965int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1966{
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1973 u32 gpio_reg;
a2fbb9ea 1974
c18487ee
YR
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977 return -EINVAL;
1978 }
a2fbb9ea 1979
4a37fb66 1980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1983
c18487ee
YR
1984 switch (mode) {
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991 break;
a2fbb9ea 1992
c18487ee
YR
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999 break;
a2fbb9ea 2000
17de50b7 2001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2004 /* set FLOAT */
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006 break;
a2fbb9ea 2007
c18487ee
YR
2008 default:
2009 break;
a2fbb9ea
ET
2010 }
2011
c18487ee 2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2014
c18487ee 2015 return 0;
a2fbb9ea
ET
2016}
2017
4acac6a5
EG
2018int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019{
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2026 u32 gpio_reg;
2027
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030 return -EINVAL;
2031 }
2032
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 /* read GPIO int */
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037 switch (mode) {
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044 break;
2045
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052 break;
2053
2054 default:
2055 break;
2056 }
2057
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061 return 0;
2062}
2063
c18487ee 2064static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2065{
c18487ee
YR
2066 u32 spio_mask = (1 << spio_num);
2067 u32 spio_reg;
a2fbb9ea 2068
c18487ee
YR
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072 return -EINVAL;
a2fbb9ea
ET
2073 }
2074
4a37fb66 2075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2078
c18487ee 2079 switch (mode) {
6378c025 2080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085 break;
a2fbb9ea 2086
6378c025 2087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092 break;
a2fbb9ea 2093
c18487ee
YR
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096 /* set FLOAT */
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098 break;
a2fbb9ea 2099
c18487ee
YR
2100 default:
2101 break;
a2fbb9ea
ET
2102 }
2103
c18487ee 2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2106
a2fbb9ea
ET
2107 return 0;
2108}
2109
c18487ee 2110static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2111{
ad33ea3a
EG
2112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2116 ADVERTISED_Pause);
2117 break;
356e2385 2118
c18487ee 2119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2121 ADVERTISED_Pause);
2122 break;
356e2385 2123
c18487ee 2124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2125 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2126 break;
356e2385 2127
c18487ee 2128 default:
34f80b04 2129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2130 ADVERTISED_Pause);
2131 break;
2132 }
2133}
f1410647 2134
c18487ee
YR
2135static void bnx2x_link_report(struct bnx2x *bp)
2136{
f34d28ea 2137 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2138 netif_carrier_off(bp->dev);
7995c64e 2139 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2140 return;
2141 }
2142
c18487ee 2143 if (bp->link_vars.link_up) {
35c5f8fe
EG
2144 u16 line_speed;
2145
c18487ee
YR
2146 if (bp->state == BNX2X_STATE_OPEN)
2147 netif_carrier_on(bp->dev);
7995c64e 2148 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2149
35c5f8fe
EG
2150 line_speed = bp->link_vars.line_speed;
2151 if (IS_E1HMF(bp)) {
2152 u16 vn_max_rate;
2153
2154 vn_max_rate =
2155 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate;
2159 }
7995c64e 2160 pr_cont("%d Mbps ", line_speed);
f1410647 2161
c18487ee 2162 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2163 pr_cont("full duplex");
c18487ee 2164 else
7995c64e 2165 pr_cont("half duplex");
f1410647 2166
c0700f90
DM
2167 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2169 pr_cont(", receive ");
356e2385
EG
2170 if (bp->link_vars.flow_ctrl &
2171 BNX2X_FLOW_CTRL_TX)
7995c64e 2172 pr_cont("& transmit ");
c18487ee 2173 } else {
7995c64e 2174 pr_cont(", transmit ");
c18487ee 2175 }
7995c64e 2176 pr_cont("flow control ON");
c18487ee 2177 }
7995c64e 2178 pr_cont("\n");
f1410647 2179
c18487ee
YR
2180 } else { /* link_down */
2181 netif_carrier_off(bp->dev);
7995c64e 2182 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2183 }
c18487ee
YR
2184}
2185
b5bf9068 2186static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2187{
19680c48
EG
2188 if (!BP_NOMCP(bp)) {
2189 u8 rc;
a2fbb9ea 2190
19680c48 2191 /* Initialize link parameters structure variables */
8c99e7b0
YR
2192 /* It is recommended to turn off RX FC for jumbo frames
2193 for better performance */
0c593270 2194 if (bp->dev->mtu > 5000)
c0700f90 2195 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2196 else
c0700f90 2197 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2198
4a37fb66 2199 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2200
2201 if (load_mode == LOAD_DIAG)
2202 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
19680c48 2204 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2205
4a37fb66 2206 bnx2x_release_phy_lock(bp);
a2fbb9ea 2207
3c96c68b
EG
2208 bnx2x_calc_fc_adv(bp);
2209
b5bf9068
EG
2210 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2212 bnx2x_link_report(bp);
b5bf9068 2213 }
34f80b04 2214
19680c48
EG
2215 return rc;
2216 }
f5372251 2217 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2218 return -EINVAL;
a2fbb9ea
ET
2219}
2220
c18487ee 2221static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2222{
19680c48 2223 if (!BP_NOMCP(bp)) {
4a37fb66 2224 bnx2x_acquire_phy_lock(bp);
19680c48 2225 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2226 bnx2x_release_phy_lock(bp);
a2fbb9ea 2227
19680c48
EG
2228 bnx2x_calc_fc_adv(bp);
2229 } else
f5372251 2230 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2231}
a2fbb9ea 2232
c18487ee
YR
2233static void bnx2x__link_reset(struct bnx2x *bp)
2234{
19680c48 2235 if (!BP_NOMCP(bp)) {
4a37fb66 2236 bnx2x_acquire_phy_lock(bp);
589abe3a 2237 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2238 bnx2x_release_phy_lock(bp);
19680c48 2239 } else
f5372251 2240 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2241}
a2fbb9ea 2242
c18487ee
YR
2243static u8 bnx2x_link_test(struct bnx2x *bp)
2244{
2245 u8 rc;
a2fbb9ea 2246
4a37fb66 2247 bnx2x_acquire_phy_lock(bp);
c18487ee 2248 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2249 bnx2x_release_phy_lock(bp);
a2fbb9ea 2250
c18487ee
YR
2251 return rc;
2252}
a2fbb9ea 2253
8a1c38d1 2254static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2255{
8a1c38d1
EG
2256 u32 r_param = bp->link_vars.line_speed / 8;
2257 u32 fair_periodic_timeout_usec;
2258 u32 t_fair;
34f80b04 2259
8a1c38d1
EG
2260 memset(&(bp->cmng.rs_vars), 0,
2261 sizeof(struct rate_shaping_vars_per_port));
2262 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2263
8a1c38d1
EG
2264 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2266
8a1c38d1
EG
2267 /* this is the threshold below which no timer arming will occur
2268 1.25 coefficient is for the threshold to be a little bigger
2269 than the real time, to compensate for timer in-accuracy */
2270 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2271 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
8a1c38d1
EG
2273 /* resolution of fairness timer */
2274 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2277
8a1c38d1
EG
2278 /* this is the threshold below which we won't arm the timer anymore */
2279 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2280
8a1c38d1
EG
2281 /* we multiply by 1e3/8 to get bytes/msec.
2282 We don't want the credits to pass a credit
2283 of the t_fair*FAIR_MEM (algorithm resolution) */
2284 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285 /* since each tick is 4 usec */
2286 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2287}
2288
2691d51d
EG
2289/* Calculates the sum of vn_min_rates.
2290 It's needed for further normalizing of the min_rates.
2291 Returns:
2292 sum of vn_min_rates.
2293 or
2294 0 - if all the min_rates are 0.
2295 In the later case fainess algorithm should be deactivated.
2296 If not all min_rates are zero then those that are zeroes will be set to 1.
2297 */
2298static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299{
2300 int all_zero = 1;
2301 int port = BP_PORT(bp);
2302 int vn;
2303
2304 bp->vn_weight_sum = 0;
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 int func = 2*vn + port;
2307 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311 /* Skip hidden vns */
2312 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313 continue;
2314
2315 /* If min rate is zero - set it to 1 */
2316 if (!vn_min_rate)
2317 vn_min_rate = DEF_MIN_RATE;
2318 else
2319 all_zero = 0;
2320
2321 bp->vn_weight_sum += vn_min_rate;
2322 }
2323
2324 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2325 if (all_zero) {
2326 bp->cmng.flags.cmng_enables &=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2330 } else
2331 bp->cmng.flags.cmng_enables |=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2333}
2334
8a1c38d1 2335static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2336{
2337 struct rate_shaping_vars_per_vn m_rs_vn;
2338 struct fairness_vars_per_vn m_fair_vn;
2339 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340 u16 vn_min_rate, vn_max_rate;
2341 int i;
2342
2343 /* If function is hidden - set min and max to zeroes */
2344 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345 vn_min_rate = 0;
2346 vn_max_rate = 0;
2347
2348 } else {
2349 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2351 /* If min rate is zero - set it to 1 */
2352 if (!vn_min_rate)
34f80b04
EG
2353 vn_min_rate = DEF_MIN_RATE;
2354 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356 }
8a1c38d1 2357 DP(NETIF_MSG_IFUP,
b015e3d1 2358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2359 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2360
2361 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364 /* global vn counter - maximal Mbps for this vn */
2365 m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367 /* quota - number of bytes transmitted in this period */
2368 m_rs_vn.vn_counter.quota =
2369 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
8a1c38d1 2371 if (bp->vn_weight_sum) {
34f80b04
EG
2372 /* credit for each period of the fairness algorithm:
2373 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2374 vn_weight_sum should not be larger than 10000, thus
2375 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376 than zero */
34f80b04 2377 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2378 max((u32)(vn_min_rate * (T_FAIR_COEF /
2379 (8 * bp->vn_weight_sum))),
2380 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2381 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382 m_fair_vn.vn_credit_delta);
2383 }
2384
34f80b04
EG
2385 /* Store it to internal memory */
2386 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389 ((u32 *)(&m_rs_vn))[i]);
2390
2391 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394 ((u32 *)(&m_fair_vn))[i]);
2395}
2396
8a1c38d1 2397
c18487ee
YR
2398/* This function is called upon link interrupt */
2399static void bnx2x_link_attn(struct bnx2x *bp)
2400{
bb2a0f7a
YG
2401 /* Make sure that we are synced with the current statistics */
2402 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
c18487ee 2404 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2405
bb2a0f7a
YG
2406 if (bp->link_vars.link_up) {
2407
1c06328c 2408 /* dropless flow control */
a18f5128 2409 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2410 int port = BP_PORT(bp);
2411 u32 pause_enabled = 0;
2412
2413 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414 pause_enabled = 1;
2415
2416 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2417 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2418 pause_enabled);
2419 }
2420
bb2a0f7a
YG
2421 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422 struct host_port_stats *pstats;
2423
2424 pstats = bnx2x_sp(bp, port_stats);
2425 /* reset old bmac stats */
2426 memset(&(pstats->mac_stx[0]), 0,
2427 sizeof(struct mac_stx));
2428 }
f34d28ea 2429 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431 }
2432
c18487ee
YR
2433 /* indicate link status */
2434 bnx2x_link_report(bp);
34f80b04
EG
2435
2436 if (IS_E1HMF(bp)) {
8a1c38d1 2437 int port = BP_PORT(bp);
34f80b04 2438 int func;
8a1c38d1 2439 int vn;
34f80b04 2440
ab6ad5a4 2441 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2442 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443 if (vn == BP_E1HVN(bp))
2444 continue;
2445
8a1c38d1 2446 func = ((vn << 1) | port);
34f80b04
EG
2447 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449 }
34f80b04 2450
8a1c38d1
EG
2451 if (bp->link_vars.link_up) {
2452 int i;
2453
2454 /* Init rate shaping and fairness contexts */
2455 bnx2x_init_port_minmax(bp);
34f80b04 2456
34f80b04 2457 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2458 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460 /* Store it to internal memory */
2461 for (i = 0;
2462 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465 ((u32 *)(&bp->cmng))[i]);
2466 }
34f80b04 2467 }
c18487ee 2468}
a2fbb9ea 2469
c18487ee
YR
2470static void bnx2x__link_status_update(struct bnx2x *bp)
2471{
f34d28ea 2472 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2473 return;
a2fbb9ea 2474
c18487ee 2475 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2476
bb2a0f7a
YG
2477 if (bp->link_vars.link_up)
2478 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479 else
2480 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
2691d51d
EG
2482 bnx2x_calc_vn_weight_sum(bp);
2483
c18487ee
YR
2484 /* indicate link status */
2485 bnx2x_link_report(bp);
a2fbb9ea 2486}
a2fbb9ea 2487
34f80b04
EG
2488static void bnx2x_pmf_update(struct bnx2x *bp)
2489{
2490 int port = BP_PORT(bp);
2491 u32 val;
2492
2493 bp->port.pmf = 1;
2494 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496 /* enable nig attention */
2497 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2500
2501 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2502}
2503
c18487ee 2504/* end of Link */
a2fbb9ea
ET
2505
2506/* slow path */
2507
2508/*
2509 * General service functions
2510 */
2511
2691d51d
EG
2512/* send the MCP a request, block until there is a reply */
2513u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514{
2515 int func = BP_FUNC(bp);
2516 u32 seq = ++bp->fw_seq;
2517 u32 rc = 0;
2518 u32 cnt = 1;
2519 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
c4ff7cbf 2521 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2522 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525 do {
2526 /* let the FW do it's magic ... */
2527 msleep(delay);
2528
2529 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
c4ff7cbf
EG
2531 /* Give the FW up to 5 second (500*10ms) */
2532 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2533
2534 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535 cnt*delay, rc, seq);
2536
2537 /* is this a reply to our command? */
2538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539 rc &= FW_MSG_CODE_MASK;
2540 else {
2541 /* FW BUG! */
2542 BNX2X_ERR("FW failed to respond!\n");
2543 bnx2x_fw_dump(bp);
2544 rc = 0;
2545 }
c4ff7cbf 2546 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2547
2548 return rc;
2549}
2550
2551static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2552static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2553static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555static void bnx2x_e1h_disable(struct bnx2x *bp)
2556{
2557 int port = BP_PORT(bp);
2691d51d
EG
2558
2559 netif_tx_disable(bp->dev);
2691d51d
EG
2560
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2562
2691d51d
EG
2563 netif_carrier_off(bp->dev);
2564}
2565
2566static void bnx2x_e1h_enable(struct bnx2x *bp)
2567{
2568 int port = BP_PORT(bp);
2569
2570 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2571
2691d51d
EG
2572 /* Tx queue should be only reenabled */
2573 netif_tx_wake_all_queues(bp->dev);
2574
061bc702
EG
2575 /*
2576 * Should not call netif_carrier_on since it will be called if the link
2577 * is up when checking for link state
2578 */
2691d51d
EG
2579}
2580
2581static void bnx2x_update_min_max(struct bnx2x *bp)
2582{
2583 int port = BP_PORT(bp);
2584 int vn, i;
2585
2586 /* Init rate shaping and fairness contexts */
2587 bnx2x_init_port_minmax(bp);
2588
2589 bnx2x_calc_vn_weight_sum(bp);
2590
2591 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592 bnx2x_init_vn_minmax(bp, 2*vn + port);
2593
2594 if (bp->port.pmf) {
2595 int func;
2596
2597 /* Set the attention towards other drivers on the same port */
2598 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599 if (vn == BP_E1HVN(bp))
2600 continue;
2601
2602 func = ((vn << 1) | port);
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2605 }
2606
2607 /* Store it to internal memory */
2608 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609 REG_WR(bp, BAR_XSTRORM_INTMEM +
2610 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611 ((u32 *)(&bp->cmng))[i]);
2612 }
2613}
2614
2615static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2616{
2691d51d 2617 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2618
2619 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
f34d28ea
EG
2621 /*
2622 * This is the only place besides the function initialization
2623 * where the bp->flags can change so it is done without any
2624 * locks
2625 */
2691d51d
EG
2626 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2628 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2629
2630 bnx2x_e1h_disable(bp);
2631 } else {
2632 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2633 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2634
2635 bnx2x_e1h_enable(bp);
2636 }
2637 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2638 }
2639 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2640
2641 bnx2x_update_min_max(bp);
2642 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2643 }
2644
2645 /* Report results to MCP */
2646 if (dcc_event)
2647 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648 else
2649 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2650}
2651
28912902
MC
2652/* must be called under the spq lock */
2653static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654{
2655 struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657 if (bp->spq_prod_bd == bp->spq_last_bd) {
2658 bp->spq_prod_bd = bp->spq;
2659 bp->spq_prod_idx = 0;
2660 DP(NETIF_MSG_TIMER, "end of spq\n");
2661 } else {
2662 bp->spq_prod_bd++;
2663 bp->spq_prod_idx++;
2664 }
2665 return next_spe;
2666}
2667
2668/* must be called under the spq lock */
2669static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670{
2671 int func = BP_FUNC(bp);
2672
2673 /* Make sure that BD data is updated before writing the producer */
2674 wmb();
2675
2676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677 bp->spq_prod_idx);
2678 mmiowb();
2679}
2680
a2fbb9ea
ET
2681/* the slow path queue is odd since completions arrive on the fastpath ring */
2682static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683 u32 data_hi, u32 data_lo, int common)
2684{
28912902 2685 struct eth_spe *spe;
a2fbb9ea 2686
34f80b04
EG
2687 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2689 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2692
2693#ifdef BNX2X_STOP_ON_ERROR
2694 if (unlikely(bp->panic))
2695 return -EIO;
2696#endif
2697
34f80b04 2698 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2699
2700 if (!bp->spq_left) {
2701 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2702 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2703 bnx2x_panic();
2704 return -EBUSY;
2705 }
f1410647 2706
28912902
MC
2707 spe = bnx2x_sp_get_next(bp);
2708
a2fbb9ea 2709 /* CID needs port number to be encoded int it */
28912902 2710 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2711 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712 HW_CID(bp, cid)));
28912902 2713 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2714 if (common)
28912902 2715 spe->hdr.type |=
a2fbb9ea
ET
2716 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2717
28912902
MC
2718 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2720
2721 bp->spq_left--;
2722
28912902 2723 bnx2x_sp_prod_update(bp);
34f80b04 2724 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2725 return 0;
2726}
2727
2728/* acquire split MCP access lock register */
4a37fb66 2729static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2730{
a2fbb9ea 2731 u32 i, j, val;
34f80b04 2732 int rc = 0;
a2fbb9ea
ET
2733
2734 might_sleep();
2735 i = 100;
2736 for (j = 0; j < i*10; j++) {
2737 val = (1UL << 31);
2738 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740 if (val & (1L << 31))
2741 break;
2742
2743 msleep(5);
2744 }
a2fbb9ea 2745 if (!(val & (1L << 31))) {
19680c48 2746 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2747 rc = -EBUSY;
2748 }
2749
2750 return rc;
2751}
2752
4a37fb66
YG
2753/* release split MCP access lock register */
2754static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2755{
2756 u32 val = 0;
2757
2758 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759}
2760
2761static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2762{
2763 struct host_def_status_block *def_sb = bp->def_status_blk;
2764 u16 rc = 0;
2765
2766 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2767 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769 rc |= 1;
2770 }
2771 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773 rc |= 2;
2774 }
2775 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777 rc |= 4;
2778 }
2779 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781 rc |= 8;
2782 }
2783 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785 rc |= 16;
2786 }
2787 return rc;
2788}
2789
2790/*
2791 * slow path service functions
2792 */
2793
2794static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2795{
34f80b04 2796 int port = BP_PORT(bp);
5c862848
EG
2797 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2799 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2801 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2803 u32 aeu_mask;
87942b46 2804 u32 nig_mask = 0;
a2fbb9ea 2805
a2fbb9ea
ET
2806 if (bp->attn_state & asserted)
2807 BNX2X_ERR("IGU ERROR\n");
2808
3fcaf2e5
EG
2809 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810 aeu_mask = REG_RD(bp, aeu_addr);
2811
a2fbb9ea 2812 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2813 aeu_mask, asserted);
2814 aeu_mask &= ~(asserted & 0xff);
2815 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2816
3fcaf2e5
EG
2817 REG_WR(bp, aeu_addr, aeu_mask);
2818 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2819
3fcaf2e5 2820 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2821 bp->attn_state |= asserted;
3fcaf2e5 2822 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2823
2824 if (asserted & ATTN_HARD_WIRED_MASK) {
2825 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2826
a5e9a7cf
EG
2827 bnx2x_acquire_phy_lock(bp);
2828
877e9aa4 2829 /* save nig interrupt mask */
87942b46 2830 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2831 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2832
c18487ee 2833 bnx2x_link_attn(bp);
a2fbb9ea
ET
2834
2835 /* handle unicore attn? */
2836 }
2837 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2839
2840 if (asserted & GPIO_2_FUNC)
2841 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2842
2843 if (asserted & GPIO_3_FUNC)
2844 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2845
2846 if (asserted & GPIO_4_FUNC)
2847 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2848
2849 if (port == 0) {
2850 if (asserted & ATTN_GENERAL_ATTN_1) {
2851 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2853 }
2854 if (asserted & ATTN_GENERAL_ATTN_2) {
2855 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2857 }
2858 if (asserted & ATTN_GENERAL_ATTN_3) {
2859 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2861 }
2862 } else {
2863 if (asserted & ATTN_GENERAL_ATTN_4) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2866 }
2867 if (asserted & ATTN_GENERAL_ATTN_5) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2870 }
2871 if (asserted & ATTN_GENERAL_ATTN_6) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2874 }
2875 }
2876
2877 } /* if hardwired */
2878
5c862848
EG
2879 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880 asserted, hc_addr);
2881 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2882
2883 /* now set back the mask */
a5e9a7cf 2884 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2885 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2886 bnx2x_release_phy_lock(bp);
2887 }
a2fbb9ea
ET
2888}
2889
fd4ef40d
EG
2890static inline void bnx2x_fan_failure(struct bnx2x *bp)
2891{
2892 int port = BP_PORT(bp);
2893
2894 /* mark the failure */
2895 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898 bp->link_params.ext_phy_config);
2899
2900 /* log the failure */
7995c64e
JP
2901 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2902 "Please contact Dell Support for assistance.\n");
fd4ef40d 2903}
ab6ad5a4 2904
877e9aa4 2905static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2906{
34f80b04 2907 int port = BP_PORT(bp);
877e9aa4 2908 int reg_offset;
4d295db0 2909 u32 val, swap_val, swap_override;
877e9aa4 2910
34f80b04
EG
2911 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2912 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2913
34f80b04 2914 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2915
2916 val = REG_RD(bp, reg_offset);
2917 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2918 REG_WR(bp, reg_offset, val);
2919
2920 BNX2X_ERR("SPIO5 hw attention\n");
2921
fd4ef40d 2922 /* Fan failure attention */
35b19ba5
EG
2923 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2924 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2925 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2926 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2927 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2928 /* The PHY reset is controlled by GPIO 1 */
2929 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2930 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2931 break;
2932
4d295db0
EG
2933 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2934 /* The PHY reset is controlled by GPIO 1 */
2935 /* fake the port number to cancel the swap done in
2936 set_gpio() */
2937 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2938 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2939 port = (swap_val && swap_override) ^ 1;
2940 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2941 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2942 break;
2943
877e9aa4
ET
2944 default:
2945 break;
2946 }
fd4ef40d 2947 bnx2x_fan_failure(bp);
877e9aa4 2948 }
34f80b04 2949
589abe3a
EG
2950 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2951 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2952 bnx2x_acquire_phy_lock(bp);
2953 bnx2x_handle_module_detect_int(&bp->link_params);
2954 bnx2x_release_phy_lock(bp);
2955 }
2956
34f80b04
EG
2957 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2958
2959 val = REG_RD(bp, reg_offset);
2960 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2961 REG_WR(bp, reg_offset, val);
2962
2963 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2964 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2965 bnx2x_panic();
2966 }
877e9aa4
ET
2967}
2968
2969static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2970{
2971 u32 val;
2972
0626b899 2973 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2974
2975 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2976 BNX2X_ERR("DB hw attention 0x%x\n", val);
2977 /* DORQ discard attention */
2978 if (val & 0x2)
2979 BNX2X_ERR("FATAL error from DORQ\n");
2980 }
34f80b04
EG
2981
2982 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2983
2984 int port = BP_PORT(bp);
2985 int reg_offset;
2986
2987 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2988 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2989
2990 val = REG_RD(bp, reg_offset);
2991 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2992 REG_WR(bp, reg_offset, val);
2993
2994 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2995 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2996 bnx2x_panic();
2997 }
877e9aa4
ET
2998}
2999
3000static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3001{
3002 u32 val;
3003
3004 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3005
3006 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3007 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3008 /* CFC error attention */
3009 if (val & 0x2)
3010 BNX2X_ERR("FATAL error from CFC\n");
3011 }
3012
3013 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3014
3015 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3016 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3017 /* RQ_USDMDP_FIFO_OVERFLOW */
3018 if (val & 0x18000)
3019 BNX2X_ERR("FATAL error from PXP\n");
3020 }
34f80b04
EG
3021
3022 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3023
3024 int port = BP_PORT(bp);
3025 int reg_offset;
3026
3027 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3028 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3029
3030 val = REG_RD(bp, reg_offset);
3031 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3032 REG_WR(bp, reg_offset, val);
3033
3034 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3035 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3036 bnx2x_panic();
3037 }
877e9aa4
ET
3038}
3039
3040static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3041{
34f80b04
EG
3042 u32 val;
3043
877e9aa4
ET
3044 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3045
34f80b04
EG
3046 if (attn & BNX2X_PMF_LINK_ASSERT) {
3047 int func = BP_FUNC(bp);
3048
3049 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3050 bp->mf_config = SHMEM_RD(bp,
3051 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3052 val = SHMEM_RD(bp, func_mb[func].drv_status);
3053 if (val & DRV_STATUS_DCC_EVENT_MASK)
3054 bnx2x_dcc_event(bp,
3055 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3056 bnx2x__link_status_update(bp);
2691d51d 3057 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3058 bnx2x_pmf_update(bp);
3059
3060 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3061
3062 BNX2X_ERR("MC assert!\n");
3063 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3064 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3067 bnx2x_panic();
3068
3069 } else if (attn & BNX2X_MCP_ASSERT) {
3070
3071 BNX2X_ERR("MCP assert!\n");
3072 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3073 bnx2x_fw_dump(bp);
877e9aa4
ET
3074
3075 } else
3076 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3077 }
3078
3079 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3080 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3081 if (attn & BNX2X_GRC_TIMEOUT) {
3082 val = CHIP_IS_E1H(bp) ?
3083 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3084 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3085 }
3086 if (attn & BNX2X_GRC_RSV) {
3087 val = CHIP_IS_E1H(bp) ?
3088 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3089 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3090 }
877e9aa4 3091 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3092 }
3093}
3094
3095static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3096{
a2fbb9ea
ET
3097 struct attn_route attn;
3098 struct attn_route group_mask;
34f80b04 3099 int port = BP_PORT(bp);
877e9aa4 3100 int index;
a2fbb9ea
ET
3101 u32 reg_addr;
3102 u32 val;
3fcaf2e5 3103 u32 aeu_mask;
a2fbb9ea
ET
3104
3105 /* need to take HW lock because MCP or other port might also
3106 try to handle this event */
4a37fb66 3107 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3108
3109 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3110 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3111 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3112 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3113 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3114 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3115
3116 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3117 if (deasserted & (1 << index)) {
3118 group_mask = bp->attn_group[index];
3119
34f80b04
EG
3120 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3121 index, group_mask.sig[0], group_mask.sig[1],
3122 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3123
877e9aa4
ET
3124 bnx2x_attn_int_deasserted3(bp,
3125 attn.sig[3] & group_mask.sig[3]);
3126 bnx2x_attn_int_deasserted1(bp,
3127 attn.sig[1] & group_mask.sig[1]);
3128 bnx2x_attn_int_deasserted2(bp,
3129 attn.sig[2] & group_mask.sig[2]);
3130 bnx2x_attn_int_deasserted0(bp,
3131 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3132
a2fbb9ea
ET
3133 if ((attn.sig[0] & group_mask.sig[0] &
3134 HW_PRTY_ASSERT_SET_0) ||
3135 (attn.sig[1] & group_mask.sig[1] &
3136 HW_PRTY_ASSERT_SET_1) ||
3137 (attn.sig[2] & group_mask.sig[2] &
3138 HW_PRTY_ASSERT_SET_2))
6378c025 3139 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3140 }
3141 }
3142
4a37fb66 3143 bnx2x_release_alr(bp);
a2fbb9ea 3144
5c862848 3145 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3146
3147 val = ~deasserted;
3fcaf2e5
EG
3148 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3149 val, reg_addr);
5c862848 3150 REG_WR(bp, reg_addr, val);
a2fbb9ea 3151
a2fbb9ea 3152 if (~bp->attn_state & deasserted)
3fcaf2e5 3153 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3154
3155 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3156 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3157
3fcaf2e5
EG
3158 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3159 aeu_mask = REG_RD(bp, reg_addr);
3160
3161 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3162 aeu_mask, deasserted);
3163 aeu_mask |= (deasserted & 0xff);
3164 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3165
3fcaf2e5
EG
3166 REG_WR(bp, reg_addr, aeu_mask);
3167 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3168
3169 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3170 bp->attn_state &= ~deasserted;
3171 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3172}
3173
3174static void bnx2x_attn_int(struct bnx2x *bp)
3175{
3176 /* read local copy of bits */
68d59484
EG
3177 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3178 attn_bits);
3179 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180 attn_bits_ack);
a2fbb9ea
ET
3181 u32 attn_state = bp->attn_state;
3182
3183 /* look for changed bits */
3184 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3185 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3186
3187 DP(NETIF_MSG_HW,
3188 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3189 attn_bits, attn_ack, asserted, deasserted);
3190
3191 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3192 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3193
3194 /* handle bits that were raised */
3195 if (asserted)
3196 bnx2x_attn_int_asserted(bp, asserted);
3197
3198 if (deasserted)
3199 bnx2x_attn_int_deasserted(bp, deasserted);
3200}
3201
3202static void bnx2x_sp_task(struct work_struct *work)
3203{
1cf167f2 3204 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3205 u16 status;
3206
34f80b04 3207
a2fbb9ea
ET
3208 /* Return here if interrupt is disabled */
3209 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3210 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3211 return;
3212 }
3213
3214 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3215/* if (status == 0) */
3216/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3217
3196a88a 3218 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3219
877e9aa4
ET
3220 /* HW attentions */
3221 if (status & 0x1)
a2fbb9ea 3222 bnx2x_attn_int(bp);
a2fbb9ea 3223
68d59484 3224 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3225 IGU_INT_NOP, 1);
3226 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3227 IGU_INT_NOP, 1);
3228 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3229 IGU_INT_NOP, 1);
3230 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3231 IGU_INT_NOP, 1);
3232 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3233 IGU_INT_ENABLE, 1);
877e9aa4 3234
a2fbb9ea
ET
3235}
3236
3237static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3238{
3239 struct net_device *dev = dev_instance;
3240 struct bnx2x *bp = netdev_priv(dev);
3241
3242 /* Return here if interrupt is disabled */
3243 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3244 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3245 return IRQ_HANDLED;
3246 }
3247
8d9c5f34 3248 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3249
3250#ifdef BNX2X_STOP_ON_ERROR
3251 if (unlikely(bp->panic))
3252 return IRQ_HANDLED;
3253#endif
3254
993ac7b5
MC
3255#ifdef BCM_CNIC
3256 {
3257 struct cnic_ops *c_ops;
3258
3259 rcu_read_lock();
3260 c_ops = rcu_dereference(bp->cnic_ops);
3261 if (c_ops)
3262 c_ops->cnic_handler(bp->cnic_data, NULL);
3263 rcu_read_unlock();
3264 }
3265#endif
1cf167f2 3266 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3267
3268 return IRQ_HANDLED;
3269}
3270
3271/* end of slow path */
3272
3273/* Statistics */
3274
3275/****************************************************************************
3276* Macros
3277****************************************************************************/
3278
a2fbb9ea
ET
3279/* sum[hi:lo] += add[hi:lo] */
3280#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3281 do { \
3282 s_lo += a_lo; \
f5ba6772 3283 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3284 } while (0)
3285
3286/* difference = minuend - subtrahend */
3287#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3288 do { \
bb2a0f7a
YG
3289 if (m_lo < s_lo) { \
3290 /* underflow */ \
a2fbb9ea 3291 d_hi = m_hi - s_hi; \
bb2a0f7a 3292 if (d_hi > 0) { \
6378c025 3293 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3294 d_hi--; \
3295 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3296 } else { \
6378c025 3297 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3298 d_hi = 0; \
3299 d_lo = 0; \
3300 } \
bb2a0f7a
YG
3301 } else { \
3302 /* m_lo >= s_lo */ \
a2fbb9ea 3303 if (m_hi < s_hi) { \
bb2a0f7a
YG
3304 d_hi = 0; \
3305 d_lo = 0; \
3306 } else { \
6378c025 3307 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3308 d_hi = m_hi - s_hi; \
3309 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3310 } \
3311 } \
3312 } while (0)
3313
bb2a0f7a 3314#define UPDATE_STAT64(s, t) \
a2fbb9ea 3315 do { \
bb2a0f7a
YG
3316 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3317 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3318 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3319 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3320 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3321 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3322 } while (0)
3323
bb2a0f7a 3324#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3325 do { \
bb2a0f7a
YG
3326 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3327 diff.lo, new->s##_lo, old->s##_lo); \
3328 ADD_64(estats->t##_hi, diff.hi, \
3329 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3330 } while (0)
3331
3332/* sum[hi:lo] += add */
3333#define ADD_EXTEND_64(s_hi, s_lo, a) \
3334 do { \
3335 s_lo += a; \
3336 s_hi += (s_lo < a) ? 1 : 0; \
3337 } while (0)
3338
bb2a0f7a 3339#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3340 do { \
bb2a0f7a
YG
3341 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3342 pstats->mac_stx[1].s##_lo, \
3343 new->s); \
a2fbb9ea
ET
3344 } while (0)
3345
bb2a0f7a 3346#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3347 do { \
4781bfad
EG
3348 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3349 old_tclient->s = tclient->s; \
de832a55
EG
3350 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3351 } while (0)
3352
3353#define UPDATE_EXTEND_USTAT(s, t) \
3354 do { \
3355 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3356 old_uclient->s = uclient->s; \
3357 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3358 } while (0)
3359
3360#define UPDATE_EXTEND_XSTAT(s, t) \
3361 do { \
4781bfad
EG
3362 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3363 old_xclient->s = xclient->s; \
de832a55
EG
3364 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3365 } while (0)
3366
3367/* minuend -= subtrahend */
3368#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3369 do { \
3370 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3371 } while (0)
3372
3373/* minuend[hi:lo] -= subtrahend */
3374#define SUB_EXTEND_64(m_hi, m_lo, s) \
3375 do { \
3376 SUB_64(m_hi, 0, m_lo, s); \
3377 } while (0)
3378
3379#define SUB_EXTEND_USTAT(s, t) \
3380 do { \
3381 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3382 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3383 } while (0)
3384
3385/*
3386 * General service functions
3387 */
3388
3389static inline long bnx2x_hilo(u32 *hiref)
3390{
3391 u32 lo = *(hiref + 1);
3392#if (BITS_PER_LONG == 64)
3393 u32 hi = *hiref;
3394
3395 return HILO_U64(hi, lo);
3396#else
3397 return lo;
3398#endif
3399}
3400
3401/*
3402 * Init service functions
3403 */
3404
bb2a0f7a
YG
3405static void bnx2x_storm_stats_post(struct bnx2x *bp)
3406{
3407 if (!bp->stats_pending) {
3408 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3409 int i, rc;
bb2a0f7a
YG
3410
3411 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3412 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3413 for_each_queue(bp, i)
3414 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3415
3416 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3417 ((u32 *)&ramrod_data)[1],
3418 ((u32 *)&ramrod_data)[0], 0);
3419 if (rc == 0) {
3420 /* stats ramrod has it's own slot on the spq */
3421 bp->spq_left++;
3422 bp->stats_pending = 1;
3423 }
3424 }
3425}
3426
bb2a0f7a
YG
3427static void bnx2x_hw_stats_post(struct bnx2x *bp)
3428{
3429 struct dmae_command *dmae = &bp->stats_dmae;
3430 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3431
3432 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3433 if (CHIP_REV_IS_SLOW(bp))
3434 return;
bb2a0f7a
YG
3435
3436 /* loader */
3437 if (bp->executer_idx) {
3438 int loader_idx = PMF_DMAE_C(bp);
3439
3440 memset(dmae, 0, sizeof(struct dmae_command));
3441
3442 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3443 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3444 DMAE_CMD_DST_RESET |
3445#ifdef __BIG_ENDIAN
3446 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3447#else
3448 DMAE_CMD_ENDIANITY_DW_SWAP |
3449#endif
3450 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3451 DMAE_CMD_PORT_0) |
3452 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3453 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3454 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3455 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3456 sizeof(struct dmae_command) *
3457 (loader_idx + 1)) >> 2;
3458 dmae->dst_addr_hi = 0;
3459 dmae->len = sizeof(struct dmae_command) >> 2;
3460 if (CHIP_IS_E1(bp))
3461 dmae->len--;
3462 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3463 dmae->comp_addr_hi = 0;
3464 dmae->comp_val = 1;
3465
3466 *stats_comp = 0;
3467 bnx2x_post_dmae(bp, dmae, loader_idx);
3468
3469 } else if (bp->func_stx) {
3470 *stats_comp = 0;
3471 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3472 }
3473}
3474
3475static int bnx2x_stats_comp(struct bnx2x *bp)
3476{
3477 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3478 int cnt = 10;
3479
3480 might_sleep();
3481 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3482 if (!cnt) {
3483 BNX2X_ERR("timeout waiting for stats finished\n");
3484 break;
3485 }
3486 cnt--;
12469401 3487 msleep(1);
bb2a0f7a
YG
3488 }
3489 return 1;
3490}
3491
3492/*
3493 * Statistics service functions
3494 */
3495
3496static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3497{
3498 struct dmae_command *dmae;
3499 u32 opcode;
3500 int loader_idx = PMF_DMAE_C(bp);
3501 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3502
3503 /* sanity */
3504 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3505 BNX2X_ERR("BUG!\n");
3506 return;
3507 }
3508
3509 bp->executer_idx = 0;
3510
3511 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3512 DMAE_CMD_C_ENABLE |
3513 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3514#ifdef __BIG_ENDIAN
3515 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3516#else
3517 DMAE_CMD_ENDIANITY_DW_SWAP |
3518#endif
3519 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3520 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3521
3522 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3523 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3524 dmae->src_addr_lo = bp->port.port_stx >> 2;
3525 dmae->src_addr_hi = 0;
3526 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3527 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3528 dmae->len = DMAE_LEN32_RD_MAX;
3529 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3530 dmae->comp_addr_hi = 0;
3531 dmae->comp_val = 1;
3532
3533 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3534 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3535 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3536 dmae->src_addr_hi = 0;
7a9b2557
VZ
3537 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3538 DMAE_LEN32_RD_MAX * 4);
3539 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3540 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3541 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3542 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3543 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3544 dmae->comp_val = DMAE_COMP_VAL;
3545
3546 *stats_comp = 0;
3547 bnx2x_hw_stats_post(bp);
3548 bnx2x_stats_comp(bp);
3549}
3550
3551static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3552{
3553 struct dmae_command *dmae;
34f80b04 3554 int port = BP_PORT(bp);
bb2a0f7a 3555 int vn = BP_E1HVN(bp);
a2fbb9ea 3556 u32 opcode;
bb2a0f7a 3557 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3558 u32 mac_addr;
bb2a0f7a
YG
3559 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3560
3561 /* sanity */
3562 if (!bp->link_vars.link_up || !bp->port.pmf) {
3563 BNX2X_ERR("BUG!\n");
3564 return;
3565 }
a2fbb9ea
ET
3566
3567 bp->executer_idx = 0;
bb2a0f7a
YG
3568
3569 /* MCP */
3570 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3571 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3572 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3573#ifdef __BIG_ENDIAN
bb2a0f7a 3574 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3575#else
bb2a0f7a 3576 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3577#endif
bb2a0f7a
YG
3578 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3579 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3580
bb2a0f7a 3581 if (bp->port.port_stx) {
a2fbb9ea
ET
3582
3583 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3584 dmae->opcode = opcode;
bb2a0f7a
YG
3585 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3586 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3587 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3588 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3589 dmae->len = sizeof(struct host_port_stats) >> 2;
3590 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3591 dmae->comp_addr_hi = 0;
3592 dmae->comp_val = 1;
a2fbb9ea
ET
3593 }
3594
bb2a0f7a
YG
3595 if (bp->func_stx) {
3596
3597 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3598 dmae->opcode = opcode;
3599 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3600 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3601 dmae->dst_addr_lo = bp->func_stx >> 2;
3602 dmae->dst_addr_hi = 0;
3603 dmae->len = sizeof(struct host_func_stats) >> 2;
3604 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3605 dmae->comp_addr_hi = 0;
3606 dmae->comp_val = 1;
a2fbb9ea
ET
3607 }
3608
bb2a0f7a 3609 /* MAC */
a2fbb9ea
ET
3610 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3611 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3612 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3613#ifdef __BIG_ENDIAN
3614 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3615#else
3616 DMAE_CMD_ENDIANITY_DW_SWAP |
3617#endif
bb2a0f7a
YG
3618 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3619 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3620
c18487ee 3621 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3622
3623 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3624 NIG_REG_INGRESS_BMAC0_MEM);
3625
3626 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3627 BIGMAC_REGISTER_TX_STAT_GTBYT */
3628 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3629 dmae->opcode = opcode;
3630 dmae->src_addr_lo = (mac_addr +
3631 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3632 dmae->src_addr_hi = 0;
3633 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3634 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3635 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3636 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3637 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3638 dmae->comp_addr_hi = 0;
3639 dmae->comp_val = 1;
3640
3641 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3642 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3643 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3644 dmae->opcode = opcode;
3645 dmae->src_addr_lo = (mac_addr +
3646 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3647 dmae->src_addr_hi = 0;
3648 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3649 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3650 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3651 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3652 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3653 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3654 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3655 dmae->comp_addr_hi = 0;
3656 dmae->comp_val = 1;
3657
c18487ee 3658 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3659
3660 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3661
3662 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3663 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3664 dmae->opcode = opcode;
3665 dmae->src_addr_lo = (mac_addr +
3666 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3667 dmae->src_addr_hi = 0;
3668 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3669 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3670 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3671 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3672 dmae->comp_addr_hi = 0;
3673 dmae->comp_val = 1;
3674
3675 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3676 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3677 dmae->opcode = opcode;
3678 dmae->src_addr_lo = (mac_addr +
3679 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3680 dmae->src_addr_hi = 0;
3681 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3682 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3683 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3684 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3685 dmae->len = 1;
3686 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3687 dmae->comp_addr_hi = 0;
3688 dmae->comp_val = 1;
3689
3690 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3691 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3692 dmae->opcode = opcode;
3693 dmae->src_addr_lo = (mac_addr +
3694 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3695 dmae->src_addr_hi = 0;
3696 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3697 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3698 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3699 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3700 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3701 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3702 dmae->comp_addr_hi = 0;
3703 dmae->comp_val = 1;
3704 }
3705
3706 /* NIG */
bb2a0f7a
YG
3707 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3708 dmae->opcode = opcode;
3709 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3710 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3711 dmae->src_addr_hi = 0;
3712 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3713 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3714 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3715 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3716 dmae->comp_addr_hi = 0;
3717 dmae->comp_val = 1;
3718
3719 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3720 dmae->opcode = opcode;
3721 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3722 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3723 dmae->src_addr_hi = 0;
3724 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3725 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3726 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3727 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728 dmae->len = (2*sizeof(u32)) >> 2;
3729 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3730 dmae->comp_addr_hi = 0;
3731 dmae->comp_val = 1;
3732
a2fbb9ea
ET
3733 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3734 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3735 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3736 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3737#ifdef __BIG_ENDIAN
3738 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3739#else
3740 DMAE_CMD_ENDIANITY_DW_SWAP |
3741#endif
bb2a0f7a
YG
3742 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3743 (vn << DMAE_CMD_E1HVN_SHIFT));
3744 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3745 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3746 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3747 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3748 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3749 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3750 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751 dmae->len = (2*sizeof(u32)) >> 2;
3752 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3753 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3754 dmae->comp_val = DMAE_COMP_VAL;
3755
3756 *stats_comp = 0;
a2fbb9ea
ET
3757}
3758
bb2a0f7a 3759static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3760{
bb2a0f7a
YG
3761 struct dmae_command *dmae = &bp->stats_dmae;
3762 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3763
bb2a0f7a
YG
3764 /* sanity */
3765 if (!bp->func_stx) {
3766 BNX2X_ERR("BUG!\n");
3767 return;
3768 }
a2fbb9ea 3769
bb2a0f7a
YG
3770 bp->executer_idx = 0;
3771 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3772
bb2a0f7a
YG
3773 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3774 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3775 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3776#ifdef __BIG_ENDIAN
3777 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3778#else
3779 DMAE_CMD_ENDIANITY_DW_SWAP |
3780#endif
3781 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3782 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3783 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3784 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3785 dmae->dst_addr_lo = bp->func_stx >> 2;
3786 dmae->dst_addr_hi = 0;
3787 dmae->len = sizeof(struct host_func_stats) >> 2;
3788 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3789 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3790 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3791
bb2a0f7a
YG
3792 *stats_comp = 0;
3793}
a2fbb9ea 3794
bb2a0f7a
YG
3795static void bnx2x_stats_start(struct bnx2x *bp)
3796{
3797 if (bp->port.pmf)
3798 bnx2x_port_stats_init(bp);
3799
3800 else if (bp->func_stx)
3801 bnx2x_func_stats_init(bp);
3802
3803 bnx2x_hw_stats_post(bp);
3804 bnx2x_storm_stats_post(bp);
3805}
3806
3807static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3808{
3809 bnx2x_stats_comp(bp);
3810 bnx2x_stats_pmf_update(bp);
3811 bnx2x_stats_start(bp);
3812}
3813
3814static void bnx2x_stats_restart(struct bnx2x *bp)
3815{
3816 bnx2x_stats_comp(bp);
3817 bnx2x_stats_start(bp);
3818}
3819
3820static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3821{
3822 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3823 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3824 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3825 struct {
3826 u32 lo;
3827 u32 hi;
3828 } diff;
bb2a0f7a
YG
3829
3830 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3831 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3832 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3833 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3834 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3835 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3836 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3837 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3838 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3839 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3840 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3841 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3842 UPDATE_STAT64(tx_stat_gt127,
3843 tx_stat_etherstatspkts65octetsto127octets);
3844 UPDATE_STAT64(tx_stat_gt255,
3845 tx_stat_etherstatspkts128octetsto255octets);
3846 UPDATE_STAT64(tx_stat_gt511,
3847 tx_stat_etherstatspkts256octetsto511octets);
3848 UPDATE_STAT64(tx_stat_gt1023,
3849 tx_stat_etherstatspkts512octetsto1023octets);
3850 UPDATE_STAT64(tx_stat_gt1518,
3851 tx_stat_etherstatspkts1024octetsto1522octets);
3852 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3853 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3854 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3855 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3856 UPDATE_STAT64(tx_stat_gterr,
3857 tx_stat_dot3statsinternalmactransmiterrors);
3858 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3859
3860 estats->pause_frames_received_hi =
3861 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3862 estats->pause_frames_received_lo =
3863 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3864
3865 estats->pause_frames_sent_hi =
3866 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3867 estats->pause_frames_sent_lo =
3868 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3869}
3870
3871static void bnx2x_emac_stats_update(struct bnx2x *bp)
3872{
3873 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3874 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3875 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3876
3877 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3878 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3879 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3880 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3881 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3882 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3883 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3884 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3885 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3886 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3887 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3888 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3889 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3890 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3891 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3892 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3893 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3894 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3895 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3896 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3897 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3898 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3899 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3900 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3901 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3902 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3903 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3904 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3905 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3907 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3908
3909 estats->pause_frames_received_hi =
3910 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3911 estats->pause_frames_received_lo =
3912 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3913 ADD_64(estats->pause_frames_received_hi,
3914 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3915 estats->pause_frames_received_lo,
3916 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3917
3918 estats->pause_frames_sent_hi =
3919 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3920 estats->pause_frames_sent_lo =
3921 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3922 ADD_64(estats->pause_frames_sent_hi,
3923 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3924 estats->pause_frames_sent_lo,
3925 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3926}
3927
3928static int bnx2x_hw_stats_update(struct bnx2x *bp)
3929{
3930 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3931 struct nig_stats *old = &(bp->port.old_nig_stats);
3932 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3933 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3934 struct {
3935 u32 lo;
3936 u32 hi;
3937 } diff;
de832a55 3938 u32 nig_timer_max;
bb2a0f7a
YG
3939
3940 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3941 bnx2x_bmac_stats_update(bp);
3942
3943 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3944 bnx2x_emac_stats_update(bp);
3945
3946 else { /* unreached */
c3eefaf6 3947 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3948 return -1;
3949 }
a2fbb9ea 3950
bb2a0f7a
YG
3951 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3952 new->brb_discard - old->brb_discard);
66e855f3
YG
3953 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3954 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3955
bb2a0f7a
YG
3956 UPDATE_STAT64_NIG(egress_mac_pkt0,
3957 etherstatspkts1024octetsto1522octets);
3958 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3959
bb2a0f7a 3960 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3961
bb2a0f7a
YG
3962 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3963 sizeof(struct mac_stx));
3964 estats->brb_drop_hi = pstats->brb_drop_hi;
3965 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3966
bb2a0f7a 3967 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3968
de832a55
EG
3969 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3970 if (nig_timer_max != estats->nig_timer_max) {
3971 estats->nig_timer_max = nig_timer_max;
3972 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3973 }
3974
bb2a0f7a 3975 return 0;
a2fbb9ea
ET
3976}
3977
bb2a0f7a 3978static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3979{
3980 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3981 struct tstorm_per_port_stats *tport =
de832a55 3982 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3983 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3984 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3985 int i;
3986
6fe49bb9
EG
3987 memcpy(&(fstats->total_bytes_received_hi),
3988 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3989 sizeof(struct host_func_stats) - 2*sizeof(u32));
3990 estats->error_bytes_received_hi = 0;
3991 estats->error_bytes_received_lo = 0;
3992 estats->etherstatsoverrsizepkts_hi = 0;
3993 estats->etherstatsoverrsizepkts_lo = 0;
3994 estats->no_buff_discard_hi = 0;
3995 estats->no_buff_discard_lo = 0;
a2fbb9ea 3996
54b9ddaa 3997 for_each_queue(bp, i) {
de832a55
EG
3998 struct bnx2x_fastpath *fp = &bp->fp[i];
3999 int cl_id = fp->cl_id;
4000 struct tstorm_per_client_stats *tclient =
4001 &stats->tstorm_common.client_statistics[cl_id];
4002 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4003 struct ustorm_per_client_stats *uclient =
4004 &stats->ustorm_common.client_statistics[cl_id];
4005 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4006 struct xstorm_per_client_stats *xclient =
4007 &stats->xstorm_common.client_statistics[cl_id];
4008 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4009 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4010 u32 diff;
4011
4012 /* are storm stats valid? */
4013 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4014 bp->stats_counter) {
de832a55
EG
4015 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4016 " xstorm counter (%d) != stats_counter (%d)\n",
4017 i, xclient->stats_counter, bp->stats_counter);
4018 return -1;
4019 }
4020 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4021 bp->stats_counter) {
de832a55
EG
4022 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4023 " tstorm counter (%d) != stats_counter (%d)\n",
4024 i, tclient->stats_counter, bp->stats_counter);
4025 return -2;
4026 }
4027 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4028 bp->stats_counter) {
4029 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4030 " ustorm counter (%d) != stats_counter (%d)\n",
4031 i, uclient->stats_counter, bp->stats_counter);
4032 return -4;
4033 }
a2fbb9ea 4034
de832a55 4035 qstats->total_bytes_received_hi =
ca00392c 4036 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4037 qstats->total_bytes_received_lo =
ca00392c
EG
4038 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4039
4040 ADD_64(qstats->total_bytes_received_hi,
4041 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4042 qstats->total_bytes_received_lo,
4043 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4044
4045 ADD_64(qstats->total_bytes_received_hi,
4046 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4047 qstats->total_bytes_received_lo,
4048 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4049
4050 qstats->valid_bytes_received_hi =
4051 qstats->total_bytes_received_hi;
de832a55 4052 qstats->valid_bytes_received_lo =
ca00392c 4053 qstats->total_bytes_received_lo;
bb2a0f7a 4054
de832a55 4055 qstats->error_bytes_received_hi =
bb2a0f7a 4056 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4057 qstats->error_bytes_received_lo =
bb2a0f7a 4058 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4059
de832a55
EG
4060 ADD_64(qstats->total_bytes_received_hi,
4061 qstats->error_bytes_received_hi,
4062 qstats->total_bytes_received_lo,
4063 qstats->error_bytes_received_lo);
4064
4065 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4066 total_unicast_packets_received);
4067 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4068 total_multicast_packets_received);
4069 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4070 total_broadcast_packets_received);
4071 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4072 etherstatsoverrsizepkts);
4073 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4074
4075 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4076 total_unicast_packets_received);
4077 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4078 total_multicast_packets_received);
4079 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4080 total_broadcast_packets_received);
4081 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4082 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4083 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4084
4085 qstats->total_bytes_transmitted_hi =
ca00392c 4086 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4087 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4088 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4089
4090 ADD_64(qstats->total_bytes_transmitted_hi,
4091 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4092 qstats->total_bytes_transmitted_lo,
4093 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4094
4095 ADD_64(qstats->total_bytes_transmitted_hi,
4096 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4097 qstats->total_bytes_transmitted_lo,
4098 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4099
de832a55
EG
4100 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4101 total_unicast_packets_transmitted);
4102 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4103 total_multicast_packets_transmitted);
4104 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4105 total_broadcast_packets_transmitted);
4106
4107 old_tclient->checksum_discard = tclient->checksum_discard;
4108 old_tclient->ttl0_discard = tclient->ttl0_discard;
4109
4110 ADD_64(fstats->total_bytes_received_hi,
4111 qstats->total_bytes_received_hi,
4112 fstats->total_bytes_received_lo,
4113 qstats->total_bytes_received_lo);
4114 ADD_64(fstats->total_bytes_transmitted_hi,
4115 qstats->total_bytes_transmitted_hi,
4116 fstats->total_bytes_transmitted_lo,
4117 qstats->total_bytes_transmitted_lo);
4118 ADD_64(fstats->total_unicast_packets_received_hi,
4119 qstats->total_unicast_packets_received_hi,
4120 fstats->total_unicast_packets_received_lo,
4121 qstats->total_unicast_packets_received_lo);
4122 ADD_64(fstats->total_multicast_packets_received_hi,
4123 qstats->total_multicast_packets_received_hi,
4124 fstats->total_multicast_packets_received_lo,
4125 qstats->total_multicast_packets_received_lo);
4126 ADD_64(fstats->total_broadcast_packets_received_hi,
4127 qstats->total_broadcast_packets_received_hi,
4128 fstats->total_broadcast_packets_received_lo,
4129 qstats->total_broadcast_packets_received_lo);
4130 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4131 qstats->total_unicast_packets_transmitted_hi,
4132 fstats->total_unicast_packets_transmitted_lo,
4133 qstats->total_unicast_packets_transmitted_lo);
4134 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4135 qstats->total_multicast_packets_transmitted_hi,
4136 fstats->total_multicast_packets_transmitted_lo,
4137 qstats->total_multicast_packets_transmitted_lo);
4138 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4139 qstats->total_broadcast_packets_transmitted_hi,
4140 fstats->total_broadcast_packets_transmitted_lo,
4141 qstats->total_broadcast_packets_transmitted_lo);
4142 ADD_64(fstats->valid_bytes_received_hi,
4143 qstats->valid_bytes_received_hi,
4144 fstats->valid_bytes_received_lo,
4145 qstats->valid_bytes_received_lo);
4146
4147 ADD_64(estats->error_bytes_received_hi,
4148 qstats->error_bytes_received_hi,
4149 estats->error_bytes_received_lo,
4150 qstats->error_bytes_received_lo);
4151 ADD_64(estats->etherstatsoverrsizepkts_hi,
4152 qstats->etherstatsoverrsizepkts_hi,
4153 estats->etherstatsoverrsizepkts_lo,
4154 qstats->etherstatsoverrsizepkts_lo);
4155 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4156 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4157 }
4158
4159 ADD_64(fstats->total_bytes_received_hi,
4160 estats->rx_stat_ifhcinbadoctets_hi,
4161 fstats->total_bytes_received_lo,
4162 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4163
4164 memcpy(estats, &(fstats->total_bytes_received_hi),
4165 sizeof(struct host_func_stats) - 2*sizeof(u32));
4166
de832a55
EG
4167 ADD_64(estats->etherstatsoverrsizepkts_hi,
4168 estats->rx_stat_dot3statsframestoolong_hi,
4169 estats->etherstatsoverrsizepkts_lo,
4170 estats->rx_stat_dot3statsframestoolong_lo);
4171 ADD_64(estats->error_bytes_received_hi,
4172 estats->rx_stat_ifhcinbadoctets_hi,
4173 estats->error_bytes_received_lo,
4174 estats->rx_stat_ifhcinbadoctets_lo);
4175
4176 if (bp->port.pmf) {
4177 estats->mac_filter_discard =
4178 le32_to_cpu(tport->mac_filter_discard);
4179 estats->xxoverflow_discard =
4180 le32_to_cpu(tport->xxoverflow_discard);
4181 estats->brb_truncate_discard =
bb2a0f7a 4182 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4183 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4184 }
bb2a0f7a
YG
4185
4186 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4187
de832a55
EG
4188 bp->stats_pending = 0;
4189
a2fbb9ea
ET
4190 return 0;
4191}
4192
bb2a0f7a 4193static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4194{
bb2a0f7a 4195 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4196 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4197 int i;
a2fbb9ea
ET
4198
4199 nstats->rx_packets =
4200 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4201 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4202 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4203
4204 nstats->tx_packets =
4205 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4206 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4207 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4208
de832a55 4209 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4210
0e39e645 4211 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4212
de832a55 4213 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4214 for_each_queue(bp, i)
de832a55
EG
4215 nstats->rx_dropped +=
4216 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4217
a2fbb9ea
ET
4218 nstats->tx_dropped = 0;
4219
4220 nstats->multicast =
de832a55 4221 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4222
bb2a0f7a 4223 nstats->collisions =
de832a55 4224 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4225
4226 nstats->rx_length_errors =
de832a55
EG
4227 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4228 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4229 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4230 bnx2x_hilo(&estats->brb_truncate_hi);
4231 nstats->rx_crc_errors =
4232 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4233 nstats->rx_frame_errors =
4234 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4235 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4236 nstats->rx_missed_errors = estats->xxoverflow_discard;
4237
4238 nstats->rx_errors = nstats->rx_length_errors +
4239 nstats->rx_over_errors +
4240 nstats->rx_crc_errors +
4241 nstats->rx_frame_errors +
0e39e645
ET
4242 nstats->rx_fifo_errors +
4243 nstats->rx_missed_errors;
a2fbb9ea 4244
bb2a0f7a 4245 nstats->tx_aborted_errors =
de832a55
EG
4246 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4247 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4248 nstats->tx_carrier_errors =
4249 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4250 nstats->tx_fifo_errors = 0;
4251 nstats->tx_heartbeat_errors = 0;
4252 nstats->tx_window_errors = 0;
4253
4254 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4255 nstats->tx_carrier_errors +
4256 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4257}
4258
4259static void bnx2x_drv_stats_update(struct bnx2x *bp)
4260{
4261 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4262 int i;
4263
4264 estats->driver_xoff = 0;
4265 estats->rx_err_discard_pkt = 0;
4266 estats->rx_skb_alloc_failed = 0;
4267 estats->hw_csum_err = 0;
54b9ddaa 4268 for_each_queue(bp, i) {
de832a55
EG
4269 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4270
4271 estats->driver_xoff += qstats->driver_xoff;
4272 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4273 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4274 estats->hw_csum_err += qstats->hw_csum_err;
4275 }
a2fbb9ea
ET
4276}
4277
bb2a0f7a 4278static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4279{
bb2a0f7a 4280 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4281
bb2a0f7a
YG
4282 if (*stats_comp != DMAE_COMP_VAL)
4283 return;
4284
4285 if (bp->port.pmf)
de832a55 4286 bnx2x_hw_stats_update(bp);
a2fbb9ea 4287
de832a55
EG
4288 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4289 BNX2X_ERR("storm stats were not updated for 3 times\n");
4290 bnx2x_panic();
4291 return;
a2fbb9ea
ET
4292 }
4293
de832a55
EG
4294 bnx2x_net_stats_update(bp);
4295 bnx2x_drv_stats_update(bp);
4296
7995c64e 4297 if (netif_msg_timer(bp)) {
ca00392c 4298 struct bnx2x_fastpath *fp0_rx = bp->fp;
54b9ddaa 4299 struct bnx2x_fastpath *fp0_tx = bp->fp;
de832a55
EG
4300 struct tstorm_per_client_stats *old_tclient =
4301 &bp->fp->old_tclient;
4302 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4303 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4304 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4305 int i;
a2fbb9ea 4306
7995c64e 4307 netdev_printk(KERN_DEBUG, bp->dev, "\n");
a2fbb9ea
ET
4308 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4309 " tx pkt (%lx)\n",
ca00392c
EG
4310 bnx2x_tx_avail(fp0_tx),
4311 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4312 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4313 " rx pkt (%lx)\n",
ca00392c
EG
4314 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4315 fp0_rx->rx_comp_cons),
4316 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4317 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4318 "brb truncate %u\n",
4319 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4320 qstats->driver_xoff,
4321 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4322 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4323 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4324 "mac_discard %u mac_filter_discard %u "
4325 "xxovrflow_discard %u brb_truncate_discard %u "
4326 "ttl0_discard %u\n",
4781bfad 4327 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4328 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4329 bnx2x_hilo(&qstats->no_buff_discard_hi),
4330 estats->mac_discard, estats->mac_filter_discard,
4331 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4332 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4333
4334 for_each_queue(bp, i) {
4335 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4336 bnx2x_fp(bp, i, tx_pkt),
4337 bnx2x_fp(bp, i, rx_pkt),
4338 bnx2x_fp(bp, i, rx_calls));
4339 }
4340 }
4341
bb2a0f7a
YG
4342 bnx2x_hw_stats_post(bp);
4343 bnx2x_storm_stats_post(bp);
4344}
a2fbb9ea 4345
bb2a0f7a
YG
4346static void bnx2x_port_stats_stop(struct bnx2x *bp)
4347{
4348 struct dmae_command *dmae;
4349 u32 opcode;
4350 int loader_idx = PMF_DMAE_C(bp);
4351 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4352
bb2a0f7a 4353 bp->executer_idx = 0;
a2fbb9ea 4354
bb2a0f7a
YG
4355 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4356 DMAE_CMD_C_ENABLE |
4357 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4358#ifdef __BIG_ENDIAN
bb2a0f7a 4359 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4360#else
bb2a0f7a 4361 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4362#endif
bb2a0f7a
YG
4363 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4364 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4365
4366 if (bp->port.port_stx) {
4367
4368 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4369 if (bp->func_stx)
4370 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4371 else
4372 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4373 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4374 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4375 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4376 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4377 dmae->len = sizeof(struct host_port_stats) >> 2;
4378 if (bp->func_stx) {
4379 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4380 dmae->comp_addr_hi = 0;
4381 dmae->comp_val = 1;
4382 } else {
4383 dmae->comp_addr_lo =
4384 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4385 dmae->comp_addr_hi =
4386 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4387 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4388
bb2a0f7a
YG
4389 *stats_comp = 0;
4390 }
a2fbb9ea
ET
4391 }
4392
bb2a0f7a
YG
4393 if (bp->func_stx) {
4394
4395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4396 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4397 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4398 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4399 dmae->dst_addr_lo = bp->func_stx >> 2;
4400 dmae->dst_addr_hi = 0;
4401 dmae->len = sizeof(struct host_func_stats) >> 2;
4402 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4403 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4404 dmae->comp_val = DMAE_COMP_VAL;
4405
4406 *stats_comp = 0;
a2fbb9ea 4407 }
bb2a0f7a
YG
4408}
4409
4410static void bnx2x_stats_stop(struct bnx2x *bp)
4411{
4412 int update = 0;
4413
4414 bnx2x_stats_comp(bp);
4415
4416 if (bp->port.pmf)
4417 update = (bnx2x_hw_stats_update(bp) == 0);
4418
4419 update |= (bnx2x_storm_stats_update(bp) == 0);
4420
4421 if (update) {
4422 bnx2x_net_stats_update(bp);
a2fbb9ea 4423
bb2a0f7a
YG
4424 if (bp->port.pmf)
4425 bnx2x_port_stats_stop(bp);
4426
4427 bnx2x_hw_stats_post(bp);
4428 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4429 }
4430}
4431
bb2a0f7a
YG
4432static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4433{
4434}
4435
4436static const struct {
4437 void (*action)(struct bnx2x *bp);
4438 enum bnx2x_stats_state next_state;
4439} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4440/* state event */
4441{
4442/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4443/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4444/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4445/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4446},
4447{
4448/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4449/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4450/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4451/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4452}
4453};
4454
4455static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4456{
4457 enum bnx2x_stats_state state = bp->stats_state;
4458
4459 bnx2x_stats_stm[state][event].action(bp);
4460 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4461
8924665a
EG
4462 /* Make sure the state has been "changed" */
4463 smp_wmb();
4464
7995c64e 4465 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4466 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4467 state, event, bp->stats_state);
4468}
4469
6fe49bb9
EG
4470static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4471{
4472 struct dmae_command *dmae;
4473 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4474
4475 /* sanity */
4476 if (!bp->port.pmf || !bp->port.port_stx) {
4477 BNX2X_ERR("BUG!\n");
4478 return;
4479 }
4480
4481 bp->executer_idx = 0;
4482
4483 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4484 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4485 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4486 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4487#ifdef __BIG_ENDIAN
4488 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4489#else
4490 DMAE_CMD_ENDIANITY_DW_SWAP |
4491#endif
4492 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4493 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4494 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4495 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4496 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4497 dmae->dst_addr_hi = 0;
4498 dmae->len = sizeof(struct host_port_stats) >> 2;
4499 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4500 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4501 dmae->comp_val = DMAE_COMP_VAL;
4502
4503 *stats_comp = 0;
4504 bnx2x_hw_stats_post(bp);
4505 bnx2x_stats_comp(bp);
4506}
4507
4508static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4509{
4510 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4511 int port = BP_PORT(bp);
4512 int func;
4513 u32 func_stx;
4514
4515 /* sanity */
4516 if (!bp->port.pmf || !bp->func_stx) {
4517 BNX2X_ERR("BUG!\n");
4518 return;
4519 }
4520
4521 /* save our func_stx */
4522 func_stx = bp->func_stx;
4523
4524 for (vn = VN_0; vn < vn_max; vn++) {
4525 func = 2*vn + port;
4526
4527 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4528 bnx2x_func_stats_init(bp);
4529 bnx2x_hw_stats_post(bp);
4530 bnx2x_stats_comp(bp);
4531 }
4532
4533 /* restore our func_stx */
4534 bp->func_stx = func_stx;
4535}
4536
4537static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4538{
4539 struct dmae_command *dmae = &bp->stats_dmae;
4540 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4541
4542 /* sanity */
4543 if (!bp->func_stx) {
4544 BNX2X_ERR("BUG!\n");
4545 return;
4546 }
4547
4548 bp->executer_idx = 0;
4549 memset(dmae, 0, sizeof(struct dmae_command));
4550
4551 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4552 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4553 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4554#ifdef __BIG_ENDIAN
4555 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4556#else
4557 DMAE_CMD_ENDIANITY_DW_SWAP |
4558#endif
4559 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4560 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4561 dmae->src_addr_lo = bp->func_stx >> 2;
4562 dmae->src_addr_hi = 0;
4563 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4564 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4565 dmae->len = sizeof(struct host_func_stats) >> 2;
4566 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4567 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4568 dmae->comp_val = DMAE_COMP_VAL;
4569
4570 *stats_comp = 0;
4571 bnx2x_hw_stats_post(bp);
4572 bnx2x_stats_comp(bp);
4573}
4574
4575static void bnx2x_stats_init(struct bnx2x *bp)
4576{
4577 int port = BP_PORT(bp);
4578 int func = BP_FUNC(bp);
4579 int i;
4580
4581 bp->stats_pending = 0;
4582 bp->executer_idx = 0;
4583 bp->stats_counter = 0;
4584
4585 /* port and func stats for management */
4586 if (!BP_NOMCP(bp)) {
4587 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4588 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4589
4590 } else {
4591 bp->port.port_stx = 0;
4592 bp->func_stx = 0;
4593 }
4594 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4595 bp->port.port_stx, bp->func_stx);
4596
4597 /* port stats */
4598 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4599 bp->port.old_nig_stats.brb_discard =
4600 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4601 bp->port.old_nig_stats.brb_truncate =
4602 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4603 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4604 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4605 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4606 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4607
4608 /* function stats */
4609 for_each_queue(bp, i) {
4610 struct bnx2x_fastpath *fp = &bp->fp[i];
4611
4612 memset(&fp->old_tclient, 0,
4613 sizeof(struct tstorm_per_client_stats));
4614 memset(&fp->old_uclient, 0,
4615 sizeof(struct ustorm_per_client_stats));
4616 memset(&fp->old_xclient, 0,
4617 sizeof(struct xstorm_per_client_stats));
4618 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4619 }
4620
4621 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4622 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4623
4624 bp->stats_state = STATS_STATE_DISABLED;
4625
4626 if (bp->port.pmf) {
4627 if (bp->port.port_stx)
4628 bnx2x_port_stats_base_init(bp);
4629
4630 if (bp->func_stx)
4631 bnx2x_func_stats_base_init(bp);
4632
4633 } else if (bp->func_stx)
4634 bnx2x_func_stats_base_update(bp);
4635}
4636
a2fbb9ea
ET
4637static void bnx2x_timer(unsigned long data)
4638{
4639 struct bnx2x *bp = (struct bnx2x *) data;
4640
4641 if (!netif_running(bp->dev))
4642 return;
4643
4644 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4645 goto timer_restart;
a2fbb9ea
ET
4646
4647 if (poll) {
4648 struct bnx2x_fastpath *fp = &bp->fp[0];
4649 int rc;
4650
7961f791 4651 bnx2x_tx_int(fp);
a2fbb9ea
ET
4652 rc = bnx2x_rx_int(fp, 1000);
4653 }
4654
34f80b04
EG
4655 if (!BP_NOMCP(bp)) {
4656 int func = BP_FUNC(bp);
a2fbb9ea
ET
4657 u32 drv_pulse;
4658 u32 mcp_pulse;
4659
4660 ++bp->fw_drv_pulse_wr_seq;
4661 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4662 /* TBD - add SYSTEM_TIME */
4663 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4664 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4665
34f80b04 4666 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4667 MCP_PULSE_SEQ_MASK);
4668 /* The delta between driver pulse and mcp response
4669 * should be 1 (before mcp response) or 0 (after mcp response)
4670 */
4671 if ((drv_pulse != mcp_pulse) &&
4672 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4673 /* someone lost a heartbeat... */
4674 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4675 drv_pulse, mcp_pulse);
4676 }
4677 }
4678
f34d28ea 4679 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 4680 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4681
f1410647 4682timer_restart:
a2fbb9ea
ET
4683 mod_timer(&bp->timer, jiffies + bp->current_interval);
4684}
4685
4686/* end of Statistics */
4687
4688/* nic init */
4689
4690/*
4691 * nic init service functions
4692 */
4693
34f80b04 4694static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4695{
34f80b04
EG
4696 int port = BP_PORT(bp);
4697
ca00392c
EG
4698 /* "CSTORM" */
4699 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4700 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4701 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4702 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4703 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4704 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4705}
4706
5c862848
EG
4707static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4708 dma_addr_t mapping, int sb_id)
34f80b04
EG
4709{
4710 int port = BP_PORT(bp);
bb2a0f7a 4711 int func = BP_FUNC(bp);
a2fbb9ea 4712 int index;
34f80b04 4713 u64 section;
a2fbb9ea
ET
4714
4715 /* USTORM */
4716 section = ((u64)mapping) + offsetof(struct host_status_block,
4717 u_status_block);
34f80b04 4718 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4719
ca00392c
EG
4720 REG_WR(bp, BAR_CSTRORM_INTMEM +
4721 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4722 REG_WR(bp, BAR_CSTRORM_INTMEM +
4723 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4724 U64_HI(section));
ca00392c
EG
4725 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4726 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4727
4728 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4729 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4730 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4731
4732 /* CSTORM */
4733 section = ((u64)mapping) + offsetof(struct host_status_block,
4734 c_status_block);
34f80b04 4735 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4736
4737 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4738 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4739 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4740 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4741 U64_HI(section));
7a9b2557 4742 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4743 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4744
4745 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4746 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4747 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4748
4749 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4750}
4751
4752static void bnx2x_zero_def_sb(struct bnx2x *bp)
4753{
4754 int func = BP_FUNC(bp);
a2fbb9ea 4755
ca00392c 4756 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4757 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4758 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4759 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4760 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4761 sizeof(struct cstorm_def_status_block_u)/4);
4762 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4763 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4764 sizeof(struct cstorm_def_status_block_c)/4);
4765 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4766 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4767 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4768}
4769
4770static void bnx2x_init_def_sb(struct bnx2x *bp,
4771 struct host_def_status_block *def_sb,
34f80b04 4772 dma_addr_t mapping, int sb_id)
a2fbb9ea 4773{
34f80b04
EG
4774 int port = BP_PORT(bp);
4775 int func = BP_FUNC(bp);
a2fbb9ea
ET
4776 int index, val, reg_offset;
4777 u64 section;
4778
4779 /* ATTN */
4780 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4781 atten_status_block);
34f80b04 4782 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4783
49d66772
ET
4784 bp->attn_state = 0;
4785
a2fbb9ea
ET
4786 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4787 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4788
34f80b04 4789 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4790 bp->attn_group[index].sig[0] = REG_RD(bp,
4791 reg_offset + 0x10*index);
4792 bp->attn_group[index].sig[1] = REG_RD(bp,
4793 reg_offset + 0x4 + 0x10*index);
4794 bp->attn_group[index].sig[2] = REG_RD(bp,
4795 reg_offset + 0x8 + 0x10*index);
4796 bp->attn_group[index].sig[3] = REG_RD(bp,
4797 reg_offset + 0xc + 0x10*index);
4798 }
4799
a2fbb9ea
ET
4800 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4801 HC_REG_ATTN_MSG0_ADDR_L);
4802
4803 REG_WR(bp, reg_offset, U64_LO(section));
4804 REG_WR(bp, reg_offset + 4, U64_HI(section));
4805
4806 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4807
4808 val = REG_RD(bp, reg_offset);
34f80b04 4809 val |= sb_id;
a2fbb9ea
ET
4810 REG_WR(bp, reg_offset, val);
4811
4812 /* USTORM */
4813 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4814 u_def_status_block);
34f80b04 4815 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4816
ca00392c
EG
4817 REG_WR(bp, BAR_CSTRORM_INTMEM +
4818 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4819 REG_WR(bp, BAR_CSTRORM_INTMEM +
4820 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4821 U64_HI(section));
ca00392c
EG
4822 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4823 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4824
4825 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4826 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4827 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4828
4829 /* CSTORM */
4830 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4831 c_def_status_block);
34f80b04 4832 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4833
4834 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4835 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4836 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4837 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4838 U64_HI(section));
5c862848 4839 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4840 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4841
4842 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4843 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4844 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4845
4846 /* TSTORM */
4847 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4848 t_def_status_block);
34f80b04 4849 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4850
4851 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4852 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4853 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4854 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4855 U64_HI(section));
5c862848 4856 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4857 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4858
4859 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4860 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4861 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4862
4863 /* XSTORM */
4864 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4865 x_def_status_block);
34f80b04 4866 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4867
4868 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4869 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4870 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4871 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4872 U64_HI(section));
5c862848 4873 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4874 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4875
4876 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4877 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4878 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4879
bb2a0f7a 4880 bp->stats_pending = 0;
66e855f3 4881 bp->set_mac_pending = 0;
bb2a0f7a 4882
34f80b04 4883 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4884}
4885
4886static void bnx2x_update_coalesce(struct bnx2x *bp)
4887{
34f80b04 4888 int port = BP_PORT(bp);
a2fbb9ea
ET
4889 int i;
4890
4891 for_each_queue(bp, i) {
34f80b04 4892 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4893
4894 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4895 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4896 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4897 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4898 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
4899 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4900 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4901 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4902 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4903
4904 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4905 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4906 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4907 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4908 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 4909 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4910 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4911 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4912 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4913 }
4914}
4915
7a9b2557
VZ
4916static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4917 struct bnx2x_fastpath *fp, int last)
4918{
4919 int i;
4920
4921 for (i = 0; i < last; i++) {
4922 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4923 struct sk_buff *skb = rx_buf->skb;
4924
4925 if (skb == NULL) {
4926 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4927 continue;
4928 }
4929
4930 if (fp->tpa_state[i] == BNX2X_TPA_START)
4931 pci_unmap_single(bp->pdev,
4932 pci_unmap_addr(rx_buf, mapping),
356e2385 4933 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4934
4935 dev_kfree_skb(skb);
4936 rx_buf->skb = NULL;
4937 }
4938}
4939
a2fbb9ea
ET
4940static void bnx2x_init_rx_rings(struct bnx2x *bp)
4941{
7a9b2557 4942 int func = BP_FUNC(bp);
32626230
EG
4943 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4944 ETH_MAX_AGGREGATION_QUEUES_E1H;
4945 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4946 int i, j;
a2fbb9ea 4947
87942b46 4948 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4949 DP(NETIF_MSG_IFUP,
4950 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4951
7a9b2557 4952 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4953
54b9ddaa 4954 for_each_queue(bp, j) {
32626230 4955 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4956
32626230 4957 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4958 fp->tpa_pool[i].skb =
4959 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4960 if (!fp->tpa_pool[i].skb) {
4961 BNX2X_ERR("Failed to allocate TPA "
4962 "skb pool for queue[%d] - "
4963 "disabling TPA on this "
4964 "queue!\n", j);
4965 bnx2x_free_tpa_pool(bp, fp, i);
4966 fp->disable_tpa = 1;
4967 break;
4968 }
4969 pci_unmap_addr_set((struct sw_rx_bd *)
4970 &bp->fp->tpa_pool[i],
4971 mapping, 0);
4972 fp->tpa_state[i] = BNX2X_TPA_STOP;
4973 }
4974 }
4975 }
4976
54b9ddaa 4977 for_each_queue(bp, j) {
a2fbb9ea
ET
4978 struct bnx2x_fastpath *fp = &bp->fp[j];
4979
4980 fp->rx_bd_cons = 0;
4981 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4982 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4983
4984 /* "next page" elements initialization */
4985 /* SGE ring */
4986 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4987 struct eth_rx_sge *sge;
4988
4989 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4990 sge->addr_hi =
4991 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4992 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4993 sge->addr_lo =
4994 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4995 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4996 }
4997
4998 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4999
7a9b2557 5000 /* RX BD ring */
a2fbb9ea
ET
5001 for (i = 1; i <= NUM_RX_RINGS; i++) {
5002 struct eth_rx_bd *rx_bd;
5003
5004 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5005 rx_bd->addr_hi =
5006 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5007 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5008 rx_bd->addr_lo =
5009 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5010 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5011 }
5012
34f80b04 5013 /* CQ ring */
a2fbb9ea
ET
5014 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5015 struct eth_rx_cqe_next_page *nextpg;
5016
5017 nextpg = (struct eth_rx_cqe_next_page *)
5018 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5019 nextpg->addr_hi =
5020 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5021 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5022 nextpg->addr_lo =
5023 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5024 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5025 }
5026
7a9b2557
VZ
5027 /* Allocate SGEs and initialize the ring elements */
5028 for (i = 0, ring_prod = 0;
5029 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5030
7a9b2557
VZ
5031 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5032 BNX2X_ERR("was only able to allocate "
5033 "%d rx sges\n", i);
5034 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5035 /* Cleanup already allocated elements */
5036 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5037 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5038 fp->disable_tpa = 1;
5039 ring_prod = 0;
5040 break;
5041 }
5042 ring_prod = NEXT_SGE_IDX(ring_prod);
5043 }
5044 fp->rx_sge_prod = ring_prod;
5045
5046 /* Allocate BDs and initialize BD ring */
66e855f3 5047 fp->rx_comp_cons = 0;
7a9b2557 5048 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5049 for (i = 0; i < bp->rx_ring_size; i++) {
5050 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5051 BNX2X_ERR("was only able to allocate "
de832a55
EG
5052 "%d rx skbs on queue[%d]\n", i, j);
5053 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5054 break;
5055 }
5056 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5057 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5058 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5059 }
5060
7a9b2557
VZ
5061 fp->rx_bd_prod = ring_prod;
5062 /* must not have more available CQEs than BDs */
5063 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5064 cqe_ring_prod);
a2fbb9ea
ET
5065 fp->rx_pkt = fp->rx_calls = 0;
5066
7a9b2557
VZ
5067 /* Warning!
5068 * this will generate an interrupt (to the TSTORM)
5069 * must only be done after chip is initialized
5070 */
5071 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5072 fp->rx_sge_prod);
a2fbb9ea
ET
5073 if (j != 0)
5074 continue;
5075
5076 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5077 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5078 U64_LO(fp->rx_comp_mapping));
5079 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5080 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5081 U64_HI(fp->rx_comp_mapping));
5082 }
5083}
5084
5085static void bnx2x_init_tx_ring(struct bnx2x *bp)
5086{
5087 int i, j;
5088
54b9ddaa 5089 for_each_queue(bp, j) {
a2fbb9ea
ET
5090 struct bnx2x_fastpath *fp = &bp->fp[j];
5091
5092 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5093 struct eth_tx_next_bd *tx_next_bd =
5094 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5095
ca00392c 5096 tx_next_bd->addr_hi =
a2fbb9ea 5097 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5098 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5099 tx_next_bd->addr_lo =
a2fbb9ea 5100 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5101 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5102 }
5103
ca00392c
EG
5104 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5105 fp->tx_db.data.zero_fill1 = 0;
5106 fp->tx_db.data.prod = 0;
5107
a2fbb9ea
ET
5108 fp->tx_pkt_prod = 0;
5109 fp->tx_pkt_cons = 0;
5110 fp->tx_bd_prod = 0;
5111 fp->tx_bd_cons = 0;
5112 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5113 fp->tx_pkt = 0;
5114 }
5115}
5116
5117static void bnx2x_init_sp_ring(struct bnx2x *bp)
5118{
34f80b04 5119 int func = BP_FUNC(bp);
a2fbb9ea
ET
5120
5121 spin_lock_init(&bp->spq_lock);
5122
5123 bp->spq_left = MAX_SPQ_PENDING;
5124 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5125 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5126 bp->spq_prod_bd = bp->spq;
5127 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5128
34f80b04 5129 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5130 U64_LO(bp->spq_mapping));
34f80b04
EG
5131 REG_WR(bp,
5132 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5133 U64_HI(bp->spq_mapping));
5134
34f80b04 5135 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5136 bp->spq_prod_idx);
5137}
5138
5139static void bnx2x_init_context(struct bnx2x *bp)
5140{
5141 int i;
5142
54b9ddaa
VZ
5143 /* Rx */
5144 for_each_queue(bp, i) {
a2fbb9ea
ET
5145 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5146 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5147 u8 cl_id = fp->cl_id;
a2fbb9ea 5148
34f80b04
EG
5149 context->ustorm_st_context.common.sb_index_numbers =
5150 BNX2X_RX_SB_INDEX_NUM;
0626b899 5151 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5152 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5153 context->ustorm_st_context.common.flags =
de832a55
EG
5154 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5155 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5156 context->ustorm_st_context.common.statistics_counter_id =
5157 cl_id;
8d9c5f34 5158 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5159 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5160 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5161 bp->rx_buf_size;
34f80b04 5162 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5163 U64_HI(fp->rx_desc_mapping);
34f80b04 5164 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5165 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5166 if (!fp->disable_tpa) {
5167 context->ustorm_st_context.common.flags |=
ca00392c 5168 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5169 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5170 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5171 (u32)0xffff);
7a9b2557
VZ
5172 context->ustorm_st_context.common.sge_page_base_hi =
5173 U64_HI(fp->rx_sge_mapping);
5174 context->ustorm_st_context.common.sge_page_base_lo =
5175 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5176
5177 context->ustorm_st_context.common.max_sges_for_packet =
5178 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5179 context->ustorm_st_context.common.max_sges_for_packet =
5180 ((context->ustorm_st_context.common.
5181 max_sges_for_packet + PAGES_PER_SGE - 1) &
5182 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5183 }
5184
8d9c5f34
EG
5185 context->ustorm_ag_context.cdu_usage =
5186 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5187 CDU_REGION_NUMBER_UCM_AG,
5188 ETH_CONNECTION_TYPE);
5189
ca00392c
EG
5190 context->xstorm_ag_context.cdu_reserved =
5191 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5192 CDU_REGION_NUMBER_XCM_AG,
5193 ETH_CONNECTION_TYPE);
5194 }
5195
54b9ddaa
VZ
5196 /* Tx */
5197 for_each_queue(bp, i) {
ca00392c
EG
5198 struct bnx2x_fastpath *fp = &bp->fp[i];
5199 struct eth_context *context =
54b9ddaa 5200 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5201
5202 context->cstorm_st_context.sb_index_number =
5203 C_SB_ETH_TX_CQ_INDEX;
5204 context->cstorm_st_context.status_block_id = fp->sb_id;
5205
8d9c5f34
EG
5206 context->xstorm_st_context.tx_bd_page_base_hi =
5207 U64_HI(fp->tx_desc_mapping);
5208 context->xstorm_st_context.tx_bd_page_base_lo =
5209 U64_LO(fp->tx_desc_mapping);
ca00392c 5210 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5211 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5212 }
5213}
5214
5215static void bnx2x_init_ind_table(struct bnx2x *bp)
5216{
26c8fa4d 5217 int func = BP_FUNC(bp);
a2fbb9ea
ET
5218 int i;
5219
555f6c78 5220 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5221 return;
5222
555f6c78
EG
5223 DP(NETIF_MSG_IFUP,
5224 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5225 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5226 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5227 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5228 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5229}
5230
49d66772
ET
5231static void bnx2x_set_client_config(struct bnx2x *bp)
5232{
49d66772 5233 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5234 int port = BP_PORT(bp);
5235 int i;
49d66772 5236
e7799c5f 5237 tstorm_client.mtu = bp->dev->mtu;
49d66772 5238 tstorm_client.config_flags =
de832a55
EG
5239 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5240 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5241#ifdef BCM_VLAN
0c6671b0 5242 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5243 tstorm_client.config_flags |=
8d9c5f34 5244 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5245 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5246 }
5247#endif
49d66772
ET
5248
5249 for_each_queue(bp, i) {
de832a55
EG
5250 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5251
49d66772 5252 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5253 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5254 ((u32 *)&tstorm_client)[0]);
5255 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5256 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5257 ((u32 *)&tstorm_client)[1]);
5258 }
5259
34f80b04
EG
5260 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5261 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5262}
5263
a2fbb9ea
ET
5264static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5265{
a2fbb9ea 5266 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5267 int mode = bp->rx_mode;
37b091ba 5268 int mask = bp->rx_mode_cl_mask;
34f80b04 5269 int func = BP_FUNC(bp);
581ce43d 5270 int port = BP_PORT(bp);
a2fbb9ea 5271 int i;
581ce43d
EG
5272 /* All but management unicast packets should pass to the host as well */
5273 u32 llh_mask =
5274 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5275 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5276 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5277 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5278
3196a88a 5279 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5280
5281 switch (mode) {
5282 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5283 tstorm_mac_filter.ucast_drop_all = mask;
5284 tstorm_mac_filter.mcast_drop_all = mask;
5285 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5286 break;
356e2385 5287
a2fbb9ea 5288 case BNX2X_RX_MODE_NORMAL:
34f80b04 5289 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5290 break;
356e2385 5291
a2fbb9ea 5292 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5293 tstorm_mac_filter.mcast_accept_all = mask;
5294 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5295 break;
356e2385 5296
a2fbb9ea 5297 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5298 tstorm_mac_filter.ucast_accept_all = mask;
5299 tstorm_mac_filter.mcast_accept_all = mask;
5300 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5301 /* pass management unicast packets as well */
5302 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5303 break;
356e2385 5304
a2fbb9ea 5305 default:
34f80b04
EG
5306 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5307 break;
a2fbb9ea
ET
5308 }
5309
581ce43d
EG
5310 REG_WR(bp,
5311 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5312 llh_mask);
5313
a2fbb9ea
ET
5314 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5315 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5316 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5317 ((u32 *)&tstorm_mac_filter)[i]);
5318
34f80b04 5319/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5320 ((u32 *)&tstorm_mac_filter)[i]); */
5321 }
a2fbb9ea 5322
49d66772
ET
5323 if (mode != BNX2X_RX_MODE_NONE)
5324 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5325}
5326
471de716
EG
5327static void bnx2x_init_internal_common(struct bnx2x *bp)
5328{
5329 int i;
5330
5331 /* Zero this manually as its initialization is
5332 currently missing in the initTool */
5333 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5334 REG_WR(bp, BAR_USTRORM_INTMEM +
5335 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5336}
5337
5338static void bnx2x_init_internal_port(struct bnx2x *bp)
5339{
5340 int port = BP_PORT(bp);
5341
ca00392c
EG
5342 REG_WR(bp,
5343 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5344 REG_WR(bp,
5345 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5346 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5347 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5348}
5349
5350static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5351{
a2fbb9ea
ET
5352 struct tstorm_eth_function_common_config tstorm_config = {0};
5353 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5354 int port = BP_PORT(bp);
5355 int func = BP_FUNC(bp);
de832a55
EG
5356 int i, j;
5357 u32 offset;
471de716 5358 u16 max_agg_size;
a2fbb9ea
ET
5359
5360 if (is_multi(bp)) {
555f6c78 5361 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5362 tstorm_config.rss_result_mask = MULTI_MASK;
5363 }
ca00392c
EG
5364
5365 /* Enable TPA if needed */
5366 if (bp->flags & TPA_ENABLE_FLAG)
5367 tstorm_config.config_flags |=
5368 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5369
8d9c5f34
EG
5370 if (IS_E1HMF(bp))
5371 tstorm_config.config_flags |=
5372 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5373
34f80b04
EG
5374 tstorm_config.leading_client_id = BP_L_ID(bp);
5375
a2fbb9ea 5376 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5377 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5378 (*(u32 *)&tstorm_config));
5379
c14423fe 5380 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5381 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5382 bnx2x_set_storm_rx_mode(bp);
5383
de832a55
EG
5384 for_each_queue(bp, i) {
5385 u8 cl_id = bp->fp[i].cl_id;
5386
5387 /* reset xstorm per client statistics */
5388 offset = BAR_XSTRORM_INTMEM +
5389 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5390 for (j = 0;
5391 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5392 REG_WR(bp, offset + j*4, 0);
5393
5394 /* reset tstorm per client statistics */
5395 offset = BAR_TSTRORM_INTMEM +
5396 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5397 for (j = 0;
5398 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5399 REG_WR(bp, offset + j*4, 0);
5400
5401 /* reset ustorm per client statistics */
5402 offset = BAR_USTRORM_INTMEM +
5403 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5404 for (j = 0;
5405 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5406 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5407 }
5408
5409 /* Init statistics related context */
34f80b04 5410 stats_flags.collect_eth = 1;
a2fbb9ea 5411
66e855f3 5412 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5413 ((u32 *)&stats_flags)[0]);
66e855f3 5414 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5415 ((u32 *)&stats_flags)[1]);
5416
66e855f3 5417 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5418 ((u32 *)&stats_flags)[0]);
66e855f3 5419 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5420 ((u32 *)&stats_flags)[1]);
5421
de832a55
EG
5422 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5423 ((u32 *)&stats_flags)[0]);
5424 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5425 ((u32 *)&stats_flags)[1]);
5426
66e855f3 5427 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5428 ((u32 *)&stats_flags)[0]);
66e855f3 5429 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5430 ((u32 *)&stats_flags)[1]);
5431
66e855f3
YG
5432 REG_WR(bp, BAR_XSTRORM_INTMEM +
5433 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5434 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5435 REG_WR(bp, BAR_XSTRORM_INTMEM +
5436 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5437 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5438
5439 REG_WR(bp, BAR_TSTRORM_INTMEM +
5440 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5441 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5442 REG_WR(bp, BAR_TSTRORM_INTMEM +
5443 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5444 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5445
de832a55
EG
5446 REG_WR(bp, BAR_USTRORM_INTMEM +
5447 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5448 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5449 REG_WR(bp, BAR_USTRORM_INTMEM +
5450 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5451 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5452
34f80b04
EG
5453 if (CHIP_IS_E1H(bp)) {
5454 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5455 IS_E1HMF(bp));
5456 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5457 IS_E1HMF(bp));
5458 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5459 IS_E1HMF(bp));
5460 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5461 IS_E1HMF(bp));
5462
7a9b2557
VZ
5463 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5464 bp->e1hov);
34f80b04
EG
5465 }
5466
4f40f2cb
EG
5467 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5468 max_agg_size =
5469 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5470 SGE_PAGE_SIZE * PAGES_PER_SGE),
5471 (u32)0xffff);
54b9ddaa 5472 for_each_queue(bp, i) {
7a9b2557 5473 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5474
5475 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5476 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5477 U64_LO(fp->rx_comp_mapping));
5478 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5479 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5480 U64_HI(fp->rx_comp_mapping));
5481
ca00392c
EG
5482 /* Next page */
5483 REG_WR(bp, BAR_USTRORM_INTMEM +
5484 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5485 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5486 REG_WR(bp, BAR_USTRORM_INTMEM +
5487 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5488 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5489
7a9b2557 5490 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5491 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5492 max_agg_size);
5493 }
8a1c38d1 5494
1c06328c
EG
5495 /* dropless flow control */
5496 if (CHIP_IS_E1H(bp)) {
5497 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5498
5499 rx_pause.bd_thr_low = 250;
5500 rx_pause.cqe_thr_low = 250;
5501 rx_pause.cos = 1;
5502 rx_pause.sge_thr_low = 0;
5503 rx_pause.bd_thr_high = 350;
5504 rx_pause.cqe_thr_high = 350;
5505 rx_pause.sge_thr_high = 0;
5506
54b9ddaa 5507 for_each_queue(bp, i) {
1c06328c
EG
5508 struct bnx2x_fastpath *fp = &bp->fp[i];
5509
5510 if (!fp->disable_tpa) {
5511 rx_pause.sge_thr_low = 150;
5512 rx_pause.sge_thr_high = 250;
5513 }
5514
5515
5516 offset = BAR_USTRORM_INTMEM +
5517 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5518 fp->cl_id);
5519 for (j = 0;
5520 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5521 j++)
5522 REG_WR(bp, offset + j*4,
5523 ((u32 *)&rx_pause)[j]);
5524 }
5525 }
5526
8a1c38d1
EG
5527 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5528
5529 /* Init rate shaping and fairness contexts */
5530 if (IS_E1HMF(bp)) {
5531 int vn;
5532
5533 /* During init there is no active link
5534 Until link is up, set link rate to 10Gbps */
5535 bp->link_vars.line_speed = SPEED_10000;
5536 bnx2x_init_port_minmax(bp);
5537
b015e3d1
EG
5538 if (!BP_NOMCP(bp))
5539 bp->mf_config =
5540 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5541 bnx2x_calc_vn_weight_sum(bp);
5542
5543 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5544 bnx2x_init_vn_minmax(bp, 2*vn + port);
5545
5546 /* Enable rate shaping and fairness */
b015e3d1 5547 bp->cmng.flags.cmng_enables |=
8a1c38d1 5548 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5549
8a1c38d1
EG
5550 } else {
5551 /* rate shaping and fairness are disabled */
5552 DP(NETIF_MSG_IFUP,
5553 "single function mode minmax will be disabled\n");
5554 }
5555
5556
5557 /* Store it to internal memory */
5558 if (bp->port.pmf)
5559 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5560 REG_WR(bp, BAR_XSTRORM_INTMEM +
5561 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5562 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5563}
5564
471de716
EG
5565static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5566{
5567 switch (load_code) {
5568 case FW_MSG_CODE_DRV_LOAD_COMMON:
5569 bnx2x_init_internal_common(bp);
5570 /* no break */
5571
5572 case FW_MSG_CODE_DRV_LOAD_PORT:
5573 bnx2x_init_internal_port(bp);
5574 /* no break */
5575
5576 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5577 bnx2x_init_internal_func(bp);
5578 break;
5579
5580 default:
5581 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5582 break;
5583 }
5584}
5585
5586static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5587{
5588 int i;
5589
5590 for_each_queue(bp, i) {
5591 struct bnx2x_fastpath *fp = &bp->fp[i];
5592
34f80b04 5593 fp->bp = bp;
a2fbb9ea 5594 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5595 fp->index = i;
34f80b04 5596 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5597#ifdef BCM_CNIC
5598 fp->sb_id = fp->cl_id + 1;
5599#else
34f80b04 5600 fp->sb_id = fp->cl_id;
37b091ba 5601#endif
34f80b04 5602 DP(NETIF_MSG_IFUP,
f5372251
EG
5603 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5604 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5605 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5606 fp->sb_id);
5c862848 5607 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5608 }
5609
16119785
EG
5610 /* ensure status block indices were read */
5611 rmb();
5612
5613
5c862848
EG
5614 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5615 DEF_SB_ID);
5616 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5617 bnx2x_update_coalesce(bp);
5618 bnx2x_init_rx_rings(bp);
5619 bnx2x_init_tx_ring(bp);
5620 bnx2x_init_sp_ring(bp);
5621 bnx2x_init_context(bp);
471de716 5622 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5623 bnx2x_init_ind_table(bp);
0ef00459
EG
5624 bnx2x_stats_init(bp);
5625
5626 /* At this point, we are ready for interrupts */
5627 atomic_set(&bp->intr_sem, 0);
5628
5629 /* flush all before enabling interrupts */
5630 mb();
5631 mmiowb();
5632
615f8fd9 5633 bnx2x_int_enable(bp);
eb8da205
EG
5634
5635 /* Check for SPIO5 */
5636 bnx2x_attn_int_deasserted0(bp,
5637 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5638 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5639}
5640
5641/* end of nic init */
5642
5643/*
5644 * gzip service functions
5645 */
5646
5647static int bnx2x_gunzip_init(struct bnx2x *bp)
5648{
5649 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5650 &bp->gunzip_mapping);
5651 if (bp->gunzip_buf == NULL)
5652 goto gunzip_nomem1;
5653
5654 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5655 if (bp->strm == NULL)
5656 goto gunzip_nomem2;
5657
5658 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5659 GFP_KERNEL);
5660 if (bp->strm->workspace == NULL)
5661 goto gunzip_nomem3;
5662
5663 return 0;
5664
5665gunzip_nomem3:
5666 kfree(bp->strm);
5667 bp->strm = NULL;
5668
5669gunzip_nomem2:
5670 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5671 bp->gunzip_mapping);
5672 bp->gunzip_buf = NULL;
5673
5674gunzip_nomem1:
7995c64e 5675 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
a2fbb9ea
ET
5676 return -ENOMEM;
5677}
5678
5679static void bnx2x_gunzip_end(struct bnx2x *bp)
5680{
5681 kfree(bp->strm->workspace);
5682
5683 kfree(bp->strm);
5684 bp->strm = NULL;
5685
5686 if (bp->gunzip_buf) {
5687 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5688 bp->gunzip_mapping);
5689 bp->gunzip_buf = NULL;
5690 }
5691}
5692
94a78b79 5693static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5694{
5695 int n, rc;
5696
5697 /* check gzip header */
94a78b79
VZ
5698 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5699 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5700 return -EINVAL;
94a78b79 5701 }
a2fbb9ea
ET
5702
5703 n = 10;
5704
34f80b04 5705#define FNAME 0x8
a2fbb9ea
ET
5706
5707 if (zbuf[3] & FNAME)
5708 while ((zbuf[n++] != 0) && (n < len));
5709
94a78b79 5710 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5711 bp->strm->avail_in = len - n;
5712 bp->strm->next_out = bp->gunzip_buf;
5713 bp->strm->avail_out = FW_BUF_SIZE;
5714
5715 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5716 if (rc != Z_OK)
5717 return rc;
5718
5719 rc = zlib_inflate(bp->strm, Z_FINISH);
5720 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
5721 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5722 bp->strm->msg);
a2fbb9ea
ET
5723
5724 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5725 if (bp->gunzip_outlen & 0x3)
7995c64e
JP
5726 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5727 bp->gunzip_outlen);
a2fbb9ea
ET
5728 bp->gunzip_outlen >>= 2;
5729
5730 zlib_inflateEnd(bp->strm);
5731
5732 if (rc == Z_STREAM_END)
5733 return 0;
5734
5735 return rc;
5736}
5737
5738/* nic load/unload */
5739
5740/*
34f80b04 5741 * General service functions
a2fbb9ea
ET
5742 */
5743
5744/* send a NIG loopback debug packet */
5745static void bnx2x_lb_pckt(struct bnx2x *bp)
5746{
a2fbb9ea 5747 u32 wb_write[3];
a2fbb9ea
ET
5748
5749 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5750 wb_write[0] = 0x55555555;
5751 wb_write[1] = 0x55555555;
34f80b04 5752 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5753 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5754
5755 /* NON-IP protocol */
a2fbb9ea
ET
5756 wb_write[0] = 0x09000000;
5757 wb_write[1] = 0x55555555;
34f80b04 5758 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5759 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5760}
5761
5762/* some of the internal memories
5763 * are not directly readable from the driver
5764 * to test them we send debug packets
5765 */
5766static int bnx2x_int_mem_test(struct bnx2x *bp)
5767{
5768 int factor;
5769 int count, i;
5770 u32 val = 0;
5771
ad8d3948 5772 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5773 factor = 120;
ad8d3948
EG
5774 else if (CHIP_REV_IS_EMUL(bp))
5775 factor = 200;
5776 else
a2fbb9ea 5777 factor = 1;
a2fbb9ea
ET
5778
5779 DP(NETIF_MSG_HW, "start part1\n");
5780
5781 /* Disable inputs of parser neighbor blocks */
5782 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5783 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5784 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5785 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5786
5787 /* Write 0 to parser credits for CFC search request */
5788 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5789
5790 /* send Ethernet packet */
5791 bnx2x_lb_pckt(bp);
5792
5793 /* TODO do i reset NIG statistic? */
5794 /* Wait until NIG register shows 1 packet of size 0x10 */
5795 count = 1000 * factor;
5796 while (count) {
34f80b04 5797
a2fbb9ea
ET
5798 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5799 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5800 if (val == 0x10)
5801 break;
5802
5803 msleep(10);
5804 count--;
5805 }
5806 if (val != 0x10) {
5807 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5808 return -1;
5809 }
5810
5811 /* Wait until PRS register shows 1 packet */
5812 count = 1000 * factor;
5813 while (count) {
5814 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5815 if (val == 1)
5816 break;
5817
5818 msleep(10);
5819 count--;
5820 }
5821 if (val != 0x1) {
5822 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5823 return -2;
5824 }
5825
5826 /* Reset and init BRB, PRS */
34f80b04 5827 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5828 msleep(50);
34f80b04 5829 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5830 msleep(50);
94a78b79
VZ
5831 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5832 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5833
5834 DP(NETIF_MSG_HW, "part2\n");
5835
5836 /* Disable inputs of parser neighbor blocks */
5837 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5838 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5839 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5840 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5841
5842 /* Write 0 to parser credits for CFC search request */
5843 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5844
5845 /* send 10 Ethernet packets */
5846 for (i = 0; i < 10; i++)
5847 bnx2x_lb_pckt(bp);
5848
5849 /* Wait until NIG register shows 10 + 1
5850 packets of size 11*0x10 = 0xb0 */
5851 count = 1000 * factor;
5852 while (count) {
34f80b04 5853
a2fbb9ea
ET
5854 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5855 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5856 if (val == 0xb0)
5857 break;
5858
5859 msleep(10);
5860 count--;
5861 }
5862 if (val != 0xb0) {
5863 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5864 return -3;
5865 }
5866
5867 /* Wait until PRS register shows 2 packets */
5868 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5869 if (val != 2)
5870 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5871
5872 /* Write 1 to parser credits for CFC search request */
5873 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5874
5875 /* Wait until PRS register shows 3 packets */
5876 msleep(10 * factor);
5877 /* Wait until NIG register shows 1 packet of size 0x10 */
5878 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5879 if (val != 3)
5880 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5881
5882 /* clear NIG EOP FIFO */
5883 for (i = 0; i < 11; i++)
5884 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5885 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5886 if (val != 1) {
5887 BNX2X_ERR("clear of NIG failed\n");
5888 return -4;
5889 }
5890
5891 /* Reset and init BRB, PRS, NIG */
5892 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5893 msleep(50);
5894 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5895 msleep(50);
94a78b79
VZ
5896 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5897 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5898#ifndef BCM_CNIC
a2fbb9ea
ET
5899 /* set NIC mode */
5900 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5901#endif
5902
5903 /* Enable inputs of parser neighbor blocks */
5904 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5905 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5906 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5907 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5908
5909 DP(NETIF_MSG_HW, "done\n");
5910
5911 return 0; /* OK */
5912}
5913
5914static void enable_blocks_attention(struct bnx2x *bp)
5915{
5916 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5917 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5918 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5919 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5920 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5921 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5922 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5923 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5924 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5925/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5926/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5927 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5928 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5929 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5930/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5931/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5932 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5933 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5934 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5935 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5936/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5937/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5938 if (CHIP_REV_IS_FPGA(bp))
5939 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5940 else
5941 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5942 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5943 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5944 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5945/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5946/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5947 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5948 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5949/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5950 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5951}
5952
34f80b04 5953
81f75bbf
EG
5954static void bnx2x_reset_common(struct bnx2x *bp)
5955{
5956 /* reset_common */
5957 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5958 0xd3ffff7f);
5959 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5960}
5961
573f2035
EG
5962static void bnx2x_init_pxp(struct bnx2x *bp)
5963{
5964 u16 devctl;
5965 int r_order, w_order;
5966
5967 pci_read_config_word(bp->pdev,
5968 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5969 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5970 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5971 if (bp->mrrs == -1)
5972 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5973 else {
5974 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5975 r_order = bp->mrrs;
5976 }
5977
5978 bnx2x_init_pxp_arb(bp, r_order, w_order);
5979}
fd4ef40d
EG
5980
5981static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5982{
5983 u32 val;
5984 u8 port;
5985 u8 is_required = 0;
5986
5987 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5988 SHARED_HW_CFG_FAN_FAILURE_MASK;
5989
5990 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5991 is_required = 1;
5992
5993 /*
5994 * The fan failure mechanism is usually related to the PHY type since
5995 * the power consumption of the board is affected by the PHY. Currently,
5996 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5997 */
5998 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5999 for (port = PORT_0; port < PORT_MAX; port++) {
6000 u32 phy_type =
6001 SHMEM_RD(bp, dev_info.port_hw_config[port].
6002 external_phy_config) &
6003 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6004 is_required |=
6005 ((phy_type ==
6006 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6007 (phy_type ==
6008 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6009 (phy_type ==
6010 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6011 }
6012
6013 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6014
6015 if (is_required == 0)
6016 return;
6017
6018 /* Fan failure is indicated by SPIO 5 */
6019 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6020 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6021
6022 /* set to active low mode */
6023 val = REG_RD(bp, MISC_REG_SPIO_INT);
6024 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6025 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6026 REG_WR(bp, MISC_REG_SPIO_INT, val);
6027
6028 /* enable interrupt to signal the IGU */
6029 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6030 val |= (1 << MISC_REGISTERS_SPIO_5);
6031 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6032}
6033
34f80b04 6034static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6035{
a2fbb9ea 6036 u32 val, i;
37b091ba
MC
6037#ifdef BCM_CNIC
6038 u32 wb_write[2];
6039#endif
a2fbb9ea 6040
34f80b04 6041 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6042
81f75bbf 6043 bnx2x_reset_common(bp);
34f80b04
EG
6044 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6045 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6046
94a78b79 6047 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6048 if (CHIP_IS_E1H(bp))
6049 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6050
34f80b04
EG
6051 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6052 msleep(30);
6053 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6054
94a78b79 6055 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6056 if (CHIP_IS_E1(bp)) {
6057 /* enable HW interrupt from PXP on USDM overflow
6058 bit 16 on INT_MASK_0 */
6059 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6060 }
a2fbb9ea 6061
94a78b79 6062 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6063 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6064
6065#ifdef __BIG_ENDIAN
34f80b04
EG
6066 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6067 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6068 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6069 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6070 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6071 /* make sure this value is 0 */
6072 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6073
6074/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6075 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6076 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6077 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6078 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6079#endif
6080
34f80b04 6081 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6082#ifdef BCM_CNIC
34f80b04
EG
6083 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6084 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6085 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6086#endif
6087
34f80b04
EG
6088 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6089 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6090
34f80b04
EG
6091 /* let the HW do it's magic ... */
6092 msleep(100);
6093 /* finish PXP init */
6094 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6095 if (val != 1) {
6096 BNX2X_ERR("PXP2 CFG failed\n");
6097 return -EBUSY;
6098 }
6099 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6100 if (val != 1) {
6101 BNX2X_ERR("PXP2 RD_INIT failed\n");
6102 return -EBUSY;
6103 }
a2fbb9ea 6104
34f80b04
EG
6105 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6106 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6107
94a78b79 6108 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6109
34f80b04
EG
6110 /* clean the DMAE memory */
6111 bp->dmae_ready = 1;
6112 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6113
94a78b79
VZ
6114 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6115 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6116 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6117 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6118
34f80b04
EG
6119 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6120 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6121 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6122 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6123
94a78b79 6124 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6125
6126#ifdef BCM_CNIC
6127 wb_write[0] = 0;
6128 wb_write[1] = 0;
6129 for (i = 0; i < 64; i++) {
6130 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6131 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6132
6133 if (CHIP_IS_E1H(bp)) {
6134 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6135 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6136 wb_write, 2);
6137 }
6138 }
6139#endif
34f80b04
EG
6140 /* soft reset pulse */
6141 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6142 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6143
37b091ba 6144#ifdef BCM_CNIC
94a78b79 6145 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6146#endif
a2fbb9ea 6147
94a78b79 6148 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6149 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6150 if (!CHIP_REV_IS_SLOW(bp)) {
6151 /* enable hw interrupt from doorbell Q */
6152 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6153 }
a2fbb9ea 6154
94a78b79
VZ
6155 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6156 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6157 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6158#ifndef BCM_CNIC
3196a88a
EG
6159 /* set NIC mode */
6160 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6161#endif
34f80b04
EG
6162 if (CHIP_IS_E1H(bp))
6163 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6164
94a78b79
VZ
6165 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6166 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6167 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6168 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6169
ca00392c
EG
6170 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6171 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6172 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6173 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6174
94a78b79
VZ
6175 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6176 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6177 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6178 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6179
34f80b04
EG
6180 /* sync semi rtc */
6181 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6182 0x80000000);
6183 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6184 0x80000000);
a2fbb9ea 6185
94a78b79
VZ
6186 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6187 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6188 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6189
34f80b04
EG
6190 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6191 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6192 REG_WR(bp, i, 0xc0cac01a);
6193 /* TODO: replace with something meaningful */
6194 }
94a78b79 6195 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6196#ifdef BCM_CNIC
6197 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6198 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6199 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6200 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6201 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6204 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6205 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6206 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6207#endif
34f80b04 6208 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6209
34f80b04
EG
6210 if (sizeof(union cdu_context) != 1024)
6211 /* we currently assume that a context is 1024 bytes */
7995c64e
JP
6212 pr_alert("please adjust the size of cdu_context(%ld)\n",
6213 (long)sizeof(union cdu_context));
a2fbb9ea 6214
94a78b79 6215 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6216 val = (4 << 24) + (0 << 12) + 1024;
6217 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6218
94a78b79 6219 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6220 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6221 /* enable context validation interrupt from CFC */
6222 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6223
6224 /* set the thresholds to prevent CFC/CDU race */
6225 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6226
94a78b79
VZ
6227 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6228 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6229
94a78b79 6230 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6231 /* Reset PCIE errors for debug */
6232 REG_WR(bp, 0x2814, 0xffffffff);
6233 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6234
94a78b79 6235 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6236 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6237 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6238 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6239
94a78b79 6240 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6241 if (CHIP_IS_E1H(bp)) {
6242 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6243 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6244 }
6245
6246 if (CHIP_REV_IS_SLOW(bp))
6247 msleep(200);
6248
6249 /* finish CFC init */
6250 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6251 if (val != 1) {
6252 BNX2X_ERR("CFC LL_INIT failed\n");
6253 return -EBUSY;
6254 }
6255 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6256 if (val != 1) {
6257 BNX2X_ERR("CFC AC_INIT failed\n");
6258 return -EBUSY;
6259 }
6260 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6261 if (val != 1) {
6262 BNX2X_ERR("CFC CAM_INIT failed\n");
6263 return -EBUSY;
6264 }
6265 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6266
34f80b04
EG
6267 /* read NIG statistic
6268 to see if this is our first up since powerup */
6269 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6270 val = *bnx2x_sp(bp, wb_data[0]);
6271
6272 /* do internal memory self test */
6273 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6274 BNX2X_ERR("internal mem self test failed\n");
6275 return -EBUSY;
6276 }
6277
35b19ba5 6278 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6280 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6281 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6282 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6283 bp->port.need_hw_lock = 1;
6284 break;
6285
34f80b04
EG
6286 default:
6287 break;
6288 }
f1410647 6289
fd4ef40d
EG
6290 bnx2x_setup_fan_failure_detection(bp);
6291
34f80b04
EG
6292 /* clear PXP2 attentions */
6293 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6294
34f80b04 6295 enable_blocks_attention(bp);
a2fbb9ea 6296
6bbca910
YR
6297 if (!BP_NOMCP(bp)) {
6298 bnx2x_acquire_phy_lock(bp);
6299 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6300 bnx2x_release_phy_lock(bp);
6301 } else
6302 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6303
34f80b04
EG
6304 return 0;
6305}
a2fbb9ea 6306
34f80b04
EG
6307static int bnx2x_init_port(struct bnx2x *bp)
6308{
6309 int port = BP_PORT(bp);
94a78b79 6310 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6311 u32 low, high;
34f80b04 6312 u32 val;
a2fbb9ea 6313
34f80b04
EG
6314 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6315
6316 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6317
94a78b79 6318 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6319 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6320
6321 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6322 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6323 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6324 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6325
37b091ba
MC
6326#ifdef BCM_CNIC
6327 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6328
94a78b79 6329 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6330 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6331 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6332#endif
94a78b79 6333 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6334
94a78b79 6335 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6336 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6337 /* no pause for emulation and FPGA */
6338 low = 0;
6339 high = 513;
6340 } else {
6341 if (IS_E1HMF(bp))
6342 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6343 else if (bp->dev->mtu > 4096) {
6344 if (bp->flags & ONE_PORT_FLAG)
6345 low = 160;
6346 else {
6347 val = bp->dev->mtu;
6348 /* (24*1024 + val*4)/256 */
6349 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6350 }
6351 } else
6352 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6353 high = low + 56; /* 14*1024/256 */
6354 }
6355 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6356 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6357
6358
94a78b79 6359 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6360
94a78b79 6361 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6362 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6363 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6364 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6365
94a78b79
VZ
6366 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6367 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6368 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6369 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6370
94a78b79 6371 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6372 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6373
94a78b79 6374 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6375
6376 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6377 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6378
6379 /* update threshold */
34f80b04 6380 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6381 /* update init credit */
34f80b04 6382 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6383
6384 /* probe changes */
34f80b04 6385 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6386 msleep(5);
34f80b04 6387 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6388
37b091ba
MC
6389#ifdef BCM_CNIC
6390 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6391#endif
94a78b79 6392 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6393 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6394
6395 if (CHIP_IS_E1(bp)) {
6396 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6397 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6398 }
94a78b79 6399 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6400
94a78b79 6401 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6402 /* init aeu_mask_attn_func_0/1:
6403 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6404 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6405 * bits 4-7 are used for "per vn group attention" */
6406 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6407 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6408
94a78b79 6409 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6410 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6411 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6412 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6413 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6414
94a78b79 6415 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6416
6417 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6418
6419 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6420 /* 0x2 disable e1hov, 0x1 enable */
6421 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6422 (IS_E1HMF(bp) ? 0x1 : 0x2));
6423
1c06328c
EG
6424 {
6425 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6426 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6427 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6428 }
34f80b04
EG
6429 }
6430
94a78b79 6431 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6432 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6433
35b19ba5 6434 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6435 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6436 {
6437 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6438
6439 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6440 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6441
6442 /* The GPIO should be swapped if the swap register is
6443 set and active */
6444 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6445 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6446
6447 /* Select function upon port-swap configuration */
6448 if (port == 0) {
6449 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6450 aeu_gpio_mask = (swap_val && swap_override) ?
6451 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6452 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6453 } else {
6454 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6455 aeu_gpio_mask = (swap_val && swap_override) ?
6456 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6457 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6458 }
6459 val = REG_RD(bp, offset);
6460 /* add GPIO3 to group */
6461 val |= aeu_gpio_mask;
6462 REG_WR(bp, offset, val);
6463 }
6464 break;
6465
35b19ba5 6466 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6468 /* add SPIO 5 to group 0 */
4d295db0
EG
6469 {
6470 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6471 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6472 val = REG_RD(bp, reg_addr);
f1410647 6473 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6474 REG_WR(bp, reg_addr, val);
6475 }
f1410647
ET
6476 break;
6477
6478 default:
6479 break;
6480 }
6481
c18487ee 6482 bnx2x__link_reset(bp);
a2fbb9ea 6483
34f80b04
EG
6484 return 0;
6485}
6486
6487#define ILT_PER_FUNC (768/2)
6488#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6489/* the phys address is shifted right 12 bits and has an added
6490 1=valid bit added to the 53rd bit
6491 then since this is a wide register(TM)
6492 we split it into two 32 bit writes
6493 */
6494#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6495#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6496#define PXP_ONE_ILT(x) (((x) << 10) | x)
6497#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6498
37b091ba
MC
6499#ifdef BCM_CNIC
6500#define CNIC_ILT_LINES 127
6501#define CNIC_CTX_PER_ILT 16
6502#else
34f80b04 6503#define CNIC_ILT_LINES 0
37b091ba 6504#endif
34f80b04
EG
6505
6506static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6507{
6508 int reg;
6509
6510 if (CHIP_IS_E1H(bp))
6511 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6512 else /* E1 */
6513 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6514
6515 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6516}
6517
6518static int bnx2x_init_func(struct bnx2x *bp)
6519{
6520 int port = BP_PORT(bp);
6521 int func = BP_FUNC(bp);
8badd27a 6522 u32 addr, val;
34f80b04
EG
6523 int i;
6524
6525 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6526
8badd27a
EG
6527 /* set MSI reconfigure capability */
6528 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6529 val = REG_RD(bp, addr);
6530 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6531 REG_WR(bp, addr, val);
6532
34f80b04
EG
6533 i = FUNC_ILT_BASE(func);
6534
6535 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6536 if (CHIP_IS_E1H(bp)) {
6537 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6538 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6539 } else /* E1 */
6540 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6541 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6542
37b091ba
MC
6543#ifdef BCM_CNIC
6544 i += 1 + CNIC_ILT_LINES;
6545 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6546 if (CHIP_IS_E1(bp))
6547 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6548 else {
6549 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6550 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6551 }
6552
6553 i++;
6554 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6555 if (CHIP_IS_E1(bp))
6556 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6557 else {
6558 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6559 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6560 }
6561
6562 i++;
6563 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6564 if (CHIP_IS_E1(bp))
6565 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6566 else {
6567 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6568 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6569 }
6570
6571 /* tell the searcher where the T2 table is */
6572 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6573
6574 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6575 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6576
6577 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6578 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6579 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6580
6581 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6582#endif
34f80b04
EG
6583
6584 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6585 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6586 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6587 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6588 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6589 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6590 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6591 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6592 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6593 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6594
6595 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6596 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6597 }
6598
6599 /* HC init per function */
6600 if (CHIP_IS_E1H(bp)) {
6601 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6602
6603 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6604 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6605 }
94a78b79 6606 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6607
c14423fe 6608 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6609 REG_WR(bp, 0x2114, 0xffffffff);
6610 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6611
34f80b04
EG
6612 return 0;
6613}
6614
6615static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6616{
6617 int i, rc = 0;
a2fbb9ea 6618
34f80b04
EG
6619 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6620 BP_FUNC(bp), load_code);
a2fbb9ea 6621
34f80b04
EG
6622 bp->dmae_ready = 0;
6623 mutex_init(&bp->dmae_mutex);
54016b26
EG
6624 rc = bnx2x_gunzip_init(bp);
6625 if (rc)
6626 return rc;
a2fbb9ea 6627
34f80b04
EG
6628 switch (load_code) {
6629 case FW_MSG_CODE_DRV_LOAD_COMMON:
6630 rc = bnx2x_init_common(bp);
6631 if (rc)
6632 goto init_hw_err;
6633 /* no break */
6634
6635 case FW_MSG_CODE_DRV_LOAD_PORT:
6636 bp->dmae_ready = 1;
6637 rc = bnx2x_init_port(bp);
6638 if (rc)
6639 goto init_hw_err;
6640 /* no break */
6641
6642 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6643 bp->dmae_ready = 1;
6644 rc = bnx2x_init_func(bp);
6645 if (rc)
6646 goto init_hw_err;
6647 break;
6648
6649 default:
6650 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6651 break;
6652 }
6653
6654 if (!BP_NOMCP(bp)) {
6655 int func = BP_FUNC(bp);
a2fbb9ea
ET
6656
6657 bp->fw_drv_pulse_wr_seq =
34f80b04 6658 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6659 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6660 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6661 }
a2fbb9ea 6662
34f80b04
EG
6663 /* this needs to be done before gunzip end */
6664 bnx2x_zero_def_sb(bp);
6665 for_each_queue(bp, i)
6666 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
6667#ifdef BCM_CNIC
6668 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6669#endif
34f80b04
EG
6670
6671init_hw_err:
6672 bnx2x_gunzip_end(bp);
6673
6674 return rc;
a2fbb9ea
ET
6675}
6676
a2fbb9ea
ET
6677static void bnx2x_free_mem(struct bnx2x *bp)
6678{
6679
6680#define BNX2X_PCI_FREE(x, y, size) \
6681 do { \
6682 if (x) { \
6683 pci_free_consistent(bp->pdev, size, x, y); \
6684 x = NULL; \
6685 y = 0; \
6686 } \
6687 } while (0)
6688
6689#define BNX2X_FREE(x) \
6690 do { \
6691 if (x) { \
6692 vfree(x); \
6693 x = NULL; \
6694 } \
6695 } while (0)
6696
6697 int i;
6698
6699 /* fastpath */
555f6c78 6700 /* Common */
a2fbb9ea
ET
6701 for_each_queue(bp, i) {
6702
555f6c78 6703 /* status blocks */
a2fbb9ea
ET
6704 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6705 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6706 sizeof(struct host_status_block));
555f6c78
EG
6707 }
6708 /* Rx */
54b9ddaa 6709 for_each_queue(bp, i) {
a2fbb9ea 6710
555f6c78 6711 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6712 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6713 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6714 bnx2x_fp(bp, i, rx_desc_mapping),
6715 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6716
6717 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6718 bnx2x_fp(bp, i, rx_comp_mapping),
6719 sizeof(struct eth_fast_path_rx_cqe) *
6720 NUM_RCQ_BD);
a2fbb9ea 6721
7a9b2557 6722 /* SGE ring */
32626230 6723 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6724 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6725 bnx2x_fp(bp, i, rx_sge_mapping),
6726 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6727 }
555f6c78 6728 /* Tx */
54b9ddaa 6729 for_each_queue(bp, i) {
555f6c78
EG
6730
6731 /* fastpath tx rings: tx_buf tx_desc */
6732 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6733 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6734 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6735 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6736 }
a2fbb9ea
ET
6737 /* end of fastpath */
6738
6739 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6740 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6741
6742 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6743 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6744
37b091ba 6745#ifdef BCM_CNIC
a2fbb9ea
ET
6746 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6747 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6748 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6749 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
6750 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6751 sizeof(struct host_status_block));
a2fbb9ea 6752#endif
7a9b2557 6753 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6754
6755#undef BNX2X_PCI_FREE
6756#undef BNX2X_KFREE
6757}
6758
6759static int bnx2x_alloc_mem(struct bnx2x *bp)
6760{
6761
6762#define BNX2X_PCI_ALLOC(x, y, size) \
6763 do { \
6764 x = pci_alloc_consistent(bp->pdev, size, y); \
6765 if (x == NULL) \
6766 goto alloc_mem_err; \
6767 memset(x, 0, size); \
6768 } while (0)
6769
6770#define BNX2X_ALLOC(x, size) \
6771 do { \
6772 x = vmalloc(size); \
6773 if (x == NULL) \
6774 goto alloc_mem_err; \
6775 memset(x, 0, size); \
6776 } while (0)
6777
6778 int i;
6779
6780 /* fastpath */
555f6c78 6781 /* Common */
a2fbb9ea
ET
6782 for_each_queue(bp, i) {
6783 bnx2x_fp(bp, i, bp) = bp;
6784
555f6c78 6785 /* status blocks */
a2fbb9ea
ET
6786 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6787 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6788 sizeof(struct host_status_block));
555f6c78
EG
6789 }
6790 /* Rx */
54b9ddaa 6791 for_each_queue(bp, i) {
a2fbb9ea 6792
555f6c78 6793 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6794 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6795 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6796 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6797 &bnx2x_fp(bp, i, rx_desc_mapping),
6798 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6799
6800 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6801 &bnx2x_fp(bp, i, rx_comp_mapping),
6802 sizeof(struct eth_fast_path_rx_cqe) *
6803 NUM_RCQ_BD);
6804
7a9b2557
VZ
6805 /* SGE ring */
6806 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6807 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6808 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6809 &bnx2x_fp(bp, i, rx_sge_mapping),
6810 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6811 }
555f6c78 6812 /* Tx */
54b9ddaa 6813 for_each_queue(bp, i) {
555f6c78 6814
555f6c78
EG
6815 /* fastpath tx rings: tx_buf tx_desc */
6816 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6817 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6818 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6819 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6820 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6821 }
a2fbb9ea
ET
6822 /* end of fastpath */
6823
6824 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6825 sizeof(struct host_def_status_block));
6826
6827 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6828 sizeof(struct bnx2x_slowpath));
6829
37b091ba 6830#ifdef BCM_CNIC
a2fbb9ea
ET
6831 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6832
a2fbb9ea
ET
6833 /* allocate searcher T2 table
6834 we allocate 1/4 of alloc num for T2
6835 (which is not entered into the ILT) */
6836 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6837
37b091ba 6838 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 6839 for (i = 0; i < 16*1024; i += 64)
37b091ba 6840 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6841
37b091ba 6842 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
6843 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6844
6845 /* QM queues (128*MAX_CONN) */
6846 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
6847
6848 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6849 sizeof(struct host_status_block));
a2fbb9ea
ET
6850#endif
6851
6852 /* Slow path ring */
6853 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6854
6855 return 0;
6856
6857alloc_mem_err:
6858 bnx2x_free_mem(bp);
6859 return -ENOMEM;
6860
6861#undef BNX2X_PCI_ALLOC
6862#undef BNX2X_ALLOC
6863}
6864
6865static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6866{
6867 int i;
6868
54b9ddaa 6869 for_each_queue(bp, i) {
a2fbb9ea
ET
6870 struct bnx2x_fastpath *fp = &bp->fp[i];
6871
6872 u16 bd_cons = fp->tx_bd_cons;
6873 u16 sw_prod = fp->tx_pkt_prod;
6874 u16 sw_cons = fp->tx_pkt_cons;
6875
a2fbb9ea
ET
6876 while (sw_cons != sw_prod) {
6877 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6878 sw_cons++;
6879 }
6880 }
6881}
6882
6883static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6884{
6885 int i, j;
6886
54b9ddaa 6887 for_each_queue(bp, j) {
a2fbb9ea
ET
6888 struct bnx2x_fastpath *fp = &bp->fp[j];
6889
a2fbb9ea
ET
6890 for (i = 0; i < NUM_RX_BD; i++) {
6891 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6892 struct sk_buff *skb = rx_buf->skb;
6893
6894 if (skb == NULL)
6895 continue;
6896
6897 pci_unmap_single(bp->pdev,
6898 pci_unmap_addr(rx_buf, mapping),
356e2385 6899 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6900
6901 rx_buf->skb = NULL;
6902 dev_kfree_skb(skb);
6903 }
7a9b2557 6904 if (!fp->disable_tpa)
32626230
EG
6905 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6906 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6907 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6908 }
6909}
6910
6911static void bnx2x_free_skbs(struct bnx2x *bp)
6912{
6913 bnx2x_free_tx_skbs(bp);
6914 bnx2x_free_rx_skbs(bp);
6915}
6916
6917static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6918{
34f80b04 6919 int i, offset = 1;
a2fbb9ea
ET
6920
6921 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6922 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6923 bp->msix_table[0].vector);
6924
37b091ba
MC
6925#ifdef BCM_CNIC
6926 offset++;
6927#endif
a2fbb9ea 6928 for_each_queue(bp, i) {
c14423fe 6929 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6930 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6931 bnx2x_fp(bp, i, state));
6932
34f80b04 6933 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6934 }
a2fbb9ea
ET
6935}
6936
6cbe5065 6937static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 6938{
a2fbb9ea 6939 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
6940 if (!disable_only)
6941 bnx2x_free_msix_irqs(bp);
a2fbb9ea 6942 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6943 bp->flags &= ~USING_MSIX_FLAG;
6944
8badd27a 6945 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
6946 if (!disable_only)
6947 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
6948 pci_disable_msi(bp->pdev);
6949 bp->flags &= ~USING_MSI_FLAG;
6950
6cbe5065 6951 } else if (!disable_only)
a2fbb9ea
ET
6952 free_irq(bp->pdev->irq, bp->dev);
6953}
6954
6955static int bnx2x_enable_msix(struct bnx2x *bp)
6956{
8badd27a
EG
6957 int i, rc, offset = 1;
6958 int igu_vec = 0;
a2fbb9ea 6959
8badd27a
EG
6960 bp->msix_table[0].entry = igu_vec;
6961 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6962
37b091ba
MC
6963#ifdef BCM_CNIC
6964 igu_vec = BP_L_ID(bp) + offset;
6965 bp->msix_table[1].entry = igu_vec;
6966 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6967 offset++;
6968#endif
34f80b04 6969 for_each_queue(bp, i) {
8badd27a 6970 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6971 bp->msix_table[i + offset].entry = igu_vec;
6972 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6973 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6974 }
6975
34f80b04 6976 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6977 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6978 if (rc) {
8badd27a
EG
6979 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6980 return rc;
34f80b04 6981 }
8badd27a 6982
a2fbb9ea
ET
6983 bp->flags |= USING_MSIX_FLAG;
6984
6985 return 0;
a2fbb9ea
ET
6986}
6987
a2fbb9ea
ET
6988static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6989{
34f80b04 6990 int i, rc, offset = 1;
a2fbb9ea 6991
a2fbb9ea
ET
6992 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6993 bp->dev->name, bp->dev);
a2fbb9ea
ET
6994 if (rc) {
6995 BNX2X_ERR("request sp irq failed\n");
6996 return -EBUSY;
6997 }
6998
37b091ba
MC
6999#ifdef BCM_CNIC
7000 offset++;
7001#endif
a2fbb9ea 7002 for_each_queue(bp, i) {
555f6c78 7003 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7004 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7005 bp->dev->name, i);
ca00392c 7006
34f80b04 7007 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7008 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7009 if (rc) {
555f6c78 7010 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7011 bnx2x_free_msix_irqs(bp);
7012 return -EBUSY;
7013 }
7014
555f6c78 7015 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7016 }
7017
555f6c78 7018 i = BNX2X_NUM_QUEUES(bp);
7995c64e
JP
7019 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7020 bp->msix_table[0].vector,
7021 0, bp->msix_table[offset].vector,
7022 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7023
a2fbb9ea 7024 return 0;
a2fbb9ea
ET
7025}
7026
8badd27a
EG
7027static int bnx2x_enable_msi(struct bnx2x *bp)
7028{
7029 int rc;
7030
7031 rc = pci_enable_msi(bp->pdev);
7032 if (rc) {
7033 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7034 return -1;
7035 }
7036 bp->flags |= USING_MSI_FLAG;
7037
7038 return 0;
7039}
7040
a2fbb9ea
ET
7041static int bnx2x_req_irq(struct bnx2x *bp)
7042{
8badd27a 7043 unsigned long flags;
34f80b04 7044 int rc;
a2fbb9ea 7045
8badd27a
EG
7046 if (bp->flags & USING_MSI_FLAG)
7047 flags = 0;
7048 else
7049 flags = IRQF_SHARED;
7050
7051 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7052 bp->dev->name, bp->dev);
a2fbb9ea
ET
7053 if (!rc)
7054 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7055
7056 return rc;
a2fbb9ea
ET
7057}
7058
65abd74d
YG
7059static void bnx2x_napi_enable(struct bnx2x *bp)
7060{
7061 int i;
7062
54b9ddaa 7063 for_each_queue(bp, i)
65abd74d
YG
7064 napi_enable(&bnx2x_fp(bp, i, napi));
7065}
7066
7067static void bnx2x_napi_disable(struct bnx2x *bp)
7068{
7069 int i;
7070
54b9ddaa 7071 for_each_queue(bp, i)
65abd74d
YG
7072 napi_disable(&bnx2x_fp(bp, i, napi));
7073}
7074
7075static void bnx2x_netif_start(struct bnx2x *bp)
7076{
e1510706
EG
7077 int intr_sem;
7078
7079 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7080 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7081
7082 if (intr_sem) {
65abd74d 7083 if (netif_running(bp->dev)) {
65abd74d
YG
7084 bnx2x_napi_enable(bp);
7085 bnx2x_int_enable(bp);
555f6c78
EG
7086 if (bp->state == BNX2X_STATE_OPEN)
7087 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7088 }
7089 }
7090}
7091
f8ef6e44 7092static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7093{
f8ef6e44 7094 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7095 bnx2x_napi_disable(bp);
762d5f6c 7096 netif_tx_disable(bp->dev);
65abd74d
YG
7097}
7098
a2fbb9ea
ET
7099/*
7100 * Init service functions
7101 */
7102
e665bfda
MC
7103/**
7104 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7105 *
7106 * @param bp driver descriptor
7107 * @param set set or clear an entry (1 or 0)
7108 * @param mac pointer to a buffer containing a MAC
7109 * @param cl_bit_vec bit vector of clients to register a MAC for
7110 * @param cam_offset offset in a CAM to use
7111 * @param with_bcast set broadcast MAC as well
7112 */
7113static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7114 u32 cl_bit_vec, u8 cam_offset,
7115 u8 with_bcast)
a2fbb9ea
ET
7116{
7117 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7118 int port = BP_PORT(bp);
a2fbb9ea
ET
7119
7120 /* CAM allocation
7121 * unicasts 0-31:port0 32-63:port1
7122 * multicast 64-127:port0 128-191:port1
7123 */
e665bfda
MC
7124 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7125 config->hdr.offset = cam_offset;
7126 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7127 config->hdr.reserved1 = 0;
7128
7129 /* primary MAC */
7130 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7131 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7132 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7133 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7134 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7135 swab16(*(u16 *)&mac[4]);
34f80b04 7136 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7137 if (set)
7138 config->config_table[0].target_table_entry.flags = 0;
7139 else
7140 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7141 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7142 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7143 config->config_table[0].target_table_entry.vlan_id = 0;
7144
3101c2bc
YG
7145 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7146 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7147 config->config_table[0].cam_entry.msb_mac_addr,
7148 config->config_table[0].cam_entry.middle_mac_addr,
7149 config->config_table[0].cam_entry.lsb_mac_addr);
7150
7151 /* broadcast */
e665bfda
MC
7152 if (with_bcast) {
7153 config->config_table[1].cam_entry.msb_mac_addr =
7154 cpu_to_le16(0xffff);
7155 config->config_table[1].cam_entry.middle_mac_addr =
7156 cpu_to_le16(0xffff);
7157 config->config_table[1].cam_entry.lsb_mac_addr =
7158 cpu_to_le16(0xffff);
7159 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7160 if (set)
7161 config->config_table[1].target_table_entry.flags =
7162 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7163 else
7164 CAM_INVALIDATE(config->config_table[1]);
7165 config->config_table[1].target_table_entry.clients_bit_vector =
7166 cpu_to_le32(cl_bit_vec);
7167 config->config_table[1].target_table_entry.vlan_id = 0;
7168 }
a2fbb9ea
ET
7169
7170 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7171 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7172 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7173}
7174
e665bfda
MC
7175/**
7176 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7177 *
7178 * @param bp driver descriptor
7179 * @param set set or clear an entry (1 or 0)
7180 * @param mac pointer to a buffer containing a MAC
7181 * @param cl_bit_vec bit vector of clients to register a MAC for
7182 * @param cam_offset offset in a CAM to use
7183 */
7184static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7185 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7186{
7187 struct mac_configuration_cmd_e1h *config =
7188 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7189
8d9c5f34 7190 config->hdr.length = 1;
e665bfda
MC
7191 config->hdr.offset = cam_offset;
7192 config->hdr.client_id = 0xff;
34f80b04
EG
7193 config->hdr.reserved1 = 0;
7194
7195 /* primary MAC */
7196 config->config_table[0].msb_mac_addr =
e665bfda 7197 swab16(*(u16 *)&mac[0]);
34f80b04 7198 config->config_table[0].middle_mac_addr =
e665bfda 7199 swab16(*(u16 *)&mac[2]);
34f80b04 7200 config->config_table[0].lsb_mac_addr =
e665bfda 7201 swab16(*(u16 *)&mac[4]);
ca00392c 7202 config->config_table[0].clients_bit_vector =
e665bfda 7203 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7204 config->config_table[0].vlan_id = 0;
7205 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7206 if (set)
7207 config->config_table[0].flags = BP_PORT(bp);
7208 else
7209 config->config_table[0].flags =
7210 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7211
e665bfda 7212 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7213 (set ? "setting" : "clearing"),
34f80b04
EG
7214 config->config_table[0].msb_mac_addr,
7215 config->config_table[0].middle_mac_addr,
e665bfda 7216 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7217
7218 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7219 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7220 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7221}
7222
a2fbb9ea
ET
7223static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7224 int *state_p, int poll)
7225{
7226 /* can take a while if any port is running */
8b3a0f0b 7227 int cnt = 5000;
a2fbb9ea 7228
c14423fe
ET
7229 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7230 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7231
7232 might_sleep();
34f80b04 7233 while (cnt--) {
a2fbb9ea
ET
7234 if (poll) {
7235 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7236 /* if index is different from 0
7237 * the reply for some commands will
3101c2bc 7238 * be on the non default queue
a2fbb9ea
ET
7239 */
7240 if (idx)
7241 bnx2x_rx_int(&bp->fp[idx], 10);
7242 }
a2fbb9ea 7243
3101c2bc 7244 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7245 if (*state_p == state) {
7246#ifdef BNX2X_STOP_ON_ERROR
7247 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7248#endif
a2fbb9ea 7249 return 0;
8b3a0f0b 7250 }
a2fbb9ea 7251
a2fbb9ea 7252 msleep(1);
e3553b29
EG
7253
7254 if (bp->panic)
7255 return -EIO;
a2fbb9ea
ET
7256 }
7257
a2fbb9ea 7258 /* timeout! */
49d66772
ET
7259 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7260 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7261#ifdef BNX2X_STOP_ON_ERROR
7262 bnx2x_panic();
7263#endif
a2fbb9ea 7264
49d66772 7265 return -EBUSY;
a2fbb9ea
ET
7266}
7267
e665bfda
MC
7268static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7269{
7270 bp->set_mac_pending++;
7271 smp_wmb();
7272
7273 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7274 (1 << bp->fp->cl_id), BP_FUNC(bp));
7275
7276 /* Wait for a completion */
7277 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7278}
7279
7280static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7281{
7282 bp->set_mac_pending++;
7283 smp_wmb();
7284
7285 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7286 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7287 1);
7288
7289 /* Wait for a completion */
7290 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7291}
7292
993ac7b5
MC
7293#ifdef BCM_CNIC
7294/**
7295 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7296 * MAC(s). This function will wait until the ramdord completion
7297 * returns.
7298 *
7299 * @param bp driver handle
7300 * @param set set or clear the CAM entry
7301 *
7302 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7303 */
7304static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7305{
7306 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7307
7308 bp->set_mac_pending++;
7309 smp_wmb();
7310
7311 /* Send a SET_MAC ramrod */
7312 if (CHIP_IS_E1(bp))
7313 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7314 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7315 1);
7316 else
7317 /* CAM allocation for E1H
7318 * unicasts: by func number
7319 * multicast: 20+FUNC*20, 20 each
7320 */
7321 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7322 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7323
7324 /* Wait for a completion when setting */
7325 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7326
7327 return 0;
7328}
7329#endif
7330
a2fbb9ea
ET
7331static int bnx2x_setup_leading(struct bnx2x *bp)
7332{
34f80b04 7333 int rc;
a2fbb9ea 7334
c14423fe 7335 /* reset IGU state */
34f80b04 7336 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7337
7338 /* SETUP ramrod */
7339 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7340
34f80b04
EG
7341 /* Wait for completion */
7342 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7343
34f80b04 7344 return rc;
a2fbb9ea
ET
7345}
7346
7347static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7348{
555f6c78
EG
7349 struct bnx2x_fastpath *fp = &bp->fp[index];
7350
a2fbb9ea 7351 /* reset IGU state */
555f6c78 7352 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7353
228241eb 7354 /* SETUP ramrod */
555f6c78
EG
7355 fp->state = BNX2X_FP_STATE_OPENING;
7356 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7357 fp->cl_id, 0);
a2fbb9ea
ET
7358
7359 /* Wait for completion */
7360 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7361 &(fp->state), 0);
a2fbb9ea
ET
7362}
7363
a2fbb9ea 7364static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7365
54b9ddaa 7366static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7367{
ca00392c
EG
7368
7369 switch (bp->multi_mode) {
7370 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7371 bp->num_queues = 1;
ca00392c
EG
7372 break;
7373
7374 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7375 if (num_queues)
7376 bp->num_queues = min_t(u32, num_queues,
7377 BNX2X_MAX_QUEUES(bp));
ca00392c 7378 else
54b9ddaa
VZ
7379 bp->num_queues = min_t(u32, num_online_cpus(),
7380 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7381 break;
7382
7383
7384 default:
54b9ddaa 7385 bp->num_queues = 1;
ca00392c
EG
7386 break;
7387 }
ca00392c
EG
7388}
7389
54b9ddaa 7390static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7391{
ca00392c 7392 int rc = 0;
a2fbb9ea 7393
8badd27a
EG
7394 switch (int_mode) {
7395 case INT_MODE_INTx:
7396 case INT_MODE_MSI:
54b9ddaa 7397 bp->num_queues = 1;
ca00392c 7398 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7399 break;
7400
7401 case INT_MODE_MSIX:
7402 default:
54b9ddaa
VZ
7403 /* Set number of queues according to bp->multi_mode value */
7404 bnx2x_set_num_queues_msix(bp);
ca00392c 7405
54b9ddaa
VZ
7406 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7407 bp->num_queues);
ca00392c 7408
2dfe0e1f
EG
7409 /* if we can't use MSI-X we only need one fp,
7410 * so try to enable MSI-X with the requested number of fp's
7411 * and fallback to MSI or legacy INTx with one fp
7412 */
ca00392c 7413 rc = bnx2x_enable_msix(bp);
54b9ddaa 7414 if (rc)
34f80b04 7415 /* failed to enable MSI-X */
54b9ddaa 7416 bp->num_queues = 1;
8badd27a 7417 break;
a2fbb9ea 7418 }
54b9ddaa 7419 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7420 return rc;
8badd27a
EG
7421}
7422
993ac7b5
MC
7423#ifdef BCM_CNIC
7424static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7425static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7426#endif
8badd27a
EG
7427
7428/* must be called with rtnl_lock */
7429static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7430{
7431 u32 load_code;
ca00392c
EG
7432 int i, rc;
7433
8badd27a 7434#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7435 if (unlikely(bp->panic))
7436 return -EPERM;
7437#endif
7438
7439 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7440
54b9ddaa 7441 rc = bnx2x_set_num_queues(bp);
c14423fe 7442
6cbe5065
VZ
7443 if (bnx2x_alloc_mem(bp)) {
7444 bnx2x_free_irq(bp, true);
a2fbb9ea 7445 return -ENOMEM;
6cbe5065 7446 }
a2fbb9ea 7447
54b9ddaa 7448 for_each_queue(bp, i)
7a9b2557
VZ
7449 bnx2x_fp(bp, i, disable_tpa) =
7450 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7451
54b9ddaa 7452 for_each_queue(bp, i)
2dfe0e1f
EG
7453 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7454 bnx2x_poll, 128);
7455
2dfe0e1f
EG
7456 bnx2x_napi_enable(bp);
7457
34f80b04
EG
7458 if (bp->flags & USING_MSIX_FLAG) {
7459 rc = bnx2x_req_msix_irqs(bp);
7460 if (rc) {
6cbe5065 7461 bnx2x_free_irq(bp, true);
2dfe0e1f 7462 goto load_error1;
34f80b04
EG
7463 }
7464 } else {
ca00392c 7465 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7466 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7467 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7468 bnx2x_enable_msi(bp);
34f80b04
EG
7469 bnx2x_ack_int(bp);
7470 rc = bnx2x_req_irq(bp);
7471 if (rc) {
2dfe0e1f 7472 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7473 bnx2x_free_irq(bp, true);
2dfe0e1f 7474 goto load_error1;
a2fbb9ea 7475 }
8badd27a
EG
7476 if (bp->flags & USING_MSI_FLAG) {
7477 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7478 netdev_info(bp->dev, "using MSI IRQ %d\n",
7479 bp->pdev->irq);
8badd27a 7480 }
a2fbb9ea
ET
7481 }
7482
2dfe0e1f
EG
7483 /* Send LOAD_REQUEST command to MCP
7484 Returns the type of LOAD command:
7485 if it is the first port to be initialized
7486 common blocks should be initialized, otherwise - not
7487 */
7488 if (!BP_NOMCP(bp)) {
7489 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7490 if (!load_code) {
7491 BNX2X_ERR("MCP response failure, aborting\n");
7492 rc = -EBUSY;
7493 goto load_error2;
7494 }
7495 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7496 rc = -EBUSY; /* other port in diagnostic mode */
7497 goto load_error2;
7498 }
7499
7500 } else {
7501 int port = BP_PORT(bp);
7502
f5372251 7503 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7504 load_count[0], load_count[1], load_count[2]);
7505 load_count[0]++;
7506 load_count[1 + port]++;
f5372251 7507 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7508 load_count[0], load_count[1], load_count[2]);
7509 if (load_count[0] == 1)
7510 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7511 else if (load_count[1 + port] == 1)
7512 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7513 else
7514 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7515 }
7516
7517 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7518 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7519 bp->port.pmf = 1;
7520 else
7521 bp->port.pmf = 0;
7522 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7523
a2fbb9ea 7524 /* Initialize HW */
34f80b04
EG
7525 rc = bnx2x_init_hw(bp, load_code);
7526 if (rc) {
a2fbb9ea 7527 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
7528 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7529 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7530 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 7531 goto load_error2;
a2fbb9ea
ET
7532 }
7533
a2fbb9ea 7534 /* Setup NIC internals and enable interrupts */
471de716 7535 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7536
2691d51d
EG
7537 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7538 (bp->common.shmem2_base))
7539 SHMEM2_WR(bp, dcc_support,
7540 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7541 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7542
a2fbb9ea 7543 /* Send LOAD_DONE command to MCP */
34f80b04 7544 if (!BP_NOMCP(bp)) {
228241eb
ET
7545 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7546 if (!load_code) {
da5a662a 7547 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7548 rc = -EBUSY;
2dfe0e1f 7549 goto load_error3;
a2fbb9ea
ET
7550 }
7551 }
7552
7553 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7554
34f80b04
EG
7555 rc = bnx2x_setup_leading(bp);
7556 if (rc) {
da5a662a 7557 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7558#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7559 goto load_error3;
e3553b29
EG
7560#else
7561 bp->panic = 1;
7562 return -EBUSY;
7563#endif
34f80b04 7564 }
a2fbb9ea 7565
34f80b04
EG
7566 if (CHIP_IS_E1H(bp))
7567 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7568 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 7569 bp->flags |= MF_FUNC_DIS;
34f80b04 7570 }
a2fbb9ea 7571
ca00392c 7572 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7573#ifdef BCM_CNIC
7574 /* Enable Timer scan */
7575 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7576#endif
34f80b04
EG
7577 for_each_nondefault_queue(bp, i) {
7578 rc = bnx2x_setup_multi(bp, i);
7579 if (rc)
37b091ba
MC
7580#ifdef BCM_CNIC
7581 goto load_error4;
7582#else
2dfe0e1f 7583 goto load_error3;
37b091ba 7584#endif
34f80b04 7585 }
a2fbb9ea 7586
ca00392c 7587 if (CHIP_IS_E1(bp))
e665bfda 7588 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7589 else
e665bfda 7590 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7591#ifdef BCM_CNIC
7592 /* Set iSCSI L2 MAC */
7593 mutex_lock(&bp->cnic_mutex);
7594 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7595 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7596 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
7597 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7598 CNIC_SB_ID(bp));
993ac7b5
MC
7599 }
7600 mutex_unlock(&bp->cnic_mutex);
7601#endif
ca00392c 7602 }
34f80b04
EG
7603
7604 if (bp->port.pmf)
b5bf9068 7605 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7606
7607 /* Start fast path */
34f80b04
EG
7608 switch (load_mode) {
7609 case LOAD_NORMAL:
ca00392c
EG
7610 if (bp->state == BNX2X_STATE_OPEN) {
7611 /* Tx queue should be only reenabled */
7612 netif_tx_wake_all_queues(bp->dev);
7613 }
2dfe0e1f 7614 /* Initialize the receive filter. */
34f80b04
EG
7615 bnx2x_set_rx_mode(bp->dev);
7616 break;
7617
7618 case LOAD_OPEN:
555f6c78 7619 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7620 if (bp->state != BNX2X_STATE_OPEN)
7621 netif_tx_disable(bp->dev);
2dfe0e1f 7622 /* Initialize the receive filter. */
34f80b04 7623 bnx2x_set_rx_mode(bp->dev);
34f80b04 7624 break;
a2fbb9ea 7625
34f80b04 7626 case LOAD_DIAG:
2dfe0e1f 7627 /* Initialize the receive filter. */
a2fbb9ea 7628 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7629 bp->state = BNX2X_STATE_DIAG;
7630 break;
7631
7632 default:
7633 break;
a2fbb9ea
ET
7634 }
7635
34f80b04
EG
7636 if (!bp->port.pmf)
7637 bnx2x__link_status_update(bp);
7638
a2fbb9ea
ET
7639 /* start the timer */
7640 mod_timer(&bp->timer, jiffies + bp->current_interval);
7641
993ac7b5
MC
7642#ifdef BCM_CNIC
7643 bnx2x_setup_cnic_irq_info(bp);
7644 if (bp->state == BNX2X_STATE_OPEN)
7645 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7646#endif
34f80b04 7647
a2fbb9ea
ET
7648 return 0;
7649
37b091ba
MC
7650#ifdef BCM_CNIC
7651load_error4:
7652 /* Disable Timer scan */
7653 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7654#endif
2dfe0e1f
EG
7655load_error3:
7656 bnx2x_int_disable_sync(bp, 1);
7657 if (!BP_NOMCP(bp)) {
7658 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7659 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7660 }
7661 bp->port.pmf = 0;
7a9b2557
VZ
7662 /* Free SKBs, SGEs, TPA pool and driver internals */
7663 bnx2x_free_skbs(bp);
54b9ddaa 7664 for_each_queue(bp, i)
3196a88a 7665 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7666load_error2:
d1014634 7667 /* Release IRQs */
6cbe5065 7668 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
7669load_error1:
7670 bnx2x_napi_disable(bp);
54b9ddaa 7671 for_each_queue(bp, i)
7cde1c8b 7672 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7673 bnx2x_free_mem(bp);
7674
34f80b04 7675 return rc;
a2fbb9ea
ET
7676}
7677
7678static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7679{
555f6c78 7680 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7681 int rc;
7682
c14423fe 7683 /* halt the connection */
555f6c78
EG
7684 fp->state = BNX2X_FP_STATE_HALTING;
7685 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7686
34f80b04 7687 /* Wait for completion */
a2fbb9ea 7688 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7689 &(fp->state), 1);
c14423fe 7690 if (rc) /* timeout */
a2fbb9ea
ET
7691 return rc;
7692
7693 /* delete cfc entry */
7694 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7695
34f80b04
EG
7696 /* Wait for completion */
7697 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7698 &(fp->state), 1);
34f80b04 7699 return rc;
a2fbb9ea
ET
7700}
7701
da5a662a 7702static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7703{
4781bfad 7704 __le16 dsb_sp_prod_idx;
c14423fe 7705 /* if the other port is handling traffic,
a2fbb9ea 7706 this can take a lot of time */
34f80b04
EG
7707 int cnt = 500;
7708 int rc;
a2fbb9ea
ET
7709
7710 might_sleep();
7711
7712 /* Send HALT ramrod */
7713 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7714 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7715
34f80b04
EG
7716 /* Wait for completion */
7717 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7718 &(bp->fp[0].state), 1);
7719 if (rc) /* timeout */
da5a662a 7720 return rc;
a2fbb9ea 7721
49d66772 7722 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7723
228241eb 7724 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7725 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7726
49d66772 7727 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7728 we are going to reset the chip anyway
7729 so there is not much to do if this times out
7730 */
34f80b04 7731 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7732 if (!cnt) {
7733 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7734 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7735 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7736#ifdef BNX2X_STOP_ON_ERROR
7737 bnx2x_panic();
7738#endif
36e552ab 7739 rc = -EBUSY;
34f80b04
EG
7740 break;
7741 }
7742 cnt--;
da5a662a 7743 msleep(1);
5650d9d4 7744 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7745 }
7746 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7747 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7748
7749 return rc;
a2fbb9ea
ET
7750}
7751
34f80b04
EG
7752static void bnx2x_reset_func(struct bnx2x *bp)
7753{
7754 int port = BP_PORT(bp);
7755 int func = BP_FUNC(bp);
7756 int base, i;
7757
7758 /* Configure IGU */
7759 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7760 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7761
37b091ba
MC
7762#ifdef BCM_CNIC
7763 /* Disable Timer scan */
7764 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7765 /*
7766 * Wait for at least 10ms and up to 2 second for the timers scan to
7767 * complete
7768 */
7769 for (i = 0; i < 200; i++) {
7770 msleep(10);
7771 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7772 break;
7773 }
7774#endif
34f80b04
EG
7775 /* Clear ILT */
7776 base = FUNC_ILT_BASE(func);
7777 for (i = base; i < base + ILT_PER_FUNC; i++)
7778 bnx2x_ilt_wr(bp, i, 0);
7779}
7780
7781static void bnx2x_reset_port(struct bnx2x *bp)
7782{
7783 int port = BP_PORT(bp);
7784 u32 val;
7785
7786 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7787
7788 /* Do not rcv packets to BRB */
7789 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7790 /* Do not direct rcv packets that are not for MCP to the BRB */
7791 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7792 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7793
7794 /* Configure AEU */
7795 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7796
7797 msleep(100);
7798 /* Check for BRB port occupancy */
7799 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7800 if (val)
7801 DP(NETIF_MSG_IFDOWN,
33471629 7802 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7803
7804 /* TODO: Close Doorbell port? */
7805}
7806
34f80b04
EG
7807static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7808{
7809 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7810 BP_FUNC(bp), reset_code);
7811
7812 switch (reset_code) {
7813 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7814 bnx2x_reset_port(bp);
7815 bnx2x_reset_func(bp);
7816 bnx2x_reset_common(bp);
7817 break;
7818
7819 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7820 bnx2x_reset_port(bp);
7821 bnx2x_reset_func(bp);
7822 break;
7823
7824 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7825 bnx2x_reset_func(bp);
7826 break;
49d66772 7827
34f80b04
EG
7828 default:
7829 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7830 break;
7831 }
7832}
7833
33471629 7834/* must be called with rtnl_lock */
34f80b04 7835static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7836{
da5a662a 7837 int port = BP_PORT(bp);
a2fbb9ea 7838 u32 reset_code = 0;
da5a662a 7839 int i, cnt, rc;
a2fbb9ea 7840
993ac7b5
MC
7841#ifdef BCM_CNIC
7842 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7843#endif
a2fbb9ea
ET
7844 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7845
ab6ad5a4 7846 /* Set "drop all" */
228241eb
ET
7847 bp->rx_mode = BNX2X_RX_MODE_NONE;
7848 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7849
ab6ad5a4 7850 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7851 bnx2x_netif_stop(bp, 1);
e94d8af3 7852
34f80b04
EG
7853 del_timer_sync(&bp->timer);
7854 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7855 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7856 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7857
70b9986c 7858 /* Release IRQs */
6cbe5065 7859 bnx2x_free_irq(bp, false);
70b9986c 7860
555f6c78 7861 /* Wait until tx fastpath tasks complete */
54b9ddaa 7862 for_each_queue(bp, i) {
228241eb
ET
7863 struct bnx2x_fastpath *fp = &bp->fp[i];
7864
34f80b04 7865 cnt = 1000;
e8b5fc51 7866 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7867
7961f791 7868 bnx2x_tx_int(fp);
34f80b04
EG
7869 if (!cnt) {
7870 BNX2X_ERR("timeout waiting for queue[%d]\n",
7871 i);
7872#ifdef BNX2X_STOP_ON_ERROR
7873 bnx2x_panic();
7874 return -EBUSY;
7875#else
7876 break;
7877#endif
7878 }
7879 cnt--;
da5a662a 7880 msleep(1);
34f80b04 7881 }
228241eb 7882 }
da5a662a
VZ
7883 /* Give HW time to discard old tx messages */
7884 msleep(1);
a2fbb9ea 7885
3101c2bc
YG
7886 if (CHIP_IS_E1(bp)) {
7887 struct mac_configuration_cmd *config =
7888 bnx2x_sp(bp, mcast_config);
7889
e665bfda 7890 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7891
8d9c5f34 7892 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7893 CAM_INVALIDATE(config->config_table[i]);
7894
8d9c5f34 7895 config->hdr.length = i;
3101c2bc
YG
7896 if (CHIP_REV_IS_SLOW(bp))
7897 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7898 else
7899 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7900 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7901 config->hdr.reserved1 = 0;
7902
e665bfda
MC
7903 bp->set_mac_pending++;
7904 smp_wmb();
7905
3101c2bc
YG
7906 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7907 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7908 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7909
7910 } else { /* E1H */
65abd74d
YG
7911 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7912
e665bfda 7913 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7914
7915 for (i = 0; i < MC_HASH_SIZE; i++)
7916 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7917
7918 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 7919 }
993ac7b5
MC
7920#ifdef BCM_CNIC
7921 /* Clear iSCSI L2 MAC */
7922 mutex_lock(&bp->cnic_mutex);
7923 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7924 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7925 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7926 }
7927 mutex_unlock(&bp->cnic_mutex);
7928#endif
3101c2bc 7929
65abd74d
YG
7930 if (unload_mode == UNLOAD_NORMAL)
7931 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7932
7d0446c2 7933 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7934 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7935
7d0446c2 7936 else if (bp->wol) {
65abd74d
YG
7937 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7938 u8 *mac_addr = bp->dev->dev_addr;
7939 u32 val;
7940 /* The mac address is written to entries 1-4 to
7941 preserve entry 0 which is used by the PMF */
7942 u8 entry = (BP_E1HVN(bp) + 1)*8;
7943
7944 val = (mac_addr[0] << 8) | mac_addr[1];
7945 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7946
7947 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7948 (mac_addr[4] << 8) | mac_addr[5];
7949 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7950
7951 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7952
7953 } else
7954 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7955
34f80b04
EG
7956 /* Close multi and leading connections
7957 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7958 for_each_nondefault_queue(bp, i)
7959 if (bnx2x_stop_multi(bp, i))
228241eb 7960 goto unload_error;
a2fbb9ea 7961
da5a662a
VZ
7962 rc = bnx2x_stop_leading(bp);
7963 if (rc) {
34f80b04 7964 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7965#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7966 return -EBUSY;
da5a662a
VZ
7967#else
7968 goto unload_error;
34f80b04 7969#endif
228241eb
ET
7970 }
7971
7972unload_error:
34f80b04 7973 if (!BP_NOMCP(bp))
228241eb 7974 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7975 else {
f5372251 7976 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7977 load_count[0], load_count[1], load_count[2]);
7978 load_count[0]--;
da5a662a 7979 load_count[1 + port]--;
f5372251 7980 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7981 load_count[0], load_count[1], load_count[2]);
7982 if (load_count[0] == 0)
7983 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7984 else if (load_count[1 + port] == 0)
34f80b04
EG
7985 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7986 else
7987 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7988 }
a2fbb9ea 7989
34f80b04
EG
7990 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7991 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7992 bnx2x__link_reset(bp);
a2fbb9ea
ET
7993
7994 /* Reset the chip */
228241eb 7995 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7996
7997 /* Report UNLOAD_DONE to MCP */
34f80b04 7998 if (!BP_NOMCP(bp))
a2fbb9ea 7999 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8000
9a035440 8001 bp->port.pmf = 0;
a2fbb9ea 8002
7a9b2557 8003 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8004 bnx2x_free_skbs(bp);
54b9ddaa 8005 for_each_queue(bp, i)
3196a88a 8006 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8007 for_each_queue(bp, i)
7cde1c8b 8008 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8009 bnx2x_free_mem(bp);
8010
8011 bp->state = BNX2X_STATE_CLOSED;
228241eb 8012
a2fbb9ea
ET
8013 netif_carrier_off(bp->dev);
8014
8015 return 0;
8016}
8017
34f80b04
EG
8018static void bnx2x_reset_task(struct work_struct *work)
8019{
8020 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8021
8022#ifdef BNX2X_STOP_ON_ERROR
8023 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8024 " so reset not done to allow debug dump,\n"
ad361c98 8025 " you will need to reboot when done\n");
34f80b04
EG
8026 return;
8027#endif
8028
8029 rtnl_lock();
8030
8031 if (!netif_running(bp->dev))
8032 goto reset_task_exit;
8033
8034 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8035 bnx2x_nic_load(bp, LOAD_NORMAL);
8036
8037reset_task_exit:
8038 rtnl_unlock();
8039}
8040
a2fbb9ea
ET
8041/* end of nic load/unload */
8042
8043/* ethtool_ops */
8044
8045/*
8046 * Init service functions
8047 */
8048
f1ef27ef
EG
8049static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8050{
8051 switch (func) {
8052 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8053 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8054 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8055 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8056 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8057 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8058 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8059 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8060 default:
8061 BNX2X_ERR("Unsupported function index: %d\n", func);
8062 return (u32)(-1);
8063 }
8064}
8065
8066static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8067{
8068 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8069
8070 /* Flush all outstanding writes */
8071 mmiowb();
8072
8073 /* Pretend to be function 0 */
8074 REG_WR(bp, reg, 0);
8075 /* Flush the GRC transaction (in the chip) */
8076 new_val = REG_RD(bp, reg);
8077 if (new_val != 0) {
8078 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8079 new_val);
8080 BUG();
8081 }
8082
8083 /* From now we are in the "like-E1" mode */
8084 bnx2x_int_disable(bp);
8085
8086 /* Flush all outstanding writes */
8087 mmiowb();
8088
8089 /* Restore the original funtion settings */
8090 REG_WR(bp, reg, orig_func);
8091 new_val = REG_RD(bp, reg);
8092 if (new_val != orig_func) {
8093 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8094 orig_func, new_val);
8095 BUG();
8096 }
8097}
8098
8099static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8100{
8101 if (CHIP_IS_E1H(bp))
8102 bnx2x_undi_int_disable_e1h(bp, func);
8103 else
8104 bnx2x_int_disable(bp);
8105}
8106
34f80b04
EG
8107static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8108{
8109 u32 val;
8110
8111 /* Check if there is any driver already loaded */
8112 val = REG_RD(bp, MISC_REG_UNPREPARED);
8113 if (val == 0x1) {
8114 /* Check if it is the UNDI driver
8115 * UNDI driver initializes CID offset for normal bell to 0x7
8116 */
4a37fb66 8117 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8118 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8119 if (val == 0x7) {
8120 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8121 /* save our func */
34f80b04 8122 int func = BP_FUNC(bp);
da5a662a
VZ
8123 u32 swap_en;
8124 u32 swap_val;
34f80b04 8125
b4661739
EG
8126 /* clear the UNDI indication */
8127 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8128
34f80b04
EG
8129 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8130
8131 /* try unload UNDI on port 0 */
8132 bp->func = 0;
da5a662a
VZ
8133 bp->fw_seq =
8134 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8135 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8136 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8137
8138 /* if UNDI is loaded on the other port */
8139 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8140
da5a662a
VZ
8141 /* send "DONE" for previous unload */
8142 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8143
8144 /* unload UNDI on port 1 */
34f80b04 8145 bp->func = 1;
da5a662a
VZ
8146 bp->fw_seq =
8147 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8148 DRV_MSG_SEQ_NUMBER_MASK);
8149 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8150
8151 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8152 }
8153
b4661739
EG
8154 /* now it's safe to release the lock */
8155 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8156
f1ef27ef 8157 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8158
8159 /* close input traffic and wait for it */
8160 /* Do not rcv packets to BRB */
8161 REG_WR(bp,
8162 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8163 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8164 /* Do not direct rcv packets that are not for MCP to
8165 * the BRB */
8166 REG_WR(bp,
8167 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8168 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8169 /* clear AEU */
8170 REG_WR(bp,
8171 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8172 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8173 msleep(10);
8174
8175 /* save NIG port swap info */
8176 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8177 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8178 /* reset device */
8179 REG_WR(bp,
8180 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8181 0xd3ffffff);
34f80b04
EG
8182 REG_WR(bp,
8183 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8184 0x1403);
da5a662a
VZ
8185 /* take the NIG out of reset and restore swap values */
8186 REG_WR(bp,
8187 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8188 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8189 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8190 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8191
8192 /* send unload done to the MCP */
8193 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8194
8195 /* restore our func and fw_seq */
8196 bp->func = func;
8197 bp->fw_seq =
8198 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8199 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8200
8201 } else
8202 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8203 }
8204}
8205
8206static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8207{
8208 u32 val, val2, val3, val4, id;
72ce58c3 8209 u16 pmc;
34f80b04
EG
8210
8211 /* Get the chip revision id and number. */
8212 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8213 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8214 id = ((val & 0xffff) << 16);
8215 val = REG_RD(bp, MISC_REG_CHIP_REV);
8216 id |= ((val & 0xf) << 12);
8217 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8218 id |= ((val & 0xff) << 4);
5a40e08e 8219 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8220 id |= (val & 0xf);
8221 bp->common.chip_id = id;
8222 bp->link_params.chip_id = bp->common.chip_id;
8223 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8224
1c06328c
EG
8225 val = (REG_RD(bp, 0x2874) & 0x55);
8226 if ((bp->common.chip_id & 0x1) ||
8227 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8228 bp->flags |= ONE_PORT_FLAG;
8229 BNX2X_DEV_INFO("single port device\n");
8230 }
8231
34f80b04
EG
8232 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8233 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8234 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8235 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8236 bp->common.flash_size, bp->common.flash_size);
8237
8238 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8239 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8240 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8241 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8242 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8243
8244 if (!bp->common.shmem_base ||
8245 (bp->common.shmem_base < 0xA0000) ||
8246 (bp->common.shmem_base >= 0xC0000)) {
8247 BNX2X_DEV_INFO("MCP not active\n");
8248 bp->flags |= NO_MCP_FLAG;
8249 return;
8250 }
8251
8252 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8253 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8254 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8255 BNX2X_ERR("BAD MCP validity signature\n");
8256
8257 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8258 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8259
8260 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8261 SHARED_HW_CFG_LED_MODE_MASK) >>
8262 SHARED_HW_CFG_LED_MODE_SHIFT);
8263
c2c8b03e
EG
8264 bp->link_params.feature_config_flags = 0;
8265 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8266 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8267 bp->link_params.feature_config_flags |=
8268 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8269 else
8270 bp->link_params.feature_config_flags &=
8271 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8272
34f80b04
EG
8273 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8274 bp->common.bc_ver = val;
8275 BNX2X_DEV_INFO("bc_ver %X\n", val);
8276 if (val < BNX2X_BC_VER) {
8277 /* for now only warn
8278 * later we might need to enforce this */
8279 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8280 " please upgrade BC\n", BNX2X_BC_VER, val);
8281 }
4d295db0
EG
8282 bp->link_params.feature_config_flags |=
8283 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8284 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8285
8286 if (BP_E1HVN(bp) == 0) {
8287 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8288 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8289 } else {
8290 /* no WOL capability for E1HVN != 0 */
8291 bp->flags |= NO_WOL_FLAG;
8292 }
8293 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8294 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8295
8296 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8297 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8298 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8299 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8300
7995c64e 8301 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
34f80b04
EG
8302}
8303
8304static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8305 u32 switch_cfg)
a2fbb9ea 8306{
34f80b04 8307 int port = BP_PORT(bp);
a2fbb9ea
ET
8308 u32 ext_phy_type;
8309
a2fbb9ea
ET
8310 switch (switch_cfg) {
8311 case SWITCH_CFG_1G:
8312 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8313
c18487ee
YR
8314 ext_phy_type =
8315 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8316 switch (ext_phy_type) {
8317 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8318 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8319 ext_phy_type);
8320
34f80b04
EG
8321 bp->port.supported |= (SUPPORTED_10baseT_Half |
8322 SUPPORTED_10baseT_Full |
8323 SUPPORTED_100baseT_Half |
8324 SUPPORTED_100baseT_Full |
8325 SUPPORTED_1000baseT_Full |
8326 SUPPORTED_2500baseX_Full |
8327 SUPPORTED_TP |
8328 SUPPORTED_FIBRE |
8329 SUPPORTED_Autoneg |
8330 SUPPORTED_Pause |
8331 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8332 break;
8333
8334 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8335 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8336 ext_phy_type);
8337
34f80b04
EG
8338 bp->port.supported |= (SUPPORTED_10baseT_Half |
8339 SUPPORTED_10baseT_Full |
8340 SUPPORTED_100baseT_Half |
8341 SUPPORTED_100baseT_Full |
8342 SUPPORTED_1000baseT_Full |
8343 SUPPORTED_TP |
8344 SUPPORTED_FIBRE |
8345 SUPPORTED_Autoneg |
8346 SUPPORTED_Pause |
8347 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8348 break;
8349
8350 default:
8351 BNX2X_ERR("NVRAM config error. "
8352 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8353 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8354 return;
8355 }
8356
34f80b04
EG
8357 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8358 port*0x10);
8359 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8360 break;
8361
8362 case SWITCH_CFG_10G:
8363 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8364
c18487ee
YR
8365 ext_phy_type =
8366 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8367 switch (ext_phy_type) {
8368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8369 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8370 ext_phy_type);
8371
34f80b04
EG
8372 bp->port.supported |= (SUPPORTED_10baseT_Half |
8373 SUPPORTED_10baseT_Full |
8374 SUPPORTED_100baseT_Half |
8375 SUPPORTED_100baseT_Full |
8376 SUPPORTED_1000baseT_Full |
8377 SUPPORTED_2500baseX_Full |
8378 SUPPORTED_10000baseT_Full |
8379 SUPPORTED_TP |
8380 SUPPORTED_FIBRE |
8381 SUPPORTED_Autoneg |
8382 SUPPORTED_Pause |
8383 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8384 break;
8385
589abe3a
EG
8386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8387 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8388 ext_phy_type);
f1410647 8389
34f80b04 8390 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8391 SUPPORTED_1000baseT_Full |
34f80b04 8392 SUPPORTED_FIBRE |
589abe3a 8393 SUPPORTED_Autoneg |
34f80b04
EG
8394 SUPPORTED_Pause |
8395 SUPPORTED_Asym_Pause);
f1410647
ET
8396 break;
8397
589abe3a
EG
8398 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8399 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8400 ext_phy_type);
8401
34f80b04 8402 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8403 SUPPORTED_2500baseX_Full |
34f80b04 8404 SUPPORTED_1000baseT_Full |
589abe3a
EG
8405 SUPPORTED_FIBRE |
8406 SUPPORTED_Autoneg |
8407 SUPPORTED_Pause |
8408 SUPPORTED_Asym_Pause);
8409 break;
8410
8411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8412 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8413 ext_phy_type);
8414
8415 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8416 SUPPORTED_FIBRE |
8417 SUPPORTED_Pause |
8418 SUPPORTED_Asym_Pause);
f1410647
ET
8419 break;
8420
589abe3a
EG
8421 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8422 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8423 ext_phy_type);
8424
34f80b04
EG
8425 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8426 SUPPORTED_1000baseT_Full |
8427 SUPPORTED_FIBRE |
34f80b04
EG
8428 SUPPORTED_Pause |
8429 SUPPORTED_Asym_Pause);
f1410647
ET
8430 break;
8431
589abe3a
EG
8432 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8433 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8434 ext_phy_type);
8435
34f80b04 8436 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8437 SUPPORTED_1000baseT_Full |
34f80b04 8438 SUPPORTED_Autoneg |
589abe3a 8439 SUPPORTED_FIBRE |
34f80b04
EG
8440 SUPPORTED_Pause |
8441 SUPPORTED_Asym_Pause);
c18487ee
YR
8442 break;
8443
4d295db0
EG
8444 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8445 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8446 ext_phy_type);
8447
8448 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8449 SUPPORTED_1000baseT_Full |
8450 SUPPORTED_Autoneg |
8451 SUPPORTED_FIBRE |
8452 SUPPORTED_Pause |
8453 SUPPORTED_Asym_Pause);
8454 break;
8455
f1410647
ET
8456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8457 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8458 ext_phy_type);
8459
34f80b04
EG
8460 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8461 SUPPORTED_TP |
8462 SUPPORTED_Autoneg |
8463 SUPPORTED_Pause |
8464 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8465 break;
8466
28577185
EG
8467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8468 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8469 ext_phy_type);
8470
8471 bp->port.supported |= (SUPPORTED_10baseT_Half |
8472 SUPPORTED_10baseT_Full |
8473 SUPPORTED_100baseT_Half |
8474 SUPPORTED_100baseT_Full |
8475 SUPPORTED_1000baseT_Full |
8476 SUPPORTED_10000baseT_Full |
8477 SUPPORTED_TP |
8478 SUPPORTED_Autoneg |
8479 SUPPORTED_Pause |
8480 SUPPORTED_Asym_Pause);
8481 break;
8482
c18487ee
YR
8483 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8484 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8485 bp->link_params.ext_phy_config);
8486 break;
8487
a2fbb9ea
ET
8488 default:
8489 BNX2X_ERR("NVRAM config error. "
8490 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8491 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8492 return;
8493 }
8494
34f80b04
EG
8495 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8496 port*0x18);
8497 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8498
a2fbb9ea
ET
8499 break;
8500
8501 default:
8502 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8503 bp->port.link_config);
a2fbb9ea
ET
8504 return;
8505 }
34f80b04 8506 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8507
8508 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8509 if (!(bp->link_params.speed_cap_mask &
8510 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8511 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8512
c18487ee
YR
8513 if (!(bp->link_params.speed_cap_mask &
8514 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8515 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8516
c18487ee
YR
8517 if (!(bp->link_params.speed_cap_mask &
8518 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8519 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8520
c18487ee
YR
8521 if (!(bp->link_params.speed_cap_mask &
8522 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8523 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8524
c18487ee
YR
8525 if (!(bp->link_params.speed_cap_mask &
8526 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8527 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8528 SUPPORTED_1000baseT_Full);
a2fbb9ea 8529
c18487ee
YR
8530 if (!(bp->link_params.speed_cap_mask &
8531 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8532 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8533
c18487ee
YR
8534 if (!(bp->link_params.speed_cap_mask &
8535 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8536 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8537
34f80b04 8538 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8539}
8540
34f80b04 8541static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8542{
c18487ee 8543 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8544
34f80b04 8545 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8546 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8547 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8548 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8549 bp->port.advertising = bp->port.supported;
a2fbb9ea 8550 } else {
c18487ee
YR
8551 u32 ext_phy_type =
8552 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8553
8554 if ((ext_phy_type ==
8555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8556 (ext_phy_type ==
8557 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8558 /* force 10G, no AN */
c18487ee 8559 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8560 bp->port.advertising =
a2fbb9ea
ET
8561 (ADVERTISED_10000baseT_Full |
8562 ADVERTISED_FIBRE);
8563 break;
8564 }
8565 BNX2X_ERR("NVRAM config error. "
8566 "Invalid link_config 0x%x"
8567 " Autoneg not supported\n",
34f80b04 8568 bp->port.link_config);
a2fbb9ea
ET
8569 return;
8570 }
8571 break;
8572
8573 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8574 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8575 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8576 bp->port.advertising = (ADVERTISED_10baseT_Full |
8577 ADVERTISED_TP);
a2fbb9ea
ET
8578 } else {
8579 BNX2X_ERR("NVRAM config error. "
8580 "Invalid link_config 0x%x"
8581 " speed_cap_mask 0x%x\n",
34f80b04 8582 bp->port.link_config,
c18487ee 8583 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8584 return;
8585 }
8586 break;
8587
8588 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8589 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8590 bp->link_params.req_line_speed = SPEED_10;
8591 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8592 bp->port.advertising = (ADVERTISED_10baseT_Half |
8593 ADVERTISED_TP);
a2fbb9ea
ET
8594 } else {
8595 BNX2X_ERR("NVRAM config error. "
8596 "Invalid link_config 0x%x"
8597 " speed_cap_mask 0x%x\n",
34f80b04 8598 bp->port.link_config,
c18487ee 8599 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8600 return;
8601 }
8602 break;
8603
8604 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8605 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8606 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8607 bp->port.advertising = (ADVERTISED_100baseT_Full |
8608 ADVERTISED_TP);
a2fbb9ea
ET
8609 } else {
8610 BNX2X_ERR("NVRAM config error. "
8611 "Invalid link_config 0x%x"
8612 " speed_cap_mask 0x%x\n",
34f80b04 8613 bp->port.link_config,
c18487ee 8614 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8615 return;
8616 }
8617 break;
8618
8619 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8620 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8621 bp->link_params.req_line_speed = SPEED_100;
8622 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8623 bp->port.advertising = (ADVERTISED_100baseT_Half |
8624 ADVERTISED_TP);
a2fbb9ea
ET
8625 } else {
8626 BNX2X_ERR("NVRAM config error. "
8627 "Invalid link_config 0x%x"
8628 " speed_cap_mask 0x%x\n",
34f80b04 8629 bp->port.link_config,
c18487ee 8630 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8631 return;
8632 }
8633 break;
8634
8635 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8636 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8637 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8638 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8639 ADVERTISED_TP);
a2fbb9ea
ET
8640 } else {
8641 BNX2X_ERR("NVRAM config error. "
8642 "Invalid link_config 0x%x"
8643 " speed_cap_mask 0x%x\n",
34f80b04 8644 bp->port.link_config,
c18487ee 8645 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8646 return;
8647 }
8648 break;
8649
8650 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8651 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8652 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8653 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8654 ADVERTISED_TP);
a2fbb9ea
ET
8655 } else {
8656 BNX2X_ERR("NVRAM config error. "
8657 "Invalid link_config 0x%x"
8658 " speed_cap_mask 0x%x\n",
34f80b04 8659 bp->port.link_config,
c18487ee 8660 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8661 return;
8662 }
8663 break;
8664
8665 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8666 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8667 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8668 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8669 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8670 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8671 ADVERTISED_FIBRE);
a2fbb9ea
ET
8672 } else {
8673 BNX2X_ERR("NVRAM config error. "
8674 "Invalid link_config 0x%x"
8675 " speed_cap_mask 0x%x\n",
34f80b04 8676 bp->port.link_config,
c18487ee 8677 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8678 return;
8679 }
8680 break;
8681
8682 default:
8683 BNX2X_ERR("NVRAM config error. "
8684 "BAD link speed link_config 0x%x\n",
34f80b04 8685 bp->port.link_config);
c18487ee 8686 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8687 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8688 break;
8689 }
a2fbb9ea 8690
34f80b04
EG
8691 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8692 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8693 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8694 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8695 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8696
c18487ee 8697 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8698 " advertising 0x%x\n",
c18487ee
YR
8699 bp->link_params.req_line_speed,
8700 bp->link_params.req_duplex,
34f80b04 8701 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8702}
8703
e665bfda
MC
8704static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8705{
8706 mac_hi = cpu_to_be16(mac_hi);
8707 mac_lo = cpu_to_be32(mac_lo);
8708 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8709 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8710}
8711
34f80b04 8712static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8713{
34f80b04
EG
8714 int port = BP_PORT(bp);
8715 u32 val, val2;
589abe3a 8716 u32 config;
c2c8b03e 8717 u16 i;
01cd4528 8718 u32 ext_phy_type;
a2fbb9ea 8719
c18487ee 8720 bp->link_params.bp = bp;
34f80b04 8721 bp->link_params.port = port;
c18487ee 8722
c18487ee 8723 bp->link_params.lane_config =
a2fbb9ea 8724 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8725 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8726 SHMEM_RD(bp,
8727 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8728 /* BCM8727_NOC => BCM8727 no over current */
8729 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8730 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8731 bp->link_params.ext_phy_config &=
8732 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8733 bp->link_params.ext_phy_config |=
8734 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8735 bp->link_params.feature_config_flags |=
8736 FEATURE_CONFIG_BCM8727_NOC;
8737 }
8738
c18487ee 8739 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8740 SHMEM_RD(bp,
8741 dev_info.port_hw_config[port].speed_capability_mask);
8742
34f80b04 8743 bp->port.link_config =
a2fbb9ea
ET
8744 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8745
c2c8b03e
EG
8746 /* Get the 4 lanes xgxs config rx and tx */
8747 for (i = 0; i < 2; i++) {
8748 val = SHMEM_RD(bp,
8749 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8750 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8751 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8752
8753 val = SHMEM_RD(bp,
8754 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8755 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8756 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8757 }
8758
3ce2c3f9
EG
8759 /* If the device is capable of WoL, set the default state according
8760 * to the HW
8761 */
4d295db0 8762 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8763 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8764 (config & PORT_FEATURE_WOL_ENABLED));
8765
c2c8b03e
EG
8766 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8767 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8768 bp->link_params.lane_config,
8769 bp->link_params.ext_phy_config,
34f80b04 8770 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8771
4d295db0
EG
8772 bp->link_params.switch_cfg |= (bp->port.link_config &
8773 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8774 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8775
8776 bnx2x_link_settings_requested(bp);
8777
01cd4528
EG
8778 /*
8779 * If connected directly, work with the internal PHY, otherwise, work
8780 * with the external PHY
8781 */
8782 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8783 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8784 bp->mdio.prtad = bp->link_params.phy_addr;
8785
8786 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8787 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8788 bp->mdio.prtad =
659bc5c4 8789 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8790
a2fbb9ea
ET
8791 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8792 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8793 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8794 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8795 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8796
8797#ifdef BCM_CNIC
8798 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8799 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8800 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8801#endif
34f80b04
EG
8802}
8803
8804static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8805{
8806 int func = BP_FUNC(bp);
8807 u32 val, val2;
8808 int rc = 0;
a2fbb9ea 8809
34f80b04 8810 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8811
34f80b04
EG
8812 bp->e1hov = 0;
8813 bp->e1hmf = 0;
8814 if (CHIP_IS_E1H(bp)) {
8815 bp->mf_config =
8816 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8817
2691d51d 8818 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8819 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8820 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8821 bp->e1hmf = 1;
2691d51d
EG
8822 BNX2X_DEV_INFO("%s function mode\n",
8823 IS_E1HMF(bp) ? "multi" : "single");
8824
8825 if (IS_E1HMF(bp)) {
8826 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8827 e1hov_tag) &
8828 FUNC_MF_CFG_E1HOV_TAG_MASK);
8829 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8830 bp->e1hov = val;
8831 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8832 "(0x%04x)\n",
8833 func, bp->e1hov, bp->e1hov);
8834 } else {
34f80b04
EG
8835 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8836 " aborting\n", func);
8837 rc = -EPERM;
8838 }
2691d51d
EG
8839 } else {
8840 if (BP_E1HVN(bp)) {
8841 BNX2X_ERR("!!! VN %d in single function mode,"
8842 " aborting\n", BP_E1HVN(bp));
8843 rc = -EPERM;
8844 }
34f80b04
EG
8845 }
8846 }
a2fbb9ea 8847
34f80b04
EG
8848 if (!BP_NOMCP(bp)) {
8849 bnx2x_get_port_hwinfo(bp);
8850
8851 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8852 DRV_MSG_SEQ_NUMBER_MASK);
8853 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8854 }
8855
8856 if (IS_E1HMF(bp)) {
8857 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8858 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8859 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8860 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8861 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8862 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8863 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8864 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8865 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8866 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8867 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8868 ETH_ALEN);
8869 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8870 ETH_ALEN);
a2fbb9ea 8871 }
34f80b04
EG
8872
8873 return rc;
a2fbb9ea
ET
8874 }
8875
34f80b04
EG
8876 if (BP_NOMCP(bp)) {
8877 /* only supposed to happen on emulation/FPGA */
33471629 8878 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8879 random_ether_addr(bp->dev->dev_addr);
8880 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8881 }
a2fbb9ea 8882
34f80b04
EG
8883 return rc;
8884}
8885
8886static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8887{
8888 int func = BP_FUNC(bp);
87942b46 8889 int timer_interval;
34f80b04
EG
8890 int rc;
8891
da5a662a
VZ
8892 /* Disable interrupt handling until HW is initialized */
8893 atomic_set(&bp->intr_sem, 1);
e1510706 8894 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8895
34f80b04 8896 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8897 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
8898#ifdef BCM_CNIC
8899 mutex_init(&bp->cnic_mutex);
8900#endif
a2fbb9ea 8901
1cf167f2 8902 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8903 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8904
8905 rc = bnx2x_get_hwinfo(bp);
8906
8907 /* need to reset chip if undi was active */
8908 if (!BP_NOMCP(bp))
8909 bnx2x_undi_unload(bp);
8910
8911 if (CHIP_REV_IS_FPGA(bp))
7995c64e 8912 pr_err("FPGA detected\n");
34f80b04
EG
8913
8914 if (BP_NOMCP(bp) && (func == 0))
7995c64e 8915 pr_err("MCP disabled, must load devices in order!\n");
34f80b04 8916
555f6c78 8917 /* Set multi queue mode */
8badd27a
EG
8918 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8919 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
7995c64e 8920 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8921 multi_mode = ETH_RSS_MODE_DISABLED;
8922 }
8923 bp->multi_mode = multi_mode;
8924
8925
7a9b2557
VZ
8926 /* Set TPA flags */
8927 if (disable_tpa) {
8928 bp->flags &= ~TPA_ENABLE_FLAG;
8929 bp->dev->features &= ~NETIF_F_LRO;
8930 } else {
8931 bp->flags |= TPA_ENABLE_FLAG;
8932 bp->dev->features |= NETIF_F_LRO;
8933 }
8934
a18f5128
EG
8935 if (CHIP_IS_E1(bp))
8936 bp->dropless_fc = 0;
8937 else
8938 bp->dropless_fc = dropless_fc;
8939
8d5726c4 8940 bp->mrrs = mrrs;
7a9b2557 8941
34f80b04
EG
8942 bp->tx_ring_size = MAX_TX_AVAIL;
8943 bp->rx_ring_size = MAX_RX_AVAIL;
8944
8945 bp->rx_csum = 1;
34f80b04 8946
7d323bfd
EG
8947 /* make sure that the numbers are in the right granularity */
8948 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8949 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 8950
87942b46
EG
8951 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8952 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8953
8954 init_timer(&bp->timer);
8955 bp->timer.expires = jiffies + bp->current_interval;
8956 bp->timer.data = (unsigned long) bp;
8957 bp->timer.function = bnx2x_timer;
8958
8959 return rc;
a2fbb9ea
ET
8960}
8961
8962/*
8963 * ethtool service functions
8964 */
8965
8966/* All ethtool functions called with rtnl_lock */
8967
8968static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8969{
8970 struct bnx2x *bp = netdev_priv(dev);
8971
34f80b04
EG
8972 cmd->supported = bp->port.supported;
8973 cmd->advertising = bp->port.advertising;
a2fbb9ea 8974
f34d28ea
EG
8975 if ((bp->state == BNX2X_STATE_OPEN) &&
8976 !(bp->flags & MF_FUNC_DIS) &&
8977 (bp->link_vars.link_up)) {
c18487ee
YR
8978 cmd->speed = bp->link_vars.line_speed;
8979 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
8980 if (IS_E1HMF(bp)) {
8981 u16 vn_max_rate;
34f80b04 8982
b015e3d1
EG
8983 vn_max_rate =
8984 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 8985 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
8986 if (vn_max_rate < cmd->speed)
8987 cmd->speed = vn_max_rate;
8988 }
8989 } else {
8990 cmd->speed = -1;
8991 cmd->duplex = -1;
34f80b04 8992 }
a2fbb9ea 8993
c18487ee
YR
8994 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8995 u32 ext_phy_type =
8996 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8997
8998 switch (ext_phy_type) {
8999 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9000 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9001 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9002 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9006 cmd->port = PORT_FIBRE;
9007 break;
9008
9009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9010 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9011 cmd->port = PORT_TP;
9012 break;
9013
c18487ee
YR
9014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9015 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9016 bp->link_params.ext_phy_config);
9017 break;
9018
f1410647
ET
9019 default:
9020 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9021 bp->link_params.ext_phy_config);
9022 break;
f1410647
ET
9023 }
9024 } else
a2fbb9ea 9025 cmd->port = PORT_TP;
a2fbb9ea 9026
01cd4528 9027 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9028 cmd->transceiver = XCVR_INTERNAL;
9029
c18487ee 9030 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9031 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9032 else
a2fbb9ea 9033 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9034
9035 cmd->maxtxpkt = 0;
9036 cmd->maxrxpkt = 0;
9037
9038 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9039 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9040 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9041 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9042 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9043 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9044 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9045
9046 return 0;
9047}
9048
9049static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9050{
9051 struct bnx2x *bp = netdev_priv(dev);
9052 u32 advertising;
9053
34f80b04
EG
9054 if (IS_E1HMF(bp))
9055 return 0;
9056
a2fbb9ea
ET
9057 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9058 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9059 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9060 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9061 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9062 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9063 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9064
a2fbb9ea 9065 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9066 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9067 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9068 return -EINVAL;
f1410647 9069 }
a2fbb9ea
ET
9070
9071 /* advertise the requested speed and duplex if supported */
34f80b04 9072 cmd->advertising &= bp->port.supported;
a2fbb9ea 9073
c18487ee
YR
9074 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9075 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9076 bp->port.advertising |= (ADVERTISED_Autoneg |
9077 cmd->advertising);
a2fbb9ea
ET
9078
9079 } else { /* forced speed */
9080 /* advertise the requested speed and duplex if supported */
9081 switch (cmd->speed) {
9082 case SPEED_10:
9083 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9084 if (!(bp->port.supported &
f1410647
ET
9085 SUPPORTED_10baseT_Full)) {
9086 DP(NETIF_MSG_LINK,
9087 "10M full not supported\n");
a2fbb9ea 9088 return -EINVAL;
f1410647 9089 }
a2fbb9ea
ET
9090
9091 advertising = (ADVERTISED_10baseT_Full |
9092 ADVERTISED_TP);
9093 } else {
34f80b04 9094 if (!(bp->port.supported &
f1410647
ET
9095 SUPPORTED_10baseT_Half)) {
9096 DP(NETIF_MSG_LINK,
9097 "10M half not supported\n");
a2fbb9ea 9098 return -EINVAL;
f1410647 9099 }
a2fbb9ea
ET
9100
9101 advertising = (ADVERTISED_10baseT_Half |
9102 ADVERTISED_TP);
9103 }
9104 break;
9105
9106 case SPEED_100:
9107 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9108 if (!(bp->port.supported &
f1410647
ET
9109 SUPPORTED_100baseT_Full)) {
9110 DP(NETIF_MSG_LINK,
9111 "100M full not supported\n");
a2fbb9ea 9112 return -EINVAL;
f1410647 9113 }
a2fbb9ea
ET
9114
9115 advertising = (ADVERTISED_100baseT_Full |
9116 ADVERTISED_TP);
9117 } else {
34f80b04 9118 if (!(bp->port.supported &
f1410647
ET
9119 SUPPORTED_100baseT_Half)) {
9120 DP(NETIF_MSG_LINK,
9121 "100M half not supported\n");
a2fbb9ea 9122 return -EINVAL;
f1410647 9123 }
a2fbb9ea
ET
9124
9125 advertising = (ADVERTISED_100baseT_Half |
9126 ADVERTISED_TP);
9127 }
9128 break;
9129
9130 case SPEED_1000:
f1410647
ET
9131 if (cmd->duplex != DUPLEX_FULL) {
9132 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 9133 return -EINVAL;
f1410647 9134 }
a2fbb9ea 9135
34f80b04 9136 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 9137 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 9138 return -EINVAL;
f1410647 9139 }
a2fbb9ea
ET
9140
9141 advertising = (ADVERTISED_1000baseT_Full |
9142 ADVERTISED_TP);
9143 break;
9144
9145 case SPEED_2500:
f1410647
ET
9146 if (cmd->duplex != DUPLEX_FULL) {
9147 DP(NETIF_MSG_LINK,
9148 "2.5G half not supported\n");
a2fbb9ea 9149 return -EINVAL;
f1410647 9150 }
a2fbb9ea 9151
34f80b04 9152 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9153 DP(NETIF_MSG_LINK,
9154 "2.5G full not supported\n");
a2fbb9ea 9155 return -EINVAL;
f1410647 9156 }
a2fbb9ea 9157
f1410647 9158 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9159 ADVERTISED_TP);
9160 break;
9161
9162 case SPEED_10000:
f1410647
ET
9163 if (cmd->duplex != DUPLEX_FULL) {
9164 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9165 return -EINVAL;
f1410647 9166 }
a2fbb9ea 9167
34f80b04 9168 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9169 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9170 return -EINVAL;
f1410647 9171 }
a2fbb9ea
ET
9172
9173 advertising = (ADVERTISED_10000baseT_Full |
9174 ADVERTISED_FIBRE);
9175 break;
9176
9177 default:
f1410647 9178 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9179 return -EINVAL;
9180 }
9181
c18487ee
YR
9182 bp->link_params.req_line_speed = cmd->speed;
9183 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9184 bp->port.advertising = advertising;
a2fbb9ea
ET
9185 }
9186
c18487ee 9187 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9188 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9189 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9190 bp->port.advertising);
a2fbb9ea 9191
34f80b04 9192 if (netif_running(dev)) {
bb2a0f7a 9193 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9194 bnx2x_link_set(bp);
9195 }
a2fbb9ea
ET
9196
9197 return 0;
9198}
9199
0a64ea57
EG
9200#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9201#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9202
9203static int bnx2x_get_regs_len(struct net_device *dev)
9204{
0a64ea57 9205 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9206 int regdump_len = 0;
0a64ea57
EG
9207 int i;
9208
0a64ea57
EG
9209 if (CHIP_IS_E1(bp)) {
9210 for (i = 0; i < REGS_COUNT; i++)
9211 if (IS_E1_ONLINE(reg_addrs[i].info))
9212 regdump_len += reg_addrs[i].size;
9213
9214 for (i = 0; i < WREGS_COUNT_E1; i++)
9215 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9216 regdump_len += wreg_addrs_e1[i].size *
9217 (1 + wreg_addrs_e1[i].read_regs_count);
9218
9219 } else { /* E1H */
9220 for (i = 0; i < REGS_COUNT; i++)
9221 if (IS_E1H_ONLINE(reg_addrs[i].info))
9222 regdump_len += reg_addrs[i].size;
9223
9224 for (i = 0; i < WREGS_COUNT_E1H; i++)
9225 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9226 regdump_len += wreg_addrs_e1h[i].size *
9227 (1 + wreg_addrs_e1h[i].read_regs_count);
9228 }
9229 regdump_len *= 4;
9230 regdump_len += sizeof(struct dump_hdr);
9231
9232 return regdump_len;
9233}
9234
9235static void bnx2x_get_regs(struct net_device *dev,
9236 struct ethtool_regs *regs, void *_p)
9237{
9238 u32 *p = _p, i, j;
9239 struct bnx2x *bp = netdev_priv(dev);
9240 struct dump_hdr dump_hdr = {0};
9241
9242 regs->version = 0;
9243 memset(p, 0, regs->len);
9244
9245 if (!netif_running(bp->dev))
9246 return;
9247
9248 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9249 dump_hdr.dump_sign = dump_sign_all;
9250 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9251 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9252 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9253 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9254 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9255
9256 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9257 p += dump_hdr.hdr_size + 1;
9258
9259 if (CHIP_IS_E1(bp)) {
9260 for (i = 0; i < REGS_COUNT; i++)
9261 if (IS_E1_ONLINE(reg_addrs[i].info))
9262 for (j = 0; j < reg_addrs[i].size; j++)
9263 *p++ = REG_RD(bp,
9264 reg_addrs[i].addr + j*4);
9265
9266 } else { /* E1H */
9267 for (i = 0; i < REGS_COUNT; i++)
9268 if (IS_E1H_ONLINE(reg_addrs[i].info))
9269 for (j = 0; j < reg_addrs[i].size; j++)
9270 *p++ = REG_RD(bp,
9271 reg_addrs[i].addr + j*4);
9272 }
9273}
9274
0d28e49a
EG
9275#define PHY_FW_VER_LEN 10
9276
9277static void bnx2x_get_drvinfo(struct net_device *dev,
9278 struct ethtool_drvinfo *info)
9279{
9280 struct bnx2x *bp = netdev_priv(dev);
9281 u8 phy_fw_ver[PHY_FW_VER_LEN];
9282
9283 strcpy(info->driver, DRV_MODULE_NAME);
9284 strcpy(info->version, DRV_MODULE_VERSION);
9285
9286 phy_fw_ver[0] = '\0';
9287 if (bp->port.pmf) {
9288 bnx2x_acquire_phy_lock(bp);
9289 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9290 (bp->state != BNX2X_STATE_CLOSED),
9291 phy_fw_ver, PHY_FW_VER_LEN);
9292 bnx2x_release_phy_lock(bp);
9293 }
9294
9295 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9296 (bp->common.bc_ver & 0xff0000) >> 16,
9297 (bp->common.bc_ver & 0xff00) >> 8,
9298 (bp->common.bc_ver & 0xff),
9299 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9300 strcpy(info->bus_info, pci_name(bp->pdev));
9301 info->n_stats = BNX2X_NUM_STATS;
9302 info->testinfo_len = BNX2X_NUM_TESTS;
9303 info->eedump_len = bp->common.flash_size;
9304 info->regdump_len = bnx2x_get_regs_len(dev);
9305}
9306
a2fbb9ea
ET
9307static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9308{
9309 struct bnx2x *bp = netdev_priv(dev);
9310
9311 if (bp->flags & NO_WOL_FLAG) {
9312 wol->supported = 0;
9313 wol->wolopts = 0;
9314 } else {
9315 wol->supported = WAKE_MAGIC;
9316 if (bp->wol)
9317 wol->wolopts = WAKE_MAGIC;
9318 else
9319 wol->wolopts = 0;
9320 }
9321 memset(&wol->sopass, 0, sizeof(wol->sopass));
9322}
9323
9324static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9325{
9326 struct bnx2x *bp = netdev_priv(dev);
9327
9328 if (wol->wolopts & ~WAKE_MAGIC)
9329 return -EINVAL;
9330
9331 if (wol->wolopts & WAKE_MAGIC) {
9332 if (bp->flags & NO_WOL_FLAG)
9333 return -EINVAL;
9334
9335 bp->wol = 1;
34f80b04 9336 } else
a2fbb9ea 9337 bp->wol = 0;
34f80b04 9338
a2fbb9ea
ET
9339 return 0;
9340}
9341
9342static u32 bnx2x_get_msglevel(struct net_device *dev)
9343{
9344 struct bnx2x *bp = netdev_priv(dev);
9345
7995c64e 9346 return bp->msg_enable;
a2fbb9ea
ET
9347}
9348
9349static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9350{
9351 struct bnx2x *bp = netdev_priv(dev);
9352
9353 if (capable(CAP_NET_ADMIN))
7995c64e 9354 bp->msg_enable = level;
a2fbb9ea
ET
9355}
9356
9357static int bnx2x_nway_reset(struct net_device *dev)
9358{
9359 struct bnx2x *bp = netdev_priv(dev);
9360
34f80b04
EG
9361 if (!bp->port.pmf)
9362 return 0;
a2fbb9ea 9363
34f80b04 9364 if (netif_running(dev)) {
bb2a0f7a 9365 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9366 bnx2x_link_set(bp);
9367 }
a2fbb9ea
ET
9368
9369 return 0;
9370}
9371
ab6ad5a4 9372static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9373{
9374 struct bnx2x *bp = netdev_priv(dev);
9375
f34d28ea
EG
9376 if (bp->flags & MF_FUNC_DIS)
9377 return 0;
9378
01e53298
NO
9379 return bp->link_vars.link_up;
9380}
9381
a2fbb9ea
ET
9382static int bnx2x_get_eeprom_len(struct net_device *dev)
9383{
9384 struct bnx2x *bp = netdev_priv(dev);
9385
34f80b04 9386 return bp->common.flash_size;
a2fbb9ea
ET
9387}
9388
9389static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9390{
34f80b04 9391 int port = BP_PORT(bp);
a2fbb9ea
ET
9392 int count, i;
9393 u32 val = 0;
9394
9395 /* adjust timeout for emulation/FPGA */
9396 count = NVRAM_TIMEOUT_COUNT;
9397 if (CHIP_REV_IS_SLOW(bp))
9398 count *= 100;
9399
9400 /* request access to nvram interface */
9401 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9402 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9403
9404 for (i = 0; i < count*10; i++) {
9405 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9406 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9407 break;
9408
9409 udelay(5);
9410 }
9411
9412 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9413 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9414 return -EBUSY;
9415 }
9416
9417 return 0;
9418}
9419
9420static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9421{
34f80b04 9422 int port = BP_PORT(bp);
a2fbb9ea
ET
9423 int count, i;
9424 u32 val = 0;
9425
9426 /* adjust timeout for emulation/FPGA */
9427 count = NVRAM_TIMEOUT_COUNT;
9428 if (CHIP_REV_IS_SLOW(bp))
9429 count *= 100;
9430
9431 /* relinquish nvram interface */
9432 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9433 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9434
9435 for (i = 0; i < count*10; i++) {
9436 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9437 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9438 break;
9439
9440 udelay(5);
9441 }
9442
9443 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9444 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9445 return -EBUSY;
9446 }
9447
9448 return 0;
9449}
9450
9451static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9452{
9453 u32 val;
9454
9455 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9456
9457 /* enable both bits, even on read */
9458 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9459 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9460 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9461}
9462
9463static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9464{
9465 u32 val;
9466
9467 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9468
9469 /* disable both bits, even after read */
9470 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9471 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9472 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9473}
9474
4781bfad 9475static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9476 u32 cmd_flags)
9477{
f1410647 9478 int count, i, rc;
a2fbb9ea
ET
9479 u32 val;
9480
9481 /* build the command word */
9482 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9483
9484 /* need to clear DONE bit separately */
9485 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9486
9487 /* address of the NVRAM to read from */
9488 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9489 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9490
9491 /* issue a read command */
9492 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9493
9494 /* adjust timeout for emulation/FPGA */
9495 count = NVRAM_TIMEOUT_COUNT;
9496 if (CHIP_REV_IS_SLOW(bp))
9497 count *= 100;
9498
9499 /* wait for completion */
9500 *ret_val = 0;
9501 rc = -EBUSY;
9502 for (i = 0; i < count; i++) {
9503 udelay(5);
9504 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9505
9506 if (val & MCPR_NVM_COMMAND_DONE) {
9507 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9508 /* we read nvram data in cpu order
9509 * but ethtool sees it as an array of bytes
9510 * converting to big-endian will do the work */
4781bfad 9511 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9512 rc = 0;
9513 break;
9514 }
9515 }
9516
9517 return rc;
9518}
9519
9520static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9521 int buf_size)
9522{
9523 int rc;
9524 u32 cmd_flags;
4781bfad 9525 __be32 val;
a2fbb9ea
ET
9526
9527 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9528 DP(BNX2X_MSG_NVM,
c14423fe 9529 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9530 offset, buf_size);
9531 return -EINVAL;
9532 }
9533
34f80b04
EG
9534 if (offset + buf_size > bp->common.flash_size) {
9535 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9536 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9537 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9538 return -EINVAL;
9539 }
9540
9541 /* request access to nvram interface */
9542 rc = bnx2x_acquire_nvram_lock(bp);
9543 if (rc)
9544 return rc;
9545
9546 /* enable access to nvram interface */
9547 bnx2x_enable_nvram_access(bp);
9548
9549 /* read the first word(s) */
9550 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9551 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9552 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9553 memcpy(ret_buf, &val, 4);
9554
9555 /* advance to the next dword */
9556 offset += sizeof(u32);
9557 ret_buf += sizeof(u32);
9558 buf_size -= sizeof(u32);
9559 cmd_flags = 0;
9560 }
9561
9562 if (rc == 0) {
9563 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9564 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9565 memcpy(ret_buf, &val, 4);
9566 }
9567
9568 /* disable access to nvram interface */
9569 bnx2x_disable_nvram_access(bp);
9570 bnx2x_release_nvram_lock(bp);
9571
9572 return rc;
9573}
9574
9575static int bnx2x_get_eeprom(struct net_device *dev,
9576 struct ethtool_eeprom *eeprom, u8 *eebuf)
9577{
9578 struct bnx2x *bp = netdev_priv(dev);
9579 int rc;
9580
2add3acb
EG
9581 if (!netif_running(dev))
9582 return -EAGAIN;
9583
34f80b04 9584 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9585 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9586 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9587 eeprom->len, eeprom->len);
9588
9589 /* parameters already validated in ethtool_get_eeprom */
9590
9591 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9592
9593 return rc;
9594}
9595
9596static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9597 u32 cmd_flags)
9598{
f1410647 9599 int count, i, rc;
a2fbb9ea
ET
9600
9601 /* build the command word */
9602 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9603
9604 /* need to clear DONE bit separately */
9605 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9606
9607 /* write the data */
9608 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9609
9610 /* address of the NVRAM to write to */
9611 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9612 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9613
9614 /* issue the write command */
9615 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9616
9617 /* adjust timeout for emulation/FPGA */
9618 count = NVRAM_TIMEOUT_COUNT;
9619 if (CHIP_REV_IS_SLOW(bp))
9620 count *= 100;
9621
9622 /* wait for completion */
9623 rc = -EBUSY;
9624 for (i = 0; i < count; i++) {
9625 udelay(5);
9626 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9627 if (val & MCPR_NVM_COMMAND_DONE) {
9628 rc = 0;
9629 break;
9630 }
9631 }
9632
9633 return rc;
9634}
9635
f1410647 9636#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9637
9638static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9639 int buf_size)
9640{
9641 int rc;
9642 u32 cmd_flags;
9643 u32 align_offset;
4781bfad 9644 __be32 val;
a2fbb9ea 9645
34f80b04
EG
9646 if (offset + buf_size > bp->common.flash_size) {
9647 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9648 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9649 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9650 return -EINVAL;
9651 }
9652
9653 /* request access to nvram interface */
9654 rc = bnx2x_acquire_nvram_lock(bp);
9655 if (rc)
9656 return rc;
9657
9658 /* enable access to nvram interface */
9659 bnx2x_enable_nvram_access(bp);
9660
9661 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9662 align_offset = (offset & ~0x03);
9663 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9664
9665 if (rc == 0) {
9666 val &= ~(0xff << BYTE_OFFSET(offset));
9667 val |= (*data_buf << BYTE_OFFSET(offset));
9668
9669 /* nvram data is returned as an array of bytes
9670 * convert it back to cpu order */
9671 val = be32_to_cpu(val);
9672
a2fbb9ea
ET
9673 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9674 cmd_flags);
9675 }
9676
9677 /* disable access to nvram interface */
9678 bnx2x_disable_nvram_access(bp);
9679 bnx2x_release_nvram_lock(bp);
9680
9681 return rc;
9682}
9683
9684static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9685 int buf_size)
9686{
9687 int rc;
9688 u32 cmd_flags;
9689 u32 val;
9690 u32 written_so_far;
9691
34f80b04 9692 if (buf_size == 1) /* ethtool */
a2fbb9ea 9693 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9694
9695 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9696 DP(BNX2X_MSG_NVM,
c14423fe 9697 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9698 offset, buf_size);
9699 return -EINVAL;
9700 }
9701
34f80b04
EG
9702 if (offset + buf_size > bp->common.flash_size) {
9703 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9704 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9705 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9706 return -EINVAL;
9707 }
9708
9709 /* request access to nvram interface */
9710 rc = bnx2x_acquire_nvram_lock(bp);
9711 if (rc)
9712 return rc;
9713
9714 /* enable access to nvram interface */
9715 bnx2x_enable_nvram_access(bp);
9716
9717 written_so_far = 0;
9718 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9719 while ((written_so_far < buf_size) && (rc == 0)) {
9720 if (written_so_far == (buf_size - sizeof(u32)))
9721 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9722 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9723 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9724 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9725 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9726
9727 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9728
9729 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9730
9731 /* advance to the next dword */
9732 offset += sizeof(u32);
9733 data_buf += sizeof(u32);
9734 written_so_far += sizeof(u32);
9735 cmd_flags = 0;
9736 }
9737
9738 /* disable access to nvram interface */
9739 bnx2x_disable_nvram_access(bp);
9740 bnx2x_release_nvram_lock(bp);
9741
9742 return rc;
9743}
9744
9745static int bnx2x_set_eeprom(struct net_device *dev,
9746 struct ethtool_eeprom *eeprom, u8 *eebuf)
9747{
9748 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9749 int port = BP_PORT(bp);
9750 int rc = 0;
a2fbb9ea 9751
9f4c9583
EG
9752 if (!netif_running(dev))
9753 return -EAGAIN;
9754
34f80b04 9755 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9756 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9757 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9758 eeprom->len, eeprom->len);
9759
9760 /* parameters already validated in ethtool_set_eeprom */
9761
f57a6025
EG
9762 /* PHY eeprom can be accessed only by the PMF */
9763 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9764 !bp->port.pmf)
9765 return -EINVAL;
9766
9767 if (eeprom->magic == 0x50485950) {
9768 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9769 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9770
f57a6025
EG
9771 bnx2x_acquire_phy_lock(bp);
9772 rc |= bnx2x_link_reset(&bp->link_params,
9773 &bp->link_vars, 0);
9774 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9775 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9776 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9777 MISC_REGISTERS_GPIO_HIGH, port);
9778 bnx2x_release_phy_lock(bp);
9779 bnx2x_link_report(bp);
9780
9781 } else if (eeprom->magic == 0x50485952) {
9782 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 9783 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 9784 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9785 rc |= bnx2x_link_reset(&bp->link_params,
9786 &bp->link_vars, 1);
9787
9788 rc |= bnx2x_phy_init(&bp->link_params,
9789 &bp->link_vars);
4a37fb66 9790 bnx2x_release_phy_lock(bp);
f57a6025
EG
9791 bnx2x_calc_fc_adv(bp);
9792 }
9793 } else if (eeprom->magic == 0x53985943) {
9794 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9795 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9796 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9797 u8 ext_phy_addr =
659bc5c4 9798 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9799
9800 /* DSP Remove Download Mode */
9801 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9802 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9803
f57a6025
EG
9804 bnx2x_acquire_phy_lock(bp);
9805
9806 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9807
9808 /* wait 0.5 sec to allow it to run */
9809 msleep(500);
9810 bnx2x_ext_phy_hw_reset(bp, port);
9811 msleep(500);
9812 bnx2x_release_phy_lock(bp);
9813 }
9814 } else
c18487ee 9815 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9816
9817 return rc;
9818}
9819
9820static int bnx2x_get_coalesce(struct net_device *dev,
9821 struct ethtool_coalesce *coal)
9822{
9823 struct bnx2x *bp = netdev_priv(dev);
9824
9825 memset(coal, 0, sizeof(struct ethtool_coalesce));
9826
9827 coal->rx_coalesce_usecs = bp->rx_ticks;
9828 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9829
9830 return 0;
9831}
9832
ca00392c 9833#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9834static int bnx2x_set_coalesce(struct net_device *dev,
9835 struct ethtool_coalesce *coal)
9836{
9837 struct bnx2x *bp = netdev_priv(dev);
9838
9839 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9840 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9841 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9842
9843 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9844 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9845 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9846
34f80b04 9847 if (netif_running(dev))
a2fbb9ea
ET
9848 bnx2x_update_coalesce(bp);
9849
9850 return 0;
9851}
9852
9853static void bnx2x_get_ringparam(struct net_device *dev,
9854 struct ethtool_ringparam *ering)
9855{
9856 struct bnx2x *bp = netdev_priv(dev);
9857
9858 ering->rx_max_pending = MAX_RX_AVAIL;
9859 ering->rx_mini_max_pending = 0;
9860 ering->rx_jumbo_max_pending = 0;
9861
9862 ering->rx_pending = bp->rx_ring_size;
9863 ering->rx_mini_pending = 0;
9864 ering->rx_jumbo_pending = 0;
9865
9866 ering->tx_max_pending = MAX_TX_AVAIL;
9867 ering->tx_pending = bp->tx_ring_size;
9868}
9869
9870static int bnx2x_set_ringparam(struct net_device *dev,
9871 struct ethtool_ringparam *ering)
9872{
9873 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9874 int rc = 0;
a2fbb9ea
ET
9875
9876 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9877 (ering->tx_pending > MAX_TX_AVAIL) ||
9878 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9879 return -EINVAL;
9880
9881 bp->rx_ring_size = ering->rx_pending;
9882 bp->tx_ring_size = ering->tx_pending;
9883
34f80b04
EG
9884 if (netif_running(dev)) {
9885 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9886 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9887 }
9888
34f80b04 9889 return rc;
a2fbb9ea
ET
9890}
9891
9892static void bnx2x_get_pauseparam(struct net_device *dev,
9893 struct ethtool_pauseparam *epause)
9894{
9895 struct bnx2x *bp = netdev_priv(dev);
9896
356e2385
EG
9897 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9898 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9899 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9900
c0700f90
DM
9901 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9902 BNX2X_FLOW_CTRL_RX);
9903 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9904 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9905
9906 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9907 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9908 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9909}
9910
9911static int bnx2x_set_pauseparam(struct net_device *dev,
9912 struct ethtool_pauseparam *epause)
9913{
9914 struct bnx2x *bp = netdev_priv(dev);
9915
34f80b04
EG
9916 if (IS_E1HMF(bp))
9917 return 0;
9918
a2fbb9ea
ET
9919 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9920 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9921 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9922
c0700f90 9923 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9924
f1410647 9925 if (epause->rx_pause)
c0700f90 9926 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9927
f1410647 9928 if (epause->tx_pause)
c0700f90 9929 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9930
c0700f90
DM
9931 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9932 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9933
c18487ee 9934 if (epause->autoneg) {
34f80b04 9935 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9936 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9937 return -EINVAL;
9938 }
a2fbb9ea 9939
c18487ee 9940 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9941 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9942 }
a2fbb9ea 9943
c18487ee
YR
9944 DP(NETIF_MSG_LINK,
9945 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9946
9947 if (netif_running(dev)) {
bb2a0f7a 9948 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9949 bnx2x_link_set(bp);
9950 }
a2fbb9ea
ET
9951
9952 return 0;
9953}
9954
df0f2343
VZ
9955static int bnx2x_set_flags(struct net_device *dev, u32 data)
9956{
9957 struct bnx2x *bp = netdev_priv(dev);
9958 int changed = 0;
9959 int rc = 0;
9960
9961 /* TPA requires Rx CSUM offloading */
9962 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
9963 if (!disable_tpa) {
9964 if (!(dev->features & NETIF_F_LRO)) {
9965 dev->features |= NETIF_F_LRO;
9966 bp->flags |= TPA_ENABLE_FLAG;
9967 changed = 1;
9968 }
9969 } else
9970 rc = -EINVAL;
df0f2343
VZ
9971 } else if (dev->features & NETIF_F_LRO) {
9972 dev->features &= ~NETIF_F_LRO;
9973 bp->flags &= ~TPA_ENABLE_FLAG;
9974 changed = 1;
9975 }
9976
9977 if (changed && netif_running(dev)) {
9978 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9979 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9980 }
9981
9982 return rc;
9983}
9984
a2fbb9ea
ET
9985static u32 bnx2x_get_rx_csum(struct net_device *dev)
9986{
9987 struct bnx2x *bp = netdev_priv(dev);
9988
9989 return bp->rx_csum;
9990}
9991
9992static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9993{
9994 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9995 int rc = 0;
a2fbb9ea
ET
9996
9997 bp->rx_csum = data;
df0f2343
VZ
9998
9999 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10000 TPA'ed packets will be discarded due to wrong TCP CSUM */
10001 if (!data) {
10002 u32 flags = ethtool_op_get_flags(dev);
10003
10004 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10005 }
10006
10007 return rc;
a2fbb9ea
ET
10008}
10009
10010static int bnx2x_set_tso(struct net_device *dev, u32 data)
10011{
755735eb 10012 if (data) {
a2fbb9ea 10013 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10014 dev->features |= NETIF_F_TSO6;
10015 } else {
a2fbb9ea 10016 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10017 dev->features &= ~NETIF_F_TSO6;
10018 }
10019
a2fbb9ea
ET
10020 return 0;
10021}
10022
f3c87cdd 10023static const struct {
a2fbb9ea
ET
10024 char string[ETH_GSTRING_LEN];
10025} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10026 { "register_test (offline)" },
10027 { "memory_test (offline)" },
10028 { "loopback_test (offline)" },
10029 { "nvram_test (online)" },
10030 { "interrupt_test (online)" },
10031 { "link_test (online)" },
d3d4f495 10032 { "idle check (online)" }
a2fbb9ea
ET
10033};
10034
f3c87cdd
YG
10035static int bnx2x_test_registers(struct bnx2x *bp)
10036{
10037 int idx, i, rc = -ENODEV;
10038 u32 wr_val = 0;
9dabc424 10039 int port = BP_PORT(bp);
f3c87cdd
YG
10040 static const struct {
10041 u32 offset0;
10042 u32 offset1;
10043 u32 mask;
10044 } reg_tbl[] = {
10045/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10046 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10047 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10048 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10049 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10050 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10051 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10052 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10053 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10054 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10055/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10056 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10057 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10058 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10059 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10060 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10061 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10062 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10063 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10064 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10065/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10066 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10067 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10068 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10069 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10070 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10071 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10072 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10073 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10074 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10075/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10076 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10077 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10078 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10079 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10080 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10081 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10082
10083 { 0xffffffff, 0, 0x00000000 }
10084 };
10085
10086 if (!netif_running(bp->dev))
10087 return rc;
10088
10089 /* Repeat the test twice:
10090 First by writing 0x00000000, second by writing 0xffffffff */
10091 for (idx = 0; idx < 2; idx++) {
10092
10093 switch (idx) {
10094 case 0:
10095 wr_val = 0;
10096 break;
10097 case 1:
10098 wr_val = 0xffffffff;
10099 break;
10100 }
10101
10102 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10103 u32 offset, mask, save_val, val;
f3c87cdd
YG
10104
10105 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10106 mask = reg_tbl[i].mask;
10107
10108 save_val = REG_RD(bp, offset);
10109
10110 REG_WR(bp, offset, wr_val);
10111 val = REG_RD(bp, offset);
10112
10113 /* Restore the original register's value */
10114 REG_WR(bp, offset, save_val);
10115
10116 /* verify that value is as expected value */
10117 if ((val & mask) != (wr_val & mask))
10118 goto test_reg_exit;
10119 }
10120 }
10121
10122 rc = 0;
10123
10124test_reg_exit:
10125 return rc;
10126}
10127
10128static int bnx2x_test_memory(struct bnx2x *bp)
10129{
10130 int i, j, rc = -ENODEV;
10131 u32 val;
10132 static const struct {
10133 u32 offset;
10134 int size;
10135 } mem_tbl[] = {
10136 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10137 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10138 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10139 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10140 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10141 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10142 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10143
10144 { 0xffffffff, 0 }
10145 };
10146 static const struct {
10147 char *name;
10148 u32 offset;
9dabc424
YG
10149 u32 e1_mask;
10150 u32 e1h_mask;
f3c87cdd 10151 } prty_tbl[] = {
9dabc424
YG
10152 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10153 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10154 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10155 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10156 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10157 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10158
10159 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10160 };
10161
10162 if (!netif_running(bp->dev))
10163 return rc;
10164
10165 /* Go through all the memories */
10166 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10167 for (j = 0; j < mem_tbl[i].size; j++)
10168 REG_RD(bp, mem_tbl[i].offset + j*4);
10169
10170 /* Check the parity status */
10171 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10172 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10173 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10174 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10175 DP(NETIF_MSG_HW,
10176 "%s is 0x%x\n", prty_tbl[i].name, val);
10177 goto test_mem_exit;
10178 }
10179 }
10180
10181 rc = 0;
10182
10183test_mem_exit:
10184 return rc;
10185}
10186
f3c87cdd
YG
10187static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10188{
10189 int cnt = 1000;
10190
10191 if (link_up)
10192 while (bnx2x_link_test(bp) && cnt--)
10193 msleep(10);
10194}
10195
10196static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10197{
10198 unsigned int pkt_size, num_pkts, i;
10199 struct sk_buff *skb;
10200 unsigned char *packet;
ca00392c 10201 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 10202 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
10203 u16 tx_start_idx, tx_idx;
10204 u16 rx_start_idx, rx_idx;
ca00392c 10205 u16 pkt_prod, bd_prod;
f3c87cdd 10206 struct sw_tx_bd *tx_buf;
ca00392c
EG
10207 struct eth_tx_start_bd *tx_start_bd;
10208 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10209 dma_addr_t mapping;
10210 union eth_rx_cqe *cqe;
10211 u8 cqe_fp_flags;
10212 struct sw_rx_bd *rx_buf;
10213 u16 len;
10214 int rc = -ENODEV;
10215
b5bf9068
EG
10216 /* check the loopback mode */
10217 switch (loopback_mode) {
10218 case BNX2X_PHY_LOOPBACK:
10219 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10220 return -EINVAL;
10221 break;
10222 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10223 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10224 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10225 break;
10226 default:
f3c87cdd 10227 return -EINVAL;
b5bf9068 10228 }
f3c87cdd 10229
b5bf9068
EG
10230 /* prepare the loopback packet */
10231 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10232 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10233 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10234 if (!skb) {
10235 rc = -ENOMEM;
10236 goto test_loopback_exit;
10237 }
10238 packet = skb_put(skb, pkt_size);
10239 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10240 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10241 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10242 for (i = ETH_HLEN; i < pkt_size; i++)
10243 packet[i] = (unsigned char) (i & 0xff);
10244
b5bf9068 10245 /* send the loopback packet */
f3c87cdd 10246 num_pkts = 0;
ca00392c
EG
10247 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10248 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10249
ca00392c
EG
10250 pkt_prod = fp_tx->tx_pkt_prod++;
10251 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10252 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10253 tx_buf->skb = skb;
ca00392c 10254 tx_buf->flags = 0;
f3c87cdd 10255
ca00392c
EG
10256 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10257 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10258 mapping = pci_map_single(bp->pdev, skb->data,
10259 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10260 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10261 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10262 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10263 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10264 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10265 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10266 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10267 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10268
10269 /* turn on parsing and get a BD */
10270 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10271 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10272
10273 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10274
58f4c4cf
EG
10275 wmb();
10276
ca00392c
EG
10277 fp_tx->tx_db.data.prod += 2;
10278 barrier();
54b9ddaa 10279 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
10280
10281 mmiowb();
10282
10283 num_pkts++;
ca00392c 10284 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10285
10286 udelay(100);
10287
ca00392c 10288 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10289 if (tx_idx != tx_start_idx + num_pkts)
10290 goto test_loopback_exit;
10291
ca00392c 10292 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10293 if (rx_idx != rx_start_idx + num_pkts)
10294 goto test_loopback_exit;
10295
ca00392c 10296 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10297 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10298 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10299 goto test_loopback_rx_exit;
10300
10301 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10302 if (len != pkt_size)
10303 goto test_loopback_rx_exit;
10304
ca00392c 10305 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10306 skb = rx_buf->skb;
10307 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10308 for (i = ETH_HLEN; i < pkt_size; i++)
10309 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10310 goto test_loopback_rx_exit;
10311
10312 rc = 0;
10313
10314test_loopback_rx_exit:
f3c87cdd 10315
ca00392c
EG
10316 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10317 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10318 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10319 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10320
10321 /* Update producers */
ca00392c
EG
10322 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10323 fp_rx->rx_sge_prod);
f3c87cdd
YG
10324
10325test_loopback_exit:
10326 bp->link_params.loopback_mode = LOOPBACK_NONE;
10327
10328 return rc;
10329}
10330
10331static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10332{
b5bf9068 10333 int rc = 0, res;
f3c87cdd
YG
10334
10335 if (!netif_running(bp->dev))
10336 return BNX2X_LOOPBACK_FAILED;
10337
f8ef6e44 10338 bnx2x_netif_stop(bp, 1);
3910c8ae 10339 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10340
b5bf9068
EG
10341 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10342 if (res) {
10343 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10344 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10345 }
10346
b5bf9068
EG
10347 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10348 if (res) {
10349 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10350 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10351 }
10352
3910c8ae 10353 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10354 bnx2x_netif_start(bp);
10355
10356 return rc;
10357}
10358
10359#define CRC32_RESIDUAL 0xdebb20e3
10360
10361static int bnx2x_test_nvram(struct bnx2x *bp)
10362{
10363 static const struct {
10364 int offset;
10365 int size;
10366 } nvram_tbl[] = {
10367 { 0, 0x14 }, /* bootstrap */
10368 { 0x14, 0xec }, /* dir */
10369 { 0x100, 0x350 }, /* manuf_info */
10370 { 0x450, 0xf0 }, /* feature_info */
10371 { 0x640, 0x64 }, /* upgrade_key_info */
10372 { 0x6a4, 0x64 },
10373 { 0x708, 0x70 }, /* manuf_key_info */
10374 { 0x778, 0x70 },
10375 { 0, 0 }
10376 };
4781bfad 10377 __be32 buf[0x350 / 4];
f3c87cdd
YG
10378 u8 *data = (u8 *)buf;
10379 int i, rc;
ab6ad5a4 10380 u32 magic, crc;
f3c87cdd
YG
10381
10382 rc = bnx2x_nvram_read(bp, 0, data, 4);
10383 if (rc) {
f5372251 10384 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10385 goto test_nvram_exit;
10386 }
10387
10388 magic = be32_to_cpu(buf[0]);
10389 if (magic != 0x669955aa) {
10390 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10391 rc = -ENODEV;
10392 goto test_nvram_exit;
10393 }
10394
10395 for (i = 0; nvram_tbl[i].size; i++) {
10396
10397 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10398 nvram_tbl[i].size);
10399 if (rc) {
10400 DP(NETIF_MSG_PROBE,
f5372251 10401 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10402 goto test_nvram_exit;
10403 }
10404
ab6ad5a4
EG
10405 crc = ether_crc_le(nvram_tbl[i].size, data);
10406 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10407 DP(NETIF_MSG_PROBE,
ab6ad5a4 10408 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10409 rc = -ENODEV;
10410 goto test_nvram_exit;
10411 }
10412 }
10413
10414test_nvram_exit:
10415 return rc;
10416}
10417
10418static int bnx2x_test_intr(struct bnx2x *bp)
10419{
10420 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10421 int i, rc;
10422
10423 if (!netif_running(bp->dev))
10424 return -ENODEV;
10425
8d9c5f34 10426 config->hdr.length = 0;
af246401 10427 if (CHIP_IS_E1(bp))
0c43f43f
VZ
10428 /* use last unicast entries */
10429 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
10430 else
10431 config->hdr.offset = BP_FUNC(bp);
0626b899 10432 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10433 config->hdr.reserved1 = 0;
10434
e665bfda
MC
10435 bp->set_mac_pending++;
10436 smp_wmb();
f3c87cdd
YG
10437 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10438 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10439 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10440 if (rc == 0) {
f3c87cdd
YG
10441 for (i = 0; i < 10; i++) {
10442 if (!bp->set_mac_pending)
10443 break;
e665bfda 10444 smp_rmb();
f3c87cdd
YG
10445 msleep_interruptible(10);
10446 }
10447 if (i == 10)
10448 rc = -ENODEV;
10449 }
10450
10451 return rc;
10452}
10453
a2fbb9ea
ET
10454static void bnx2x_self_test(struct net_device *dev,
10455 struct ethtool_test *etest, u64 *buf)
10456{
10457 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10458
10459 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10460
f3c87cdd 10461 if (!netif_running(dev))
a2fbb9ea 10462 return;
a2fbb9ea 10463
33471629 10464 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10465 if (IS_E1HMF(bp))
10466 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10467
10468 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10469 int port = BP_PORT(bp);
10470 u32 val;
f3c87cdd
YG
10471 u8 link_up;
10472
279abdf5
EG
10473 /* save current value of input enable for TX port IF */
10474 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10475 /* disable input for TX port IF */
10476 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10477
061bc702 10478 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
10479 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10480 bnx2x_nic_load(bp, LOAD_DIAG);
10481 /* wait until link state is restored */
10482 bnx2x_wait_for_link(bp, link_up);
10483
10484 if (bnx2x_test_registers(bp) != 0) {
10485 buf[0] = 1;
10486 etest->flags |= ETH_TEST_FL_FAILED;
10487 }
10488 if (bnx2x_test_memory(bp) != 0) {
10489 buf[1] = 1;
10490 etest->flags |= ETH_TEST_FL_FAILED;
10491 }
10492 buf[2] = bnx2x_test_loopback(bp, link_up);
10493 if (buf[2] != 0)
10494 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10495
f3c87cdd 10496 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10497
10498 /* restore input for TX port IF */
10499 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10500
f3c87cdd
YG
10501 bnx2x_nic_load(bp, LOAD_NORMAL);
10502 /* wait until link state is restored */
10503 bnx2x_wait_for_link(bp, link_up);
10504 }
10505 if (bnx2x_test_nvram(bp) != 0) {
10506 buf[3] = 1;
a2fbb9ea
ET
10507 etest->flags |= ETH_TEST_FL_FAILED;
10508 }
f3c87cdd
YG
10509 if (bnx2x_test_intr(bp) != 0) {
10510 buf[4] = 1;
10511 etest->flags |= ETH_TEST_FL_FAILED;
10512 }
10513 if (bp->port.pmf)
10514 if (bnx2x_link_test(bp) != 0) {
10515 buf[5] = 1;
10516 etest->flags |= ETH_TEST_FL_FAILED;
10517 }
f3c87cdd
YG
10518
10519#ifdef BNX2X_EXTRA_DEBUG
10520 bnx2x_panic_dump(bp);
10521#endif
a2fbb9ea
ET
10522}
10523
de832a55
EG
10524static const struct {
10525 long offset;
10526 int size;
10527 u8 string[ETH_GSTRING_LEN];
10528} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10529/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10530 { Q_STATS_OFFSET32(error_bytes_received_hi),
10531 8, "[%d]: rx_error_bytes" },
10532 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10533 8, "[%d]: rx_ucast_packets" },
10534 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10535 8, "[%d]: rx_mcast_packets" },
10536 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10537 8, "[%d]: rx_bcast_packets" },
10538 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10539 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10540 4, "[%d]: rx_phy_ip_err_discards"},
10541 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10542 4, "[%d]: rx_skb_alloc_discard" },
10543 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10544
10545/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10546 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10547 8, "[%d]: tx_packets" }
10548};
10549
bb2a0f7a
YG
10550static const struct {
10551 long offset;
10552 int size;
10553 u32 flags;
66e855f3
YG
10554#define STATS_FLAGS_PORT 1
10555#define STATS_FLAGS_FUNC 2
de832a55 10556#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10557 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10558} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10559/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10560 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10561 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10562 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10563 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10564 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10565 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10566 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10567 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10568 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10569 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10570 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10571 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10572 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10573 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10574 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10575 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10576 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10577/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10578 8, STATS_FLAGS_PORT, "rx_fragments" },
10579 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10580 8, STATS_FLAGS_PORT, "rx_jabbers" },
10581 { STATS_OFFSET32(no_buff_discard_hi),
10582 8, STATS_FLAGS_BOTH, "rx_discards" },
10583 { STATS_OFFSET32(mac_filter_discard),
10584 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10585 { STATS_OFFSET32(xxoverflow_discard),
10586 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10587 { STATS_OFFSET32(brb_drop_hi),
10588 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10589 { STATS_OFFSET32(brb_truncate_hi),
10590 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10591 { STATS_OFFSET32(pause_frames_received_hi),
10592 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10593 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10594 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10595 { STATS_OFFSET32(nig_timer_max),
10596 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10597/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10598 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10599 { STATS_OFFSET32(rx_skb_alloc_failed),
10600 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10601 { STATS_OFFSET32(hw_csum_err),
10602 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10603
10604 { STATS_OFFSET32(total_bytes_transmitted_hi),
10605 8, STATS_FLAGS_BOTH, "tx_bytes" },
10606 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10607 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10608 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10609 8, STATS_FLAGS_BOTH, "tx_packets" },
10610 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10611 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10612 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10613 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10614 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10615 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10616 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10617 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10618/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10619 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10620 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10621 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10622 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10623 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10624 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10625 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10626 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10627 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10628 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10629 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10630 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10631 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10632 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10633 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10634 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10635 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10636 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10637 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10638/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10639 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10640 { STATS_OFFSET32(pause_frames_sent_hi),
10641 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10642};
10643
de832a55
EG
10644#define IS_PORT_STAT(i) \
10645 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10646#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10647#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 10648 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 10649
15f0a394
BH
10650static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10651{
10652 struct bnx2x *bp = netdev_priv(dev);
10653 int i, num_stats;
10654
10655 switch(stringset) {
10656 case ETH_SS_STATS:
10657 if (is_multi(bp)) {
54b9ddaa 10658 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
10659 if (!IS_E1HMF_MODE_STAT(bp))
10660 num_stats += BNX2X_NUM_STATS;
10661 } else {
10662 if (IS_E1HMF_MODE_STAT(bp)) {
10663 num_stats = 0;
10664 for (i = 0; i < BNX2X_NUM_STATS; i++)
10665 if (IS_FUNC_STAT(i))
10666 num_stats++;
10667 } else
10668 num_stats = BNX2X_NUM_STATS;
10669 }
10670 return num_stats;
10671
10672 case ETH_SS_TEST:
10673 return BNX2X_NUM_TESTS;
10674
10675 default:
10676 return -EINVAL;
10677 }
10678}
10679
a2fbb9ea
ET
10680static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10681{
bb2a0f7a 10682 struct bnx2x *bp = netdev_priv(dev);
de832a55 10683 int i, j, k;
bb2a0f7a 10684
a2fbb9ea
ET
10685 switch (stringset) {
10686 case ETH_SS_STATS:
de832a55
EG
10687 if (is_multi(bp)) {
10688 k = 0;
54b9ddaa 10689 for_each_queue(bp, i) {
de832a55
EG
10690 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10691 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10692 bnx2x_q_stats_arr[j].string, i);
10693 k += BNX2X_NUM_Q_STATS;
10694 }
10695 if (IS_E1HMF_MODE_STAT(bp))
10696 break;
10697 for (j = 0; j < BNX2X_NUM_STATS; j++)
10698 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10699 bnx2x_stats_arr[j].string);
10700 } else {
10701 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10702 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10703 continue;
10704 strcpy(buf + j*ETH_GSTRING_LEN,
10705 bnx2x_stats_arr[i].string);
10706 j++;
10707 }
bb2a0f7a 10708 }
a2fbb9ea
ET
10709 break;
10710
10711 case ETH_SS_TEST:
10712 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10713 break;
10714 }
10715}
10716
a2fbb9ea
ET
10717static void bnx2x_get_ethtool_stats(struct net_device *dev,
10718 struct ethtool_stats *stats, u64 *buf)
10719{
10720 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10721 u32 *hw_stats, *offset;
10722 int i, j, k;
bb2a0f7a 10723
de832a55
EG
10724 if (is_multi(bp)) {
10725 k = 0;
54b9ddaa 10726 for_each_queue(bp, i) {
de832a55
EG
10727 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10728 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10729 if (bnx2x_q_stats_arr[j].size == 0) {
10730 /* skip this counter */
10731 buf[k + j] = 0;
10732 continue;
10733 }
10734 offset = (hw_stats +
10735 bnx2x_q_stats_arr[j].offset);
10736 if (bnx2x_q_stats_arr[j].size == 4) {
10737 /* 4-byte counter */
10738 buf[k + j] = (u64) *offset;
10739 continue;
10740 }
10741 /* 8-byte counter */
10742 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10743 }
10744 k += BNX2X_NUM_Q_STATS;
10745 }
10746 if (IS_E1HMF_MODE_STAT(bp))
10747 return;
10748 hw_stats = (u32 *)&bp->eth_stats;
10749 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10750 if (bnx2x_stats_arr[j].size == 0) {
10751 /* skip this counter */
10752 buf[k + j] = 0;
10753 continue;
10754 }
10755 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10756 if (bnx2x_stats_arr[j].size == 4) {
10757 /* 4-byte counter */
10758 buf[k + j] = (u64) *offset;
10759 continue;
10760 }
10761 /* 8-byte counter */
10762 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10763 }
de832a55
EG
10764 } else {
10765 hw_stats = (u32 *)&bp->eth_stats;
10766 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10767 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10768 continue;
10769 if (bnx2x_stats_arr[i].size == 0) {
10770 /* skip this counter */
10771 buf[j] = 0;
10772 j++;
10773 continue;
10774 }
10775 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10776 if (bnx2x_stats_arr[i].size == 4) {
10777 /* 4-byte counter */
10778 buf[j] = (u64) *offset;
10779 j++;
10780 continue;
10781 }
10782 /* 8-byte counter */
10783 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10784 j++;
a2fbb9ea 10785 }
a2fbb9ea
ET
10786 }
10787}
10788
10789static int bnx2x_phys_id(struct net_device *dev, u32 data)
10790{
10791 struct bnx2x *bp = netdev_priv(dev);
10792 int i;
10793
34f80b04
EG
10794 if (!netif_running(dev))
10795 return 0;
10796
10797 if (!bp->port.pmf)
10798 return 0;
10799
a2fbb9ea
ET
10800 if (data == 0)
10801 data = 2;
10802
10803 for (i = 0; i < (data * 2); i++) {
c18487ee 10804 if ((i % 2) == 0)
7846e471
YR
10805 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10806 SPEED_1000);
c18487ee 10807 else
7846e471 10808 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 10809
a2fbb9ea
ET
10810 msleep_interruptible(500);
10811 if (signal_pending(current))
10812 break;
10813 }
10814
c18487ee 10815 if (bp->link_vars.link_up)
7846e471
YR
10816 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10817 bp->link_vars.line_speed);
a2fbb9ea
ET
10818
10819 return 0;
10820}
10821
0fc0b732 10822static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10823 .get_settings = bnx2x_get_settings,
10824 .set_settings = bnx2x_set_settings,
10825 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10826 .get_regs_len = bnx2x_get_regs_len,
10827 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10828 .get_wol = bnx2x_get_wol,
10829 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10830 .get_msglevel = bnx2x_get_msglevel,
10831 .set_msglevel = bnx2x_set_msglevel,
10832 .nway_reset = bnx2x_nway_reset,
01e53298 10833 .get_link = bnx2x_get_link,
7a9b2557
VZ
10834 .get_eeprom_len = bnx2x_get_eeprom_len,
10835 .get_eeprom = bnx2x_get_eeprom,
10836 .set_eeprom = bnx2x_set_eeprom,
10837 .get_coalesce = bnx2x_get_coalesce,
10838 .set_coalesce = bnx2x_set_coalesce,
10839 .get_ringparam = bnx2x_get_ringparam,
10840 .set_ringparam = bnx2x_set_ringparam,
10841 .get_pauseparam = bnx2x_get_pauseparam,
10842 .set_pauseparam = bnx2x_set_pauseparam,
10843 .get_rx_csum = bnx2x_get_rx_csum,
10844 .set_rx_csum = bnx2x_set_rx_csum,
10845 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10846 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10847 .set_flags = bnx2x_set_flags,
10848 .get_flags = ethtool_op_get_flags,
10849 .get_sg = ethtool_op_get_sg,
10850 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10851 .get_tso = ethtool_op_get_tso,
10852 .set_tso = bnx2x_set_tso,
7a9b2557 10853 .self_test = bnx2x_self_test,
15f0a394 10854 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10855 .get_strings = bnx2x_get_strings,
a2fbb9ea 10856 .phys_id = bnx2x_phys_id,
bb2a0f7a 10857 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10858};
10859
10860/* end of ethtool_ops */
10861
10862/****************************************************************************
10863* General service functions
10864****************************************************************************/
10865
10866static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10867{
10868 u16 pmcsr;
10869
10870 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10871
10872 switch (state) {
10873 case PCI_D0:
34f80b04 10874 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10875 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10876 PCI_PM_CTRL_PME_STATUS));
10877
10878 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10879 /* delay required during transition out of D3hot */
a2fbb9ea 10880 msleep(20);
34f80b04 10881 break;
a2fbb9ea 10882
34f80b04
EG
10883 case PCI_D3hot:
10884 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10885 pmcsr |= 3;
a2fbb9ea 10886
34f80b04
EG
10887 if (bp->wol)
10888 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10889
34f80b04
EG
10890 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10891 pmcsr);
a2fbb9ea 10892
34f80b04
EG
10893 /* No more memory access after this point until
10894 * device is brought back to D0.
10895 */
10896 break;
10897
10898 default:
10899 return -EINVAL;
10900 }
10901 return 0;
a2fbb9ea
ET
10902}
10903
237907c1
EG
10904static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10905{
10906 u16 rx_cons_sb;
10907
10908 /* Tell compiler that status block fields can change */
10909 barrier();
10910 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10911 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10912 rx_cons_sb++;
10913 return (fp->rx_comp_cons != rx_cons_sb);
10914}
10915
34f80b04
EG
10916/*
10917 * net_device service functions
10918 */
10919
a2fbb9ea
ET
10920static int bnx2x_poll(struct napi_struct *napi, int budget)
10921{
54b9ddaa 10922 int work_done = 0;
a2fbb9ea
ET
10923 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10924 napi);
10925 struct bnx2x *bp = fp->bp;
a2fbb9ea 10926
54b9ddaa 10927 while (1) {
a2fbb9ea 10928#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
10929 if (unlikely(bp->panic)) {
10930 napi_complete(napi);
10931 return 0;
10932 }
a2fbb9ea
ET
10933#endif
10934
54b9ddaa
VZ
10935 if (bnx2x_has_tx_work(fp))
10936 bnx2x_tx_int(fp);
356e2385 10937
54b9ddaa
VZ
10938 if (bnx2x_has_rx_work(fp)) {
10939 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 10940
54b9ddaa
VZ
10941 /* must not complete if we consumed full budget */
10942 if (work_done >= budget)
10943 break;
10944 }
a2fbb9ea 10945
54b9ddaa
VZ
10946 /* Fall out from the NAPI loop if needed */
10947 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10948 bnx2x_update_fpsb_idx(fp);
10949 /* bnx2x_has_rx_work() reads the status block, thus we need
10950 * to ensure that status block indices have been actually read
10951 * (bnx2x_update_fpsb_idx) prior to this check
10952 * (bnx2x_has_rx_work) so that we won't write the "newer"
10953 * value of the status block to IGU (if there was a DMA right
10954 * after bnx2x_has_rx_work and if there is no rmb, the memory
10955 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10956 * before bnx2x_ack_sb). In this case there will never be
10957 * another interrupt until there is another update of the
10958 * status block, while there is still unhandled work.
10959 */
10960 rmb();
a2fbb9ea 10961
54b9ddaa
VZ
10962 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10963 napi_complete(napi);
10964 /* Re-enable interrupts */
10965 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10966 le16_to_cpu(fp->fp_c_idx),
10967 IGU_INT_NOP, 1);
10968 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10969 le16_to_cpu(fp->fp_u_idx),
10970 IGU_INT_ENABLE, 1);
10971 break;
10972 }
10973 }
a2fbb9ea 10974 }
356e2385 10975
a2fbb9ea
ET
10976 return work_done;
10977}
10978
755735eb
EG
10979
10980/* we split the first BD into headers and data BDs
33471629 10981 * to ease the pain of our fellow microcode engineers
755735eb
EG
10982 * we use one mapping for both BDs
10983 * So far this has only been observed to happen
10984 * in Other Operating Systems(TM)
10985 */
10986static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10987 struct bnx2x_fastpath *fp,
ca00392c
EG
10988 struct sw_tx_bd *tx_buf,
10989 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10990 u16 bd_prod, int nbd)
10991{
ca00392c 10992 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10993 struct eth_tx_bd *d_tx_bd;
10994 dma_addr_t mapping;
10995 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10996
10997 /* first fix first BD */
10998 h_tx_bd->nbd = cpu_to_le16(nbd);
10999 h_tx_bd->nbytes = cpu_to_le16(hlen);
11000
11001 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11002 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11003 h_tx_bd->addr_lo, h_tx_bd->nbd);
11004
11005 /* now get a new data BD
11006 * (after the pbd) and fill it */
11007 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11008 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11009
11010 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11011 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11012
11013 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11014 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11015 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11016
11017 /* this marks the BD as one that has no individual mapping */
11018 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11019
755735eb
EG
11020 DP(NETIF_MSG_TX_QUEUED,
11021 "TSO split data size is %d (%x:%x)\n",
11022 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11023
ca00392c
EG
11024 /* update tx_bd */
11025 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11026
11027 return bd_prod;
11028}
11029
11030static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11031{
11032 if (fix > 0)
11033 csum = (u16) ~csum_fold(csum_sub(csum,
11034 csum_partial(t_header - fix, fix, 0)));
11035
11036 else if (fix < 0)
11037 csum = (u16) ~csum_fold(csum_add(csum,
11038 csum_partial(t_header, -fix, 0)));
11039
11040 return swab16(csum);
11041}
11042
11043static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11044{
11045 u32 rc;
11046
11047 if (skb->ip_summed != CHECKSUM_PARTIAL)
11048 rc = XMIT_PLAIN;
11049
11050 else {
4781bfad 11051 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11052 rc = XMIT_CSUM_V6;
11053 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11054 rc |= XMIT_CSUM_TCP;
11055
11056 } else {
11057 rc = XMIT_CSUM_V4;
11058 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11059 rc |= XMIT_CSUM_TCP;
11060 }
11061 }
11062
11063 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 11064 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
11065
11066 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 11067 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
11068
11069 return rc;
11070}
11071
632da4d6 11072#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11073/* check if packet requires linearization (packet is too fragmented)
11074 no need to check fragmentation if page size > 8K (there will be no
11075 violation to FW restrictions) */
755735eb
EG
11076static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11077 u32 xmit_type)
11078{
11079 int to_copy = 0;
11080 int hlen = 0;
11081 int first_bd_sz = 0;
11082
11083 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11084 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11085
11086 if (xmit_type & XMIT_GSO) {
11087 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11088 /* Check if LSO packet needs to be copied:
11089 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11090 int wnd_size = MAX_FETCH_BD - 3;
33471629 11091 /* Number of windows to check */
755735eb
EG
11092 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11093 int wnd_idx = 0;
11094 int frag_idx = 0;
11095 u32 wnd_sum = 0;
11096
11097 /* Headers length */
11098 hlen = (int)(skb_transport_header(skb) - skb->data) +
11099 tcp_hdrlen(skb);
11100
11101 /* Amount of data (w/o headers) on linear part of SKB*/
11102 first_bd_sz = skb_headlen(skb) - hlen;
11103
11104 wnd_sum = first_bd_sz;
11105
11106 /* Calculate the first sum - it's special */
11107 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11108 wnd_sum +=
11109 skb_shinfo(skb)->frags[frag_idx].size;
11110
11111 /* If there was data on linear skb data - check it */
11112 if (first_bd_sz > 0) {
11113 if (unlikely(wnd_sum < lso_mss)) {
11114 to_copy = 1;
11115 goto exit_lbl;
11116 }
11117
11118 wnd_sum -= first_bd_sz;
11119 }
11120
11121 /* Others are easier: run through the frag list and
11122 check all windows */
11123 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11124 wnd_sum +=
11125 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11126
11127 if (unlikely(wnd_sum < lso_mss)) {
11128 to_copy = 1;
11129 break;
11130 }
11131 wnd_sum -=
11132 skb_shinfo(skb)->frags[wnd_idx].size;
11133 }
755735eb
EG
11134 } else {
11135 /* in non-LSO too fragmented packet should always
11136 be linearized */
11137 to_copy = 1;
11138 }
11139 }
11140
11141exit_lbl:
11142 if (unlikely(to_copy))
11143 DP(NETIF_MSG_TX_QUEUED,
11144 "Linearization IS REQUIRED for %s packet. "
11145 "num_frags %d hlen %d first_bd_sz %d\n",
11146 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11147 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11148
11149 return to_copy;
11150}
632da4d6 11151#endif
755735eb
EG
11152
11153/* called with netif_tx_lock
a2fbb9ea 11154 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11155 * netif_wake_queue()
a2fbb9ea 11156 */
61357325 11157static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11158{
11159 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 11160 struct bnx2x_fastpath *fp;
555f6c78 11161 struct netdev_queue *txq;
a2fbb9ea 11162 struct sw_tx_bd *tx_buf;
ca00392c
EG
11163 struct eth_tx_start_bd *tx_start_bd;
11164 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11165 struct eth_tx_parse_bd *pbd = NULL;
11166 u16 pkt_prod, bd_prod;
755735eb 11167 int nbd, fp_index;
a2fbb9ea 11168 dma_addr_t mapping;
755735eb 11169 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11170 int i;
11171 u8 hlen = 0;
ca00392c 11172 __le16 pkt_size = 0;
a2fbb9ea
ET
11173
11174#ifdef BNX2X_STOP_ON_ERROR
11175 if (unlikely(bp->panic))
11176 return NETDEV_TX_BUSY;
11177#endif
11178
555f6c78
EG
11179 fp_index = skb_get_queue_mapping(skb);
11180 txq = netdev_get_tx_queue(dev, fp_index);
11181
54b9ddaa 11182 fp = &bp->fp[fp_index];
755735eb 11183
231fd58a 11184 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 11185 fp->eth_q_stats.driver_xoff++;
555f6c78 11186 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11187 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11188 return NETDEV_TX_BUSY;
11189 }
11190
755735eb
EG
11191 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11192 " gso type %x xmit_type %x\n",
11193 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11194 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11195
632da4d6 11196#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11197 /* First, check if we need to linearize the skb (due to FW
11198 restrictions). No need to check fragmentation if page size > 8K
11199 (there will be no violation to FW restrictions) */
755735eb
EG
11200 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11201 /* Statistics of linearization */
11202 bp->lin_cnt++;
11203 if (skb_linearize(skb) != 0) {
11204 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11205 "silently dropping this SKB\n");
11206 dev_kfree_skb_any(skb);
da5a662a 11207 return NETDEV_TX_OK;
755735eb
EG
11208 }
11209 }
632da4d6 11210#endif
755735eb 11211
a2fbb9ea 11212 /*
755735eb 11213 Please read carefully. First we use one BD which we mark as start,
ca00392c 11214 then we have a parsing info BD (used for TSO or xsum),
755735eb 11215 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11216 (don't forget to mark the last one as last,
11217 and to unmap only AFTER you write to the BD ...)
755735eb 11218 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11219 */
11220
11221 pkt_prod = fp->tx_pkt_prod++;
755735eb 11222 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11223
755735eb 11224 /* get a tx_buf and first BD */
a2fbb9ea 11225 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11226 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11227
ca00392c
EG
11228 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11229 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11230 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11231 /* header nbd */
ca00392c 11232 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11233
755735eb
EG
11234 /* remember the first BD of the packet */
11235 tx_buf->first_bd = fp->tx_bd_prod;
11236 tx_buf->skb = skb;
ca00392c 11237 tx_buf->flags = 0;
a2fbb9ea
ET
11238
11239 DP(NETIF_MSG_TX_QUEUED,
11240 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11241 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11242
0c6671b0
EG
11243#ifdef BCM_VLAN
11244 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11245 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11246 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11247 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11248 } else
0c6671b0 11249#endif
ca00392c 11250 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11251
ca00392c
EG
11252 /* turn on parsing and get a BD */
11253 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11254 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11255
ca00392c 11256 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11257
11258 if (xmit_type & XMIT_CSUM) {
ca00392c 11259 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11260
11261 /* for now NS flag is not used in Linux */
4781bfad
EG
11262 pbd->global_data =
11263 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11264 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11265
755735eb
EG
11266 pbd->ip_hlen = (skb_transport_header(skb) -
11267 skb_network_header(skb)) / 2;
11268
11269 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11270
755735eb 11271 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11272 hlen = hlen*2;
a2fbb9ea 11273
ca00392c 11274 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11275
11276 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11277 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11278 ETH_TX_BD_FLAGS_IP_CSUM;
11279 else
ca00392c
EG
11280 tx_start_bd->bd_flags.as_bitfield |=
11281 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11282
11283 if (xmit_type & XMIT_CSUM_TCP) {
11284 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11285
11286 } else {
11287 s8 fix = SKB_CS_OFF(skb); /* signed! */
11288
ca00392c 11289 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11290
755735eb 11291 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11292 "hlen %d fix %d csum before fix %x\n",
11293 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11294
11295 /* HW bug: fixup the CSUM */
11296 pbd->tcp_pseudo_csum =
11297 bnx2x_csum_fix(skb_transport_header(skb),
11298 SKB_CS(skb), fix);
11299
11300 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11301 pbd->tcp_pseudo_csum);
11302 }
a2fbb9ea
ET
11303 }
11304
11305 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11306 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11307
ca00392c
EG
11308 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11309 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11310 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11311 tx_start_bd->nbd = cpu_to_le16(nbd);
11312 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11313 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11314
11315 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11316 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11317 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11318 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11319 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11320
755735eb 11321 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11322
11323 DP(NETIF_MSG_TX_QUEUED,
11324 "TSO packet len %d hlen %d total len %d tso size %d\n",
11325 skb->len, hlen, skb_headlen(skb),
11326 skb_shinfo(skb)->gso_size);
11327
ca00392c 11328 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11329
755735eb 11330 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11331 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11332 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11333
11334 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11335 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11336 pbd->tcp_flags = pbd_tcp_flags(skb);
11337
11338 if (xmit_type & XMIT_GSO_V4) {
11339 pbd->ip_id = swab16(ip_hdr(skb)->id);
11340 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11341 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11342 ip_hdr(skb)->daddr,
11343 0, IPPROTO_TCP, 0));
755735eb
EG
11344
11345 } else
11346 pbd->tcp_pseudo_csum =
11347 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11348 &ipv6_hdr(skb)->daddr,
11349 0, IPPROTO_TCP, 0));
11350
a2fbb9ea
ET
11351 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11352 }
ca00392c 11353 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11354
755735eb
EG
11355 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11356 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11357
755735eb 11358 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11359 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11360 if (total_pkt_bd == NULL)
11361 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11362
755735eb
EG
11363 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11364 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11365
ca00392c
EG
11366 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11367 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11368 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11369 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11370
755735eb 11371 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11372 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11373 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11374 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11375 }
11376
ca00392c 11377 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11378
a2fbb9ea
ET
11379 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11380
755735eb 11381 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11382 * if the packet contains or ends with it
11383 */
11384 if (TX_BD_POFF(bd_prod) < nbd)
11385 nbd++;
11386
ca00392c
EG
11387 if (total_pkt_bd != NULL)
11388 total_pkt_bd->total_pkt_bytes = pkt_size;
11389
a2fbb9ea
ET
11390 if (pbd)
11391 DP(NETIF_MSG_TX_QUEUED,
11392 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11393 " tcp_flags %x xsum %x seq %u hlen %u\n",
11394 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11395 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11396 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11397
755735eb 11398 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11399
58f4c4cf
EG
11400 /*
11401 * Make sure that the BD data is updated before updating the producer
11402 * since FW might read the BD right after the producer is updated.
11403 * This is only applicable for weak-ordered memory model archs such
11404 * as IA-64. The following barrier is also mandatory since FW will
11405 * assumes packets must have BDs.
11406 */
11407 wmb();
11408
ca00392c
EG
11409 fp->tx_db.data.prod += nbd;
11410 barrier();
54b9ddaa 11411 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
11412
11413 mmiowb();
11414
755735eb 11415 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11416
11417 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11418 netif_tx_stop_queue(txq);
58f4c4cf
EG
11419 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11420 if we put Tx into XOFF state. */
11421 smp_mb();
54b9ddaa 11422 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 11423 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11424 netif_tx_wake_queue(txq);
a2fbb9ea 11425 }
54b9ddaa 11426 fp->tx_pkt++;
a2fbb9ea
ET
11427
11428 return NETDEV_TX_OK;
11429}
11430
bb2a0f7a 11431/* called with rtnl_lock */
a2fbb9ea
ET
11432static int bnx2x_open(struct net_device *dev)
11433{
11434 struct bnx2x *bp = netdev_priv(dev);
11435
6eccabb3
EG
11436 netif_carrier_off(dev);
11437
a2fbb9ea
ET
11438 bnx2x_set_power_state(bp, PCI_D0);
11439
bb2a0f7a 11440 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11441}
11442
bb2a0f7a 11443/* called with rtnl_lock */
a2fbb9ea
ET
11444static int bnx2x_close(struct net_device *dev)
11445{
a2fbb9ea
ET
11446 struct bnx2x *bp = netdev_priv(dev);
11447
11448 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11449 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11450 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11451 if (!CHIP_REV_IS_SLOW(bp))
11452 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11453
11454 return 0;
11455}
11456
f5372251 11457/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11458static void bnx2x_set_rx_mode(struct net_device *dev)
11459{
11460 struct bnx2x *bp = netdev_priv(dev);
11461 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11462 int port = BP_PORT(bp);
11463
11464 if (bp->state != BNX2X_STATE_OPEN) {
11465 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11466 return;
11467 }
11468
11469 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11470
11471 if (dev->flags & IFF_PROMISC)
11472 rx_mode = BNX2X_RX_MODE_PROMISC;
11473
11474 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
11475 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11476 CHIP_IS_E1(bp)))
34f80b04
EG
11477 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11478
11479 else { /* some multicasts */
11480 if (CHIP_IS_E1(bp)) {
11481 int i, old, offset;
11482 struct dev_mc_list *mclist;
11483 struct mac_configuration_cmd *config =
11484 bnx2x_sp(bp, mcast_config);
11485
0ddf477b
JP
11486 i = 0;
11487 netdev_for_each_mc_addr(mclist, dev) {
34f80b04
EG
11488 config->config_table[i].
11489 cam_entry.msb_mac_addr =
11490 swab16(*(u16 *)&mclist->dmi_addr[0]);
11491 config->config_table[i].
11492 cam_entry.middle_mac_addr =
11493 swab16(*(u16 *)&mclist->dmi_addr[2]);
11494 config->config_table[i].
11495 cam_entry.lsb_mac_addr =
11496 swab16(*(u16 *)&mclist->dmi_addr[4]);
11497 config->config_table[i].cam_entry.flags =
11498 cpu_to_le16(port);
11499 config->config_table[i].
11500 target_table_entry.flags = 0;
ca00392c
EG
11501 config->config_table[i].target_table_entry.
11502 clients_bit_vector =
11503 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11504 config->config_table[i].
11505 target_table_entry.vlan_id = 0;
11506
11507 DP(NETIF_MSG_IFUP,
11508 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11509 config->config_table[i].
11510 cam_entry.msb_mac_addr,
11511 config->config_table[i].
11512 cam_entry.middle_mac_addr,
11513 config->config_table[i].
11514 cam_entry.lsb_mac_addr);
0ddf477b 11515 i++;
34f80b04 11516 }
8d9c5f34 11517 old = config->hdr.length;
34f80b04
EG
11518 if (old > i) {
11519 for (; i < old; i++) {
11520 if (CAM_IS_INVALID(config->
11521 config_table[i])) {
af246401 11522 /* already invalidated */
34f80b04
EG
11523 break;
11524 }
11525 /* invalidate */
11526 CAM_INVALIDATE(config->
11527 config_table[i]);
11528 }
11529 }
11530
11531 if (CHIP_REV_IS_SLOW(bp))
11532 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11533 else
11534 offset = BNX2X_MAX_MULTICAST*(1 + port);
11535
8d9c5f34 11536 config->hdr.length = i;
34f80b04 11537 config->hdr.offset = offset;
8d9c5f34 11538 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11539 config->hdr.reserved1 = 0;
11540
e665bfda
MC
11541 bp->set_mac_pending++;
11542 smp_wmb();
11543
34f80b04
EG
11544 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11545 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11546 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11547 0);
11548 } else { /* E1H */
11549 /* Accept one or more multicasts */
11550 struct dev_mc_list *mclist;
11551 u32 mc_filter[MC_HASH_SIZE];
11552 u32 crc, bit, regidx;
11553 int i;
11554
11555 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11556
0ddf477b 11557 netdev_for_each_mc_addr(mclist, dev) {
7c510e4b
JB
11558 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11559 mclist->dmi_addr);
34f80b04
EG
11560
11561 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11562 bit = (crc >> 24) & 0xff;
11563 regidx = bit >> 5;
11564 bit &= 0x1f;
11565 mc_filter[regidx] |= (1 << bit);
11566 }
11567
11568 for (i = 0; i < MC_HASH_SIZE; i++)
11569 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11570 mc_filter[i]);
11571 }
11572 }
11573
11574 bp->rx_mode = rx_mode;
11575 bnx2x_set_storm_rx_mode(bp);
11576}
11577
11578/* called with rtnl_lock */
a2fbb9ea
ET
11579static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11580{
11581 struct sockaddr *addr = p;
11582 struct bnx2x *bp = netdev_priv(dev);
11583
34f80b04 11584 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11585 return -EINVAL;
11586
11587 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11588 if (netif_running(dev)) {
11589 if (CHIP_IS_E1(bp))
e665bfda 11590 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11591 else
e665bfda 11592 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11593 }
a2fbb9ea
ET
11594
11595 return 0;
11596}
11597
c18487ee 11598/* called with rtnl_lock */
01cd4528
EG
11599static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11600 int devad, u16 addr)
a2fbb9ea 11601{
01cd4528
EG
11602 struct bnx2x *bp = netdev_priv(netdev);
11603 u16 value;
11604 int rc;
11605 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11606
01cd4528
EG
11607 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11608 prtad, devad, addr);
a2fbb9ea 11609
01cd4528
EG
11610 if (prtad != bp->mdio.prtad) {
11611 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11612 prtad, bp->mdio.prtad);
11613 return -EINVAL;
11614 }
11615
11616 /* The HW expects different devad if CL22 is used */
11617 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11618
01cd4528
EG
11619 bnx2x_acquire_phy_lock(bp);
11620 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11621 devad, addr, &value);
11622 bnx2x_release_phy_lock(bp);
11623 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11624
01cd4528
EG
11625 if (!rc)
11626 rc = value;
11627 return rc;
11628}
a2fbb9ea 11629
01cd4528
EG
11630/* called with rtnl_lock */
11631static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11632 u16 addr, u16 value)
11633{
11634 struct bnx2x *bp = netdev_priv(netdev);
11635 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11636 int rc;
11637
11638 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11639 " value 0x%x\n", prtad, devad, addr, value);
11640
11641 if (prtad != bp->mdio.prtad) {
11642 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11643 prtad, bp->mdio.prtad);
11644 return -EINVAL;
a2fbb9ea
ET
11645 }
11646
01cd4528
EG
11647 /* The HW expects different devad if CL22 is used */
11648 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11649
01cd4528
EG
11650 bnx2x_acquire_phy_lock(bp);
11651 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11652 devad, addr, value);
11653 bnx2x_release_phy_lock(bp);
11654 return rc;
11655}
c18487ee 11656
01cd4528
EG
11657/* called with rtnl_lock */
11658static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11659{
11660 struct bnx2x *bp = netdev_priv(dev);
11661 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11662
01cd4528
EG
11663 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11664 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11665
01cd4528
EG
11666 if (!netif_running(dev))
11667 return -EAGAIN;
11668
11669 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11670}
11671
34f80b04 11672/* called with rtnl_lock */
a2fbb9ea
ET
11673static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11674{
11675 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11676 int rc = 0;
a2fbb9ea
ET
11677
11678 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11679 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11680 return -EINVAL;
11681
11682 /* This does not race with packet allocation
c14423fe 11683 * because the actual alloc size is
a2fbb9ea
ET
11684 * only updated as part of load
11685 */
11686 dev->mtu = new_mtu;
11687
11688 if (netif_running(dev)) {
34f80b04
EG
11689 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11690 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11691 }
34f80b04
EG
11692
11693 return rc;
a2fbb9ea
ET
11694}
11695
11696static void bnx2x_tx_timeout(struct net_device *dev)
11697{
11698 struct bnx2x *bp = netdev_priv(dev);
11699
11700#ifdef BNX2X_STOP_ON_ERROR
11701 if (!bp->panic)
11702 bnx2x_panic();
11703#endif
11704 /* This allows the netif to be shutdown gracefully before resetting */
11705 schedule_work(&bp->reset_task);
11706}
11707
11708#ifdef BCM_VLAN
34f80b04 11709/* called with rtnl_lock */
a2fbb9ea
ET
11710static void bnx2x_vlan_rx_register(struct net_device *dev,
11711 struct vlan_group *vlgrp)
11712{
11713 struct bnx2x *bp = netdev_priv(dev);
11714
11715 bp->vlgrp = vlgrp;
0c6671b0
EG
11716
11717 /* Set flags according to the required capabilities */
11718 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11719
11720 if (dev->features & NETIF_F_HW_VLAN_TX)
11721 bp->flags |= HW_VLAN_TX_FLAG;
11722
11723 if (dev->features & NETIF_F_HW_VLAN_RX)
11724 bp->flags |= HW_VLAN_RX_FLAG;
11725
a2fbb9ea 11726 if (netif_running(dev))
49d66772 11727 bnx2x_set_client_config(bp);
a2fbb9ea 11728}
34f80b04 11729
a2fbb9ea
ET
11730#endif
11731
257ddbda 11732#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
11733static void poll_bnx2x(struct net_device *dev)
11734{
11735 struct bnx2x *bp = netdev_priv(dev);
11736
11737 disable_irq(bp->pdev->irq);
11738 bnx2x_interrupt(bp->pdev->irq, dev);
11739 enable_irq(bp->pdev->irq);
11740}
11741#endif
11742
c64213cd
SH
11743static const struct net_device_ops bnx2x_netdev_ops = {
11744 .ndo_open = bnx2x_open,
11745 .ndo_stop = bnx2x_close,
11746 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11747 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11748 .ndo_set_mac_address = bnx2x_change_mac_addr,
11749 .ndo_validate_addr = eth_validate_addr,
11750 .ndo_do_ioctl = bnx2x_ioctl,
11751 .ndo_change_mtu = bnx2x_change_mtu,
11752 .ndo_tx_timeout = bnx2x_tx_timeout,
11753#ifdef BCM_VLAN
11754 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11755#endif
257ddbda 11756#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
11757 .ndo_poll_controller = poll_bnx2x,
11758#endif
11759};
11760
34f80b04
EG
11761static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11762 struct net_device *dev)
a2fbb9ea
ET
11763{
11764 struct bnx2x *bp;
11765 int rc;
11766
11767 SET_NETDEV_DEV(dev, &pdev->dev);
11768 bp = netdev_priv(dev);
11769
34f80b04
EG
11770 bp->dev = dev;
11771 bp->pdev = pdev;
a2fbb9ea 11772 bp->flags = 0;
34f80b04 11773 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11774
11775 rc = pci_enable_device(pdev);
11776 if (rc) {
7995c64e 11777 pr_err("Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
11778 goto err_out;
11779 }
11780
11781 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7995c64e 11782 pr_err("Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
11783 rc = -ENODEV;
11784 goto err_out_disable;
11785 }
11786
11787 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7995c64e 11788 pr_err("Cannot find second PCI device base address, aborting\n");
a2fbb9ea
ET
11789 rc = -ENODEV;
11790 goto err_out_disable;
11791 }
11792
34f80b04
EG
11793 if (atomic_read(&pdev->enable_cnt) == 1) {
11794 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11795 if (rc) {
7995c64e 11796 pr_err("Cannot obtain PCI resources, aborting\n");
34f80b04
EG
11797 goto err_out_disable;
11798 }
a2fbb9ea 11799
34f80b04
EG
11800 pci_set_master(pdev);
11801 pci_save_state(pdev);
11802 }
a2fbb9ea
ET
11803
11804 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11805 if (bp->pm_cap == 0) {
7995c64e 11806 pr_err("Cannot find power management capability, aborting\n");
a2fbb9ea
ET
11807 rc = -EIO;
11808 goto err_out_release;
11809 }
11810
11811 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11812 if (bp->pcie_cap == 0) {
7995c64e 11813 pr_err("Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
11814 rc = -EIO;
11815 goto err_out_release;
11816 }
11817
6a35528a 11818 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11819 bp->flags |= USING_DAC_FLAG;
6a35528a 11820 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
7995c64e 11821 pr_err("pci_set_consistent_dma_mask failed, aborting\n");
a2fbb9ea
ET
11822 rc = -EIO;
11823 goto err_out_release;
11824 }
11825
284901a9 11826 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
7995c64e 11827 pr_err("System does not support DMA, aborting\n");
a2fbb9ea
ET
11828 rc = -EIO;
11829 goto err_out_release;
11830 }
11831
34f80b04
EG
11832 dev->mem_start = pci_resource_start(pdev, 0);
11833 dev->base_addr = dev->mem_start;
11834 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11835
11836 dev->irq = pdev->irq;
11837
275f165f 11838 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 11839 if (!bp->regview) {
7995c64e 11840 pr_err("Cannot map register space, aborting\n");
a2fbb9ea
ET
11841 rc = -ENOMEM;
11842 goto err_out_release;
11843 }
11844
34f80b04
EG
11845 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11846 min_t(u64, BNX2X_DB_SIZE,
11847 pci_resource_len(pdev, 2)));
a2fbb9ea 11848 if (!bp->doorbells) {
7995c64e 11849 pr_err("Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
11850 rc = -ENOMEM;
11851 goto err_out_unmap;
11852 }
11853
11854 bnx2x_set_power_state(bp, PCI_D0);
11855
34f80b04
EG
11856 /* clean indirect addresses */
11857 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11858 PCICFG_VENDOR_ID_OFFSET);
11859 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11860 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11861 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11862 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11863
34f80b04 11864 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11865
c64213cd 11866 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11867 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11868 dev->features |= NETIF_F_SG;
11869 dev->features |= NETIF_F_HW_CSUM;
11870 if (bp->flags & USING_DAC_FLAG)
11871 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11872 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11873 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11874#ifdef BCM_VLAN
11875 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11876 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11877
11878 dev->vlan_features |= NETIF_F_SG;
11879 dev->vlan_features |= NETIF_F_HW_CSUM;
11880 if (bp->flags & USING_DAC_FLAG)
11881 dev->vlan_features |= NETIF_F_HIGHDMA;
11882 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11883 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11884#endif
a2fbb9ea 11885
01cd4528
EG
11886 /* get_port_hwinfo() will set prtad and mmds properly */
11887 bp->mdio.prtad = MDIO_PRTAD_NONE;
11888 bp->mdio.mmds = 0;
11889 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11890 bp->mdio.dev = dev;
11891 bp->mdio.mdio_read = bnx2x_mdio_read;
11892 bp->mdio.mdio_write = bnx2x_mdio_write;
11893
a2fbb9ea
ET
11894 return 0;
11895
11896err_out_unmap:
11897 if (bp->regview) {
11898 iounmap(bp->regview);
11899 bp->regview = NULL;
11900 }
a2fbb9ea
ET
11901 if (bp->doorbells) {
11902 iounmap(bp->doorbells);
11903 bp->doorbells = NULL;
11904 }
11905
11906err_out_release:
34f80b04
EG
11907 if (atomic_read(&pdev->enable_cnt) == 1)
11908 pci_release_regions(pdev);
a2fbb9ea
ET
11909
11910err_out_disable:
11911 pci_disable_device(pdev);
11912 pci_set_drvdata(pdev, NULL);
11913
11914err_out:
11915 return rc;
11916}
11917
37f9ce62
EG
11918static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11919 int *width, int *speed)
25047950
ET
11920{
11921 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11922
37f9ce62 11923 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11924
37f9ce62
EG
11925 /* return value of 1=2.5GHz 2=5GHz */
11926 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11927}
37f9ce62 11928
94a78b79
VZ
11929static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11930{
37f9ce62 11931 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11932 struct bnx2x_fw_file_hdr *fw_hdr;
11933 struct bnx2x_fw_file_section *sections;
94a78b79 11934 u32 offset, len, num_ops;
37f9ce62 11935 u16 *ops_offsets;
94a78b79 11936 int i;
37f9ce62 11937 const u8 *fw_ver;
94a78b79
VZ
11938
11939 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11940 return -EINVAL;
11941
11942 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11943 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11944
11945 /* Make sure none of the offsets and sizes make us read beyond
11946 * the end of the firmware data */
11947 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11948 offset = be32_to_cpu(sections[i].offset);
11949 len = be32_to_cpu(sections[i].len);
11950 if (offset + len > firmware->size) {
7995c64e 11951 pr_err("Section %d length is out of bounds\n", i);
94a78b79
VZ
11952 return -EINVAL;
11953 }
11954 }
11955
11956 /* Likewise for the init_ops offsets */
11957 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11958 ops_offsets = (u16 *)(firmware->data + offset);
11959 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11960
11961 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11962 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7995c64e 11963 pr_err("Section offset %d is out of bounds\n", i);
94a78b79
VZ
11964 return -EINVAL;
11965 }
11966 }
11967
11968 /* Check FW version */
11969 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11970 fw_ver = firmware->data + offset;
11971 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11972 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11973 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11974 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7995c64e 11975 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
11976 fw_ver[0], fw_ver[1], fw_ver[2],
11977 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11978 BCM_5710_FW_MINOR_VERSION,
11979 BCM_5710_FW_REVISION_VERSION,
11980 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 11981 return -EINVAL;
94a78b79
VZ
11982 }
11983
11984 return 0;
11985}
11986
ab6ad5a4 11987static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 11988{
ab6ad5a4
EG
11989 const __be32 *source = (const __be32 *)_source;
11990 u32 *target = (u32 *)_target;
94a78b79 11991 u32 i;
94a78b79
VZ
11992
11993 for (i = 0; i < n/4; i++)
11994 target[i] = be32_to_cpu(source[i]);
11995}
11996
11997/*
11998 Ops array is stored in the following format:
11999 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12000 */
ab6ad5a4 12001static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12002{
ab6ad5a4
EG
12003 const __be32 *source = (const __be32 *)_source;
12004 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12005 u32 i, j, tmp;
94a78b79 12006
ab6ad5a4 12007 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12008 tmp = be32_to_cpu(source[j]);
12009 target[i].op = (tmp >> 24) & 0xff;
12010 target[i].offset = tmp & 0xffffff;
12011 target[i].raw_data = be32_to_cpu(source[j+1]);
12012 }
12013}
ab6ad5a4
EG
12014
12015static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12016{
ab6ad5a4
EG
12017 const __be16 *source = (const __be16 *)_source;
12018 u16 *target = (u16 *)_target;
94a78b79 12019 u32 i;
94a78b79
VZ
12020
12021 for (i = 0; i < n/2; i++)
12022 target[i] = be16_to_cpu(source[i]);
12023}
12024
7995c64e
JP
12025#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12026do { \
12027 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12028 bp->arr = kmalloc(len, GFP_KERNEL); \
12029 if (!bp->arr) { \
12030 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12031 goto lbl; \
12032 } \
12033 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12034 (u8 *)bp->arr, len); \
12035} while (0)
94a78b79 12036
94a78b79
VZ
12037static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12038{
45229b42 12039 const char *fw_file_name;
94a78b79 12040 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 12041 int rc;
94a78b79 12042
94a78b79 12043 if (CHIP_IS_E1(bp))
45229b42 12044 fw_file_name = FW_FILE_NAME_E1;
94a78b79 12045 else
45229b42 12046 fw_file_name = FW_FILE_NAME_E1H;
94a78b79 12047
7995c64e 12048 pr_info("Loading %s\n", fw_file_name);
94a78b79
VZ
12049
12050 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12051 if (rc) {
7995c64e 12052 pr_err("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
12053 goto request_firmware_exit;
12054 }
12055
12056 rc = bnx2x_check_firmware(bp);
12057 if (rc) {
7995c64e 12058 pr_err("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
12059 goto request_firmware_exit;
12060 }
12061
12062 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12063
12064 /* Initialize the pointers to the init arrays */
12065 /* Blob */
12066 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12067
12068 /* Opcodes */
12069 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12070
12071 /* Offsets */
ab6ad5a4
EG
12072 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12073 be16_to_cpu_n);
94a78b79
VZ
12074
12075 /* STORMs firmware */
573f2035
EG
12076 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12077 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12078 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12079 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12080 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12081 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12082 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12083 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12084 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12085 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12086 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12087 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12088 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12089 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12090 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12091 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
12092
12093 return 0;
ab6ad5a4 12094
94a78b79
VZ
12095init_offsets_alloc_err:
12096 kfree(bp->init_ops);
12097init_ops_alloc_err:
12098 kfree(bp->init_data);
12099request_firmware_exit:
12100 release_firmware(bp->firmware);
12101
12102 return rc;
12103}
12104
12105
a2fbb9ea
ET
12106static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12107 const struct pci_device_id *ent)
12108{
a2fbb9ea
ET
12109 struct net_device *dev = NULL;
12110 struct bnx2x *bp;
37f9ce62 12111 int pcie_width, pcie_speed;
25047950 12112 int rc;
a2fbb9ea 12113
a2fbb9ea 12114 /* dev zeroed in init_etherdev */
555f6c78 12115 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 12116 if (!dev) {
7995c64e 12117 pr_err("Cannot allocate net device\n");
a2fbb9ea 12118 return -ENOMEM;
34f80b04 12119 }
a2fbb9ea 12120
a2fbb9ea 12121 bp = netdev_priv(dev);
7995c64e 12122 bp->msg_enable = debug;
a2fbb9ea 12123
df4770de
EG
12124 pci_set_drvdata(pdev, dev);
12125
34f80b04 12126 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12127 if (rc < 0) {
12128 free_netdev(dev);
12129 return rc;
12130 }
12131
34f80b04 12132 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12133 if (rc)
12134 goto init_one_exit;
12135
94a78b79
VZ
12136 /* Set init arrays */
12137 rc = bnx2x_init_firmware(bp, &pdev->dev);
12138 if (rc) {
7995c64e 12139 pr_err("Error loading firmware\n");
94a78b79
VZ
12140 goto init_one_exit;
12141 }
12142
693fc0d1 12143 rc = register_netdev(dev);
34f80b04 12144 if (rc) {
693fc0d1 12145 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12146 goto init_one_exit;
12147 }
12148
37f9ce62 12149 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7995c64e
JP
12150 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12151 board_info[ent->driver_data].name,
12152 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12153 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12154 dev->base_addr, bp->pdev->irq, dev->dev_addr);
c016201c 12155
a2fbb9ea 12156 return 0;
34f80b04
EG
12157
12158init_one_exit:
12159 if (bp->regview)
12160 iounmap(bp->regview);
12161
12162 if (bp->doorbells)
12163 iounmap(bp->doorbells);
12164
12165 free_netdev(dev);
12166
12167 if (atomic_read(&pdev->enable_cnt) == 1)
12168 pci_release_regions(pdev);
12169
12170 pci_disable_device(pdev);
12171 pci_set_drvdata(pdev, NULL);
12172
12173 return rc;
a2fbb9ea
ET
12174}
12175
12176static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12177{
12178 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12179 struct bnx2x *bp;
12180
12181 if (!dev) {
7995c64e 12182 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
12183 return;
12184 }
228241eb 12185 bp = netdev_priv(dev);
a2fbb9ea 12186
a2fbb9ea
ET
12187 unregister_netdev(dev);
12188
94a78b79
VZ
12189 kfree(bp->init_ops_offsets);
12190 kfree(bp->init_ops);
12191 kfree(bp->init_data);
12192 release_firmware(bp->firmware);
12193
a2fbb9ea
ET
12194 if (bp->regview)
12195 iounmap(bp->regview);
12196
12197 if (bp->doorbells)
12198 iounmap(bp->doorbells);
12199
12200 free_netdev(dev);
34f80b04
EG
12201
12202 if (atomic_read(&pdev->enable_cnt) == 1)
12203 pci_release_regions(pdev);
12204
a2fbb9ea
ET
12205 pci_disable_device(pdev);
12206 pci_set_drvdata(pdev, NULL);
12207}
12208
12209static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12210{
12211 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12212 struct bnx2x *bp;
12213
34f80b04 12214 if (!dev) {
7995c64e 12215 pr_err("BAD net device from bnx2x_init_one\n");
34f80b04
EG
12216 return -ENODEV;
12217 }
12218 bp = netdev_priv(dev);
a2fbb9ea 12219
34f80b04 12220 rtnl_lock();
a2fbb9ea 12221
34f80b04 12222 pci_save_state(pdev);
228241eb 12223
34f80b04
EG
12224 if (!netif_running(dev)) {
12225 rtnl_unlock();
12226 return 0;
12227 }
a2fbb9ea
ET
12228
12229 netif_device_detach(dev);
a2fbb9ea 12230
da5a662a 12231 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12232
a2fbb9ea 12233 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12234
34f80b04
EG
12235 rtnl_unlock();
12236
a2fbb9ea
ET
12237 return 0;
12238}
12239
12240static int bnx2x_resume(struct pci_dev *pdev)
12241{
12242 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12243 struct bnx2x *bp;
a2fbb9ea
ET
12244 int rc;
12245
228241eb 12246 if (!dev) {
7995c64e 12247 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
12248 return -ENODEV;
12249 }
228241eb 12250 bp = netdev_priv(dev);
a2fbb9ea 12251
34f80b04
EG
12252 rtnl_lock();
12253
228241eb 12254 pci_restore_state(pdev);
34f80b04
EG
12255
12256 if (!netif_running(dev)) {
12257 rtnl_unlock();
12258 return 0;
12259 }
12260
a2fbb9ea
ET
12261 bnx2x_set_power_state(bp, PCI_D0);
12262 netif_device_attach(dev);
12263
da5a662a 12264 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12265
34f80b04
EG
12266 rtnl_unlock();
12267
12268 return rc;
a2fbb9ea
ET
12269}
12270
f8ef6e44
YG
12271static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12272{
12273 int i;
12274
12275 bp->state = BNX2X_STATE_ERROR;
12276
12277 bp->rx_mode = BNX2X_RX_MODE_NONE;
12278
12279 bnx2x_netif_stop(bp, 0);
12280
12281 del_timer_sync(&bp->timer);
12282 bp->stats_state = STATS_STATE_DISABLED;
12283 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12284
12285 /* Release IRQs */
6cbe5065 12286 bnx2x_free_irq(bp, false);
f8ef6e44
YG
12287
12288 if (CHIP_IS_E1(bp)) {
12289 struct mac_configuration_cmd *config =
12290 bnx2x_sp(bp, mcast_config);
12291
8d9c5f34 12292 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12293 CAM_INVALIDATE(config->config_table[i]);
12294 }
12295
12296 /* Free SKBs, SGEs, TPA pool and driver internals */
12297 bnx2x_free_skbs(bp);
54b9ddaa 12298 for_each_queue(bp, i)
f8ef6e44 12299 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 12300 for_each_queue(bp, i)
7cde1c8b 12301 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12302 bnx2x_free_mem(bp);
12303
12304 bp->state = BNX2X_STATE_CLOSED;
12305
12306 netif_carrier_off(bp->dev);
12307
12308 return 0;
12309}
12310
12311static void bnx2x_eeh_recover(struct bnx2x *bp)
12312{
12313 u32 val;
12314
12315 mutex_init(&bp->port.phy_mutex);
12316
12317 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12318 bp->link_params.shmem_base = bp->common.shmem_base;
12319 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12320
12321 if (!bp->common.shmem_base ||
12322 (bp->common.shmem_base < 0xA0000) ||
12323 (bp->common.shmem_base >= 0xC0000)) {
12324 BNX2X_DEV_INFO("MCP not active\n");
12325 bp->flags |= NO_MCP_FLAG;
12326 return;
12327 }
12328
12329 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12330 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12331 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12332 BNX2X_ERR("BAD MCP validity signature\n");
12333
12334 if (!BP_NOMCP(bp)) {
12335 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12336 & DRV_MSG_SEQ_NUMBER_MASK);
12337 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12338 }
12339}
12340
493adb1f
WX
12341/**
12342 * bnx2x_io_error_detected - called when PCI error is detected
12343 * @pdev: Pointer to PCI device
12344 * @state: The current pci connection state
12345 *
12346 * This function is called after a PCI bus error affecting
12347 * this device has been detected.
12348 */
12349static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12350 pci_channel_state_t state)
12351{
12352 struct net_device *dev = pci_get_drvdata(pdev);
12353 struct bnx2x *bp = netdev_priv(dev);
12354
12355 rtnl_lock();
12356
12357 netif_device_detach(dev);
12358
07ce50e4
DN
12359 if (state == pci_channel_io_perm_failure) {
12360 rtnl_unlock();
12361 return PCI_ERS_RESULT_DISCONNECT;
12362 }
12363
493adb1f 12364 if (netif_running(dev))
f8ef6e44 12365 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12366
12367 pci_disable_device(pdev);
12368
12369 rtnl_unlock();
12370
12371 /* Request a slot reset */
12372 return PCI_ERS_RESULT_NEED_RESET;
12373}
12374
12375/**
12376 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12377 * @pdev: Pointer to PCI device
12378 *
12379 * Restart the card from scratch, as if from a cold-boot.
12380 */
12381static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12382{
12383 struct net_device *dev = pci_get_drvdata(pdev);
12384 struct bnx2x *bp = netdev_priv(dev);
12385
12386 rtnl_lock();
12387
12388 if (pci_enable_device(pdev)) {
12389 dev_err(&pdev->dev,
12390 "Cannot re-enable PCI device after reset\n");
12391 rtnl_unlock();
12392 return PCI_ERS_RESULT_DISCONNECT;
12393 }
12394
12395 pci_set_master(pdev);
12396 pci_restore_state(pdev);
12397
12398 if (netif_running(dev))
12399 bnx2x_set_power_state(bp, PCI_D0);
12400
12401 rtnl_unlock();
12402
12403 return PCI_ERS_RESULT_RECOVERED;
12404}
12405
12406/**
12407 * bnx2x_io_resume - called when traffic can start flowing again
12408 * @pdev: Pointer to PCI device
12409 *
12410 * This callback is called when the error recovery driver tells us that
12411 * its OK to resume normal operation.
12412 */
12413static void bnx2x_io_resume(struct pci_dev *pdev)
12414{
12415 struct net_device *dev = pci_get_drvdata(pdev);
12416 struct bnx2x *bp = netdev_priv(dev);
12417
12418 rtnl_lock();
12419
f8ef6e44
YG
12420 bnx2x_eeh_recover(bp);
12421
493adb1f 12422 if (netif_running(dev))
f8ef6e44 12423 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12424
12425 netif_device_attach(dev);
12426
12427 rtnl_unlock();
12428}
12429
12430static struct pci_error_handlers bnx2x_err_handler = {
12431 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12432 .slot_reset = bnx2x_io_slot_reset,
12433 .resume = bnx2x_io_resume,
493adb1f
WX
12434};
12435
a2fbb9ea 12436static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12437 .name = DRV_MODULE_NAME,
12438 .id_table = bnx2x_pci_tbl,
12439 .probe = bnx2x_init_one,
12440 .remove = __devexit_p(bnx2x_remove_one),
12441 .suspend = bnx2x_suspend,
12442 .resume = bnx2x_resume,
12443 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12444};
12445
12446static int __init bnx2x_init(void)
12447{
dd21ca6d
SG
12448 int ret;
12449
7995c64e 12450 pr_info("%s", version);
938cf541 12451
1cf167f2
EG
12452 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12453 if (bnx2x_wq == NULL) {
7995c64e 12454 pr_err("Cannot create workqueue\n");
1cf167f2
EG
12455 return -ENOMEM;
12456 }
12457
dd21ca6d
SG
12458 ret = pci_register_driver(&bnx2x_pci_driver);
12459 if (ret) {
7995c64e 12460 pr_err("Cannot register driver\n");
dd21ca6d
SG
12461 destroy_workqueue(bnx2x_wq);
12462 }
12463 return ret;
a2fbb9ea
ET
12464}
12465
12466static void __exit bnx2x_cleanup(void)
12467{
12468 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12469
12470 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12471}
12472
12473module_init(bnx2x_init);
12474module_exit(bnx2x_cleanup);
12475
993ac7b5
MC
12476#ifdef BCM_CNIC
12477
12478/* count denotes the number of new completions we have seen */
12479static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12480{
12481 struct eth_spe *spe;
12482
12483#ifdef BNX2X_STOP_ON_ERROR
12484 if (unlikely(bp->panic))
12485 return;
12486#endif
12487
12488 spin_lock_bh(&bp->spq_lock);
12489 bp->cnic_spq_pending -= count;
12490
12491 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12492 bp->cnic_spq_pending++) {
12493
12494 if (!bp->cnic_kwq_pending)
12495 break;
12496
12497 spe = bnx2x_sp_get_next(bp);
12498 *spe = *bp->cnic_kwq_cons;
12499
12500 bp->cnic_kwq_pending--;
12501
12502 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12503 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12504
12505 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12506 bp->cnic_kwq_cons = bp->cnic_kwq;
12507 else
12508 bp->cnic_kwq_cons++;
12509 }
12510 bnx2x_sp_prod_update(bp);
12511 spin_unlock_bh(&bp->spq_lock);
12512}
12513
12514static int bnx2x_cnic_sp_queue(struct net_device *dev,
12515 struct kwqe_16 *kwqes[], u32 count)
12516{
12517 struct bnx2x *bp = netdev_priv(dev);
12518 int i;
12519
12520#ifdef BNX2X_STOP_ON_ERROR
12521 if (unlikely(bp->panic))
12522 return -EIO;
12523#endif
12524
12525 spin_lock_bh(&bp->spq_lock);
12526
12527 for (i = 0; i < count; i++) {
12528 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12529
12530 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12531 break;
12532
12533 *bp->cnic_kwq_prod = *spe;
12534
12535 bp->cnic_kwq_pending++;
12536
12537 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12538 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12539 spe->data.mac_config_addr.hi,
12540 spe->data.mac_config_addr.lo,
12541 bp->cnic_kwq_pending);
12542
12543 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12544 bp->cnic_kwq_prod = bp->cnic_kwq;
12545 else
12546 bp->cnic_kwq_prod++;
12547 }
12548
12549 spin_unlock_bh(&bp->spq_lock);
12550
12551 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12552 bnx2x_cnic_sp_post(bp, 0);
12553
12554 return i;
12555}
12556
12557static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12558{
12559 struct cnic_ops *c_ops;
12560 int rc = 0;
12561
12562 mutex_lock(&bp->cnic_mutex);
12563 c_ops = bp->cnic_ops;
12564 if (c_ops)
12565 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12566 mutex_unlock(&bp->cnic_mutex);
12567
12568 return rc;
12569}
12570
12571static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12572{
12573 struct cnic_ops *c_ops;
12574 int rc = 0;
12575
12576 rcu_read_lock();
12577 c_ops = rcu_dereference(bp->cnic_ops);
12578 if (c_ops)
12579 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12580 rcu_read_unlock();
12581
12582 return rc;
12583}
12584
12585/*
12586 * for commands that have no data
12587 */
12588static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12589{
12590 struct cnic_ctl_info ctl = {0};
12591
12592 ctl.cmd = cmd;
12593
12594 return bnx2x_cnic_ctl_send(bp, &ctl);
12595}
12596
12597static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12598{
12599 struct cnic_ctl_info ctl;
12600
12601 /* first we tell CNIC and only then we count this as a completion */
12602 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12603 ctl.data.comp.cid = cid;
12604
12605 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12606 bnx2x_cnic_sp_post(bp, 1);
12607}
12608
12609static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12610{
12611 struct bnx2x *bp = netdev_priv(dev);
12612 int rc = 0;
12613
12614 switch (ctl->cmd) {
12615 case DRV_CTL_CTXTBL_WR_CMD: {
12616 u32 index = ctl->data.io.offset;
12617 dma_addr_t addr = ctl->data.io.dma_addr;
12618
12619 bnx2x_ilt_wr(bp, index, addr);
12620 break;
12621 }
12622
12623 case DRV_CTL_COMPLETION_CMD: {
12624 int count = ctl->data.comp.comp_count;
12625
12626 bnx2x_cnic_sp_post(bp, count);
12627 break;
12628 }
12629
12630 /* rtnl_lock is held. */
12631 case DRV_CTL_START_L2_CMD: {
12632 u32 cli = ctl->data.ring.client_id;
12633
12634 bp->rx_mode_cl_mask |= (1 << cli);
12635 bnx2x_set_storm_rx_mode(bp);
12636 break;
12637 }
12638
12639 /* rtnl_lock is held. */
12640 case DRV_CTL_STOP_L2_CMD: {
12641 u32 cli = ctl->data.ring.client_id;
12642
12643 bp->rx_mode_cl_mask &= ~(1 << cli);
12644 bnx2x_set_storm_rx_mode(bp);
12645 break;
12646 }
12647
12648 default:
12649 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12650 rc = -EINVAL;
12651 }
12652
12653 return rc;
12654}
12655
12656static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12657{
12658 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12659
12660 if (bp->flags & USING_MSIX_FLAG) {
12661 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12662 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12663 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12664 } else {
12665 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12666 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12667 }
12668 cp->irq_arr[0].status_blk = bp->cnic_sb;
12669 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12670 cp->irq_arr[1].status_blk = bp->def_status_blk;
12671 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12672
12673 cp->num_irq = 2;
12674}
12675
12676static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12677 void *data)
12678{
12679 struct bnx2x *bp = netdev_priv(dev);
12680 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12681
12682 if (ops == NULL)
12683 return -EINVAL;
12684
12685 if (atomic_read(&bp->intr_sem) != 0)
12686 return -EBUSY;
12687
12688 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12689 if (!bp->cnic_kwq)
12690 return -ENOMEM;
12691
12692 bp->cnic_kwq_cons = bp->cnic_kwq;
12693 bp->cnic_kwq_prod = bp->cnic_kwq;
12694 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12695
12696 bp->cnic_spq_pending = 0;
12697 bp->cnic_kwq_pending = 0;
12698
12699 bp->cnic_data = data;
12700
12701 cp->num_irq = 0;
12702 cp->drv_state = CNIC_DRV_STATE_REGD;
12703
12704 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12705
12706 bnx2x_setup_cnic_irq_info(bp);
12707 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12708 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12709 rcu_assign_pointer(bp->cnic_ops, ops);
12710
12711 return 0;
12712}
12713
12714static int bnx2x_unregister_cnic(struct net_device *dev)
12715{
12716 struct bnx2x *bp = netdev_priv(dev);
12717 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12718
12719 mutex_lock(&bp->cnic_mutex);
12720 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12721 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12722 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12723 }
12724 cp->drv_state = 0;
12725 rcu_assign_pointer(bp->cnic_ops, NULL);
12726 mutex_unlock(&bp->cnic_mutex);
12727 synchronize_rcu();
12728 kfree(bp->cnic_kwq);
12729 bp->cnic_kwq = NULL;
12730
12731 return 0;
12732}
12733
12734struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12735{
12736 struct bnx2x *bp = netdev_priv(dev);
12737 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12738
12739 cp->drv_owner = THIS_MODULE;
12740 cp->chip_id = CHIP_ID(bp);
12741 cp->pdev = bp->pdev;
12742 cp->io_base = bp->regview;
12743 cp->io_base2 = bp->doorbells;
12744 cp->max_kwqe_pending = 8;
12745 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12746 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12747 cp->ctx_tbl_len = CNIC_ILT_LINES;
12748 cp->starting_cid = BCM_CNIC_CID_START;
12749 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12750 cp->drv_ctl = bnx2x_drv_ctl;
12751 cp->drv_register_cnic = bnx2x_register_cnic;
12752 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12753
12754 return cp;
12755}
12756EXPORT_SYMBOL(bnx2x_cnic_probe);
12757
12758#endif /* BCM_CNIC */
94a78b79 12759
This page took 1.35548 seconds and 5 git commands to generate.