bnx2x: Properly release allocated MSI-X/MSI vectors
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
54b9ddaa 60#define DRV_MODULE_VERSION "1.52.1-5"
0ab365f4 61#define DRV_MODULE_RELDATE "2009/11/09"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 124
1cf167f2 125static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
126
127enum bnx2x_board_type {
128 BCM57710 = 0,
34f80b04
EG
129 BCM57711 = 1,
130 BCM57711E = 2,
a2fbb9ea
ET
131};
132
34f80b04 133/* indexed by board_type, above */
53a10565 134static struct {
a2fbb9ea
ET
135 char *name;
136} board_info[] __devinitdata = {
34f80b04
EG
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
140};
141
34f80b04 142
a3aa1884 143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
573f2035 159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
a2fbb9ea
ET
167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
a2fbb9ea
ET
178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
ad8d3948
EG
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
ad8d3948
EG
203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
a2fbb9ea 205{
5ff7b6d4 206 struct dmae_command dmae;
a2fbb9ea 207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
5ff7b6d4 219 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 220
5ff7b6d4
EG
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 224#ifdef __BIG_ENDIAN
5ff7b6d4 225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 226#else
5ff7b6d4 227 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 228#endif
5ff7b6d4
EG
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 239
c3eefaf6 240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 250
5ff7b6d4
EG
251 mutex_lock(&bp->dmae_mutex);
252
a2fbb9ea
ET
253 *wb_comp = 0;
254
5ff7b6d4 255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
256
257 udelay(5);
ad8d3948
EG
258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
ad8d3948 262 if (!cnt) {
c3eefaf6 263 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
264 break;
265 }
ad8d3948 266 cnt--;
12469401
YG
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
a2fbb9ea 272 }
ad8d3948
EG
273
274 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
275}
276
c18487ee 277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 278{
5ff7b6d4 279 struct dmae_command dmae;
a2fbb9ea 280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
5ff7b6d4 294 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 295
5ff7b6d4
EG
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 299#ifdef __BIG_ENDIAN
5ff7b6d4 300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 301#else
5ff7b6d4 302 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 303#endif
5ff7b6d4
EG
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 314
c3eefaf6 315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 322
5ff7b6d4
EG
323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
326 *wb_comp = 0;
327
5ff7b6d4 328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
329
330 udelay(5);
ad8d3948
EG
331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
ad8d3948 334 if (!cnt) {
c3eefaf6 335 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
336 break;
337 }
ad8d3948 338 cnt--;
12469401
YG
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
a2fbb9ea 344 }
ad8d3948 345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
348
349 mutex_unlock(&bp->dmae_mutex);
350}
351
573f2035
EG
352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
ad8d3948
EG
367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 375}
a2fbb9ea 376
ad8d3948
EG
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
a2fbb9ea
ET
388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
a2fbb9ea 390 char last_idx;
34f80b04
EG
391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
393
394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
419 }
420 }
421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
a2fbb9ea
ET
503 }
504 }
34f80b04 505
a2fbb9ea
ET
506 return rc;
507}
c14423fe 508
a2fbb9ea
ET
509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
4781bfad 512 __be32 data[9];
a2fbb9ea
ET
513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 516 mark = ((mark + 0x3) & ~0x3);
ad361c98 517 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 518
ad361c98 519 printk(KERN_ERR PFX);
a2fbb9ea
ET
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
49d66772 525 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
49d66772 532 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 533 }
ad361c98 534 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
66e855f3
YG
542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
a2fbb9ea
ET
545 BNX2X_ERR("begin crash dump -----------------\n");
546
8440d2b6
EG
547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
54b9ddaa 556 for_each_queue(bp, i) {
a2fbb9ea 557 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 558
c3eefaf6 559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 562 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
a2fbb9ea 571
8440d2b6 572 /* Tx */
54b9ddaa 573 for_each_queue(bp, i) {
8440d2b6 574 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 575
c3eefaf6 576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 582 fp->status_blk->c_status_block.status_block_index,
ca00392c 583 fp->tx_db.data.prod);
8440d2b6 584 }
a2fbb9ea 585
8440d2b6
EG
586 /* Rings */
587 /* Rx */
54b9ddaa 588 for_each_queue(bp, i) {
8440d2b6 589 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 593 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
c3eefaf6
EG
597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
599 }
600
3196a88a
EG
601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
8440d2b6 603 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
c3eefaf6
EG
607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
609 }
610
a2fbb9ea
ET
611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
c3eefaf6
EG
616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
618 }
619 }
620
8440d2b6 621 /* Tx */
54b9ddaa 622 for_each_queue(bp, i) {
8440d2b6
EG
623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
c3eefaf6
EG
630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
c3eefaf6
EG
639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
641 }
642 }
a2fbb9ea 643
34f80b04 644 bnx2x_fw_dump(bp);
a2fbb9ea
ET
645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
647}
648
615f8fd9 649static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 650{
34f80b04 651 int port = BP_PORT(bp);
a2fbb9ea
ET
652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
656
657 if (msix) {
8badd27a
EG
658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 672
8badd27a
EG
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
615f8fd9
ET
675
676 REG_WR(bp, addr, val);
677
a2fbb9ea
ET
678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
8badd27a
EG
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
683
684 REG_WR(bp, addr, val);
37dbbf32
EG
685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
34f80b04
EG
690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
8badd27a 694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 695 if (bp->port.pmf)
4acac6a5
EG
696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
34f80b04
EG
698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
37dbbf32
EG
704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
a2fbb9ea
ET
707}
708
615f8fd9 709static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 710{
34f80b04 711 int port = BP_PORT(bp);
a2fbb9ea
ET
712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
8badd27a
EG
723 /* flush all outstanding writes */
724 mmiowb();
725
a2fbb9ea
ET
726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
f8ef6e44 731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 732{
a2fbb9ea 733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 734 int i, offset;
a2fbb9ea 735
34f80b04 736 /* disable interrupt handling */
a2fbb9ea 737 atomic_inc(&bp->intr_sem);
e1510706
EG
738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
f8ef6e44
YG
740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
a2fbb9ea
ET
743
744 /* make sure all ISRs are done */
745 if (msix) {
8badd27a
EG
746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
37b091ba
MC
748#ifdef BCM_CNIC
749 offset++;
750#endif
a2fbb9ea 751 for_each_queue(bp, i)
8badd27a 752 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
1cf167f2
EG
757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
759}
760
34f80b04 761/* fast path */
a2fbb9ea
ET
762
763/*
34f80b04 764 * General service functions
a2fbb9ea
ET
765 */
766
34f80b04 767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
768 u8 storm, u16 index, u8 op, u8 update)
769{
5c862848
EG
770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
772 struct igu_ack_register igu_ack;
773
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
34f80b04 776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
5c862848
EG
781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
784
785 /* Make sure that ACK is written */
786 mmiowb();
787 barrier();
a2fbb9ea
ET
788}
789
54b9ddaa 790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
791{
792 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
793
794 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
797}
798
a2fbb9ea
ET
799static u16 bnx2x_ack_int(struct bnx2x *bp)
800{
5c862848
EG
801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 804
5c862848
EG
805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806 result, hc_addr);
a2fbb9ea 807
a2fbb9ea
ET
808 return result;
809}
810
811
812/*
813 * fast path service functions
814 */
815
e8b5fc51
VZ
816static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817{
818 /* Tell compiler that consumer and producer can change */
819 barrier();
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
821}
822
a2fbb9ea
ET
823/* free skb in the packet ring at pos idx
824 * return idx of last bd freed
825 */
826static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 idx)
828{
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 832 struct sk_buff *skb = tx_buf->skb;
34f80b04 833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
834 int nbd;
835
54b9ddaa
VZ
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
a2fbb9ea
ET
839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
841
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 847
ca00392c 848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 849#ifdef BNX2X_STOP_ON_ERROR
ca00392c 850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 851 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
852 bnx2x_panic();
853 }
854#endif
ca00392c 855 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 856
ca00392c
EG
857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 859
ca00392c
EG
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
868 }
869
870 /* now free frags */
871 while (nbd > 0) {
872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 }
880
881 /* release skb */
53e5e96e 882 WARN_ON(!skb);
54b9ddaa 883 dev_kfree_skb(skb);
a2fbb9ea
ET
884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
886
34f80b04 887 return new_cons;
a2fbb9ea
ET
888}
889
34f80b04 890static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 891{
34f80b04
EG
892 s16 used;
893 u16 prod;
894 u16 cons;
a2fbb9ea 895
34f80b04 896 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
899
34f80b04
EG
900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 903
34f80b04 904#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
905 WARN_ON(used < 0);
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 908#endif
a2fbb9ea 909
34f80b04 910 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
911}
912
54b9ddaa
VZ
913static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914{
915 u16 hw_cons;
916
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
921}
922
923static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
924{
925 struct bnx2x *bp = fp->bp;
555f6c78 926 struct netdev_queue *txq;
a2fbb9ea 927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
928
929#ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
54b9ddaa 931 return -1;
a2fbb9ea
ET
932#endif
933
54b9ddaa 934 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936 sw_cons = fp->tx_pkt_cons;
937
938 while (sw_cons != hw_cons) {
939 u16 pkt_cons;
940
941 pkt_cons = TX_BD(sw_cons);
942
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
34f80b04 945 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
946 hw_cons, sw_cons, pkt_cons);
947
34f80b04 948/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
949 rmb();
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951 }
952*/
953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954 sw_cons++;
a2fbb9ea
ET
955 }
956
957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons;
959
a2fbb9ea 960 /* TBD need a thresh? */
555f6c78 961 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 962
6044735d
EG
963 /* Need to make the tx_bd_cons update visible to start_xmit()
964 * before checking for netif_tx_queue_stopped(). Without the
965 * memory barrier, there is a small possibility that
966 * start_xmit() will miss it and cause the queue to be stopped
967 * forever.
968 */
969 smp_mb();
970
555f6c78 971 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 972 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 974 netif_tx_wake_queue(txq);
a2fbb9ea 975 }
54b9ddaa 976 return 0;
a2fbb9ea
ET
977}
978
993ac7b5
MC
979#ifdef BCM_CNIC
980static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981#endif
3196a88a 982
a2fbb9ea
ET
983static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984 union eth_rx_cqe *rr_cqe)
985{
986 struct bnx2x *bp = fp->bp;
987 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
34f80b04 990 DP(BNX2X_MSG_SP,
a2fbb9ea 991 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 992 fp->index, cid, command, bp->state,
34f80b04 993 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
994
995 bp->spq_left++;
996
0626b899 997 if (fp->index) {
a2fbb9ea
ET
998 switch (command | fp->state) {
999 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000 BNX2X_FP_STATE_OPENING):
1001 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002 cid);
1003 fp->state = BNX2X_FP_STATE_OPEN;
1004 break;
1005
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008 cid);
1009 fp->state = BNX2X_FP_STATE_HALTED;
1010 break;
1011
1012 default:
34f80b04
EG
1013 BNX2X_ERR("unexpected MC reply (%d) "
1014 "fp->state is %x\n", command, fp->state);
1015 break;
a2fbb9ea 1016 }
34f80b04 1017 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1018 return;
1019 }
c14423fe 1020
a2fbb9ea
ET
1021 switch (command | bp->state) {
1022 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024 bp->state = BNX2X_STATE_OPEN;
1025 break;
1026
1027 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030 fp->state = BNX2X_FP_STATE_HALTED;
1031 break;
1032
a2fbb9ea 1033 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1034 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1035 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1036 break;
1037
993ac7b5
MC
1038#ifdef BCM_CNIC
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041 bnx2x_cnic_cfc_comp(bp, cid);
1042 break;
1043#endif
3196a88a 1044
a2fbb9ea 1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1047 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1048 bp->set_mac_pending--;
1049 smp_wmb();
a2fbb9ea
ET
1050 break;
1051
49d66772 1052 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1053 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1054 bp->set_mac_pending--;
1055 smp_wmb();
49d66772
ET
1056 break;
1057
a2fbb9ea 1058 default:
34f80b04 1059 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1060 command, bp->state);
34f80b04 1061 break;
a2fbb9ea 1062 }
34f80b04 1063 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1064}
1065
7a9b2557
VZ
1066static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1068{
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct page *page = sw_buf->page;
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073 /* Skip "next page" elements */
1074 if (!page)
1075 return;
1076
1077 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1078 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081 sw_buf->page = NULL;
1082 sge->addr_hi = 0;
1083 sge->addr_lo = 0;
1084}
1085
1086static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, int last)
1088{
1089 int i;
1090
1091 for (i = 0; i < last; i++)
1092 bnx2x_free_rx_sge(bp, fp, i);
1093}
1094
1095static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096 struct bnx2x_fastpath *fp, u16 index)
1097{
1098 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101 dma_addr_t mapping;
1102
1103 if (unlikely(page == NULL))
1104 return -ENOMEM;
1105
4f40f2cb 1106 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1107 PCI_DMA_FROMDEVICE);
8d8bb39b 1108 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1109 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110 return -ENOMEM;
1111 }
1112
1113 sw_buf->page = page;
1114 pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119 return 0;
1120}
1121
a2fbb9ea
ET
1122static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123 struct bnx2x_fastpath *fp, u16 index)
1124{
1125 struct sk_buff *skb;
1126 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128 dma_addr_t mapping;
1129
1130 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131 if (unlikely(skb == NULL))
1132 return -ENOMEM;
1133
437cf2f1 1134 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1135 PCI_DMA_FROMDEVICE);
8d8bb39b 1136 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1137 dev_kfree_skb(skb);
1138 return -ENOMEM;
1139 }
1140
1141 rx_buf->skb = skb;
1142 pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147 return 0;
1148}
1149
1150/* note that we are not allocating a new skb,
1151 * we are just moving one from cons to prod
1152 * we are not creating a new mapping,
1153 * so there is no need to check for dma_mapping_error().
1154 */
1155static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156 struct sk_buff *skb, u16 cons, u16 prod)
1157{
1158 struct bnx2x *bp = fp->bp;
1159 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164 pci_dma_sync_single_for_device(bp->pdev,
1165 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1166 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1167
1168 prod_rx_buf->skb = cons_rx_buf->skb;
1169 pci_unmap_addr_set(prod_rx_buf, mapping,
1170 pci_unmap_addr(cons_rx_buf, mapping));
1171 *prod_bd = *cons_bd;
1172}
1173
7a9b2557
VZ
1174static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175 u16 idx)
1176{
1177 u16 last_max = fp->last_max_sge;
1178
1179 if (SUB_S16(idx, last_max) > 0)
1180 fp->last_max_sge = idx;
1181}
1182
1183static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184{
1185 int i, j;
1186
1187 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188 int idx = RX_SGE_CNT * i - 1;
1189
1190 for (j = 0; j < 2; j++) {
1191 SGE_MASK_CLEAR_BIT(fp, idx);
1192 idx--;
1193 }
1194 }
1195}
1196
1197static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198 struct eth_fast_path_rx_cqe *fp_cqe)
1199{
1200 struct bnx2x *bp = fp->bp;
4f40f2cb 1201 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1202 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1203 SGE_PAGE_SHIFT;
7a9b2557
VZ
1204 u16 last_max, last_elem, first_elem;
1205 u16 delta = 0;
1206 u16 i;
1207
1208 if (!sge_len)
1209 return;
1210
1211 /* First mark all used pages */
1212 for (i = 0; i < sge_len; i++)
1213 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218 /* Here we assume that the last SGE index is the biggest */
1219 prefetch((void *)(fp->sge_mask));
1220 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222 last_max = RX_SGE(fp->last_max_sge);
1223 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226 /* If ring is not full */
1227 if (last_elem + 1 != first_elem)
1228 last_elem++;
1229
1230 /* Now update the prod */
1231 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232 if (likely(fp->sge_mask[i]))
1233 break;
1234
1235 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236 delta += RX_SGE_MASK_ELEM_SZ;
1237 }
1238
1239 if (delta > 0) {
1240 fp->rx_sge_prod += delta;
1241 /* clear page-end entries */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1243 }
1244
1245 DP(NETIF_MSG_RX_STATUS,
1246 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1247 fp->last_max_sge, fp->rx_sge_prod);
1248}
1249
1250static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251{
1252 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253 memset(fp->sge_mask, 0xff,
1254 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
33471629
EG
1256 /* Clear the two last indices in the page to 1:
1257 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1258 hence will never be indicated and should be removed from
1259 the calculations. */
1260 bnx2x_clear_sge_mask_next_elems(fp);
1261}
1262
1263static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264 struct sk_buff *skb, u16 cons, u16 prod)
1265{
1266 struct bnx2x *bp = fp->bp;
1267 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270 dma_addr_t mapping;
1271
1272 /* move empty skb from pool to prod and map it */
1273 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1275 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1276 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278 /* move partial skb from cons to pool (don't unmap yet) */
1279 fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281 /* mark bin state as start - print error if current state != stop */
1282 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285 fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287 /* point prod_bd to new skb */
1288 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291#ifdef BNX2X_STOP_ON_ERROR
1292 fp->tpa_queue_used |= (1 << queue);
1293#ifdef __powerpc64__
1294 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295#else
1296 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297#endif
1298 fp->tpa_queue_used);
1299#endif
1300}
1301
1302static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303 struct sk_buff *skb,
1304 struct eth_fast_path_rx_cqe *fp_cqe,
1305 u16 cqe_idx)
1306{
1307 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1308 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309 u32 i, frag_len, frag_size, pages;
1310 int err;
1311 int j;
1312
1313 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1314 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1315
1316 /* This is needed in order to enable forwarding support */
1317 if (frag_size)
4f40f2cb 1318 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1319 max(frag_size, (u32)len_on_bd));
1320
1321#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1322 if (pages >
1323 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1324 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325 pages, cqe_idx);
1326 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1327 fp_cqe->pkt_len, len_on_bd);
1328 bnx2x_panic();
1329 return -EINVAL;
1330 }
1331#endif
1332
1333 /* Run through the SGL and compose the fragmented skb */
1334 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337 /* FW gives the indices of the SGE as if the ring is an array
1338 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1339 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1340 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1341 old_rx_pg = *rx_pg;
1342
1343 /* If we fail to allocate a substitute page, we simply stop
1344 where we are and drop the whole packet */
1345 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346 if (unlikely(err)) {
de832a55 1347 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1348 return err;
1349 }
1350
1351 /* Unmap the page as we r going to pass it to the stack */
1352 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1353 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1354
1355 /* Add one frag and update the appropriate fields in the skb */
1356 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358 skb->data_len += frag_len;
1359 skb->truesize += frag_len;
1360 skb->len += frag_len;
1361
1362 frag_size -= frag_len;
1363 }
1364
1365 return 0;
1366}
1367
1368static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370 u16 cqe_idx)
1371{
1372 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373 struct sk_buff *skb = rx_buf->skb;
1374 /* alloc new skb */
1375 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377 /* Unmap skb in the pool anyway, as we are going to change
1378 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379 fails. */
1380 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1381 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1382
7a9b2557 1383 if (likely(new_skb)) {
66e855f3
YG
1384 /* fix ip xsum and give it to the stack */
1385 /* (no need to map the new skb) */
0c6671b0
EG
1386#ifdef BCM_VLAN
1387 int is_vlan_cqe =
1388 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389 PARSING_FLAGS_VLAN);
1390 int is_not_hwaccel_vlan_cqe =
1391 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392#endif
7a9b2557
VZ
1393
1394 prefetch(skb);
1395 prefetch(((char *)(skb)) + 128);
1396
7a9b2557
VZ
1397#ifdef BNX2X_STOP_ON_ERROR
1398 if (pad + len > bp->rx_buf_size) {
1399 BNX2X_ERR("skb_put is about to fail... "
1400 "pad %d len %d rx_buf_size %d\n",
1401 pad, len, bp->rx_buf_size);
1402 bnx2x_panic();
1403 return;
1404 }
1405#endif
1406
1407 skb_reserve(skb, pad);
1408 skb_put(skb, len);
1409
1410 skb->protocol = eth_type_trans(skb, bp->dev);
1411 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413 {
1414 struct iphdr *iph;
1415
1416 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1417#ifdef BCM_VLAN
1418 /* If there is no Rx VLAN offloading -
1419 take VLAN tag into an account */
1420 if (unlikely(is_not_hwaccel_vlan_cqe))
1421 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422#endif
7a9b2557
VZ
1423 iph->check = 0;
1424 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425 }
1426
1427 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428 &cqe->fast_path_cqe, cqe_idx)) {
1429#ifdef BCM_VLAN
0c6671b0
EG
1430 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1432 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433 le16_to_cpu(cqe->fast_path_cqe.
1434 vlan_tag));
1435 else
1436#endif
1437 netif_receive_skb(skb);
1438 } else {
1439 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440 " - dropping packet!\n");
1441 dev_kfree_skb(skb);
1442 }
1443
7a9b2557
VZ
1444
1445 /* put new skb in bin */
1446 fp->tpa_pool[queue].skb = new_skb;
1447
1448 } else {
66e855f3 1449 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1450 DP(NETIF_MSG_RX_STATUS,
1451 "Failed to allocate new skb - dropping packet!\n");
de832a55 1452 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1453 }
1454
1455 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456}
1457
1458static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459 struct bnx2x_fastpath *fp,
1460 u16 bd_prod, u16 rx_comp_prod,
1461 u16 rx_sge_prod)
1462{
8d9c5f34 1463 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1464 int i;
1465
1466 /* Update producers */
1467 rx_prods.bd_prod = bd_prod;
1468 rx_prods.cqe_prod = rx_comp_prod;
1469 rx_prods.sge_prod = rx_sge_prod;
1470
58f4c4cf
EG
1471 /*
1472 * Make sure that the BD and SGE data is updated before updating the
1473 * producers since FW might read the BD/SGE right after the producer
1474 * is updated.
1475 * This is only applicable for weak-ordered memory model archs such
1476 * as IA-64. The following barrier is also mandatory since FW will
1477 * assumes BDs must have buffers.
1478 */
1479 wmb();
1480
8d9c5f34
EG
1481 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1483 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1484 ((u32 *)&rx_prods)[i]);
1485
58f4c4cf
EG
1486 mmiowb(); /* keep prod updates ordered */
1487
7a9b2557 1488 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1489 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1490 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1491}
1492
a2fbb9ea
ET
1493static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494{
1495 struct bnx2x *bp = fp->bp;
34f80b04 1496 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1497 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498 int rx_pkt = 0;
1499
1500#ifdef BNX2X_STOP_ON_ERROR
1501 if (unlikely(bp->panic))
1502 return 0;
1503#endif
1504
34f80b04
EG
1505 /* CQ "next element" is of the size of the regular element,
1506 that's why it's ok here */
a2fbb9ea
ET
1507 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509 hw_comp_cons++;
1510
1511 bd_cons = fp->rx_bd_cons;
1512 bd_prod = fp->rx_bd_prod;
34f80b04 1513 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1514 sw_comp_cons = fp->rx_comp_cons;
1515 sw_comp_prod = fp->rx_comp_prod;
1516
1517 /* Memory barrier necessary as speculative reads of the rx
1518 * buffer can be ahead of the index in the status block
1519 */
1520 rmb();
1521
1522 DP(NETIF_MSG_RX_STATUS,
1523 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1524 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1525
1526 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1527 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1528 struct sk_buff *skb;
1529 union eth_rx_cqe *cqe;
34f80b04
EG
1530 u8 cqe_fp_flags;
1531 u16 len, pad;
a2fbb9ea
ET
1532
1533 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534 bd_prod = RX_BD(bd_prod);
1535 bd_cons = RX_BD(bd_cons);
1536
619e7a66
EG
1537 /* Prefetch the page containing the BD descriptor
1538 at producer's index. It will be needed when new skb is
1539 allocated */
1540 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541 (&fp->rx_desc_ring[bd_prod])) -
1542 PAGE_SIZE + 1));
1543
a2fbb9ea 1544 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1545 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1546
a2fbb9ea 1547 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1548 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1549 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1550 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1551 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1553
1554 /* is this a slowpath msg? */
34f80b04 1555 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1556 bnx2x_sp_event(fp, cqe);
1557 goto next_cqe;
1558
1559 /* this is an rx packet */
1560 } else {
1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1562 skb = rx_buf->skb;
54b9ddaa
VZ
1563 prefetch(skb);
1564 prefetch((u8 *)skb + 256);
a2fbb9ea
ET
1565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566 pad = cqe->fast_path_cqe.placement_offset;
1567
7a9b2557
VZ
1568 /* If CQE is marked both TPA_START and TPA_END
1569 it is a non-TPA CQE */
1570 if ((!fp->disable_tpa) &&
1571 (TPA_TYPE(cqe_fp_flags) !=
1572 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1573 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1574
1575 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576 DP(NETIF_MSG_RX_STATUS,
1577 "calling tpa_start on queue %d\n",
1578 queue);
1579
1580 bnx2x_tpa_start(fp, queue, skb,
1581 bd_cons, bd_prod);
1582 goto next_rx;
1583 }
1584
1585 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586 DP(NETIF_MSG_RX_STATUS,
1587 "calling tpa_stop on queue %d\n",
1588 queue);
1589
1590 if (!BNX2X_RX_SUM_FIX(cqe))
1591 BNX2X_ERR("STOP on none TCP "
1592 "data\n");
1593
1594 /* This is a size of the linear data
1595 on this skb */
1596 len = le16_to_cpu(cqe->fast_path_cqe.
1597 len_on_bd);
1598 bnx2x_tpa_stop(bp, fp, queue, pad,
1599 len, cqe, comp_ring_cons);
1600#ifdef BNX2X_STOP_ON_ERROR
1601 if (bp->panic)
17cb4006 1602 return 0;
7a9b2557
VZ
1603#endif
1604
1605 bnx2x_update_sge_prod(fp,
1606 &cqe->fast_path_cqe);
1607 goto next_cqe;
1608 }
1609 }
1610
a2fbb9ea
ET
1611 pci_dma_sync_single_for_device(bp->pdev,
1612 pci_unmap_addr(rx_buf, mapping),
1613 pad + RX_COPY_THRESH,
1614 PCI_DMA_FROMDEVICE);
1615 prefetch(skb);
1616 prefetch(((char *)(skb)) + 128);
1617
1618 /* is this an error packet? */
34f80b04 1619 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1620 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1621 "ERROR flags %x rx packet %u\n",
1622 cqe_fp_flags, sw_comp_cons);
de832a55 1623 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1624 goto reuse_rx;
1625 }
1626
1627 /* Since we don't have a jumbo ring
1628 * copy small packets if mtu > 1500
1629 */
1630 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631 (len <= RX_COPY_THRESH)) {
1632 struct sk_buff *new_skb;
1633
1634 new_skb = netdev_alloc_skb(bp->dev,
1635 len + pad);
1636 if (new_skb == NULL) {
1637 DP(NETIF_MSG_RX_ERR,
34f80b04 1638 "ERROR packet dropped "
a2fbb9ea 1639 "because of alloc failure\n");
de832a55 1640 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1641 goto reuse_rx;
1642 }
1643
1644 /* aligned copy */
1645 skb_copy_from_linear_data_offset(skb, pad,
1646 new_skb->data + pad, len);
1647 skb_reserve(new_skb, pad);
1648 skb_put(new_skb, len);
1649
1650 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652 skb = new_skb;
1653
a119a069
EG
1654 } else
1655 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1656 pci_unmap_single(bp->pdev,
1657 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1658 bp->rx_buf_size,
a2fbb9ea
ET
1659 PCI_DMA_FROMDEVICE);
1660 skb_reserve(skb, pad);
1661 skb_put(skb, len);
1662
1663 } else {
1664 DP(NETIF_MSG_RX_ERR,
34f80b04 1665 "ERROR packet dropped because "
a2fbb9ea 1666 "of alloc failure\n");
de832a55 1667 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1668reuse_rx:
1669 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670 goto next_rx;
1671 }
1672
1673 skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1676 if (bp->rx_csum) {
1adcd8be
EG
1677 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1679 else
de832a55 1680 fp->eth_q_stats.hw_csum_err++;
66e855f3 1681 }
a2fbb9ea
ET
1682 }
1683
748e5439 1684 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1685
a2fbb9ea 1686#ifdef BCM_VLAN
0c6671b0 1687 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1688 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1690 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692 else
1693#endif
34f80b04 1694 netif_receive_skb(skb);
a2fbb9ea 1695
a2fbb9ea
ET
1696
1697next_rx:
1698 rx_buf->skb = NULL;
1699
1700 bd_cons = NEXT_RX_IDX(bd_cons);
1701 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1702 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703 rx_pkt++;
a2fbb9ea
ET
1704next_cqe:
1705 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1707
34f80b04 1708 if (rx_pkt == budget)
a2fbb9ea
ET
1709 break;
1710 } /* while */
1711
1712 fp->rx_bd_cons = bd_cons;
34f80b04 1713 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1714 fp->rx_comp_cons = sw_comp_cons;
1715 fp->rx_comp_prod = sw_comp_prod;
1716
7a9b2557
VZ
1717 /* Update producers */
1718 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719 fp->rx_sge_prod);
a2fbb9ea
ET
1720
1721 fp->rx_pkt += rx_pkt;
1722 fp->rx_calls++;
1723
1724 return rx_pkt;
1725}
1726
1727static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728{
1729 struct bnx2x_fastpath *fp = fp_cookie;
1730 struct bnx2x *bp = fp->bp;
a2fbb9ea 1731
da5a662a
VZ
1732 /* Return here if interrupt is disabled */
1733 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735 return IRQ_HANDLED;
1736 }
1737
34f80b04 1738 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1739 fp->index, fp->sb_id);
0626b899 1740 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1741
1742#ifdef BNX2X_STOP_ON_ERROR
1743 if (unlikely(bp->panic))
1744 return IRQ_HANDLED;
1745#endif
ca00392c 1746
54b9ddaa
VZ
1747 /* Handle Rx and Tx according to MSI-X vector */
1748 prefetch(fp->rx_cons_sb);
1749 prefetch(fp->tx_cons_sb);
1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1753
a2fbb9ea
ET
1754 return IRQ_HANDLED;
1755}
1756
1757static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758{
555f6c78 1759 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1760 u16 status = bnx2x_ack_int(bp);
34f80b04 1761 u16 mask;
ca00392c 1762 int i;
a2fbb9ea 1763
34f80b04 1764 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1765 if (unlikely(status == 0)) {
1766 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767 return IRQ_NONE;
1768 }
f5372251 1769 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1770
34f80b04 1771 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1772 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774 return IRQ_HANDLED;
1775 }
1776
3196a88a
EG
1777#ifdef BNX2X_STOP_ON_ERROR
1778 if (unlikely(bp->panic))
1779 return IRQ_HANDLED;
1780#endif
1781
ca00392c
EG
1782 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1784
ca00392c
EG
1785 mask = 0x2 << fp->sb_id;
1786 if (status & mask) {
54b9ddaa
VZ
1787 /* Handle Rx and Tx according to SB id */
1788 prefetch(fp->rx_cons_sb);
1789 prefetch(&fp->status_blk->u_status_block.
1790 status_block_index);
1791 prefetch(fp->tx_cons_sb);
1792 prefetch(&fp->status_blk->c_status_block.
1793 status_block_index);
1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1795 status &= ~mask;
1796 }
a2fbb9ea
ET
1797 }
1798
993ac7b5
MC
1799#ifdef BCM_CNIC
1800 mask = 0x2 << CNIC_SB_ID(bp);
1801 if (status & (mask | 0x1)) {
1802 struct cnic_ops *c_ops = NULL;
1803
1804 rcu_read_lock();
1805 c_ops = rcu_dereference(bp->cnic_ops);
1806 if (c_ops)
1807 c_ops->cnic_handler(bp->cnic_data, NULL);
1808 rcu_read_unlock();
1809
1810 status &= ~mask;
1811 }
1812#endif
a2fbb9ea 1813
34f80b04 1814 if (unlikely(status & 0x1)) {
1cf167f2 1815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1816
1817 status &= ~0x1;
1818 if (!status)
1819 return IRQ_HANDLED;
1820 }
1821
34f80b04
EG
1822 if (status)
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824 status);
a2fbb9ea 1825
c18487ee 1826 return IRQ_HANDLED;
a2fbb9ea
ET
1827}
1828
c18487ee 1829/* end of fast path */
a2fbb9ea 1830
bb2a0f7a 1831static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1832
c18487ee
YR
1833/* Link */
1834
1835/*
1836 * General service functions
1837 */
a2fbb9ea 1838
4a37fb66 1839static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1840{
1841 u32 lock_status;
1842 u32 resource_bit = (1 << resource);
4a37fb66
YG
1843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
c18487ee 1845 int cnt;
a2fbb9ea 1846
c18487ee
YR
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849 DP(NETIF_MSG_HW,
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852 return -EINVAL;
1853 }
a2fbb9ea 1854
4a37fb66
YG
1855 if (func <= 5) {
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857 } else {
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860 }
1861
c18487ee 1862 /* Validating that the resource is not already taken */
4a37fb66 1863 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1867 return -EEXIST;
1868 }
a2fbb9ea 1869
46230476
EG
1870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1872 /* Try to acquire the lock */
4a37fb66
YG
1873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1875 if (lock_status & resource_bit)
1876 return 0;
a2fbb9ea 1877
c18487ee 1878 msleep(5);
a2fbb9ea 1879 }
c18487ee
YR
1880 DP(NETIF_MSG_HW, "Timeout\n");
1881 return -EAGAIN;
1882}
a2fbb9ea 1883
4a37fb66 1884static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1885{
1886 u32 lock_status;
1887 u32 resource_bit = (1 << resource);
4a37fb66
YG
1888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
a2fbb9ea 1890
c18487ee
YR
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893 DP(NETIF_MSG_HW,
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896 return -EINVAL;
1897 }
1898
4a37fb66
YG
1899 if (func <= 5) {
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901 } else {
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904 }
1905
c18487ee 1906 /* Validating that the resource is currently taken */
4a37fb66 1907 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1911 return -EFAULT;
a2fbb9ea
ET
1912 }
1913
4a37fb66 1914 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1915 return 0;
1916}
1917
1918/* HW Lock for shared dual port PHYs */
4a37fb66 1919static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1920{
34f80b04 1921 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1922
46c6a674
EG
1923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1925}
a2fbb9ea 1926
4a37fb66 1927static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1928{
46c6a674
EG
1929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1931
34f80b04 1932 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1933}
a2fbb9ea 1934
4acac6a5
EG
1935int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
1944 int value;
1945
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948 return -EINVAL;
1949 }
1950
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1956 value = 1;
1957 else
1958 value = 0;
1959
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1961
1962 return value;
1963}
1964
17de50b7 1965int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1966{
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1973 u32 gpio_reg;
a2fbb9ea 1974
c18487ee
YR
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977 return -EINVAL;
1978 }
a2fbb9ea 1979
4a37fb66 1980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1983
c18487ee
YR
1984 switch (mode) {
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991 break;
a2fbb9ea 1992
c18487ee
YR
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999 break;
a2fbb9ea 2000
17de50b7 2001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2004 /* set FLOAT */
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006 break;
a2fbb9ea 2007
c18487ee
YR
2008 default:
2009 break;
a2fbb9ea
ET
2010 }
2011
c18487ee 2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2014
c18487ee 2015 return 0;
a2fbb9ea
ET
2016}
2017
4acac6a5
EG
2018int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019{
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2026 u32 gpio_reg;
2027
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030 return -EINVAL;
2031 }
2032
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 /* read GPIO int */
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037 switch (mode) {
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044 break;
2045
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052 break;
2053
2054 default:
2055 break;
2056 }
2057
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061 return 0;
2062}
2063
c18487ee 2064static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2065{
c18487ee
YR
2066 u32 spio_mask = (1 << spio_num);
2067 u32 spio_reg;
a2fbb9ea 2068
c18487ee
YR
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072 return -EINVAL;
a2fbb9ea
ET
2073 }
2074
4a37fb66 2075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2078
c18487ee 2079 switch (mode) {
6378c025 2080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085 break;
a2fbb9ea 2086
6378c025 2087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092 break;
a2fbb9ea 2093
c18487ee
YR
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096 /* set FLOAT */
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098 break;
a2fbb9ea 2099
c18487ee
YR
2100 default:
2101 break;
a2fbb9ea
ET
2102 }
2103
c18487ee 2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2106
a2fbb9ea
ET
2107 return 0;
2108}
2109
c18487ee 2110static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2111{
ad33ea3a
EG
2112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2116 ADVERTISED_Pause);
2117 break;
356e2385 2118
c18487ee 2119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2121 ADVERTISED_Pause);
2122 break;
356e2385 2123
c18487ee 2124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2125 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2126 break;
356e2385 2127
c18487ee 2128 default:
34f80b04 2129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2130 ADVERTISED_Pause);
2131 break;
2132 }
2133}
f1410647 2134
c18487ee
YR
2135static void bnx2x_link_report(struct bnx2x *bp)
2136{
f34d28ea 2137 if (bp->flags & MF_FUNC_DIS) {
2691d51d
EG
2138 netif_carrier_off(bp->dev);
2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140 return;
2141 }
2142
c18487ee 2143 if (bp->link_vars.link_up) {
35c5f8fe
EG
2144 u16 line_speed;
2145
c18487ee
YR
2146 if (bp->state == BNX2X_STATE_OPEN)
2147 netif_carrier_on(bp->dev);
2148 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2149
35c5f8fe
EG
2150 line_speed = bp->link_vars.line_speed;
2151 if (IS_E1HMF(bp)) {
2152 u16 vn_max_rate;
2153
2154 vn_max_rate =
2155 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate;
2159 }
2160 printk("%d Mbps ", line_speed);
f1410647 2161
c18487ee
YR
2162 if (bp->link_vars.duplex == DUPLEX_FULL)
2163 printk("full duplex");
2164 else
2165 printk("half duplex");
f1410647 2166
c0700f90
DM
2167 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2169 printk(", receive ");
356e2385
EG
2170 if (bp->link_vars.flow_ctrl &
2171 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2172 printk("& transmit ");
2173 } else {
2174 printk(", transmit ");
2175 }
2176 printk("flow control ON");
2177 }
2178 printk("\n");
f1410647 2179
c18487ee
YR
2180 } else { /* link_down */
2181 netif_carrier_off(bp->dev);
2182 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2183 }
c18487ee
YR
2184}
2185
b5bf9068 2186static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2187{
19680c48
EG
2188 if (!BP_NOMCP(bp)) {
2189 u8 rc;
a2fbb9ea 2190
19680c48 2191 /* Initialize link parameters structure variables */
8c99e7b0
YR
2192 /* It is recommended to turn off RX FC for jumbo frames
2193 for better performance */
0c593270 2194 if (bp->dev->mtu > 5000)
c0700f90 2195 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2196 else
c0700f90 2197 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2198
4a37fb66 2199 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2200
2201 if (load_mode == LOAD_DIAG)
2202 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
19680c48 2204 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2205
4a37fb66 2206 bnx2x_release_phy_lock(bp);
a2fbb9ea 2207
3c96c68b
EG
2208 bnx2x_calc_fc_adv(bp);
2209
b5bf9068
EG
2210 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2212 bnx2x_link_report(bp);
b5bf9068 2213 }
34f80b04 2214
19680c48
EG
2215 return rc;
2216 }
f5372251 2217 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2218 return -EINVAL;
a2fbb9ea
ET
2219}
2220
c18487ee 2221static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2222{
19680c48 2223 if (!BP_NOMCP(bp)) {
4a37fb66 2224 bnx2x_acquire_phy_lock(bp);
19680c48 2225 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2226 bnx2x_release_phy_lock(bp);
a2fbb9ea 2227
19680c48
EG
2228 bnx2x_calc_fc_adv(bp);
2229 } else
f5372251 2230 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2231}
a2fbb9ea 2232
c18487ee
YR
2233static void bnx2x__link_reset(struct bnx2x *bp)
2234{
19680c48 2235 if (!BP_NOMCP(bp)) {
4a37fb66 2236 bnx2x_acquire_phy_lock(bp);
589abe3a 2237 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2238 bnx2x_release_phy_lock(bp);
19680c48 2239 } else
f5372251 2240 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2241}
a2fbb9ea 2242
c18487ee
YR
2243static u8 bnx2x_link_test(struct bnx2x *bp)
2244{
2245 u8 rc;
a2fbb9ea 2246
4a37fb66 2247 bnx2x_acquire_phy_lock(bp);
c18487ee 2248 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2249 bnx2x_release_phy_lock(bp);
a2fbb9ea 2250
c18487ee
YR
2251 return rc;
2252}
a2fbb9ea 2253
8a1c38d1 2254static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2255{
8a1c38d1
EG
2256 u32 r_param = bp->link_vars.line_speed / 8;
2257 u32 fair_periodic_timeout_usec;
2258 u32 t_fair;
34f80b04 2259
8a1c38d1
EG
2260 memset(&(bp->cmng.rs_vars), 0,
2261 sizeof(struct rate_shaping_vars_per_port));
2262 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2263
8a1c38d1
EG
2264 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2266
8a1c38d1
EG
2267 /* this is the threshold below which no timer arming will occur
2268 1.25 coefficient is for the threshold to be a little bigger
2269 than the real time, to compensate for timer in-accuracy */
2270 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2271 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
8a1c38d1
EG
2273 /* resolution of fairness timer */
2274 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2277
8a1c38d1
EG
2278 /* this is the threshold below which we won't arm the timer anymore */
2279 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2280
8a1c38d1
EG
2281 /* we multiply by 1e3/8 to get bytes/msec.
2282 We don't want the credits to pass a credit
2283 of the t_fair*FAIR_MEM (algorithm resolution) */
2284 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285 /* since each tick is 4 usec */
2286 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2287}
2288
2691d51d
EG
2289/* Calculates the sum of vn_min_rates.
2290 It's needed for further normalizing of the min_rates.
2291 Returns:
2292 sum of vn_min_rates.
2293 or
2294 0 - if all the min_rates are 0.
2295 In the later case fainess algorithm should be deactivated.
2296 If not all min_rates are zero then those that are zeroes will be set to 1.
2297 */
2298static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299{
2300 int all_zero = 1;
2301 int port = BP_PORT(bp);
2302 int vn;
2303
2304 bp->vn_weight_sum = 0;
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 int func = 2*vn + port;
2307 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311 /* Skip hidden vns */
2312 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313 continue;
2314
2315 /* If min rate is zero - set it to 1 */
2316 if (!vn_min_rate)
2317 vn_min_rate = DEF_MIN_RATE;
2318 else
2319 all_zero = 0;
2320
2321 bp->vn_weight_sum += vn_min_rate;
2322 }
2323
2324 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2325 if (all_zero) {
2326 bp->cmng.flags.cmng_enables &=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2330 } else
2331 bp->cmng.flags.cmng_enables |=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2333}
2334
8a1c38d1 2335static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2336{
2337 struct rate_shaping_vars_per_vn m_rs_vn;
2338 struct fairness_vars_per_vn m_fair_vn;
2339 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340 u16 vn_min_rate, vn_max_rate;
2341 int i;
2342
2343 /* If function is hidden - set min and max to zeroes */
2344 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345 vn_min_rate = 0;
2346 vn_max_rate = 0;
2347
2348 } else {
2349 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2351 /* If min rate is zero - set it to 1 */
2352 if (!vn_min_rate)
34f80b04
EG
2353 vn_min_rate = DEF_MIN_RATE;
2354 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356 }
8a1c38d1 2357 DP(NETIF_MSG_IFUP,
b015e3d1 2358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2359 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2360
2361 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364 /* global vn counter - maximal Mbps for this vn */
2365 m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367 /* quota - number of bytes transmitted in this period */
2368 m_rs_vn.vn_counter.quota =
2369 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
8a1c38d1 2371 if (bp->vn_weight_sum) {
34f80b04
EG
2372 /* credit for each period of the fairness algorithm:
2373 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2374 vn_weight_sum should not be larger than 10000, thus
2375 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376 than zero */
34f80b04 2377 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2378 max((u32)(vn_min_rate * (T_FAIR_COEF /
2379 (8 * bp->vn_weight_sum))),
2380 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2381 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382 m_fair_vn.vn_credit_delta);
2383 }
2384
34f80b04
EG
2385 /* Store it to internal memory */
2386 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389 ((u32 *)(&m_rs_vn))[i]);
2390
2391 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394 ((u32 *)(&m_fair_vn))[i]);
2395}
2396
8a1c38d1 2397
c18487ee
YR
2398/* This function is called upon link interrupt */
2399static void bnx2x_link_attn(struct bnx2x *bp)
2400{
bb2a0f7a
YG
2401 /* Make sure that we are synced with the current statistics */
2402 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
c18487ee 2404 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2405
bb2a0f7a
YG
2406 if (bp->link_vars.link_up) {
2407
1c06328c 2408 /* dropless flow control */
a18f5128 2409 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2410 int port = BP_PORT(bp);
2411 u32 pause_enabled = 0;
2412
2413 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414 pause_enabled = 1;
2415
2416 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2417 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2418 pause_enabled);
2419 }
2420
bb2a0f7a
YG
2421 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422 struct host_port_stats *pstats;
2423
2424 pstats = bnx2x_sp(bp, port_stats);
2425 /* reset old bmac stats */
2426 memset(&(pstats->mac_stx[0]), 0,
2427 sizeof(struct mac_stx));
2428 }
f34d28ea 2429 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431 }
2432
c18487ee
YR
2433 /* indicate link status */
2434 bnx2x_link_report(bp);
34f80b04
EG
2435
2436 if (IS_E1HMF(bp)) {
8a1c38d1 2437 int port = BP_PORT(bp);
34f80b04 2438 int func;
8a1c38d1 2439 int vn;
34f80b04 2440
ab6ad5a4 2441 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2442 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443 if (vn == BP_E1HVN(bp))
2444 continue;
2445
8a1c38d1 2446 func = ((vn << 1) | port);
34f80b04
EG
2447 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449 }
34f80b04 2450
8a1c38d1
EG
2451 if (bp->link_vars.link_up) {
2452 int i;
2453
2454 /* Init rate shaping and fairness contexts */
2455 bnx2x_init_port_minmax(bp);
34f80b04 2456
34f80b04 2457 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2458 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460 /* Store it to internal memory */
2461 for (i = 0;
2462 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465 ((u32 *)(&bp->cmng))[i]);
2466 }
34f80b04 2467 }
c18487ee 2468}
a2fbb9ea 2469
c18487ee
YR
2470static void bnx2x__link_status_update(struct bnx2x *bp)
2471{
f34d28ea 2472 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2473 return;
a2fbb9ea 2474
c18487ee 2475 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2476
bb2a0f7a
YG
2477 if (bp->link_vars.link_up)
2478 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479 else
2480 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
2691d51d
EG
2482 bnx2x_calc_vn_weight_sum(bp);
2483
c18487ee
YR
2484 /* indicate link status */
2485 bnx2x_link_report(bp);
a2fbb9ea 2486}
a2fbb9ea 2487
34f80b04
EG
2488static void bnx2x_pmf_update(struct bnx2x *bp)
2489{
2490 int port = BP_PORT(bp);
2491 u32 val;
2492
2493 bp->port.pmf = 1;
2494 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496 /* enable nig attention */
2497 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2500
2501 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2502}
2503
c18487ee 2504/* end of Link */
a2fbb9ea
ET
2505
2506/* slow path */
2507
2508/*
2509 * General service functions
2510 */
2511
2691d51d
EG
2512/* send the MCP a request, block until there is a reply */
2513u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514{
2515 int func = BP_FUNC(bp);
2516 u32 seq = ++bp->fw_seq;
2517 u32 rc = 0;
2518 u32 cnt = 1;
2519 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
c4ff7cbf 2521 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2522 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525 do {
2526 /* let the FW do it's magic ... */
2527 msleep(delay);
2528
2529 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
c4ff7cbf
EG
2531 /* Give the FW up to 5 second (500*10ms) */
2532 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2533
2534 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535 cnt*delay, rc, seq);
2536
2537 /* is this a reply to our command? */
2538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539 rc &= FW_MSG_CODE_MASK;
2540 else {
2541 /* FW BUG! */
2542 BNX2X_ERR("FW failed to respond!\n");
2543 bnx2x_fw_dump(bp);
2544 rc = 0;
2545 }
c4ff7cbf 2546 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2547
2548 return rc;
2549}
2550
2551static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2552static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2553static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555static void bnx2x_e1h_disable(struct bnx2x *bp)
2556{
2557 int port = BP_PORT(bp);
2691d51d
EG
2558
2559 netif_tx_disable(bp->dev);
2691d51d
EG
2560
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2562
2691d51d
EG
2563 netif_carrier_off(bp->dev);
2564}
2565
2566static void bnx2x_e1h_enable(struct bnx2x *bp)
2567{
2568 int port = BP_PORT(bp);
2569
2570 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2571
2691d51d
EG
2572 /* Tx queue should be only reenabled */
2573 netif_tx_wake_all_queues(bp->dev);
2574
061bc702
EG
2575 /*
2576 * Should not call netif_carrier_on since it will be called if the link
2577 * is up when checking for link state
2578 */
2691d51d
EG
2579}
2580
2581static void bnx2x_update_min_max(struct bnx2x *bp)
2582{
2583 int port = BP_PORT(bp);
2584 int vn, i;
2585
2586 /* Init rate shaping and fairness contexts */
2587 bnx2x_init_port_minmax(bp);
2588
2589 bnx2x_calc_vn_weight_sum(bp);
2590
2591 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592 bnx2x_init_vn_minmax(bp, 2*vn + port);
2593
2594 if (bp->port.pmf) {
2595 int func;
2596
2597 /* Set the attention towards other drivers on the same port */
2598 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599 if (vn == BP_E1HVN(bp))
2600 continue;
2601
2602 func = ((vn << 1) | port);
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2605 }
2606
2607 /* Store it to internal memory */
2608 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609 REG_WR(bp, BAR_XSTRORM_INTMEM +
2610 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611 ((u32 *)(&bp->cmng))[i]);
2612 }
2613}
2614
2615static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2616{
2691d51d 2617 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2618
2619 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
f34d28ea
EG
2621 /*
2622 * This is the only place besides the function initialization
2623 * where the bp->flags can change so it is done without any
2624 * locks
2625 */
2691d51d
EG
2626 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2628 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2629
2630 bnx2x_e1h_disable(bp);
2631 } else {
2632 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2633 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2634
2635 bnx2x_e1h_enable(bp);
2636 }
2637 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2638 }
2639 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2640
2641 bnx2x_update_min_max(bp);
2642 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2643 }
2644
2645 /* Report results to MCP */
2646 if (dcc_event)
2647 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648 else
2649 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2650}
2651
28912902
MC
2652/* must be called under the spq lock */
2653static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654{
2655 struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657 if (bp->spq_prod_bd == bp->spq_last_bd) {
2658 bp->spq_prod_bd = bp->spq;
2659 bp->spq_prod_idx = 0;
2660 DP(NETIF_MSG_TIMER, "end of spq\n");
2661 } else {
2662 bp->spq_prod_bd++;
2663 bp->spq_prod_idx++;
2664 }
2665 return next_spe;
2666}
2667
2668/* must be called under the spq lock */
2669static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670{
2671 int func = BP_FUNC(bp);
2672
2673 /* Make sure that BD data is updated before writing the producer */
2674 wmb();
2675
2676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677 bp->spq_prod_idx);
2678 mmiowb();
2679}
2680
a2fbb9ea
ET
2681/* the slow path queue is odd since completions arrive on the fastpath ring */
2682static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683 u32 data_hi, u32 data_lo, int common)
2684{
28912902 2685 struct eth_spe *spe;
a2fbb9ea 2686
34f80b04
EG
2687 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2689 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2692
2693#ifdef BNX2X_STOP_ON_ERROR
2694 if (unlikely(bp->panic))
2695 return -EIO;
2696#endif
2697
34f80b04 2698 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2699
2700 if (!bp->spq_left) {
2701 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2702 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2703 bnx2x_panic();
2704 return -EBUSY;
2705 }
f1410647 2706
28912902
MC
2707 spe = bnx2x_sp_get_next(bp);
2708
a2fbb9ea 2709 /* CID needs port number to be encoded int it */
28912902 2710 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2711 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712 HW_CID(bp, cid)));
28912902 2713 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2714 if (common)
28912902 2715 spe->hdr.type |=
a2fbb9ea
ET
2716 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2717
28912902
MC
2718 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2720
2721 bp->spq_left--;
2722
28912902 2723 bnx2x_sp_prod_update(bp);
34f80b04 2724 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2725 return 0;
2726}
2727
2728/* acquire split MCP access lock register */
4a37fb66 2729static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2730{
a2fbb9ea 2731 u32 i, j, val;
34f80b04 2732 int rc = 0;
a2fbb9ea
ET
2733
2734 might_sleep();
2735 i = 100;
2736 for (j = 0; j < i*10; j++) {
2737 val = (1UL << 31);
2738 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740 if (val & (1L << 31))
2741 break;
2742
2743 msleep(5);
2744 }
a2fbb9ea 2745 if (!(val & (1L << 31))) {
19680c48 2746 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2747 rc = -EBUSY;
2748 }
2749
2750 return rc;
2751}
2752
4a37fb66
YG
2753/* release split MCP access lock register */
2754static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2755{
2756 u32 val = 0;
2757
2758 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759}
2760
2761static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2762{
2763 struct host_def_status_block *def_sb = bp->def_status_blk;
2764 u16 rc = 0;
2765
2766 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2767 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769 rc |= 1;
2770 }
2771 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773 rc |= 2;
2774 }
2775 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777 rc |= 4;
2778 }
2779 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781 rc |= 8;
2782 }
2783 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785 rc |= 16;
2786 }
2787 return rc;
2788}
2789
2790/*
2791 * slow path service functions
2792 */
2793
2794static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2795{
34f80b04 2796 int port = BP_PORT(bp);
5c862848
EG
2797 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2799 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2801 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2803 u32 aeu_mask;
87942b46 2804 u32 nig_mask = 0;
a2fbb9ea 2805
a2fbb9ea
ET
2806 if (bp->attn_state & asserted)
2807 BNX2X_ERR("IGU ERROR\n");
2808
3fcaf2e5
EG
2809 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810 aeu_mask = REG_RD(bp, aeu_addr);
2811
a2fbb9ea 2812 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2813 aeu_mask, asserted);
2814 aeu_mask &= ~(asserted & 0xff);
2815 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2816
3fcaf2e5
EG
2817 REG_WR(bp, aeu_addr, aeu_mask);
2818 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2819
3fcaf2e5 2820 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2821 bp->attn_state |= asserted;
3fcaf2e5 2822 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2823
2824 if (asserted & ATTN_HARD_WIRED_MASK) {
2825 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2826
a5e9a7cf
EG
2827 bnx2x_acquire_phy_lock(bp);
2828
877e9aa4 2829 /* save nig interrupt mask */
87942b46 2830 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2831 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2832
c18487ee 2833 bnx2x_link_attn(bp);
a2fbb9ea
ET
2834
2835 /* handle unicore attn? */
2836 }
2837 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2839
2840 if (asserted & GPIO_2_FUNC)
2841 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2842
2843 if (asserted & GPIO_3_FUNC)
2844 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2845
2846 if (asserted & GPIO_4_FUNC)
2847 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2848
2849 if (port == 0) {
2850 if (asserted & ATTN_GENERAL_ATTN_1) {
2851 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2853 }
2854 if (asserted & ATTN_GENERAL_ATTN_2) {
2855 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2857 }
2858 if (asserted & ATTN_GENERAL_ATTN_3) {
2859 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2861 }
2862 } else {
2863 if (asserted & ATTN_GENERAL_ATTN_4) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2866 }
2867 if (asserted & ATTN_GENERAL_ATTN_5) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2870 }
2871 if (asserted & ATTN_GENERAL_ATTN_6) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2874 }
2875 }
2876
2877 } /* if hardwired */
2878
5c862848
EG
2879 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880 asserted, hc_addr);
2881 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2882
2883 /* now set back the mask */
a5e9a7cf 2884 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2885 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2886 bnx2x_release_phy_lock(bp);
2887 }
a2fbb9ea
ET
2888}
2889
fd4ef40d
EG
2890static inline void bnx2x_fan_failure(struct bnx2x *bp)
2891{
2892 int port = BP_PORT(bp);
2893
2894 /* mark the failure */
2895 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898 bp->link_params.ext_phy_config);
2899
2900 /* log the failure */
2901 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2902 " the driver to shutdown the card to prevent permanent"
2903 " damage. Please contact Dell Support for assistance\n",
2904 bp->dev->name);
2905}
ab6ad5a4 2906
877e9aa4 2907static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2908{
34f80b04 2909 int port = BP_PORT(bp);
877e9aa4 2910 int reg_offset;
4d295db0 2911 u32 val, swap_val, swap_override;
877e9aa4 2912
34f80b04
EG
2913 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2914 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2915
34f80b04 2916 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2917
2918 val = REG_RD(bp, reg_offset);
2919 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2920 REG_WR(bp, reg_offset, val);
2921
2922 BNX2X_ERR("SPIO5 hw attention\n");
2923
fd4ef40d 2924 /* Fan failure attention */
35b19ba5
EG
2925 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2926 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2927 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2928 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2929 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2930 /* The PHY reset is controlled by GPIO 1 */
2931 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2933 break;
2934
4d295db0
EG
2935 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2936 /* The PHY reset is controlled by GPIO 1 */
2937 /* fake the port number to cancel the swap done in
2938 set_gpio() */
2939 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2940 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2941 port = (swap_val && swap_override) ^ 1;
2942 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2944 break;
2945
877e9aa4
ET
2946 default:
2947 break;
2948 }
fd4ef40d 2949 bnx2x_fan_failure(bp);
877e9aa4 2950 }
34f80b04 2951
589abe3a
EG
2952 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2953 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2954 bnx2x_acquire_phy_lock(bp);
2955 bnx2x_handle_module_detect_int(&bp->link_params);
2956 bnx2x_release_phy_lock(bp);
2957 }
2958
34f80b04
EG
2959 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2960
2961 val = REG_RD(bp, reg_offset);
2962 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2963 REG_WR(bp, reg_offset, val);
2964
2965 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2966 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2967 bnx2x_panic();
2968 }
877e9aa4
ET
2969}
2970
2971static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2972{
2973 u32 val;
2974
0626b899 2975 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2976
2977 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2978 BNX2X_ERR("DB hw attention 0x%x\n", val);
2979 /* DORQ discard attention */
2980 if (val & 0x2)
2981 BNX2X_ERR("FATAL error from DORQ\n");
2982 }
34f80b04
EG
2983
2984 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2985
2986 int port = BP_PORT(bp);
2987 int reg_offset;
2988
2989 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2990 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2991
2992 val = REG_RD(bp, reg_offset);
2993 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2994 REG_WR(bp, reg_offset, val);
2995
2996 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2997 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2998 bnx2x_panic();
2999 }
877e9aa4
ET
3000}
3001
3002static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3003{
3004 u32 val;
3005
3006 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3007
3008 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3009 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3010 /* CFC error attention */
3011 if (val & 0x2)
3012 BNX2X_ERR("FATAL error from CFC\n");
3013 }
3014
3015 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3016
3017 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3018 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3019 /* RQ_USDMDP_FIFO_OVERFLOW */
3020 if (val & 0x18000)
3021 BNX2X_ERR("FATAL error from PXP\n");
3022 }
34f80b04
EG
3023
3024 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3025
3026 int port = BP_PORT(bp);
3027 int reg_offset;
3028
3029 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3030 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3031
3032 val = REG_RD(bp, reg_offset);
3033 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3034 REG_WR(bp, reg_offset, val);
3035
3036 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3037 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3038 bnx2x_panic();
3039 }
877e9aa4
ET
3040}
3041
3042static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3043{
34f80b04
EG
3044 u32 val;
3045
877e9aa4
ET
3046 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3047
34f80b04
EG
3048 if (attn & BNX2X_PMF_LINK_ASSERT) {
3049 int func = BP_FUNC(bp);
3050
3051 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3052 bp->mf_config = SHMEM_RD(bp,
3053 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3054 val = SHMEM_RD(bp, func_mb[func].drv_status);
3055 if (val & DRV_STATUS_DCC_EVENT_MASK)
3056 bnx2x_dcc_event(bp,
3057 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3058 bnx2x__link_status_update(bp);
2691d51d 3059 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3060 bnx2x_pmf_update(bp);
3061
3062 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3063
3064 BNX2X_ERR("MC assert!\n");
3065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3069 bnx2x_panic();
3070
3071 } else if (attn & BNX2X_MCP_ASSERT) {
3072
3073 BNX2X_ERR("MCP assert!\n");
3074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3075 bnx2x_fw_dump(bp);
877e9aa4
ET
3076
3077 } else
3078 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3079 }
3080
3081 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3082 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3083 if (attn & BNX2X_GRC_TIMEOUT) {
3084 val = CHIP_IS_E1H(bp) ?
3085 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3086 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3087 }
3088 if (attn & BNX2X_GRC_RSV) {
3089 val = CHIP_IS_E1H(bp) ?
3090 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3091 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3092 }
877e9aa4 3093 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3094 }
3095}
3096
3097static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3098{
a2fbb9ea
ET
3099 struct attn_route attn;
3100 struct attn_route group_mask;
34f80b04 3101 int port = BP_PORT(bp);
877e9aa4 3102 int index;
a2fbb9ea
ET
3103 u32 reg_addr;
3104 u32 val;
3fcaf2e5 3105 u32 aeu_mask;
a2fbb9ea
ET
3106
3107 /* need to take HW lock because MCP or other port might also
3108 try to handle this event */
4a37fb66 3109 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3110
3111 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3112 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3113 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3114 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3115 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3116 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3117
3118 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3119 if (deasserted & (1 << index)) {
3120 group_mask = bp->attn_group[index];
3121
34f80b04
EG
3122 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3123 index, group_mask.sig[0], group_mask.sig[1],
3124 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3125
877e9aa4
ET
3126 bnx2x_attn_int_deasserted3(bp,
3127 attn.sig[3] & group_mask.sig[3]);
3128 bnx2x_attn_int_deasserted1(bp,
3129 attn.sig[1] & group_mask.sig[1]);
3130 bnx2x_attn_int_deasserted2(bp,
3131 attn.sig[2] & group_mask.sig[2]);
3132 bnx2x_attn_int_deasserted0(bp,
3133 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3134
a2fbb9ea
ET
3135 if ((attn.sig[0] & group_mask.sig[0] &
3136 HW_PRTY_ASSERT_SET_0) ||
3137 (attn.sig[1] & group_mask.sig[1] &
3138 HW_PRTY_ASSERT_SET_1) ||
3139 (attn.sig[2] & group_mask.sig[2] &
3140 HW_PRTY_ASSERT_SET_2))
6378c025 3141 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3142 }
3143 }
3144
4a37fb66 3145 bnx2x_release_alr(bp);
a2fbb9ea 3146
5c862848 3147 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3148
3149 val = ~deasserted;
3fcaf2e5
EG
3150 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3151 val, reg_addr);
5c862848 3152 REG_WR(bp, reg_addr, val);
a2fbb9ea 3153
a2fbb9ea 3154 if (~bp->attn_state & deasserted)
3fcaf2e5 3155 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3156
3157 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3158 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3159
3fcaf2e5
EG
3160 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3161 aeu_mask = REG_RD(bp, reg_addr);
3162
3163 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3164 aeu_mask, deasserted);
3165 aeu_mask |= (deasserted & 0xff);
3166 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3167
3fcaf2e5
EG
3168 REG_WR(bp, reg_addr, aeu_mask);
3169 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3170
3171 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3172 bp->attn_state &= ~deasserted;
3173 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3174}
3175
3176static void bnx2x_attn_int(struct bnx2x *bp)
3177{
3178 /* read local copy of bits */
68d59484
EG
3179 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180 attn_bits);
3181 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3182 attn_bits_ack);
a2fbb9ea
ET
3183 u32 attn_state = bp->attn_state;
3184
3185 /* look for changed bits */
3186 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3187 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3188
3189 DP(NETIF_MSG_HW,
3190 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3191 attn_bits, attn_ack, asserted, deasserted);
3192
3193 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3194 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3195
3196 /* handle bits that were raised */
3197 if (asserted)
3198 bnx2x_attn_int_asserted(bp, asserted);
3199
3200 if (deasserted)
3201 bnx2x_attn_int_deasserted(bp, deasserted);
3202}
3203
3204static void bnx2x_sp_task(struct work_struct *work)
3205{
1cf167f2 3206 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3207 u16 status;
3208
34f80b04 3209
a2fbb9ea
ET
3210 /* Return here if interrupt is disabled */
3211 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3212 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3213 return;
3214 }
3215
3216 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3217/* if (status == 0) */
3218/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3219
3196a88a 3220 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3221
877e9aa4
ET
3222 /* HW attentions */
3223 if (status & 0x1)
a2fbb9ea 3224 bnx2x_attn_int(bp);
a2fbb9ea 3225
68d59484 3226 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3227 IGU_INT_NOP, 1);
3228 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3229 IGU_INT_NOP, 1);
3230 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3231 IGU_INT_NOP, 1);
3232 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3233 IGU_INT_NOP, 1);
3234 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3235 IGU_INT_ENABLE, 1);
877e9aa4 3236
a2fbb9ea
ET
3237}
3238
3239static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3240{
3241 struct net_device *dev = dev_instance;
3242 struct bnx2x *bp = netdev_priv(dev);
3243
3244 /* Return here if interrupt is disabled */
3245 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3246 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3247 return IRQ_HANDLED;
3248 }
3249
8d9c5f34 3250 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3251
3252#ifdef BNX2X_STOP_ON_ERROR
3253 if (unlikely(bp->panic))
3254 return IRQ_HANDLED;
3255#endif
3256
993ac7b5
MC
3257#ifdef BCM_CNIC
3258 {
3259 struct cnic_ops *c_ops;
3260
3261 rcu_read_lock();
3262 c_ops = rcu_dereference(bp->cnic_ops);
3263 if (c_ops)
3264 c_ops->cnic_handler(bp->cnic_data, NULL);
3265 rcu_read_unlock();
3266 }
3267#endif
1cf167f2 3268 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3269
3270 return IRQ_HANDLED;
3271}
3272
3273/* end of slow path */
3274
3275/* Statistics */
3276
3277/****************************************************************************
3278* Macros
3279****************************************************************************/
3280
a2fbb9ea
ET
3281/* sum[hi:lo] += add[hi:lo] */
3282#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3283 do { \
3284 s_lo += a_lo; \
f5ba6772 3285 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3286 } while (0)
3287
3288/* difference = minuend - subtrahend */
3289#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3290 do { \
bb2a0f7a
YG
3291 if (m_lo < s_lo) { \
3292 /* underflow */ \
a2fbb9ea 3293 d_hi = m_hi - s_hi; \
bb2a0f7a 3294 if (d_hi > 0) { \
6378c025 3295 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3296 d_hi--; \
3297 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3298 } else { \
6378c025 3299 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3300 d_hi = 0; \
3301 d_lo = 0; \
3302 } \
bb2a0f7a
YG
3303 } else { \
3304 /* m_lo >= s_lo */ \
a2fbb9ea 3305 if (m_hi < s_hi) { \
bb2a0f7a
YG
3306 d_hi = 0; \
3307 d_lo = 0; \
3308 } else { \
6378c025 3309 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3310 d_hi = m_hi - s_hi; \
3311 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3312 } \
3313 } \
3314 } while (0)
3315
bb2a0f7a 3316#define UPDATE_STAT64(s, t) \
a2fbb9ea 3317 do { \
bb2a0f7a
YG
3318 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3319 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3320 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3321 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3322 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3323 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3324 } while (0)
3325
bb2a0f7a 3326#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3327 do { \
bb2a0f7a
YG
3328 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3329 diff.lo, new->s##_lo, old->s##_lo); \
3330 ADD_64(estats->t##_hi, diff.hi, \
3331 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3332 } while (0)
3333
3334/* sum[hi:lo] += add */
3335#define ADD_EXTEND_64(s_hi, s_lo, a) \
3336 do { \
3337 s_lo += a; \
3338 s_hi += (s_lo < a) ? 1 : 0; \
3339 } while (0)
3340
bb2a0f7a 3341#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3342 do { \
bb2a0f7a
YG
3343 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3344 pstats->mac_stx[1].s##_lo, \
3345 new->s); \
a2fbb9ea
ET
3346 } while (0)
3347
bb2a0f7a 3348#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3349 do { \
4781bfad
EG
3350 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3351 old_tclient->s = tclient->s; \
de832a55
EG
3352 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3353 } while (0)
3354
3355#define UPDATE_EXTEND_USTAT(s, t) \
3356 do { \
3357 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3358 old_uclient->s = uclient->s; \
3359 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3360 } while (0)
3361
3362#define UPDATE_EXTEND_XSTAT(s, t) \
3363 do { \
4781bfad
EG
3364 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3365 old_xclient->s = xclient->s; \
de832a55
EG
3366 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3367 } while (0)
3368
3369/* minuend -= subtrahend */
3370#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3371 do { \
3372 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3373 } while (0)
3374
3375/* minuend[hi:lo] -= subtrahend */
3376#define SUB_EXTEND_64(m_hi, m_lo, s) \
3377 do { \
3378 SUB_64(m_hi, 0, m_lo, s); \
3379 } while (0)
3380
3381#define SUB_EXTEND_USTAT(s, t) \
3382 do { \
3383 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3384 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3385 } while (0)
3386
3387/*
3388 * General service functions
3389 */
3390
3391static inline long bnx2x_hilo(u32 *hiref)
3392{
3393 u32 lo = *(hiref + 1);
3394#if (BITS_PER_LONG == 64)
3395 u32 hi = *hiref;
3396
3397 return HILO_U64(hi, lo);
3398#else
3399 return lo;
3400#endif
3401}
3402
3403/*
3404 * Init service functions
3405 */
3406
bb2a0f7a
YG
3407static void bnx2x_storm_stats_post(struct bnx2x *bp)
3408{
3409 if (!bp->stats_pending) {
3410 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3411 int i, rc;
bb2a0f7a
YG
3412
3413 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3414 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3415 for_each_queue(bp, i)
3416 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3417
3418 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3419 ((u32 *)&ramrod_data)[1],
3420 ((u32 *)&ramrod_data)[0], 0);
3421 if (rc == 0) {
3422 /* stats ramrod has it's own slot on the spq */
3423 bp->spq_left++;
3424 bp->stats_pending = 1;
3425 }
3426 }
3427}
3428
bb2a0f7a
YG
3429static void bnx2x_hw_stats_post(struct bnx2x *bp)
3430{
3431 struct dmae_command *dmae = &bp->stats_dmae;
3432 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3433
3434 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3435 if (CHIP_REV_IS_SLOW(bp))
3436 return;
bb2a0f7a
YG
3437
3438 /* loader */
3439 if (bp->executer_idx) {
3440 int loader_idx = PMF_DMAE_C(bp);
3441
3442 memset(dmae, 0, sizeof(struct dmae_command));
3443
3444 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3445 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3446 DMAE_CMD_DST_RESET |
3447#ifdef __BIG_ENDIAN
3448 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3449#else
3450 DMAE_CMD_ENDIANITY_DW_SWAP |
3451#endif
3452 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3453 DMAE_CMD_PORT_0) |
3454 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3455 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3456 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3457 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3458 sizeof(struct dmae_command) *
3459 (loader_idx + 1)) >> 2;
3460 dmae->dst_addr_hi = 0;
3461 dmae->len = sizeof(struct dmae_command) >> 2;
3462 if (CHIP_IS_E1(bp))
3463 dmae->len--;
3464 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3465 dmae->comp_addr_hi = 0;
3466 dmae->comp_val = 1;
3467
3468 *stats_comp = 0;
3469 bnx2x_post_dmae(bp, dmae, loader_idx);
3470
3471 } else if (bp->func_stx) {
3472 *stats_comp = 0;
3473 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3474 }
3475}
3476
3477static int bnx2x_stats_comp(struct bnx2x *bp)
3478{
3479 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3480 int cnt = 10;
3481
3482 might_sleep();
3483 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3484 if (!cnt) {
3485 BNX2X_ERR("timeout waiting for stats finished\n");
3486 break;
3487 }
3488 cnt--;
12469401 3489 msleep(1);
bb2a0f7a
YG
3490 }
3491 return 1;
3492}
3493
3494/*
3495 * Statistics service functions
3496 */
3497
3498static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3499{
3500 struct dmae_command *dmae;
3501 u32 opcode;
3502 int loader_idx = PMF_DMAE_C(bp);
3503 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3504
3505 /* sanity */
3506 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3507 BNX2X_ERR("BUG!\n");
3508 return;
3509 }
3510
3511 bp->executer_idx = 0;
3512
3513 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3514 DMAE_CMD_C_ENABLE |
3515 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3516#ifdef __BIG_ENDIAN
3517 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3518#else
3519 DMAE_CMD_ENDIANITY_DW_SWAP |
3520#endif
3521 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3522 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3523
3524 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3526 dmae->src_addr_lo = bp->port.port_stx >> 2;
3527 dmae->src_addr_hi = 0;
3528 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3529 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3530 dmae->len = DMAE_LEN32_RD_MAX;
3531 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532 dmae->comp_addr_hi = 0;
3533 dmae->comp_val = 1;
3534
3535 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3537 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3538 dmae->src_addr_hi = 0;
7a9b2557
VZ
3539 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3540 DMAE_LEN32_RD_MAX * 4);
3541 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3542 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3543 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3544 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3545 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3546 dmae->comp_val = DMAE_COMP_VAL;
3547
3548 *stats_comp = 0;
3549 bnx2x_hw_stats_post(bp);
3550 bnx2x_stats_comp(bp);
3551}
3552
3553static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3554{
3555 struct dmae_command *dmae;
34f80b04 3556 int port = BP_PORT(bp);
bb2a0f7a 3557 int vn = BP_E1HVN(bp);
a2fbb9ea 3558 u32 opcode;
bb2a0f7a 3559 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3560 u32 mac_addr;
bb2a0f7a
YG
3561 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3562
3563 /* sanity */
3564 if (!bp->link_vars.link_up || !bp->port.pmf) {
3565 BNX2X_ERR("BUG!\n");
3566 return;
3567 }
a2fbb9ea
ET
3568
3569 bp->executer_idx = 0;
bb2a0f7a
YG
3570
3571 /* MCP */
3572 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3573 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3574 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3575#ifdef __BIG_ENDIAN
bb2a0f7a 3576 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3577#else
bb2a0f7a 3578 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3579#endif
bb2a0f7a
YG
3580 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3581 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3582
bb2a0f7a 3583 if (bp->port.port_stx) {
a2fbb9ea
ET
3584
3585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586 dmae->opcode = opcode;
bb2a0f7a
YG
3587 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3588 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3589 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3590 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3591 dmae->len = sizeof(struct host_port_stats) >> 2;
3592 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593 dmae->comp_addr_hi = 0;
3594 dmae->comp_val = 1;
a2fbb9ea
ET
3595 }
3596
bb2a0f7a
YG
3597 if (bp->func_stx) {
3598
3599 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3600 dmae->opcode = opcode;
3601 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3602 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3603 dmae->dst_addr_lo = bp->func_stx >> 2;
3604 dmae->dst_addr_hi = 0;
3605 dmae->len = sizeof(struct host_func_stats) >> 2;
3606 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3607 dmae->comp_addr_hi = 0;
3608 dmae->comp_val = 1;
a2fbb9ea
ET
3609 }
3610
bb2a0f7a 3611 /* MAC */
a2fbb9ea
ET
3612 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3613 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3614 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3615#ifdef __BIG_ENDIAN
3616 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3617#else
3618 DMAE_CMD_ENDIANITY_DW_SWAP |
3619#endif
bb2a0f7a
YG
3620 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3621 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3622
c18487ee 3623 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3624
3625 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3626 NIG_REG_INGRESS_BMAC0_MEM);
3627
3628 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3629 BIGMAC_REGISTER_TX_STAT_GTBYT */
3630 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631 dmae->opcode = opcode;
3632 dmae->src_addr_lo = (mac_addr +
3633 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3634 dmae->src_addr_hi = 0;
3635 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3636 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3637 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3638 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3639 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640 dmae->comp_addr_hi = 0;
3641 dmae->comp_val = 1;
3642
3643 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3644 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3645 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3646 dmae->opcode = opcode;
3647 dmae->src_addr_lo = (mac_addr +
3648 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3649 dmae->src_addr_hi = 0;
3650 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3651 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3652 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3653 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3654 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3655 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3658 dmae->comp_val = 1;
3659
c18487ee 3660 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3661
3662 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3663
3664 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3665 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666 dmae->opcode = opcode;
3667 dmae->src_addr_lo = (mac_addr +
3668 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3669 dmae->src_addr_hi = 0;
3670 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3671 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3672 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3673 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674 dmae->comp_addr_hi = 0;
3675 dmae->comp_val = 1;
3676
3677 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3678 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3679 dmae->opcode = opcode;
3680 dmae->src_addr_lo = (mac_addr +
3681 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3682 dmae->src_addr_hi = 0;
3683 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3684 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3685 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3686 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3687 dmae->len = 1;
3688 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3689 dmae->comp_addr_hi = 0;
3690 dmae->comp_val = 1;
3691
3692 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3693 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3694 dmae->opcode = opcode;
3695 dmae->src_addr_lo = (mac_addr +
3696 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3697 dmae->src_addr_hi = 0;
3698 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3699 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3700 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3701 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3702 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3703 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704 dmae->comp_addr_hi = 0;
3705 dmae->comp_val = 1;
3706 }
3707
3708 /* NIG */
bb2a0f7a
YG
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3712 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3715 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3716 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3717 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718 dmae->comp_addr_hi = 0;
3719 dmae->comp_val = 1;
3720
3721 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722 dmae->opcode = opcode;
3723 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3724 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3725 dmae->src_addr_hi = 0;
3726 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3727 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3729 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3730 dmae->len = (2*sizeof(u32)) >> 2;
3731 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3732 dmae->comp_addr_hi = 0;
3733 dmae->comp_val = 1;
3734
a2fbb9ea
ET
3735 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3736 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3737 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3738 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3739#ifdef __BIG_ENDIAN
3740 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3741#else
3742 DMAE_CMD_ENDIANITY_DW_SWAP |
3743#endif
bb2a0f7a
YG
3744 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3745 (vn << DMAE_CMD_E1HVN_SHIFT));
3746 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3747 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3748 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3749 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3753 dmae->len = (2*sizeof(u32)) >> 2;
3754 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3755 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3756 dmae->comp_val = DMAE_COMP_VAL;
3757
3758 *stats_comp = 0;
a2fbb9ea
ET
3759}
3760
bb2a0f7a 3761static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3762{
bb2a0f7a
YG
3763 struct dmae_command *dmae = &bp->stats_dmae;
3764 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3765
bb2a0f7a
YG
3766 /* sanity */
3767 if (!bp->func_stx) {
3768 BNX2X_ERR("BUG!\n");
3769 return;
3770 }
a2fbb9ea 3771
bb2a0f7a
YG
3772 bp->executer_idx = 0;
3773 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3774
bb2a0f7a
YG
3775 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3776 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3777 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3778#ifdef __BIG_ENDIAN
3779 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3780#else
3781 DMAE_CMD_ENDIANITY_DW_SWAP |
3782#endif
3783 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3784 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3785 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3786 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3787 dmae->dst_addr_lo = bp->func_stx >> 2;
3788 dmae->dst_addr_hi = 0;
3789 dmae->len = sizeof(struct host_func_stats) >> 2;
3790 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3791 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3792 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3793
bb2a0f7a
YG
3794 *stats_comp = 0;
3795}
a2fbb9ea 3796
bb2a0f7a
YG
3797static void bnx2x_stats_start(struct bnx2x *bp)
3798{
3799 if (bp->port.pmf)
3800 bnx2x_port_stats_init(bp);
3801
3802 else if (bp->func_stx)
3803 bnx2x_func_stats_init(bp);
3804
3805 bnx2x_hw_stats_post(bp);
3806 bnx2x_storm_stats_post(bp);
3807}
3808
3809static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3810{
3811 bnx2x_stats_comp(bp);
3812 bnx2x_stats_pmf_update(bp);
3813 bnx2x_stats_start(bp);
3814}
3815
3816static void bnx2x_stats_restart(struct bnx2x *bp)
3817{
3818 bnx2x_stats_comp(bp);
3819 bnx2x_stats_start(bp);
3820}
3821
3822static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3823{
3824 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3825 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3826 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3827 struct {
3828 u32 lo;
3829 u32 hi;
3830 } diff;
bb2a0f7a
YG
3831
3832 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3833 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3834 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3835 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3836 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3837 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3838 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3839 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3840 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3841 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3842 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3843 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3844 UPDATE_STAT64(tx_stat_gt127,
3845 tx_stat_etherstatspkts65octetsto127octets);
3846 UPDATE_STAT64(tx_stat_gt255,
3847 tx_stat_etherstatspkts128octetsto255octets);
3848 UPDATE_STAT64(tx_stat_gt511,
3849 tx_stat_etherstatspkts256octetsto511octets);
3850 UPDATE_STAT64(tx_stat_gt1023,
3851 tx_stat_etherstatspkts512octetsto1023octets);
3852 UPDATE_STAT64(tx_stat_gt1518,
3853 tx_stat_etherstatspkts1024octetsto1522octets);
3854 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3855 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3856 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3857 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3858 UPDATE_STAT64(tx_stat_gterr,
3859 tx_stat_dot3statsinternalmactransmiterrors);
3860 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3861
3862 estats->pause_frames_received_hi =
3863 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3864 estats->pause_frames_received_lo =
3865 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3866
3867 estats->pause_frames_sent_hi =
3868 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3869 estats->pause_frames_sent_lo =
3870 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3871}
3872
3873static void bnx2x_emac_stats_update(struct bnx2x *bp)
3874{
3875 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3876 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3877 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3878
3879 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3880 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3881 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3882 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3883 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3884 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3885 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3886 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3887 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3888 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3889 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3890 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3891 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3892 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3893 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3894 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3895 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3896 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3897 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3898 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3899 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3900 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3901 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3902 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3903 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3904 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3905 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3907 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3908 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3909 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3910
3911 estats->pause_frames_received_hi =
3912 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3913 estats->pause_frames_received_lo =
3914 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3915 ADD_64(estats->pause_frames_received_hi,
3916 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3917 estats->pause_frames_received_lo,
3918 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3919
3920 estats->pause_frames_sent_hi =
3921 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3922 estats->pause_frames_sent_lo =
3923 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3924 ADD_64(estats->pause_frames_sent_hi,
3925 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3926 estats->pause_frames_sent_lo,
3927 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3928}
3929
3930static int bnx2x_hw_stats_update(struct bnx2x *bp)
3931{
3932 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3933 struct nig_stats *old = &(bp->port.old_nig_stats);
3934 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3935 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3936 struct {
3937 u32 lo;
3938 u32 hi;
3939 } diff;
de832a55 3940 u32 nig_timer_max;
bb2a0f7a
YG
3941
3942 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3943 bnx2x_bmac_stats_update(bp);
3944
3945 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3946 bnx2x_emac_stats_update(bp);
3947
3948 else { /* unreached */
c3eefaf6 3949 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3950 return -1;
3951 }
a2fbb9ea 3952
bb2a0f7a
YG
3953 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3954 new->brb_discard - old->brb_discard);
66e855f3
YG
3955 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3956 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3957
bb2a0f7a
YG
3958 UPDATE_STAT64_NIG(egress_mac_pkt0,
3959 etherstatspkts1024octetsto1522octets);
3960 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3961
bb2a0f7a 3962 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3963
bb2a0f7a
YG
3964 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3965 sizeof(struct mac_stx));
3966 estats->brb_drop_hi = pstats->brb_drop_hi;
3967 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3968
bb2a0f7a 3969 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3970
de832a55
EG
3971 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3972 if (nig_timer_max != estats->nig_timer_max) {
3973 estats->nig_timer_max = nig_timer_max;
3974 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3975 }
3976
bb2a0f7a 3977 return 0;
a2fbb9ea
ET
3978}
3979
bb2a0f7a 3980static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3981{
3982 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3983 struct tstorm_per_port_stats *tport =
de832a55 3984 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3985 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3986 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3987 int i;
3988
6fe49bb9
EG
3989 memcpy(&(fstats->total_bytes_received_hi),
3990 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3991 sizeof(struct host_func_stats) - 2*sizeof(u32));
3992 estats->error_bytes_received_hi = 0;
3993 estats->error_bytes_received_lo = 0;
3994 estats->etherstatsoverrsizepkts_hi = 0;
3995 estats->etherstatsoverrsizepkts_lo = 0;
3996 estats->no_buff_discard_hi = 0;
3997 estats->no_buff_discard_lo = 0;
a2fbb9ea 3998
54b9ddaa 3999 for_each_queue(bp, i) {
de832a55
EG
4000 struct bnx2x_fastpath *fp = &bp->fp[i];
4001 int cl_id = fp->cl_id;
4002 struct tstorm_per_client_stats *tclient =
4003 &stats->tstorm_common.client_statistics[cl_id];
4004 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4005 struct ustorm_per_client_stats *uclient =
4006 &stats->ustorm_common.client_statistics[cl_id];
4007 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4008 struct xstorm_per_client_stats *xclient =
4009 &stats->xstorm_common.client_statistics[cl_id];
4010 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4011 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4012 u32 diff;
4013
4014 /* are storm stats valid? */
4015 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4016 bp->stats_counter) {
de832a55
EG
4017 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4018 " xstorm counter (%d) != stats_counter (%d)\n",
4019 i, xclient->stats_counter, bp->stats_counter);
4020 return -1;
4021 }
4022 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4023 bp->stats_counter) {
de832a55
EG
4024 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4025 " tstorm counter (%d) != stats_counter (%d)\n",
4026 i, tclient->stats_counter, bp->stats_counter);
4027 return -2;
4028 }
4029 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4030 bp->stats_counter) {
4031 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4032 " ustorm counter (%d) != stats_counter (%d)\n",
4033 i, uclient->stats_counter, bp->stats_counter);
4034 return -4;
4035 }
a2fbb9ea 4036
de832a55 4037 qstats->total_bytes_received_hi =
ca00392c 4038 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4039 qstats->total_bytes_received_lo =
ca00392c
EG
4040 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4041
4042 ADD_64(qstats->total_bytes_received_hi,
4043 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4044 qstats->total_bytes_received_lo,
4045 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4046
4047 ADD_64(qstats->total_bytes_received_hi,
4048 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4049 qstats->total_bytes_received_lo,
4050 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4051
4052 qstats->valid_bytes_received_hi =
4053 qstats->total_bytes_received_hi;
de832a55 4054 qstats->valid_bytes_received_lo =
ca00392c 4055 qstats->total_bytes_received_lo;
bb2a0f7a 4056
de832a55 4057 qstats->error_bytes_received_hi =
bb2a0f7a 4058 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4059 qstats->error_bytes_received_lo =
bb2a0f7a 4060 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4061
de832a55
EG
4062 ADD_64(qstats->total_bytes_received_hi,
4063 qstats->error_bytes_received_hi,
4064 qstats->total_bytes_received_lo,
4065 qstats->error_bytes_received_lo);
4066
4067 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4068 total_unicast_packets_received);
4069 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4070 total_multicast_packets_received);
4071 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4072 total_broadcast_packets_received);
4073 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4074 etherstatsoverrsizepkts);
4075 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4076
4077 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4078 total_unicast_packets_received);
4079 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4080 total_multicast_packets_received);
4081 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4082 total_broadcast_packets_received);
4083 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4084 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4085 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4086
4087 qstats->total_bytes_transmitted_hi =
ca00392c 4088 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4089 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4090 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4091
4092 ADD_64(qstats->total_bytes_transmitted_hi,
4093 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4094 qstats->total_bytes_transmitted_lo,
4095 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4096
4097 ADD_64(qstats->total_bytes_transmitted_hi,
4098 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4099 qstats->total_bytes_transmitted_lo,
4100 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4101
de832a55
EG
4102 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4103 total_unicast_packets_transmitted);
4104 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4105 total_multicast_packets_transmitted);
4106 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4107 total_broadcast_packets_transmitted);
4108
4109 old_tclient->checksum_discard = tclient->checksum_discard;
4110 old_tclient->ttl0_discard = tclient->ttl0_discard;
4111
4112 ADD_64(fstats->total_bytes_received_hi,
4113 qstats->total_bytes_received_hi,
4114 fstats->total_bytes_received_lo,
4115 qstats->total_bytes_received_lo);
4116 ADD_64(fstats->total_bytes_transmitted_hi,
4117 qstats->total_bytes_transmitted_hi,
4118 fstats->total_bytes_transmitted_lo,
4119 qstats->total_bytes_transmitted_lo);
4120 ADD_64(fstats->total_unicast_packets_received_hi,
4121 qstats->total_unicast_packets_received_hi,
4122 fstats->total_unicast_packets_received_lo,
4123 qstats->total_unicast_packets_received_lo);
4124 ADD_64(fstats->total_multicast_packets_received_hi,
4125 qstats->total_multicast_packets_received_hi,
4126 fstats->total_multicast_packets_received_lo,
4127 qstats->total_multicast_packets_received_lo);
4128 ADD_64(fstats->total_broadcast_packets_received_hi,
4129 qstats->total_broadcast_packets_received_hi,
4130 fstats->total_broadcast_packets_received_lo,
4131 qstats->total_broadcast_packets_received_lo);
4132 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4133 qstats->total_unicast_packets_transmitted_hi,
4134 fstats->total_unicast_packets_transmitted_lo,
4135 qstats->total_unicast_packets_transmitted_lo);
4136 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4137 qstats->total_multicast_packets_transmitted_hi,
4138 fstats->total_multicast_packets_transmitted_lo,
4139 qstats->total_multicast_packets_transmitted_lo);
4140 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4141 qstats->total_broadcast_packets_transmitted_hi,
4142 fstats->total_broadcast_packets_transmitted_lo,
4143 qstats->total_broadcast_packets_transmitted_lo);
4144 ADD_64(fstats->valid_bytes_received_hi,
4145 qstats->valid_bytes_received_hi,
4146 fstats->valid_bytes_received_lo,
4147 qstats->valid_bytes_received_lo);
4148
4149 ADD_64(estats->error_bytes_received_hi,
4150 qstats->error_bytes_received_hi,
4151 estats->error_bytes_received_lo,
4152 qstats->error_bytes_received_lo);
4153 ADD_64(estats->etherstatsoverrsizepkts_hi,
4154 qstats->etherstatsoverrsizepkts_hi,
4155 estats->etherstatsoverrsizepkts_lo,
4156 qstats->etherstatsoverrsizepkts_lo);
4157 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4158 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4159 }
4160
4161 ADD_64(fstats->total_bytes_received_hi,
4162 estats->rx_stat_ifhcinbadoctets_hi,
4163 fstats->total_bytes_received_lo,
4164 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4165
4166 memcpy(estats, &(fstats->total_bytes_received_hi),
4167 sizeof(struct host_func_stats) - 2*sizeof(u32));
4168
de832a55
EG
4169 ADD_64(estats->etherstatsoverrsizepkts_hi,
4170 estats->rx_stat_dot3statsframestoolong_hi,
4171 estats->etherstatsoverrsizepkts_lo,
4172 estats->rx_stat_dot3statsframestoolong_lo);
4173 ADD_64(estats->error_bytes_received_hi,
4174 estats->rx_stat_ifhcinbadoctets_hi,
4175 estats->error_bytes_received_lo,
4176 estats->rx_stat_ifhcinbadoctets_lo);
4177
4178 if (bp->port.pmf) {
4179 estats->mac_filter_discard =
4180 le32_to_cpu(tport->mac_filter_discard);
4181 estats->xxoverflow_discard =
4182 le32_to_cpu(tport->xxoverflow_discard);
4183 estats->brb_truncate_discard =
bb2a0f7a 4184 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4185 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4186 }
bb2a0f7a
YG
4187
4188 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4189
de832a55
EG
4190 bp->stats_pending = 0;
4191
a2fbb9ea
ET
4192 return 0;
4193}
4194
bb2a0f7a 4195static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4196{
bb2a0f7a 4197 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4198 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4199 int i;
a2fbb9ea
ET
4200
4201 nstats->rx_packets =
4202 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4203 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4204 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4205
4206 nstats->tx_packets =
4207 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4208 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4209 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4210
de832a55 4211 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4212
0e39e645 4213 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4214
de832a55 4215 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4216 for_each_queue(bp, i)
de832a55
EG
4217 nstats->rx_dropped +=
4218 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4219
a2fbb9ea
ET
4220 nstats->tx_dropped = 0;
4221
4222 nstats->multicast =
de832a55 4223 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4224
bb2a0f7a 4225 nstats->collisions =
de832a55 4226 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4227
4228 nstats->rx_length_errors =
de832a55
EG
4229 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4230 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4231 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4232 bnx2x_hilo(&estats->brb_truncate_hi);
4233 nstats->rx_crc_errors =
4234 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4235 nstats->rx_frame_errors =
4236 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4237 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4238 nstats->rx_missed_errors = estats->xxoverflow_discard;
4239
4240 nstats->rx_errors = nstats->rx_length_errors +
4241 nstats->rx_over_errors +
4242 nstats->rx_crc_errors +
4243 nstats->rx_frame_errors +
0e39e645
ET
4244 nstats->rx_fifo_errors +
4245 nstats->rx_missed_errors;
a2fbb9ea 4246
bb2a0f7a 4247 nstats->tx_aborted_errors =
de832a55
EG
4248 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4249 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4250 nstats->tx_carrier_errors =
4251 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4252 nstats->tx_fifo_errors = 0;
4253 nstats->tx_heartbeat_errors = 0;
4254 nstats->tx_window_errors = 0;
4255
4256 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4257 nstats->tx_carrier_errors +
4258 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4259}
4260
4261static void bnx2x_drv_stats_update(struct bnx2x *bp)
4262{
4263 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4264 int i;
4265
4266 estats->driver_xoff = 0;
4267 estats->rx_err_discard_pkt = 0;
4268 estats->rx_skb_alloc_failed = 0;
4269 estats->hw_csum_err = 0;
54b9ddaa 4270 for_each_queue(bp, i) {
de832a55
EG
4271 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4272
4273 estats->driver_xoff += qstats->driver_xoff;
4274 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4275 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4276 estats->hw_csum_err += qstats->hw_csum_err;
4277 }
a2fbb9ea
ET
4278}
4279
bb2a0f7a 4280static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4281{
bb2a0f7a 4282 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4283
bb2a0f7a
YG
4284 if (*stats_comp != DMAE_COMP_VAL)
4285 return;
4286
4287 if (bp->port.pmf)
de832a55 4288 bnx2x_hw_stats_update(bp);
a2fbb9ea 4289
de832a55
EG
4290 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4291 BNX2X_ERR("storm stats were not updated for 3 times\n");
4292 bnx2x_panic();
4293 return;
a2fbb9ea
ET
4294 }
4295
de832a55
EG
4296 bnx2x_net_stats_update(bp);
4297 bnx2x_drv_stats_update(bp);
4298
a2fbb9ea 4299 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c 4300 struct bnx2x_fastpath *fp0_rx = bp->fp;
54b9ddaa 4301 struct bnx2x_fastpath *fp0_tx = bp->fp;
de832a55
EG
4302 struct tstorm_per_client_stats *old_tclient =
4303 &bp->fp->old_tclient;
4304 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4305 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4306 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4307 int i;
a2fbb9ea
ET
4308
4309 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4310 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4311 " tx pkt (%lx)\n",
ca00392c
EG
4312 bnx2x_tx_avail(fp0_tx),
4313 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4314 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4315 " rx pkt (%lx)\n",
ca00392c
EG
4316 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4317 fp0_rx->rx_comp_cons),
4318 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4319 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4320 "brb truncate %u\n",
4321 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4322 qstats->driver_xoff,
4323 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4324 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4325 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4326 "mac_discard %u mac_filter_discard %u "
4327 "xxovrflow_discard %u brb_truncate_discard %u "
4328 "ttl0_discard %u\n",
4781bfad 4329 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4330 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4331 bnx2x_hilo(&qstats->no_buff_discard_hi),
4332 estats->mac_discard, estats->mac_filter_discard,
4333 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4334 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4335
4336 for_each_queue(bp, i) {
4337 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4338 bnx2x_fp(bp, i, tx_pkt),
4339 bnx2x_fp(bp, i, rx_pkt),
4340 bnx2x_fp(bp, i, rx_calls));
4341 }
4342 }
4343
bb2a0f7a
YG
4344 bnx2x_hw_stats_post(bp);
4345 bnx2x_storm_stats_post(bp);
4346}
a2fbb9ea 4347
bb2a0f7a
YG
4348static void bnx2x_port_stats_stop(struct bnx2x *bp)
4349{
4350 struct dmae_command *dmae;
4351 u32 opcode;
4352 int loader_idx = PMF_DMAE_C(bp);
4353 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4354
bb2a0f7a 4355 bp->executer_idx = 0;
a2fbb9ea 4356
bb2a0f7a
YG
4357 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4358 DMAE_CMD_C_ENABLE |
4359 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4360#ifdef __BIG_ENDIAN
bb2a0f7a 4361 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4362#else
bb2a0f7a 4363 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4364#endif
bb2a0f7a
YG
4365 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4366 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4367
4368 if (bp->port.port_stx) {
4369
4370 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4371 if (bp->func_stx)
4372 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4373 else
4374 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4376 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4377 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4378 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4379 dmae->len = sizeof(struct host_port_stats) >> 2;
4380 if (bp->func_stx) {
4381 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4382 dmae->comp_addr_hi = 0;
4383 dmae->comp_val = 1;
4384 } else {
4385 dmae->comp_addr_lo =
4386 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387 dmae->comp_addr_hi =
4388 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4389 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4390
bb2a0f7a
YG
4391 *stats_comp = 0;
4392 }
a2fbb9ea
ET
4393 }
4394
bb2a0f7a
YG
4395 if (bp->func_stx) {
4396
4397 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4398 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4399 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4400 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4401 dmae->dst_addr_lo = bp->func_stx >> 2;
4402 dmae->dst_addr_hi = 0;
4403 dmae->len = sizeof(struct host_func_stats) >> 2;
4404 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4405 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406 dmae->comp_val = DMAE_COMP_VAL;
4407
4408 *stats_comp = 0;
a2fbb9ea 4409 }
bb2a0f7a
YG
4410}
4411
4412static void bnx2x_stats_stop(struct bnx2x *bp)
4413{
4414 int update = 0;
4415
4416 bnx2x_stats_comp(bp);
4417
4418 if (bp->port.pmf)
4419 update = (bnx2x_hw_stats_update(bp) == 0);
4420
4421 update |= (bnx2x_storm_stats_update(bp) == 0);
4422
4423 if (update) {
4424 bnx2x_net_stats_update(bp);
a2fbb9ea 4425
bb2a0f7a
YG
4426 if (bp->port.pmf)
4427 bnx2x_port_stats_stop(bp);
4428
4429 bnx2x_hw_stats_post(bp);
4430 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4431 }
4432}
4433
bb2a0f7a
YG
4434static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4435{
4436}
4437
4438static const struct {
4439 void (*action)(struct bnx2x *bp);
4440 enum bnx2x_stats_state next_state;
4441} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4442/* state event */
4443{
4444/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4445/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4446/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4447/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4448},
4449{
4450/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4451/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4452/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4453/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4454}
4455};
4456
4457static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4458{
4459 enum bnx2x_stats_state state = bp->stats_state;
4460
4461 bnx2x_stats_stm[state][event].action(bp);
4462 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4463
8924665a
EG
4464 /* Make sure the state has been "changed" */
4465 smp_wmb();
4466
bb2a0f7a
YG
4467 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4468 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4469 state, event, bp->stats_state);
4470}
4471
6fe49bb9
EG
4472static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4473{
4474 struct dmae_command *dmae;
4475 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4476
4477 /* sanity */
4478 if (!bp->port.pmf || !bp->port.port_stx) {
4479 BNX2X_ERR("BUG!\n");
4480 return;
4481 }
4482
4483 bp->executer_idx = 0;
4484
4485 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4486 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4487 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4488 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4489#ifdef __BIG_ENDIAN
4490 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4491#else
4492 DMAE_CMD_ENDIANITY_DW_SWAP |
4493#endif
4494 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4495 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4496 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4497 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4498 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4499 dmae->dst_addr_hi = 0;
4500 dmae->len = sizeof(struct host_port_stats) >> 2;
4501 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4502 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4503 dmae->comp_val = DMAE_COMP_VAL;
4504
4505 *stats_comp = 0;
4506 bnx2x_hw_stats_post(bp);
4507 bnx2x_stats_comp(bp);
4508}
4509
4510static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4511{
4512 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4513 int port = BP_PORT(bp);
4514 int func;
4515 u32 func_stx;
4516
4517 /* sanity */
4518 if (!bp->port.pmf || !bp->func_stx) {
4519 BNX2X_ERR("BUG!\n");
4520 return;
4521 }
4522
4523 /* save our func_stx */
4524 func_stx = bp->func_stx;
4525
4526 for (vn = VN_0; vn < vn_max; vn++) {
4527 func = 2*vn + port;
4528
4529 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4530 bnx2x_func_stats_init(bp);
4531 bnx2x_hw_stats_post(bp);
4532 bnx2x_stats_comp(bp);
4533 }
4534
4535 /* restore our func_stx */
4536 bp->func_stx = func_stx;
4537}
4538
4539static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4540{
4541 struct dmae_command *dmae = &bp->stats_dmae;
4542 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4543
4544 /* sanity */
4545 if (!bp->func_stx) {
4546 BNX2X_ERR("BUG!\n");
4547 return;
4548 }
4549
4550 bp->executer_idx = 0;
4551 memset(dmae, 0, sizeof(struct dmae_command));
4552
4553 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4554 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4555 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4556#ifdef __BIG_ENDIAN
4557 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4558#else
4559 DMAE_CMD_ENDIANITY_DW_SWAP |
4560#endif
4561 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4562 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4563 dmae->src_addr_lo = bp->func_stx >> 2;
4564 dmae->src_addr_hi = 0;
4565 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4566 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4567 dmae->len = sizeof(struct host_func_stats) >> 2;
4568 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4569 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4570 dmae->comp_val = DMAE_COMP_VAL;
4571
4572 *stats_comp = 0;
4573 bnx2x_hw_stats_post(bp);
4574 bnx2x_stats_comp(bp);
4575}
4576
4577static void bnx2x_stats_init(struct bnx2x *bp)
4578{
4579 int port = BP_PORT(bp);
4580 int func = BP_FUNC(bp);
4581 int i;
4582
4583 bp->stats_pending = 0;
4584 bp->executer_idx = 0;
4585 bp->stats_counter = 0;
4586
4587 /* port and func stats for management */
4588 if (!BP_NOMCP(bp)) {
4589 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4590 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4591
4592 } else {
4593 bp->port.port_stx = 0;
4594 bp->func_stx = 0;
4595 }
4596 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4597 bp->port.port_stx, bp->func_stx);
4598
4599 /* port stats */
4600 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4601 bp->port.old_nig_stats.brb_discard =
4602 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4603 bp->port.old_nig_stats.brb_truncate =
4604 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4605 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4606 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4607 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4608 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4609
4610 /* function stats */
4611 for_each_queue(bp, i) {
4612 struct bnx2x_fastpath *fp = &bp->fp[i];
4613
4614 memset(&fp->old_tclient, 0,
4615 sizeof(struct tstorm_per_client_stats));
4616 memset(&fp->old_uclient, 0,
4617 sizeof(struct ustorm_per_client_stats));
4618 memset(&fp->old_xclient, 0,
4619 sizeof(struct xstorm_per_client_stats));
4620 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4621 }
4622
4623 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4624 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4625
4626 bp->stats_state = STATS_STATE_DISABLED;
4627
4628 if (bp->port.pmf) {
4629 if (bp->port.port_stx)
4630 bnx2x_port_stats_base_init(bp);
4631
4632 if (bp->func_stx)
4633 bnx2x_func_stats_base_init(bp);
4634
4635 } else if (bp->func_stx)
4636 bnx2x_func_stats_base_update(bp);
4637}
4638
a2fbb9ea
ET
4639static void bnx2x_timer(unsigned long data)
4640{
4641 struct bnx2x *bp = (struct bnx2x *) data;
4642
4643 if (!netif_running(bp->dev))
4644 return;
4645
4646 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4647 goto timer_restart;
a2fbb9ea
ET
4648
4649 if (poll) {
4650 struct bnx2x_fastpath *fp = &bp->fp[0];
4651 int rc;
4652
7961f791 4653 bnx2x_tx_int(fp);
a2fbb9ea
ET
4654 rc = bnx2x_rx_int(fp, 1000);
4655 }
4656
34f80b04
EG
4657 if (!BP_NOMCP(bp)) {
4658 int func = BP_FUNC(bp);
a2fbb9ea
ET
4659 u32 drv_pulse;
4660 u32 mcp_pulse;
4661
4662 ++bp->fw_drv_pulse_wr_seq;
4663 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4664 /* TBD - add SYSTEM_TIME */
4665 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4666 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4667
34f80b04 4668 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4669 MCP_PULSE_SEQ_MASK);
4670 /* The delta between driver pulse and mcp response
4671 * should be 1 (before mcp response) or 0 (after mcp response)
4672 */
4673 if ((drv_pulse != mcp_pulse) &&
4674 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4675 /* someone lost a heartbeat... */
4676 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4677 drv_pulse, mcp_pulse);
4678 }
4679 }
4680
f34d28ea 4681 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 4682 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4683
f1410647 4684timer_restart:
a2fbb9ea
ET
4685 mod_timer(&bp->timer, jiffies + bp->current_interval);
4686}
4687
4688/* end of Statistics */
4689
4690/* nic init */
4691
4692/*
4693 * nic init service functions
4694 */
4695
34f80b04 4696static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4697{
34f80b04
EG
4698 int port = BP_PORT(bp);
4699
ca00392c
EG
4700 /* "CSTORM" */
4701 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4703 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4704 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4705 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4706 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4707}
4708
5c862848
EG
4709static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4710 dma_addr_t mapping, int sb_id)
34f80b04
EG
4711{
4712 int port = BP_PORT(bp);
bb2a0f7a 4713 int func = BP_FUNC(bp);
a2fbb9ea 4714 int index;
34f80b04 4715 u64 section;
a2fbb9ea
ET
4716
4717 /* USTORM */
4718 section = ((u64)mapping) + offsetof(struct host_status_block,
4719 u_status_block);
34f80b04 4720 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4721
ca00392c
EG
4722 REG_WR(bp, BAR_CSTRORM_INTMEM +
4723 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4724 REG_WR(bp, BAR_CSTRORM_INTMEM +
4725 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4726 U64_HI(section));
ca00392c
EG
4727 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4728 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4729
4730 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4731 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4732 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4733
4734 /* CSTORM */
4735 section = ((u64)mapping) + offsetof(struct host_status_block,
4736 c_status_block);
34f80b04 4737 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4738
4739 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4740 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4741 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4742 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4743 U64_HI(section));
7a9b2557 4744 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4745 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4746
4747 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4748 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4749 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4750
4751 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4752}
4753
4754static void bnx2x_zero_def_sb(struct bnx2x *bp)
4755{
4756 int func = BP_FUNC(bp);
a2fbb9ea 4757
ca00392c 4758 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4759 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4760 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4761 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4762 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4763 sizeof(struct cstorm_def_status_block_u)/4);
4764 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4765 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4766 sizeof(struct cstorm_def_status_block_c)/4);
4767 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4768 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4769 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4770}
4771
4772static void bnx2x_init_def_sb(struct bnx2x *bp,
4773 struct host_def_status_block *def_sb,
34f80b04 4774 dma_addr_t mapping, int sb_id)
a2fbb9ea 4775{
34f80b04
EG
4776 int port = BP_PORT(bp);
4777 int func = BP_FUNC(bp);
a2fbb9ea
ET
4778 int index, val, reg_offset;
4779 u64 section;
4780
4781 /* ATTN */
4782 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4783 atten_status_block);
34f80b04 4784 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4785
49d66772
ET
4786 bp->attn_state = 0;
4787
a2fbb9ea
ET
4788 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4789 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4790
34f80b04 4791 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4792 bp->attn_group[index].sig[0] = REG_RD(bp,
4793 reg_offset + 0x10*index);
4794 bp->attn_group[index].sig[1] = REG_RD(bp,
4795 reg_offset + 0x4 + 0x10*index);
4796 bp->attn_group[index].sig[2] = REG_RD(bp,
4797 reg_offset + 0x8 + 0x10*index);
4798 bp->attn_group[index].sig[3] = REG_RD(bp,
4799 reg_offset + 0xc + 0x10*index);
4800 }
4801
a2fbb9ea
ET
4802 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4803 HC_REG_ATTN_MSG0_ADDR_L);
4804
4805 REG_WR(bp, reg_offset, U64_LO(section));
4806 REG_WR(bp, reg_offset + 4, U64_HI(section));
4807
4808 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4809
4810 val = REG_RD(bp, reg_offset);
34f80b04 4811 val |= sb_id;
a2fbb9ea
ET
4812 REG_WR(bp, reg_offset, val);
4813
4814 /* USTORM */
4815 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816 u_def_status_block);
34f80b04 4817 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4818
ca00392c
EG
4819 REG_WR(bp, BAR_CSTRORM_INTMEM +
4820 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4821 REG_WR(bp, BAR_CSTRORM_INTMEM +
4822 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4823 U64_HI(section));
ca00392c
EG
4824 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4825 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4826
4827 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4828 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4829 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4830
4831 /* CSTORM */
4832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833 c_def_status_block);
34f80b04 4834 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4835
4836 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4837 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4838 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4839 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4840 U64_HI(section));
5c862848 4841 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4842 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4843
4844 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4846 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4847
4848 /* TSTORM */
4849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850 t_def_status_block);
34f80b04 4851 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4852
4853 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4854 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4855 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4856 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4857 U64_HI(section));
5c862848 4858 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4859 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4860
4861 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4862 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4863 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4864
4865 /* XSTORM */
4866 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867 x_def_status_block);
34f80b04 4868 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4869
4870 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4871 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4872 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4873 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4874 U64_HI(section));
5c862848 4875 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4876 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4877
4878 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4879 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4880 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4881
bb2a0f7a 4882 bp->stats_pending = 0;
66e855f3 4883 bp->set_mac_pending = 0;
bb2a0f7a 4884
34f80b04 4885 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4886}
4887
4888static void bnx2x_update_coalesce(struct bnx2x *bp)
4889{
34f80b04 4890 int port = BP_PORT(bp);
a2fbb9ea
ET
4891 int i;
4892
4893 for_each_queue(bp, i) {
34f80b04 4894 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4895
4896 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4897 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4898 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4899 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4900 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
4901 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4902 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4903 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4904 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4905
4906 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4907 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4908 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4909 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4910 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 4911 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4912 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4913 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4914 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4915 }
4916}
4917
7a9b2557
VZ
4918static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4919 struct bnx2x_fastpath *fp, int last)
4920{
4921 int i;
4922
4923 for (i = 0; i < last; i++) {
4924 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4925 struct sk_buff *skb = rx_buf->skb;
4926
4927 if (skb == NULL) {
4928 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4929 continue;
4930 }
4931
4932 if (fp->tpa_state[i] == BNX2X_TPA_START)
4933 pci_unmap_single(bp->pdev,
4934 pci_unmap_addr(rx_buf, mapping),
356e2385 4935 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4936
4937 dev_kfree_skb(skb);
4938 rx_buf->skb = NULL;
4939 }
4940}
4941
a2fbb9ea
ET
4942static void bnx2x_init_rx_rings(struct bnx2x *bp)
4943{
7a9b2557 4944 int func = BP_FUNC(bp);
32626230
EG
4945 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4946 ETH_MAX_AGGREGATION_QUEUES_E1H;
4947 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4948 int i, j;
a2fbb9ea 4949
87942b46 4950 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4951 DP(NETIF_MSG_IFUP,
4952 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4953
7a9b2557 4954 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4955
54b9ddaa 4956 for_each_queue(bp, j) {
32626230 4957 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4958
32626230 4959 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4960 fp->tpa_pool[i].skb =
4961 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4962 if (!fp->tpa_pool[i].skb) {
4963 BNX2X_ERR("Failed to allocate TPA "
4964 "skb pool for queue[%d] - "
4965 "disabling TPA on this "
4966 "queue!\n", j);
4967 bnx2x_free_tpa_pool(bp, fp, i);
4968 fp->disable_tpa = 1;
4969 break;
4970 }
4971 pci_unmap_addr_set((struct sw_rx_bd *)
4972 &bp->fp->tpa_pool[i],
4973 mapping, 0);
4974 fp->tpa_state[i] = BNX2X_TPA_STOP;
4975 }
4976 }
4977 }
4978
54b9ddaa 4979 for_each_queue(bp, j) {
a2fbb9ea
ET
4980 struct bnx2x_fastpath *fp = &bp->fp[j];
4981
4982 fp->rx_bd_cons = 0;
4983 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4984 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4985
4986 /* "next page" elements initialization */
4987 /* SGE ring */
4988 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4989 struct eth_rx_sge *sge;
4990
4991 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4992 sge->addr_hi =
4993 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4994 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4995 sge->addr_lo =
4996 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4997 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4998 }
4999
5000 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5001
7a9b2557 5002 /* RX BD ring */
a2fbb9ea
ET
5003 for (i = 1; i <= NUM_RX_RINGS; i++) {
5004 struct eth_rx_bd *rx_bd;
5005
5006 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5007 rx_bd->addr_hi =
5008 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5009 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5010 rx_bd->addr_lo =
5011 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5012 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5013 }
5014
34f80b04 5015 /* CQ ring */
a2fbb9ea
ET
5016 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5017 struct eth_rx_cqe_next_page *nextpg;
5018
5019 nextpg = (struct eth_rx_cqe_next_page *)
5020 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5021 nextpg->addr_hi =
5022 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5023 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5024 nextpg->addr_lo =
5025 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5026 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5027 }
5028
7a9b2557
VZ
5029 /* Allocate SGEs and initialize the ring elements */
5030 for (i = 0, ring_prod = 0;
5031 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5032
7a9b2557
VZ
5033 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5034 BNX2X_ERR("was only able to allocate "
5035 "%d rx sges\n", i);
5036 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5037 /* Cleanup already allocated elements */
5038 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5039 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5040 fp->disable_tpa = 1;
5041 ring_prod = 0;
5042 break;
5043 }
5044 ring_prod = NEXT_SGE_IDX(ring_prod);
5045 }
5046 fp->rx_sge_prod = ring_prod;
5047
5048 /* Allocate BDs and initialize BD ring */
66e855f3 5049 fp->rx_comp_cons = 0;
7a9b2557 5050 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5051 for (i = 0; i < bp->rx_ring_size; i++) {
5052 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5053 BNX2X_ERR("was only able to allocate "
de832a55
EG
5054 "%d rx skbs on queue[%d]\n", i, j);
5055 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5056 break;
5057 }
5058 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5059 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5060 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5061 }
5062
7a9b2557
VZ
5063 fp->rx_bd_prod = ring_prod;
5064 /* must not have more available CQEs than BDs */
5065 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5066 cqe_ring_prod);
a2fbb9ea
ET
5067 fp->rx_pkt = fp->rx_calls = 0;
5068
7a9b2557
VZ
5069 /* Warning!
5070 * this will generate an interrupt (to the TSTORM)
5071 * must only be done after chip is initialized
5072 */
5073 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5074 fp->rx_sge_prod);
a2fbb9ea
ET
5075 if (j != 0)
5076 continue;
5077
5078 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5079 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5080 U64_LO(fp->rx_comp_mapping));
5081 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5082 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5083 U64_HI(fp->rx_comp_mapping));
5084 }
5085}
5086
5087static void bnx2x_init_tx_ring(struct bnx2x *bp)
5088{
5089 int i, j;
5090
54b9ddaa 5091 for_each_queue(bp, j) {
a2fbb9ea
ET
5092 struct bnx2x_fastpath *fp = &bp->fp[j];
5093
5094 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5095 struct eth_tx_next_bd *tx_next_bd =
5096 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5097
ca00392c 5098 tx_next_bd->addr_hi =
a2fbb9ea 5099 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5100 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5101 tx_next_bd->addr_lo =
a2fbb9ea 5102 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5103 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5104 }
5105
ca00392c
EG
5106 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5107 fp->tx_db.data.zero_fill1 = 0;
5108 fp->tx_db.data.prod = 0;
5109
a2fbb9ea
ET
5110 fp->tx_pkt_prod = 0;
5111 fp->tx_pkt_cons = 0;
5112 fp->tx_bd_prod = 0;
5113 fp->tx_bd_cons = 0;
5114 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5115 fp->tx_pkt = 0;
5116 }
5117}
5118
5119static void bnx2x_init_sp_ring(struct bnx2x *bp)
5120{
34f80b04 5121 int func = BP_FUNC(bp);
a2fbb9ea
ET
5122
5123 spin_lock_init(&bp->spq_lock);
5124
5125 bp->spq_left = MAX_SPQ_PENDING;
5126 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5127 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5128 bp->spq_prod_bd = bp->spq;
5129 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5130
34f80b04 5131 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5132 U64_LO(bp->spq_mapping));
34f80b04
EG
5133 REG_WR(bp,
5134 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5135 U64_HI(bp->spq_mapping));
5136
34f80b04 5137 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5138 bp->spq_prod_idx);
5139}
5140
5141static void bnx2x_init_context(struct bnx2x *bp)
5142{
5143 int i;
5144
54b9ddaa
VZ
5145 /* Rx */
5146 for_each_queue(bp, i) {
a2fbb9ea
ET
5147 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5148 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5149 u8 cl_id = fp->cl_id;
a2fbb9ea 5150
34f80b04
EG
5151 context->ustorm_st_context.common.sb_index_numbers =
5152 BNX2X_RX_SB_INDEX_NUM;
0626b899 5153 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5154 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5155 context->ustorm_st_context.common.flags =
de832a55
EG
5156 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5157 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5158 context->ustorm_st_context.common.statistics_counter_id =
5159 cl_id;
8d9c5f34 5160 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5161 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5162 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5163 bp->rx_buf_size;
34f80b04 5164 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5165 U64_HI(fp->rx_desc_mapping);
34f80b04 5166 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5167 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5168 if (!fp->disable_tpa) {
5169 context->ustorm_st_context.common.flags |=
ca00392c 5170 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5171 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5172 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5173 (u32)0xffff);
7a9b2557
VZ
5174 context->ustorm_st_context.common.sge_page_base_hi =
5175 U64_HI(fp->rx_sge_mapping);
5176 context->ustorm_st_context.common.sge_page_base_lo =
5177 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5178
5179 context->ustorm_st_context.common.max_sges_for_packet =
5180 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5181 context->ustorm_st_context.common.max_sges_for_packet =
5182 ((context->ustorm_st_context.common.
5183 max_sges_for_packet + PAGES_PER_SGE - 1) &
5184 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5185 }
5186
8d9c5f34
EG
5187 context->ustorm_ag_context.cdu_usage =
5188 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5189 CDU_REGION_NUMBER_UCM_AG,
5190 ETH_CONNECTION_TYPE);
5191
ca00392c
EG
5192 context->xstorm_ag_context.cdu_reserved =
5193 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5194 CDU_REGION_NUMBER_XCM_AG,
5195 ETH_CONNECTION_TYPE);
5196 }
5197
54b9ddaa
VZ
5198 /* Tx */
5199 for_each_queue(bp, i) {
ca00392c
EG
5200 struct bnx2x_fastpath *fp = &bp->fp[i];
5201 struct eth_context *context =
54b9ddaa 5202 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5203
5204 context->cstorm_st_context.sb_index_number =
5205 C_SB_ETH_TX_CQ_INDEX;
5206 context->cstorm_st_context.status_block_id = fp->sb_id;
5207
8d9c5f34
EG
5208 context->xstorm_st_context.tx_bd_page_base_hi =
5209 U64_HI(fp->tx_desc_mapping);
5210 context->xstorm_st_context.tx_bd_page_base_lo =
5211 U64_LO(fp->tx_desc_mapping);
ca00392c 5212 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5213 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5214 }
5215}
5216
5217static void bnx2x_init_ind_table(struct bnx2x *bp)
5218{
26c8fa4d 5219 int func = BP_FUNC(bp);
a2fbb9ea
ET
5220 int i;
5221
555f6c78 5222 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5223 return;
5224
555f6c78
EG
5225 DP(NETIF_MSG_IFUP,
5226 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5227 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5228 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5229 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5230 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5231}
5232
49d66772
ET
5233static void bnx2x_set_client_config(struct bnx2x *bp)
5234{
49d66772 5235 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5236 int port = BP_PORT(bp);
5237 int i;
49d66772 5238
e7799c5f 5239 tstorm_client.mtu = bp->dev->mtu;
49d66772 5240 tstorm_client.config_flags =
de832a55
EG
5241 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5242 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5243#ifdef BCM_VLAN
0c6671b0 5244 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5245 tstorm_client.config_flags |=
8d9c5f34 5246 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5247 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5248 }
5249#endif
49d66772
ET
5250
5251 for_each_queue(bp, i) {
de832a55
EG
5252 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5253
49d66772 5254 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5255 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5256 ((u32 *)&tstorm_client)[0]);
5257 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5258 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5259 ((u32 *)&tstorm_client)[1]);
5260 }
5261
34f80b04
EG
5262 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5263 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5264}
5265
a2fbb9ea
ET
5266static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5267{
a2fbb9ea 5268 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5269 int mode = bp->rx_mode;
37b091ba 5270 int mask = bp->rx_mode_cl_mask;
34f80b04 5271 int func = BP_FUNC(bp);
581ce43d 5272 int port = BP_PORT(bp);
a2fbb9ea 5273 int i;
581ce43d
EG
5274 /* All but management unicast packets should pass to the host as well */
5275 u32 llh_mask =
5276 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5277 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5278 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5279 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5280
3196a88a 5281 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5282
5283 switch (mode) {
5284 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5285 tstorm_mac_filter.ucast_drop_all = mask;
5286 tstorm_mac_filter.mcast_drop_all = mask;
5287 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5288 break;
356e2385 5289
a2fbb9ea 5290 case BNX2X_RX_MODE_NORMAL:
34f80b04 5291 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5292 break;
356e2385 5293
a2fbb9ea 5294 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5295 tstorm_mac_filter.mcast_accept_all = mask;
5296 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5297 break;
356e2385 5298
a2fbb9ea 5299 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5300 tstorm_mac_filter.ucast_accept_all = mask;
5301 tstorm_mac_filter.mcast_accept_all = mask;
5302 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5303 /* pass management unicast packets as well */
5304 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5305 break;
356e2385 5306
a2fbb9ea 5307 default:
34f80b04
EG
5308 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5309 break;
a2fbb9ea
ET
5310 }
5311
581ce43d
EG
5312 REG_WR(bp,
5313 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5314 llh_mask);
5315
a2fbb9ea
ET
5316 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5317 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5318 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5319 ((u32 *)&tstorm_mac_filter)[i]);
5320
34f80b04 5321/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5322 ((u32 *)&tstorm_mac_filter)[i]); */
5323 }
a2fbb9ea 5324
49d66772
ET
5325 if (mode != BNX2X_RX_MODE_NONE)
5326 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5327}
5328
471de716
EG
5329static void bnx2x_init_internal_common(struct bnx2x *bp)
5330{
5331 int i;
5332
5333 /* Zero this manually as its initialization is
5334 currently missing in the initTool */
5335 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5336 REG_WR(bp, BAR_USTRORM_INTMEM +
5337 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5338}
5339
5340static void bnx2x_init_internal_port(struct bnx2x *bp)
5341{
5342 int port = BP_PORT(bp);
5343
ca00392c
EG
5344 REG_WR(bp,
5345 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5346 REG_WR(bp,
5347 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5348 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5349 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5350}
5351
5352static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5353{
a2fbb9ea
ET
5354 struct tstorm_eth_function_common_config tstorm_config = {0};
5355 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5356 int port = BP_PORT(bp);
5357 int func = BP_FUNC(bp);
de832a55
EG
5358 int i, j;
5359 u32 offset;
471de716 5360 u16 max_agg_size;
a2fbb9ea
ET
5361
5362 if (is_multi(bp)) {
555f6c78 5363 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5364 tstorm_config.rss_result_mask = MULTI_MASK;
5365 }
ca00392c
EG
5366
5367 /* Enable TPA if needed */
5368 if (bp->flags & TPA_ENABLE_FLAG)
5369 tstorm_config.config_flags |=
5370 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5371
8d9c5f34
EG
5372 if (IS_E1HMF(bp))
5373 tstorm_config.config_flags |=
5374 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5375
34f80b04
EG
5376 tstorm_config.leading_client_id = BP_L_ID(bp);
5377
a2fbb9ea 5378 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5379 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5380 (*(u32 *)&tstorm_config));
5381
c14423fe 5382 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5383 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5384 bnx2x_set_storm_rx_mode(bp);
5385
de832a55
EG
5386 for_each_queue(bp, i) {
5387 u8 cl_id = bp->fp[i].cl_id;
5388
5389 /* reset xstorm per client statistics */
5390 offset = BAR_XSTRORM_INTMEM +
5391 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5392 for (j = 0;
5393 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5394 REG_WR(bp, offset + j*4, 0);
5395
5396 /* reset tstorm per client statistics */
5397 offset = BAR_TSTRORM_INTMEM +
5398 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5399 for (j = 0;
5400 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5401 REG_WR(bp, offset + j*4, 0);
5402
5403 /* reset ustorm per client statistics */
5404 offset = BAR_USTRORM_INTMEM +
5405 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5406 for (j = 0;
5407 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5408 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5409 }
5410
5411 /* Init statistics related context */
34f80b04 5412 stats_flags.collect_eth = 1;
a2fbb9ea 5413
66e855f3 5414 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5415 ((u32 *)&stats_flags)[0]);
66e855f3 5416 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5417 ((u32 *)&stats_flags)[1]);
5418
66e855f3 5419 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5420 ((u32 *)&stats_flags)[0]);
66e855f3 5421 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5422 ((u32 *)&stats_flags)[1]);
5423
de832a55
EG
5424 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5425 ((u32 *)&stats_flags)[0]);
5426 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5427 ((u32 *)&stats_flags)[1]);
5428
66e855f3 5429 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5430 ((u32 *)&stats_flags)[0]);
66e855f3 5431 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5432 ((u32 *)&stats_flags)[1]);
5433
66e855f3
YG
5434 REG_WR(bp, BAR_XSTRORM_INTMEM +
5435 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5436 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5437 REG_WR(bp, BAR_XSTRORM_INTMEM +
5438 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5439 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5440
5441 REG_WR(bp, BAR_TSTRORM_INTMEM +
5442 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5443 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5444 REG_WR(bp, BAR_TSTRORM_INTMEM +
5445 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5446 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5447
de832a55
EG
5448 REG_WR(bp, BAR_USTRORM_INTMEM +
5449 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5450 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5451 REG_WR(bp, BAR_USTRORM_INTMEM +
5452 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5453 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5454
34f80b04
EG
5455 if (CHIP_IS_E1H(bp)) {
5456 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5457 IS_E1HMF(bp));
5458 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5459 IS_E1HMF(bp));
5460 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5461 IS_E1HMF(bp));
5462 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5463 IS_E1HMF(bp));
5464
7a9b2557
VZ
5465 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5466 bp->e1hov);
34f80b04
EG
5467 }
5468
4f40f2cb
EG
5469 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5470 max_agg_size =
5471 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5472 SGE_PAGE_SIZE * PAGES_PER_SGE),
5473 (u32)0xffff);
54b9ddaa 5474 for_each_queue(bp, i) {
7a9b2557 5475 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5476
5477 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5478 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5479 U64_LO(fp->rx_comp_mapping));
5480 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5481 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5482 U64_HI(fp->rx_comp_mapping));
5483
ca00392c
EG
5484 /* Next page */
5485 REG_WR(bp, BAR_USTRORM_INTMEM +
5486 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5487 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
5489 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5490 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5491
7a9b2557 5492 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5493 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5494 max_agg_size);
5495 }
8a1c38d1 5496
1c06328c
EG
5497 /* dropless flow control */
5498 if (CHIP_IS_E1H(bp)) {
5499 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5500
5501 rx_pause.bd_thr_low = 250;
5502 rx_pause.cqe_thr_low = 250;
5503 rx_pause.cos = 1;
5504 rx_pause.sge_thr_low = 0;
5505 rx_pause.bd_thr_high = 350;
5506 rx_pause.cqe_thr_high = 350;
5507 rx_pause.sge_thr_high = 0;
5508
54b9ddaa 5509 for_each_queue(bp, i) {
1c06328c
EG
5510 struct bnx2x_fastpath *fp = &bp->fp[i];
5511
5512 if (!fp->disable_tpa) {
5513 rx_pause.sge_thr_low = 150;
5514 rx_pause.sge_thr_high = 250;
5515 }
5516
5517
5518 offset = BAR_USTRORM_INTMEM +
5519 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5520 fp->cl_id);
5521 for (j = 0;
5522 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5523 j++)
5524 REG_WR(bp, offset + j*4,
5525 ((u32 *)&rx_pause)[j]);
5526 }
5527 }
5528
8a1c38d1
EG
5529 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5530
5531 /* Init rate shaping and fairness contexts */
5532 if (IS_E1HMF(bp)) {
5533 int vn;
5534
5535 /* During init there is no active link
5536 Until link is up, set link rate to 10Gbps */
5537 bp->link_vars.line_speed = SPEED_10000;
5538 bnx2x_init_port_minmax(bp);
5539
b015e3d1
EG
5540 if (!BP_NOMCP(bp))
5541 bp->mf_config =
5542 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5543 bnx2x_calc_vn_weight_sum(bp);
5544
5545 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5546 bnx2x_init_vn_minmax(bp, 2*vn + port);
5547
5548 /* Enable rate shaping and fairness */
b015e3d1 5549 bp->cmng.flags.cmng_enables |=
8a1c38d1 5550 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5551
8a1c38d1
EG
5552 } else {
5553 /* rate shaping and fairness are disabled */
5554 DP(NETIF_MSG_IFUP,
5555 "single function mode minmax will be disabled\n");
5556 }
5557
5558
5559 /* Store it to internal memory */
5560 if (bp->port.pmf)
5561 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5562 REG_WR(bp, BAR_XSTRORM_INTMEM +
5563 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5564 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5565}
5566
471de716
EG
5567static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5568{
5569 switch (load_code) {
5570 case FW_MSG_CODE_DRV_LOAD_COMMON:
5571 bnx2x_init_internal_common(bp);
5572 /* no break */
5573
5574 case FW_MSG_CODE_DRV_LOAD_PORT:
5575 bnx2x_init_internal_port(bp);
5576 /* no break */
5577
5578 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5579 bnx2x_init_internal_func(bp);
5580 break;
5581
5582 default:
5583 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5584 break;
5585 }
5586}
5587
5588static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5589{
5590 int i;
5591
5592 for_each_queue(bp, i) {
5593 struct bnx2x_fastpath *fp = &bp->fp[i];
5594
34f80b04 5595 fp->bp = bp;
a2fbb9ea 5596 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5597 fp->index = i;
34f80b04 5598 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5599#ifdef BCM_CNIC
5600 fp->sb_id = fp->cl_id + 1;
5601#else
34f80b04 5602 fp->sb_id = fp->cl_id;
37b091ba 5603#endif
34f80b04 5604 DP(NETIF_MSG_IFUP,
f5372251
EG
5605 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5606 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5607 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5608 fp->sb_id);
5c862848 5609 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5610 }
5611
16119785
EG
5612 /* ensure status block indices were read */
5613 rmb();
5614
5615
5c862848
EG
5616 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5617 DEF_SB_ID);
5618 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5619 bnx2x_update_coalesce(bp);
5620 bnx2x_init_rx_rings(bp);
5621 bnx2x_init_tx_ring(bp);
5622 bnx2x_init_sp_ring(bp);
5623 bnx2x_init_context(bp);
471de716 5624 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5625 bnx2x_init_ind_table(bp);
0ef00459
EG
5626 bnx2x_stats_init(bp);
5627
5628 /* At this point, we are ready for interrupts */
5629 atomic_set(&bp->intr_sem, 0);
5630
5631 /* flush all before enabling interrupts */
5632 mb();
5633 mmiowb();
5634
615f8fd9 5635 bnx2x_int_enable(bp);
eb8da205
EG
5636
5637 /* Check for SPIO5 */
5638 bnx2x_attn_int_deasserted0(bp,
5639 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5640 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5641}
5642
5643/* end of nic init */
5644
5645/*
5646 * gzip service functions
5647 */
5648
5649static int bnx2x_gunzip_init(struct bnx2x *bp)
5650{
5651 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5652 &bp->gunzip_mapping);
5653 if (bp->gunzip_buf == NULL)
5654 goto gunzip_nomem1;
5655
5656 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5657 if (bp->strm == NULL)
5658 goto gunzip_nomem2;
5659
5660 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5661 GFP_KERNEL);
5662 if (bp->strm->workspace == NULL)
5663 goto gunzip_nomem3;
5664
5665 return 0;
5666
5667gunzip_nomem3:
5668 kfree(bp->strm);
5669 bp->strm = NULL;
5670
5671gunzip_nomem2:
5672 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673 bp->gunzip_mapping);
5674 bp->gunzip_buf = NULL;
5675
5676gunzip_nomem1:
5677 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5678 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5679 return -ENOMEM;
5680}
5681
5682static void bnx2x_gunzip_end(struct bnx2x *bp)
5683{
5684 kfree(bp->strm->workspace);
5685
5686 kfree(bp->strm);
5687 bp->strm = NULL;
5688
5689 if (bp->gunzip_buf) {
5690 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5691 bp->gunzip_mapping);
5692 bp->gunzip_buf = NULL;
5693 }
5694}
5695
94a78b79 5696static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5697{
5698 int n, rc;
5699
5700 /* check gzip header */
94a78b79
VZ
5701 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5702 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5703 return -EINVAL;
94a78b79 5704 }
a2fbb9ea
ET
5705
5706 n = 10;
5707
34f80b04 5708#define FNAME 0x8
a2fbb9ea
ET
5709
5710 if (zbuf[3] & FNAME)
5711 while ((zbuf[n++] != 0) && (n < len));
5712
94a78b79 5713 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5714 bp->strm->avail_in = len - n;
5715 bp->strm->next_out = bp->gunzip_buf;
5716 bp->strm->avail_out = FW_BUF_SIZE;
5717
5718 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5719 if (rc != Z_OK)
5720 return rc;
5721
5722 rc = zlib_inflate(bp->strm, Z_FINISH);
5723 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5724 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5725 bp->dev->name, bp->strm->msg);
5726
5727 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5728 if (bp->gunzip_outlen & 0x3)
5729 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5730 " gunzip_outlen (%d) not aligned\n",
5731 bp->dev->name, bp->gunzip_outlen);
5732 bp->gunzip_outlen >>= 2;
5733
5734 zlib_inflateEnd(bp->strm);
5735
5736 if (rc == Z_STREAM_END)
5737 return 0;
5738
5739 return rc;
5740}
5741
5742/* nic load/unload */
5743
5744/*
34f80b04 5745 * General service functions
a2fbb9ea
ET
5746 */
5747
5748/* send a NIG loopback debug packet */
5749static void bnx2x_lb_pckt(struct bnx2x *bp)
5750{
a2fbb9ea 5751 u32 wb_write[3];
a2fbb9ea
ET
5752
5753 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5754 wb_write[0] = 0x55555555;
5755 wb_write[1] = 0x55555555;
34f80b04 5756 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5757 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5758
5759 /* NON-IP protocol */
a2fbb9ea
ET
5760 wb_write[0] = 0x09000000;
5761 wb_write[1] = 0x55555555;
34f80b04 5762 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5763 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5764}
5765
5766/* some of the internal memories
5767 * are not directly readable from the driver
5768 * to test them we send debug packets
5769 */
5770static int bnx2x_int_mem_test(struct bnx2x *bp)
5771{
5772 int factor;
5773 int count, i;
5774 u32 val = 0;
5775
ad8d3948 5776 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5777 factor = 120;
ad8d3948
EG
5778 else if (CHIP_REV_IS_EMUL(bp))
5779 factor = 200;
5780 else
a2fbb9ea 5781 factor = 1;
a2fbb9ea
ET
5782
5783 DP(NETIF_MSG_HW, "start part1\n");
5784
5785 /* Disable inputs of parser neighbor blocks */
5786 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5787 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5788 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5789 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5790
5791 /* Write 0 to parser credits for CFC search request */
5792 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5793
5794 /* send Ethernet packet */
5795 bnx2x_lb_pckt(bp);
5796
5797 /* TODO do i reset NIG statistic? */
5798 /* Wait until NIG register shows 1 packet of size 0x10 */
5799 count = 1000 * factor;
5800 while (count) {
34f80b04 5801
a2fbb9ea
ET
5802 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5804 if (val == 0x10)
5805 break;
5806
5807 msleep(10);
5808 count--;
5809 }
5810 if (val != 0x10) {
5811 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5812 return -1;
5813 }
5814
5815 /* Wait until PRS register shows 1 packet */
5816 count = 1000 * factor;
5817 while (count) {
5818 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5819 if (val == 1)
5820 break;
5821
5822 msleep(10);
5823 count--;
5824 }
5825 if (val != 0x1) {
5826 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5827 return -2;
5828 }
5829
5830 /* Reset and init BRB, PRS */
34f80b04 5831 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5832 msleep(50);
34f80b04 5833 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5834 msleep(50);
94a78b79
VZ
5835 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5836 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5837
5838 DP(NETIF_MSG_HW, "part2\n");
5839
5840 /* Disable inputs of parser neighbor blocks */
5841 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5842 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5843 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5844 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5845
5846 /* Write 0 to parser credits for CFC search request */
5847 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5848
5849 /* send 10 Ethernet packets */
5850 for (i = 0; i < 10; i++)
5851 bnx2x_lb_pckt(bp);
5852
5853 /* Wait until NIG register shows 10 + 1
5854 packets of size 11*0x10 = 0xb0 */
5855 count = 1000 * factor;
5856 while (count) {
34f80b04 5857
a2fbb9ea
ET
5858 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5859 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5860 if (val == 0xb0)
5861 break;
5862
5863 msleep(10);
5864 count--;
5865 }
5866 if (val != 0xb0) {
5867 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5868 return -3;
5869 }
5870
5871 /* Wait until PRS register shows 2 packets */
5872 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5873 if (val != 2)
5874 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5875
5876 /* Write 1 to parser credits for CFC search request */
5877 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5878
5879 /* Wait until PRS register shows 3 packets */
5880 msleep(10 * factor);
5881 /* Wait until NIG register shows 1 packet of size 0x10 */
5882 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5883 if (val != 3)
5884 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5885
5886 /* clear NIG EOP FIFO */
5887 for (i = 0; i < 11; i++)
5888 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5889 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5890 if (val != 1) {
5891 BNX2X_ERR("clear of NIG failed\n");
5892 return -4;
5893 }
5894
5895 /* Reset and init BRB, PRS, NIG */
5896 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5897 msleep(50);
5898 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5899 msleep(50);
94a78b79
VZ
5900 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5901 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5902#ifndef BCM_CNIC
a2fbb9ea
ET
5903 /* set NIC mode */
5904 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5905#endif
5906
5907 /* Enable inputs of parser neighbor blocks */
5908 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5909 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5910 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5911 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5912
5913 DP(NETIF_MSG_HW, "done\n");
5914
5915 return 0; /* OK */
5916}
5917
5918static void enable_blocks_attention(struct bnx2x *bp)
5919{
5920 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5921 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5922 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5923 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5924 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5925 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5926 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5927 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5928 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5929/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5930/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5931 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5932 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5933 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5934/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5935/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5936 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5937 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5938 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5939 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5940/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5941/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5942 if (CHIP_REV_IS_FPGA(bp))
5943 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5944 else
5945 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5946 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5947 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5948 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5949/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5950/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5951 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5952 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5953/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5954 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5955}
5956
34f80b04 5957
81f75bbf
EG
5958static void bnx2x_reset_common(struct bnx2x *bp)
5959{
5960 /* reset_common */
5961 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5962 0xd3ffff7f);
5963 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5964}
5965
573f2035
EG
5966static void bnx2x_init_pxp(struct bnx2x *bp)
5967{
5968 u16 devctl;
5969 int r_order, w_order;
5970
5971 pci_read_config_word(bp->pdev,
5972 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5973 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5974 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5975 if (bp->mrrs == -1)
5976 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5977 else {
5978 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5979 r_order = bp->mrrs;
5980 }
5981
5982 bnx2x_init_pxp_arb(bp, r_order, w_order);
5983}
fd4ef40d
EG
5984
5985static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5986{
5987 u32 val;
5988 u8 port;
5989 u8 is_required = 0;
5990
5991 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5992 SHARED_HW_CFG_FAN_FAILURE_MASK;
5993
5994 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5995 is_required = 1;
5996
5997 /*
5998 * The fan failure mechanism is usually related to the PHY type since
5999 * the power consumption of the board is affected by the PHY. Currently,
6000 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6001 */
6002 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6003 for (port = PORT_0; port < PORT_MAX; port++) {
6004 u32 phy_type =
6005 SHMEM_RD(bp, dev_info.port_hw_config[port].
6006 external_phy_config) &
6007 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6008 is_required |=
6009 ((phy_type ==
6010 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6011 (phy_type ==
6012 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6013 (phy_type ==
6014 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6015 }
6016
6017 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6018
6019 if (is_required == 0)
6020 return;
6021
6022 /* Fan failure is indicated by SPIO 5 */
6023 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6024 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6025
6026 /* set to active low mode */
6027 val = REG_RD(bp, MISC_REG_SPIO_INT);
6028 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6029 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6030 REG_WR(bp, MISC_REG_SPIO_INT, val);
6031
6032 /* enable interrupt to signal the IGU */
6033 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6034 val |= (1 << MISC_REGISTERS_SPIO_5);
6035 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6036}
6037
34f80b04 6038static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6039{
a2fbb9ea 6040 u32 val, i;
37b091ba
MC
6041#ifdef BCM_CNIC
6042 u32 wb_write[2];
6043#endif
a2fbb9ea 6044
34f80b04 6045 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6046
81f75bbf 6047 bnx2x_reset_common(bp);
34f80b04
EG
6048 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6049 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6050
94a78b79 6051 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6052 if (CHIP_IS_E1H(bp))
6053 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6054
34f80b04
EG
6055 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6056 msleep(30);
6057 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6058
94a78b79 6059 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6060 if (CHIP_IS_E1(bp)) {
6061 /* enable HW interrupt from PXP on USDM overflow
6062 bit 16 on INT_MASK_0 */
6063 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6064 }
a2fbb9ea 6065
94a78b79 6066 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6067 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6068
6069#ifdef __BIG_ENDIAN
34f80b04
EG
6070 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6071 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6072 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6073 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6074 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6075 /* make sure this value is 0 */
6076 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6077
6078/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6079 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6080 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6081 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6082 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6083#endif
6084
34f80b04 6085 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6086#ifdef BCM_CNIC
34f80b04
EG
6087 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6088 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6089 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6090#endif
6091
34f80b04
EG
6092 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6093 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6094
34f80b04
EG
6095 /* let the HW do it's magic ... */
6096 msleep(100);
6097 /* finish PXP init */
6098 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6099 if (val != 1) {
6100 BNX2X_ERR("PXP2 CFG failed\n");
6101 return -EBUSY;
6102 }
6103 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6104 if (val != 1) {
6105 BNX2X_ERR("PXP2 RD_INIT failed\n");
6106 return -EBUSY;
6107 }
a2fbb9ea 6108
34f80b04
EG
6109 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6110 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6111
94a78b79 6112 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6113
34f80b04
EG
6114 /* clean the DMAE memory */
6115 bp->dmae_ready = 1;
6116 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6117
94a78b79
VZ
6118 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6119 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6120 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6121 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6122
34f80b04
EG
6123 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6124 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6125 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6126 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6127
94a78b79 6128 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6129
6130#ifdef BCM_CNIC
6131 wb_write[0] = 0;
6132 wb_write[1] = 0;
6133 for (i = 0; i < 64; i++) {
6134 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6135 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6136
6137 if (CHIP_IS_E1H(bp)) {
6138 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6139 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6140 wb_write, 2);
6141 }
6142 }
6143#endif
34f80b04
EG
6144 /* soft reset pulse */
6145 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6146 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6147
37b091ba 6148#ifdef BCM_CNIC
94a78b79 6149 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6150#endif
a2fbb9ea 6151
94a78b79 6152 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6153 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6154 if (!CHIP_REV_IS_SLOW(bp)) {
6155 /* enable hw interrupt from doorbell Q */
6156 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6157 }
a2fbb9ea 6158
94a78b79
VZ
6159 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6160 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6161 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6162#ifndef BCM_CNIC
3196a88a
EG
6163 /* set NIC mode */
6164 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6165#endif
34f80b04
EG
6166 if (CHIP_IS_E1H(bp))
6167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6168
94a78b79
VZ
6169 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6170 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6171 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6172 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6173
ca00392c
EG
6174 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6175 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6178
94a78b79
VZ
6179 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6180 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6181 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6182 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6183
34f80b04
EG
6184 /* sync semi rtc */
6185 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6186 0x80000000);
6187 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6188 0x80000000);
a2fbb9ea 6189
94a78b79
VZ
6190 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6192 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6193
34f80b04
EG
6194 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6195 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6196 REG_WR(bp, i, 0xc0cac01a);
6197 /* TODO: replace with something meaningful */
6198 }
94a78b79 6199 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6200#ifdef BCM_CNIC
6201 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6204 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6205 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6206 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6207 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6208 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6209 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6210 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6211#endif
34f80b04 6212 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6213
34f80b04
EG
6214 if (sizeof(union cdu_context) != 1024)
6215 /* we currently assume that a context is 1024 bytes */
6216 printk(KERN_ALERT PFX "please adjust the size of"
6217 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6218
94a78b79 6219 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6220 val = (4 << 24) + (0 << 12) + 1024;
6221 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6222
94a78b79 6223 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6224 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6225 /* enable context validation interrupt from CFC */
6226 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6227
6228 /* set the thresholds to prevent CFC/CDU race */
6229 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6230
94a78b79
VZ
6231 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6232 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6233
94a78b79 6234 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6235 /* Reset PCIE errors for debug */
6236 REG_WR(bp, 0x2814, 0xffffffff);
6237 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6238
94a78b79 6239 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6240 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6241 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6242 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6243
94a78b79 6244 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6245 if (CHIP_IS_E1H(bp)) {
6246 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6247 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6248 }
6249
6250 if (CHIP_REV_IS_SLOW(bp))
6251 msleep(200);
6252
6253 /* finish CFC init */
6254 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6255 if (val != 1) {
6256 BNX2X_ERR("CFC LL_INIT failed\n");
6257 return -EBUSY;
6258 }
6259 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6260 if (val != 1) {
6261 BNX2X_ERR("CFC AC_INIT failed\n");
6262 return -EBUSY;
6263 }
6264 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6265 if (val != 1) {
6266 BNX2X_ERR("CFC CAM_INIT failed\n");
6267 return -EBUSY;
6268 }
6269 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6270
34f80b04
EG
6271 /* read NIG statistic
6272 to see if this is our first up since powerup */
6273 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6274 val = *bnx2x_sp(bp, wb_data[0]);
6275
6276 /* do internal memory self test */
6277 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6278 BNX2X_ERR("internal mem self test failed\n");
6279 return -EBUSY;
6280 }
6281
35b19ba5 6282 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6283 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6285 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6286 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6287 bp->port.need_hw_lock = 1;
6288 break;
6289
34f80b04
EG
6290 default:
6291 break;
6292 }
f1410647 6293
fd4ef40d
EG
6294 bnx2x_setup_fan_failure_detection(bp);
6295
34f80b04
EG
6296 /* clear PXP2 attentions */
6297 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6298
34f80b04 6299 enable_blocks_attention(bp);
a2fbb9ea 6300
6bbca910
YR
6301 if (!BP_NOMCP(bp)) {
6302 bnx2x_acquire_phy_lock(bp);
6303 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6304 bnx2x_release_phy_lock(bp);
6305 } else
6306 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6307
34f80b04
EG
6308 return 0;
6309}
a2fbb9ea 6310
34f80b04
EG
6311static int bnx2x_init_port(struct bnx2x *bp)
6312{
6313 int port = BP_PORT(bp);
94a78b79 6314 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6315 u32 low, high;
34f80b04 6316 u32 val;
a2fbb9ea 6317
34f80b04
EG
6318 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6319
6320 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6321
94a78b79 6322 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6323 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6324
6325 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6326 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6327 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6328 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6329
37b091ba
MC
6330#ifdef BCM_CNIC
6331 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6332
94a78b79 6333 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6334 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6335 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6336#endif
94a78b79 6337 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6338
94a78b79 6339 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6340 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6341 /* no pause for emulation and FPGA */
6342 low = 0;
6343 high = 513;
6344 } else {
6345 if (IS_E1HMF(bp))
6346 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6347 else if (bp->dev->mtu > 4096) {
6348 if (bp->flags & ONE_PORT_FLAG)
6349 low = 160;
6350 else {
6351 val = bp->dev->mtu;
6352 /* (24*1024 + val*4)/256 */
6353 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6354 }
6355 } else
6356 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6357 high = low + 56; /* 14*1024/256 */
6358 }
6359 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6360 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6361
6362
94a78b79 6363 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6364
94a78b79 6365 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6366 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6367 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6368 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6369
94a78b79
VZ
6370 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6371 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6372 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6373 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6374
94a78b79 6375 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6376 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6377
94a78b79 6378 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6379
6380 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6381 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6382
6383 /* update threshold */
34f80b04 6384 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6385 /* update init credit */
34f80b04 6386 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6387
6388 /* probe changes */
34f80b04 6389 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6390 msleep(5);
34f80b04 6391 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6392
37b091ba
MC
6393#ifdef BCM_CNIC
6394 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6395#endif
94a78b79 6396 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6397 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6398
6399 if (CHIP_IS_E1(bp)) {
6400 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6401 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6402 }
94a78b79 6403 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6404
94a78b79 6405 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6406 /* init aeu_mask_attn_func_0/1:
6407 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6408 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6409 * bits 4-7 are used for "per vn group attention" */
6410 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6411 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6412
94a78b79 6413 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6414 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6415 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6416 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6417 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6418
94a78b79 6419 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6420
6421 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6422
6423 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6424 /* 0x2 disable e1hov, 0x1 enable */
6425 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6426 (IS_E1HMF(bp) ? 0x1 : 0x2));
6427
1c06328c
EG
6428 {
6429 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6430 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6431 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6432 }
34f80b04
EG
6433 }
6434
94a78b79 6435 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6436 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6437
35b19ba5 6438 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6440 {
6441 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6442
6443 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6444 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6445
6446 /* The GPIO should be swapped if the swap register is
6447 set and active */
6448 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6449 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6450
6451 /* Select function upon port-swap configuration */
6452 if (port == 0) {
6453 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6454 aeu_gpio_mask = (swap_val && swap_override) ?
6455 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6456 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6457 } else {
6458 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6459 aeu_gpio_mask = (swap_val && swap_override) ?
6460 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6461 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6462 }
6463 val = REG_RD(bp, offset);
6464 /* add GPIO3 to group */
6465 val |= aeu_gpio_mask;
6466 REG_WR(bp, offset, val);
6467 }
6468 break;
6469
35b19ba5 6470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6472 /* add SPIO 5 to group 0 */
4d295db0
EG
6473 {
6474 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6475 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6476 val = REG_RD(bp, reg_addr);
f1410647 6477 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6478 REG_WR(bp, reg_addr, val);
6479 }
f1410647
ET
6480 break;
6481
6482 default:
6483 break;
6484 }
6485
c18487ee 6486 bnx2x__link_reset(bp);
a2fbb9ea 6487
34f80b04
EG
6488 return 0;
6489}
6490
6491#define ILT_PER_FUNC (768/2)
6492#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6493/* the phys address is shifted right 12 bits and has an added
6494 1=valid bit added to the 53rd bit
6495 then since this is a wide register(TM)
6496 we split it into two 32 bit writes
6497 */
6498#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6499#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6500#define PXP_ONE_ILT(x) (((x) << 10) | x)
6501#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6502
37b091ba
MC
6503#ifdef BCM_CNIC
6504#define CNIC_ILT_LINES 127
6505#define CNIC_CTX_PER_ILT 16
6506#else
34f80b04 6507#define CNIC_ILT_LINES 0
37b091ba 6508#endif
34f80b04
EG
6509
6510static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6511{
6512 int reg;
6513
6514 if (CHIP_IS_E1H(bp))
6515 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6516 else /* E1 */
6517 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6518
6519 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6520}
6521
6522static int bnx2x_init_func(struct bnx2x *bp)
6523{
6524 int port = BP_PORT(bp);
6525 int func = BP_FUNC(bp);
8badd27a 6526 u32 addr, val;
34f80b04
EG
6527 int i;
6528
6529 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6530
8badd27a
EG
6531 /* set MSI reconfigure capability */
6532 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6533 val = REG_RD(bp, addr);
6534 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6535 REG_WR(bp, addr, val);
6536
34f80b04
EG
6537 i = FUNC_ILT_BASE(func);
6538
6539 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6540 if (CHIP_IS_E1H(bp)) {
6541 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6542 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6543 } else /* E1 */
6544 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6545 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6546
37b091ba
MC
6547#ifdef BCM_CNIC
6548 i += 1 + CNIC_ILT_LINES;
6549 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6550 if (CHIP_IS_E1(bp))
6551 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6552 else {
6553 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6554 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6555 }
6556
6557 i++;
6558 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6559 if (CHIP_IS_E1(bp))
6560 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6561 else {
6562 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6563 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6564 }
6565
6566 i++;
6567 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6568 if (CHIP_IS_E1(bp))
6569 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6570 else {
6571 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6572 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6573 }
6574
6575 /* tell the searcher where the T2 table is */
6576 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6577
6578 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6579 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6580
6581 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6582 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6583 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6584
6585 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6586#endif
34f80b04
EG
6587
6588 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6589 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6590 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6591 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6592 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6593 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6594 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6595 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6596 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6597 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6598
6599 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6600 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6601 }
6602
6603 /* HC init per function */
6604 if (CHIP_IS_E1H(bp)) {
6605 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6606
6607 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6608 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6609 }
94a78b79 6610 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6611
c14423fe 6612 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6613 REG_WR(bp, 0x2114, 0xffffffff);
6614 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6615
34f80b04
EG
6616 return 0;
6617}
6618
6619static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6620{
6621 int i, rc = 0;
a2fbb9ea 6622
34f80b04
EG
6623 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6624 BP_FUNC(bp), load_code);
a2fbb9ea 6625
34f80b04
EG
6626 bp->dmae_ready = 0;
6627 mutex_init(&bp->dmae_mutex);
54016b26
EG
6628 rc = bnx2x_gunzip_init(bp);
6629 if (rc)
6630 return rc;
a2fbb9ea 6631
34f80b04
EG
6632 switch (load_code) {
6633 case FW_MSG_CODE_DRV_LOAD_COMMON:
6634 rc = bnx2x_init_common(bp);
6635 if (rc)
6636 goto init_hw_err;
6637 /* no break */
6638
6639 case FW_MSG_CODE_DRV_LOAD_PORT:
6640 bp->dmae_ready = 1;
6641 rc = bnx2x_init_port(bp);
6642 if (rc)
6643 goto init_hw_err;
6644 /* no break */
6645
6646 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6647 bp->dmae_ready = 1;
6648 rc = bnx2x_init_func(bp);
6649 if (rc)
6650 goto init_hw_err;
6651 break;
6652
6653 default:
6654 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6655 break;
6656 }
6657
6658 if (!BP_NOMCP(bp)) {
6659 int func = BP_FUNC(bp);
a2fbb9ea
ET
6660
6661 bp->fw_drv_pulse_wr_seq =
34f80b04 6662 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6663 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6664 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6665 }
a2fbb9ea 6666
34f80b04
EG
6667 /* this needs to be done before gunzip end */
6668 bnx2x_zero_def_sb(bp);
6669 for_each_queue(bp, i)
6670 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
6671#ifdef BCM_CNIC
6672 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6673#endif
34f80b04
EG
6674
6675init_hw_err:
6676 bnx2x_gunzip_end(bp);
6677
6678 return rc;
a2fbb9ea
ET
6679}
6680
a2fbb9ea
ET
6681static void bnx2x_free_mem(struct bnx2x *bp)
6682{
6683
6684#define BNX2X_PCI_FREE(x, y, size) \
6685 do { \
6686 if (x) { \
6687 pci_free_consistent(bp->pdev, size, x, y); \
6688 x = NULL; \
6689 y = 0; \
6690 } \
6691 } while (0)
6692
6693#define BNX2X_FREE(x) \
6694 do { \
6695 if (x) { \
6696 vfree(x); \
6697 x = NULL; \
6698 } \
6699 } while (0)
6700
6701 int i;
6702
6703 /* fastpath */
555f6c78 6704 /* Common */
a2fbb9ea
ET
6705 for_each_queue(bp, i) {
6706
555f6c78 6707 /* status blocks */
a2fbb9ea
ET
6708 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6709 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6710 sizeof(struct host_status_block));
555f6c78
EG
6711 }
6712 /* Rx */
54b9ddaa 6713 for_each_queue(bp, i) {
a2fbb9ea 6714
555f6c78 6715 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6716 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6717 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6718 bnx2x_fp(bp, i, rx_desc_mapping),
6719 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6720
6721 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6722 bnx2x_fp(bp, i, rx_comp_mapping),
6723 sizeof(struct eth_fast_path_rx_cqe) *
6724 NUM_RCQ_BD);
a2fbb9ea 6725
7a9b2557 6726 /* SGE ring */
32626230 6727 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6728 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6729 bnx2x_fp(bp, i, rx_sge_mapping),
6730 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6731 }
555f6c78 6732 /* Tx */
54b9ddaa 6733 for_each_queue(bp, i) {
555f6c78
EG
6734
6735 /* fastpath tx rings: tx_buf tx_desc */
6736 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6737 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6738 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6739 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6740 }
a2fbb9ea
ET
6741 /* end of fastpath */
6742
6743 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6744 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6745
6746 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6747 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6748
37b091ba 6749#ifdef BCM_CNIC
a2fbb9ea
ET
6750 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6751 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6752 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6753 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
6754 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6755 sizeof(struct host_status_block));
a2fbb9ea 6756#endif
7a9b2557 6757 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6758
6759#undef BNX2X_PCI_FREE
6760#undef BNX2X_KFREE
6761}
6762
6763static int bnx2x_alloc_mem(struct bnx2x *bp)
6764{
6765
6766#define BNX2X_PCI_ALLOC(x, y, size) \
6767 do { \
6768 x = pci_alloc_consistent(bp->pdev, size, y); \
6769 if (x == NULL) \
6770 goto alloc_mem_err; \
6771 memset(x, 0, size); \
6772 } while (0)
6773
6774#define BNX2X_ALLOC(x, size) \
6775 do { \
6776 x = vmalloc(size); \
6777 if (x == NULL) \
6778 goto alloc_mem_err; \
6779 memset(x, 0, size); \
6780 } while (0)
6781
6782 int i;
6783
6784 /* fastpath */
555f6c78 6785 /* Common */
a2fbb9ea
ET
6786 for_each_queue(bp, i) {
6787 bnx2x_fp(bp, i, bp) = bp;
6788
555f6c78 6789 /* status blocks */
a2fbb9ea
ET
6790 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6791 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6792 sizeof(struct host_status_block));
555f6c78
EG
6793 }
6794 /* Rx */
54b9ddaa 6795 for_each_queue(bp, i) {
a2fbb9ea 6796
555f6c78 6797 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6798 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6799 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6800 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6801 &bnx2x_fp(bp, i, rx_desc_mapping),
6802 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6803
6804 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6805 &bnx2x_fp(bp, i, rx_comp_mapping),
6806 sizeof(struct eth_fast_path_rx_cqe) *
6807 NUM_RCQ_BD);
6808
7a9b2557
VZ
6809 /* SGE ring */
6810 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6811 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6812 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6813 &bnx2x_fp(bp, i, rx_sge_mapping),
6814 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6815 }
555f6c78 6816 /* Tx */
54b9ddaa 6817 for_each_queue(bp, i) {
555f6c78 6818
555f6c78
EG
6819 /* fastpath tx rings: tx_buf tx_desc */
6820 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6821 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6822 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6823 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6824 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6825 }
a2fbb9ea
ET
6826 /* end of fastpath */
6827
6828 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6829 sizeof(struct host_def_status_block));
6830
6831 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6832 sizeof(struct bnx2x_slowpath));
6833
37b091ba 6834#ifdef BCM_CNIC
a2fbb9ea
ET
6835 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6836
a2fbb9ea
ET
6837 /* allocate searcher T2 table
6838 we allocate 1/4 of alloc num for T2
6839 (which is not entered into the ILT) */
6840 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6841
37b091ba 6842 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 6843 for (i = 0; i < 16*1024; i += 64)
37b091ba 6844 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6845
37b091ba 6846 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
6847 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6848
6849 /* QM queues (128*MAX_CONN) */
6850 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
6851
6852 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6853 sizeof(struct host_status_block));
a2fbb9ea
ET
6854#endif
6855
6856 /* Slow path ring */
6857 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6858
6859 return 0;
6860
6861alloc_mem_err:
6862 bnx2x_free_mem(bp);
6863 return -ENOMEM;
6864
6865#undef BNX2X_PCI_ALLOC
6866#undef BNX2X_ALLOC
6867}
6868
6869static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6870{
6871 int i;
6872
54b9ddaa 6873 for_each_queue(bp, i) {
a2fbb9ea
ET
6874 struct bnx2x_fastpath *fp = &bp->fp[i];
6875
6876 u16 bd_cons = fp->tx_bd_cons;
6877 u16 sw_prod = fp->tx_pkt_prod;
6878 u16 sw_cons = fp->tx_pkt_cons;
6879
a2fbb9ea
ET
6880 while (sw_cons != sw_prod) {
6881 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6882 sw_cons++;
6883 }
6884 }
6885}
6886
6887static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6888{
6889 int i, j;
6890
54b9ddaa 6891 for_each_queue(bp, j) {
a2fbb9ea
ET
6892 struct bnx2x_fastpath *fp = &bp->fp[j];
6893
a2fbb9ea
ET
6894 for (i = 0; i < NUM_RX_BD; i++) {
6895 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6896 struct sk_buff *skb = rx_buf->skb;
6897
6898 if (skb == NULL)
6899 continue;
6900
6901 pci_unmap_single(bp->pdev,
6902 pci_unmap_addr(rx_buf, mapping),
356e2385 6903 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6904
6905 rx_buf->skb = NULL;
6906 dev_kfree_skb(skb);
6907 }
7a9b2557 6908 if (!fp->disable_tpa)
32626230
EG
6909 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6910 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6911 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6912 }
6913}
6914
6915static void bnx2x_free_skbs(struct bnx2x *bp)
6916{
6917 bnx2x_free_tx_skbs(bp);
6918 bnx2x_free_rx_skbs(bp);
6919}
6920
6921static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6922{
34f80b04 6923 int i, offset = 1;
a2fbb9ea
ET
6924
6925 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6926 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6927 bp->msix_table[0].vector);
6928
37b091ba
MC
6929#ifdef BCM_CNIC
6930 offset++;
6931#endif
a2fbb9ea 6932 for_each_queue(bp, i) {
c14423fe 6933 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6934 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6935 bnx2x_fp(bp, i, state));
6936
34f80b04 6937 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6938 }
a2fbb9ea
ET
6939}
6940
6cbe5065 6941static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 6942{
a2fbb9ea 6943 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
6944 if (!disable_only)
6945 bnx2x_free_msix_irqs(bp);
a2fbb9ea 6946 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6947 bp->flags &= ~USING_MSIX_FLAG;
6948
8badd27a 6949 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
6950 if (!disable_only)
6951 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
6952 pci_disable_msi(bp->pdev);
6953 bp->flags &= ~USING_MSI_FLAG;
6954
6cbe5065 6955 } else if (!disable_only)
a2fbb9ea
ET
6956 free_irq(bp->pdev->irq, bp->dev);
6957}
6958
6959static int bnx2x_enable_msix(struct bnx2x *bp)
6960{
8badd27a
EG
6961 int i, rc, offset = 1;
6962 int igu_vec = 0;
a2fbb9ea 6963
8badd27a
EG
6964 bp->msix_table[0].entry = igu_vec;
6965 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6966
37b091ba
MC
6967#ifdef BCM_CNIC
6968 igu_vec = BP_L_ID(bp) + offset;
6969 bp->msix_table[1].entry = igu_vec;
6970 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6971 offset++;
6972#endif
34f80b04 6973 for_each_queue(bp, i) {
8badd27a 6974 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6975 bp->msix_table[i + offset].entry = igu_vec;
6976 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6977 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6978 }
6979
34f80b04 6980 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6981 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6982 if (rc) {
8badd27a
EG
6983 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6984 return rc;
34f80b04 6985 }
8badd27a 6986
a2fbb9ea
ET
6987 bp->flags |= USING_MSIX_FLAG;
6988
6989 return 0;
a2fbb9ea
ET
6990}
6991
a2fbb9ea
ET
6992static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6993{
34f80b04 6994 int i, rc, offset = 1;
a2fbb9ea 6995
a2fbb9ea
ET
6996 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6997 bp->dev->name, bp->dev);
a2fbb9ea
ET
6998 if (rc) {
6999 BNX2X_ERR("request sp irq failed\n");
7000 return -EBUSY;
7001 }
7002
37b091ba
MC
7003#ifdef BCM_CNIC
7004 offset++;
7005#endif
a2fbb9ea 7006 for_each_queue(bp, i) {
555f6c78 7007 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7008 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7009 bp->dev->name, i);
ca00392c 7010
34f80b04 7011 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7012 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7013 if (rc) {
555f6c78 7014 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7015 bnx2x_free_msix_irqs(bp);
7016 return -EBUSY;
7017 }
7018
555f6c78 7019 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7020 }
7021
555f6c78 7022 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
7023 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7024 " ... fp[%d] %d\n",
7025 bp->dev->name, bp->msix_table[0].vector,
7026 0, bp->msix_table[offset].vector,
7027 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7028
a2fbb9ea 7029 return 0;
a2fbb9ea
ET
7030}
7031
8badd27a
EG
7032static int bnx2x_enable_msi(struct bnx2x *bp)
7033{
7034 int rc;
7035
7036 rc = pci_enable_msi(bp->pdev);
7037 if (rc) {
7038 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7039 return -1;
7040 }
7041 bp->flags |= USING_MSI_FLAG;
7042
7043 return 0;
7044}
7045
a2fbb9ea
ET
7046static int bnx2x_req_irq(struct bnx2x *bp)
7047{
8badd27a 7048 unsigned long flags;
34f80b04 7049 int rc;
a2fbb9ea 7050
8badd27a
EG
7051 if (bp->flags & USING_MSI_FLAG)
7052 flags = 0;
7053 else
7054 flags = IRQF_SHARED;
7055
7056 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7057 bp->dev->name, bp->dev);
a2fbb9ea
ET
7058 if (!rc)
7059 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7060
7061 return rc;
a2fbb9ea
ET
7062}
7063
65abd74d
YG
7064static void bnx2x_napi_enable(struct bnx2x *bp)
7065{
7066 int i;
7067
54b9ddaa 7068 for_each_queue(bp, i)
65abd74d
YG
7069 napi_enable(&bnx2x_fp(bp, i, napi));
7070}
7071
7072static void bnx2x_napi_disable(struct bnx2x *bp)
7073{
7074 int i;
7075
54b9ddaa 7076 for_each_queue(bp, i)
65abd74d
YG
7077 napi_disable(&bnx2x_fp(bp, i, napi));
7078}
7079
7080static void bnx2x_netif_start(struct bnx2x *bp)
7081{
e1510706
EG
7082 int intr_sem;
7083
7084 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7085 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7086
7087 if (intr_sem) {
65abd74d 7088 if (netif_running(bp->dev)) {
65abd74d
YG
7089 bnx2x_napi_enable(bp);
7090 bnx2x_int_enable(bp);
555f6c78
EG
7091 if (bp->state == BNX2X_STATE_OPEN)
7092 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7093 }
7094 }
7095}
7096
f8ef6e44 7097static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7098{
f8ef6e44 7099 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7100 bnx2x_napi_disable(bp);
762d5f6c 7101 netif_tx_disable(bp->dev);
65abd74d
YG
7102}
7103
a2fbb9ea
ET
7104/*
7105 * Init service functions
7106 */
7107
e665bfda
MC
7108/**
7109 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7110 *
7111 * @param bp driver descriptor
7112 * @param set set or clear an entry (1 or 0)
7113 * @param mac pointer to a buffer containing a MAC
7114 * @param cl_bit_vec bit vector of clients to register a MAC for
7115 * @param cam_offset offset in a CAM to use
7116 * @param with_bcast set broadcast MAC as well
7117 */
7118static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7119 u32 cl_bit_vec, u8 cam_offset,
7120 u8 with_bcast)
a2fbb9ea
ET
7121{
7122 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7123 int port = BP_PORT(bp);
a2fbb9ea
ET
7124
7125 /* CAM allocation
7126 * unicasts 0-31:port0 32-63:port1
7127 * multicast 64-127:port0 128-191:port1
7128 */
e665bfda
MC
7129 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7130 config->hdr.offset = cam_offset;
7131 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7132 config->hdr.reserved1 = 0;
7133
7134 /* primary MAC */
7135 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7136 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7137 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7138 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7139 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7140 swab16(*(u16 *)&mac[4]);
34f80b04 7141 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7142 if (set)
7143 config->config_table[0].target_table_entry.flags = 0;
7144 else
7145 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7146 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7147 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7148 config->config_table[0].target_table_entry.vlan_id = 0;
7149
3101c2bc
YG
7150 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7151 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7152 config->config_table[0].cam_entry.msb_mac_addr,
7153 config->config_table[0].cam_entry.middle_mac_addr,
7154 config->config_table[0].cam_entry.lsb_mac_addr);
7155
7156 /* broadcast */
e665bfda
MC
7157 if (with_bcast) {
7158 config->config_table[1].cam_entry.msb_mac_addr =
7159 cpu_to_le16(0xffff);
7160 config->config_table[1].cam_entry.middle_mac_addr =
7161 cpu_to_le16(0xffff);
7162 config->config_table[1].cam_entry.lsb_mac_addr =
7163 cpu_to_le16(0xffff);
7164 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7165 if (set)
7166 config->config_table[1].target_table_entry.flags =
7167 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7168 else
7169 CAM_INVALIDATE(config->config_table[1]);
7170 config->config_table[1].target_table_entry.clients_bit_vector =
7171 cpu_to_le32(cl_bit_vec);
7172 config->config_table[1].target_table_entry.vlan_id = 0;
7173 }
a2fbb9ea
ET
7174
7175 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7176 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7177 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7178}
7179
e665bfda
MC
7180/**
7181 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7182 *
7183 * @param bp driver descriptor
7184 * @param set set or clear an entry (1 or 0)
7185 * @param mac pointer to a buffer containing a MAC
7186 * @param cl_bit_vec bit vector of clients to register a MAC for
7187 * @param cam_offset offset in a CAM to use
7188 */
7189static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7190 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7191{
7192 struct mac_configuration_cmd_e1h *config =
7193 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7194
8d9c5f34 7195 config->hdr.length = 1;
e665bfda
MC
7196 config->hdr.offset = cam_offset;
7197 config->hdr.client_id = 0xff;
34f80b04
EG
7198 config->hdr.reserved1 = 0;
7199
7200 /* primary MAC */
7201 config->config_table[0].msb_mac_addr =
e665bfda 7202 swab16(*(u16 *)&mac[0]);
34f80b04 7203 config->config_table[0].middle_mac_addr =
e665bfda 7204 swab16(*(u16 *)&mac[2]);
34f80b04 7205 config->config_table[0].lsb_mac_addr =
e665bfda 7206 swab16(*(u16 *)&mac[4]);
ca00392c 7207 config->config_table[0].clients_bit_vector =
e665bfda 7208 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7209 config->config_table[0].vlan_id = 0;
7210 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7211 if (set)
7212 config->config_table[0].flags = BP_PORT(bp);
7213 else
7214 config->config_table[0].flags =
7215 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7216
e665bfda 7217 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7218 (set ? "setting" : "clearing"),
34f80b04
EG
7219 config->config_table[0].msb_mac_addr,
7220 config->config_table[0].middle_mac_addr,
e665bfda 7221 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7222
7223 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7224 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7225 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7226}
7227
a2fbb9ea
ET
7228static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7229 int *state_p, int poll)
7230{
7231 /* can take a while if any port is running */
8b3a0f0b 7232 int cnt = 5000;
a2fbb9ea 7233
c14423fe
ET
7234 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7235 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7236
7237 might_sleep();
34f80b04 7238 while (cnt--) {
a2fbb9ea
ET
7239 if (poll) {
7240 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7241 /* if index is different from 0
7242 * the reply for some commands will
3101c2bc 7243 * be on the non default queue
a2fbb9ea
ET
7244 */
7245 if (idx)
7246 bnx2x_rx_int(&bp->fp[idx], 10);
7247 }
a2fbb9ea 7248
3101c2bc 7249 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7250 if (*state_p == state) {
7251#ifdef BNX2X_STOP_ON_ERROR
7252 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7253#endif
a2fbb9ea 7254 return 0;
8b3a0f0b 7255 }
a2fbb9ea 7256
a2fbb9ea 7257 msleep(1);
e3553b29
EG
7258
7259 if (bp->panic)
7260 return -EIO;
a2fbb9ea
ET
7261 }
7262
a2fbb9ea 7263 /* timeout! */
49d66772
ET
7264 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7265 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7266#ifdef BNX2X_STOP_ON_ERROR
7267 bnx2x_panic();
7268#endif
a2fbb9ea 7269
49d66772 7270 return -EBUSY;
a2fbb9ea
ET
7271}
7272
e665bfda
MC
7273static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7274{
7275 bp->set_mac_pending++;
7276 smp_wmb();
7277
7278 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7279 (1 << bp->fp->cl_id), BP_FUNC(bp));
7280
7281 /* Wait for a completion */
7282 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7283}
7284
7285static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7286{
7287 bp->set_mac_pending++;
7288 smp_wmb();
7289
7290 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7291 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7292 1);
7293
7294 /* Wait for a completion */
7295 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7296}
7297
993ac7b5
MC
7298#ifdef BCM_CNIC
7299/**
7300 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7301 * MAC(s). This function will wait until the ramdord completion
7302 * returns.
7303 *
7304 * @param bp driver handle
7305 * @param set set or clear the CAM entry
7306 *
7307 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7308 */
7309static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7310{
7311 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7312
7313 bp->set_mac_pending++;
7314 smp_wmb();
7315
7316 /* Send a SET_MAC ramrod */
7317 if (CHIP_IS_E1(bp))
7318 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7319 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7320 1);
7321 else
7322 /* CAM allocation for E1H
7323 * unicasts: by func number
7324 * multicast: 20+FUNC*20, 20 each
7325 */
7326 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7327 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7328
7329 /* Wait for a completion when setting */
7330 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7331
7332 return 0;
7333}
7334#endif
7335
a2fbb9ea
ET
7336static int bnx2x_setup_leading(struct bnx2x *bp)
7337{
34f80b04 7338 int rc;
a2fbb9ea 7339
c14423fe 7340 /* reset IGU state */
34f80b04 7341 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7342
7343 /* SETUP ramrod */
7344 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7345
34f80b04
EG
7346 /* Wait for completion */
7347 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7348
34f80b04 7349 return rc;
a2fbb9ea
ET
7350}
7351
7352static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7353{
555f6c78
EG
7354 struct bnx2x_fastpath *fp = &bp->fp[index];
7355
a2fbb9ea 7356 /* reset IGU state */
555f6c78 7357 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7358
228241eb 7359 /* SETUP ramrod */
555f6c78
EG
7360 fp->state = BNX2X_FP_STATE_OPENING;
7361 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7362 fp->cl_id, 0);
a2fbb9ea
ET
7363
7364 /* Wait for completion */
7365 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7366 &(fp->state), 0);
a2fbb9ea
ET
7367}
7368
a2fbb9ea 7369static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7370
54b9ddaa 7371static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7372{
ca00392c
EG
7373
7374 switch (bp->multi_mode) {
7375 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7376 bp->num_queues = 1;
ca00392c
EG
7377 break;
7378
7379 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7380 if (num_queues)
7381 bp->num_queues = min_t(u32, num_queues,
7382 BNX2X_MAX_QUEUES(bp));
ca00392c 7383 else
54b9ddaa
VZ
7384 bp->num_queues = min_t(u32, num_online_cpus(),
7385 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7386 break;
7387
7388
7389 default:
54b9ddaa 7390 bp->num_queues = 1;
ca00392c
EG
7391 break;
7392 }
ca00392c
EG
7393}
7394
54b9ddaa 7395static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7396{
ca00392c 7397 int rc = 0;
a2fbb9ea 7398
8badd27a
EG
7399 switch (int_mode) {
7400 case INT_MODE_INTx:
7401 case INT_MODE_MSI:
54b9ddaa 7402 bp->num_queues = 1;
ca00392c 7403 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7404 break;
7405
7406 case INT_MODE_MSIX:
7407 default:
54b9ddaa
VZ
7408 /* Set number of queues according to bp->multi_mode value */
7409 bnx2x_set_num_queues_msix(bp);
ca00392c 7410
54b9ddaa
VZ
7411 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7412 bp->num_queues);
ca00392c 7413
2dfe0e1f
EG
7414 /* if we can't use MSI-X we only need one fp,
7415 * so try to enable MSI-X with the requested number of fp's
7416 * and fallback to MSI or legacy INTx with one fp
7417 */
ca00392c 7418 rc = bnx2x_enable_msix(bp);
54b9ddaa 7419 if (rc)
34f80b04 7420 /* failed to enable MSI-X */
54b9ddaa 7421 bp->num_queues = 1;
8badd27a 7422 break;
a2fbb9ea 7423 }
54b9ddaa 7424 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7425 return rc;
8badd27a
EG
7426}
7427
993ac7b5
MC
7428#ifdef BCM_CNIC
7429static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7430static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7431#endif
8badd27a
EG
7432
7433/* must be called with rtnl_lock */
7434static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7435{
7436 u32 load_code;
ca00392c
EG
7437 int i, rc;
7438
8badd27a 7439#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7440 if (unlikely(bp->panic))
7441 return -EPERM;
7442#endif
7443
7444 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7445
54b9ddaa 7446 rc = bnx2x_set_num_queues(bp);
c14423fe 7447
6cbe5065
VZ
7448 if (bnx2x_alloc_mem(bp)) {
7449 bnx2x_free_irq(bp, true);
a2fbb9ea 7450 return -ENOMEM;
6cbe5065 7451 }
a2fbb9ea 7452
54b9ddaa 7453 for_each_queue(bp, i)
7a9b2557
VZ
7454 bnx2x_fp(bp, i, disable_tpa) =
7455 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7456
54b9ddaa 7457 for_each_queue(bp, i)
2dfe0e1f
EG
7458 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7459 bnx2x_poll, 128);
7460
2dfe0e1f
EG
7461 bnx2x_napi_enable(bp);
7462
34f80b04
EG
7463 if (bp->flags & USING_MSIX_FLAG) {
7464 rc = bnx2x_req_msix_irqs(bp);
7465 if (rc) {
6cbe5065 7466 bnx2x_free_irq(bp, true);
2dfe0e1f 7467 goto load_error1;
34f80b04
EG
7468 }
7469 } else {
ca00392c 7470 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7471 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7472 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7473 bnx2x_enable_msi(bp);
34f80b04
EG
7474 bnx2x_ack_int(bp);
7475 rc = bnx2x_req_irq(bp);
7476 if (rc) {
2dfe0e1f 7477 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7478 bnx2x_free_irq(bp, true);
2dfe0e1f 7479 goto load_error1;
a2fbb9ea 7480 }
8badd27a
EG
7481 if (bp->flags & USING_MSI_FLAG) {
7482 bp->dev->irq = bp->pdev->irq;
7483 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7484 bp->dev->name, bp->pdev->irq);
7485 }
a2fbb9ea
ET
7486 }
7487
2dfe0e1f
EG
7488 /* Send LOAD_REQUEST command to MCP
7489 Returns the type of LOAD command:
7490 if it is the first port to be initialized
7491 common blocks should be initialized, otherwise - not
7492 */
7493 if (!BP_NOMCP(bp)) {
7494 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7495 if (!load_code) {
7496 BNX2X_ERR("MCP response failure, aborting\n");
7497 rc = -EBUSY;
7498 goto load_error2;
7499 }
7500 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7501 rc = -EBUSY; /* other port in diagnostic mode */
7502 goto load_error2;
7503 }
7504
7505 } else {
7506 int port = BP_PORT(bp);
7507
f5372251 7508 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7509 load_count[0], load_count[1], load_count[2]);
7510 load_count[0]++;
7511 load_count[1 + port]++;
f5372251 7512 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7513 load_count[0], load_count[1], load_count[2]);
7514 if (load_count[0] == 1)
7515 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7516 else if (load_count[1 + port] == 1)
7517 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7518 else
7519 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7520 }
7521
7522 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7523 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7524 bp->port.pmf = 1;
7525 else
7526 bp->port.pmf = 0;
7527 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7528
a2fbb9ea 7529 /* Initialize HW */
34f80b04
EG
7530 rc = bnx2x_init_hw(bp, load_code);
7531 if (rc) {
a2fbb9ea 7532 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7533 goto load_error2;
a2fbb9ea
ET
7534 }
7535
a2fbb9ea 7536 /* Setup NIC internals and enable interrupts */
471de716 7537 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7538
2691d51d
EG
7539 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7540 (bp->common.shmem2_base))
7541 SHMEM2_WR(bp, dcc_support,
7542 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7543 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7544
a2fbb9ea 7545 /* Send LOAD_DONE command to MCP */
34f80b04 7546 if (!BP_NOMCP(bp)) {
228241eb
ET
7547 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7548 if (!load_code) {
da5a662a 7549 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7550 rc = -EBUSY;
2dfe0e1f 7551 goto load_error3;
a2fbb9ea
ET
7552 }
7553 }
7554
7555 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7556
34f80b04
EG
7557 rc = bnx2x_setup_leading(bp);
7558 if (rc) {
da5a662a 7559 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7560#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7561 goto load_error3;
e3553b29
EG
7562#else
7563 bp->panic = 1;
7564 return -EBUSY;
7565#endif
34f80b04 7566 }
a2fbb9ea 7567
34f80b04
EG
7568 if (CHIP_IS_E1H(bp))
7569 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7570 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 7571 bp->flags |= MF_FUNC_DIS;
34f80b04 7572 }
a2fbb9ea 7573
ca00392c 7574 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7575#ifdef BCM_CNIC
7576 /* Enable Timer scan */
7577 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7578#endif
34f80b04
EG
7579 for_each_nondefault_queue(bp, i) {
7580 rc = bnx2x_setup_multi(bp, i);
7581 if (rc)
37b091ba
MC
7582#ifdef BCM_CNIC
7583 goto load_error4;
7584#else
2dfe0e1f 7585 goto load_error3;
37b091ba 7586#endif
34f80b04 7587 }
a2fbb9ea 7588
ca00392c 7589 if (CHIP_IS_E1(bp))
e665bfda 7590 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7591 else
e665bfda 7592 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7593#ifdef BCM_CNIC
7594 /* Set iSCSI L2 MAC */
7595 mutex_lock(&bp->cnic_mutex);
7596 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7597 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7598 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
7599 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7600 CNIC_SB_ID(bp));
993ac7b5
MC
7601 }
7602 mutex_unlock(&bp->cnic_mutex);
7603#endif
ca00392c 7604 }
34f80b04
EG
7605
7606 if (bp->port.pmf)
b5bf9068 7607 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7608
7609 /* Start fast path */
34f80b04
EG
7610 switch (load_mode) {
7611 case LOAD_NORMAL:
ca00392c
EG
7612 if (bp->state == BNX2X_STATE_OPEN) {
7613 /* Tx queue should be only reenabled */
7614 netif_tx_wake_all_queues(bp->dev);
7615 }
2dfe0e1f 7616 /* Initialize the receive filter. */
34f80b04
EG
7617 bnx2x_set_rx_mode(bp->dev);
7618 break;
7619
7620 case LOAD_OPEN:
555f6c78 7621 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7622 if (bp->state != BNX2X_STATE_OPEN)
7623 netif_tx_disable(bp->dev);
2dfe0e1f 7624 /* Initialize the receive filter. */
34f80b04 7625 bnx2x_set_rx_mode(bp->dev);
34f80b04 7626 break;
a2fbb9ea 7627
34f80b04 7628 case LOAD_DIAG:
2dfe0e1f 7629 /* Initialize the receive filter. */
a2fbb9ea 7630 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7631 bp->state = BNX2X_STATE_DIAG;
7632 break;
7633
7634 default:
7635 break;
a2fbb9ea
ET
7636 }
7637
34f80b04
EG
7638 if (!bp->port.pmf)
7639 bnx2x__link_status_update(bp);
7640
a2fbb9ea
ET
7641 /* start the timer */
7642 mod_timer(&bp->timer, jiffies + bp->current_interval);
7643
993ac7b5
MC
7644#ifdef BCM_CNIC
7645 bnx2x_setup_cnic_irq_info(bp);
7646 if (bp->state == BNX2X_STATE_OPEN)
7647 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7648#endif
34f80b04 7649
a2fbb9ea
ET
7650 return 0;
7651
37b091ba
MC
7652#ifdef BCM_CNIC
7653load_error4:
7654 /* Disable Timer scan */
7655 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7656#endif
2dfe0e1f
EG
7657load_error3:
7658 bnx2x_int_disable_sync(bp, 1);
7659 if (!BP_NOMCP(bp)) {
7660 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7661 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7662 }
7663 bp->port.pmf = 0;
7a9b2557
VZ
7664 /* Free SKBs, SGEs, TPA pool and driver internals */
7665 bnx2x_free_skbs(bp);
54b9ddaa 7666 for_each_queue(bp, i)
3196a88a 7667 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7668load_error2:
d1014634 7669 /* Release IRQs */
6cbe5065 7670 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
7671load_error1:
7672 bnx2x_napi_disable(bp);
54b9ddaa 7673 for_each_queue(bp, i)
7cde1c8b 7674 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7675 bnx2x_free_mem(bp);
7676
34f80b04 7677 return rc;
a2fbb9ea
ET
7678}
7679
7680static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7681{
555f6c78 7682 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7683 int rc;
7684
c14423fe 7685 /* halt the connection */
555f6c78
EG
7686 fp->state = BNX2X_FP_STATE_HALTING;
7687 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7688
34f80b04 7689 /* Wait for completion */
a2fbb9ea 7690 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7691 &(fp->state), 1);
c14423fe 7692 if (rc) /* timeout */
a2fbb9ea
ET
7693 return rc;
7694
7695 /* delete cfc entry */
7696 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7697
34f80b04
EG
7698 /* Wait for completion */
7699 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7700 &(fp->state), 1);
34f80b04 7701 return rc;
a2fbb9ea
ET
7702}
7703
da5a662a 7704static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7705{
4781bfad 7706 __le16 dsb_sp_prod_idx;
c14423fe 7707 /* if the other port is handling traffic,
a2fbb9ea 7708 this can take a lot of time */
34f80b04
EG
7709 int cnt = 500;
7710 int rc;
a2fbb9ea
ET
7711
7712 might_sleep();
7713
7714 /* Send HALT ramrod */
7715 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7716 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7717
34f80b04
EG
7718 /* Wait for completion */
7719 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7720 &(bp->fp[0].state), 1);
7721 if (rc) /* timeout */
da5a662a 7722 return rc;
a2fbb9ea 7723
49d66772 7724 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7725
228241eb 7726 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7727 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7728
49d66772 7729 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7730 we are going to reset the chip anyway
7731 so there is not much to do if this times out
7732 */
34f80b04 7733 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7734 if (!cnt) {
7735 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7736 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7737 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7738#ifdef BNX2X_STOP_ON_ERROR
7739 bnx2x_panic();
7740#endif
36e552ab 7741 rc = -EBUSY;
34f80b04
EG
7742 break;
7743 }
7744 cnt--;
da5a662a 7745 msleep(1);
5650d9d4 7746 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7747 }
7748 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7749 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7750
7751 return rc;
a2fbb9ea
ET
7752}
7753
34f80b04
EG
7754static void bnx2x_reset_func(struct bnx2x *bp)
7755{
7756 int port = BP_PORT(bp);
7757 int func = BP_FUNC(bp);
7758 int base, i;
7759
7760 /* Configure IGU */
7761 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7762 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7763
37b091ba
MC
7764#ifdef BCM_CNIC
7765 /* Disable Timer scan */
7766 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7767 /*
7768 * Wait for at least 10ms and up to 2 second for the timers scan to
7769 * complete
7770 */
7771 for (i = 0; i < 200; i++) {
7772 msleep(10);
7773 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7774 break;
7775 }
7776#endif
34f80b04
EG
7777 /* Clear ILT */
7778 base = FUNC_ILT_BASE(func);
7779 for (i = base; i < base + ILT_PER_FUNC; i++)
7780 bnx2x_ilt_wr(bp, i, 0);
7781}
7782
7783static void bnx2x_reset_port(struct bnx2x *bp)
7784{
7785 int port = BP_PORT(bp);
7786 u32 val;
7787
7788 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7789
7790 /* Do not rcv packets to BRB */
7791 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7792 /* Do not direct rcv packets that are not for MCP to the BRB */
7793 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7794 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7795
7796 /* Configure AEU */
7797 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7798
7799 msleep(100);
7800 /* Check for BRB port occupancy */
7801 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7802 if (val)
7803 DP(NETIF_MSG_IFDOWN,
33471629 7804 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7805
7806 /* TODO: Close Doorbell port? */
7807}
7808
34f80b04
EG
7809static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7810{
7811 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7812 BP_FUNC(bp), reset_code);
7813
7814 switch (reset_code) {
7815 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7816 bnx2x_reset_port(bp);
7817 bnx2x_reset_func(bp);
7818 bnx2x_reset_common(bp);
7819 break;
7820
7821 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7822 bnx2x_reset_port(bp);
7823 bnx2x_reset_func(bp);
7824 break;
7825
7826 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7827 bnx2x_reset_func(bp);
7828 break;
49d66772 7829
34f80b04
EG
7830 default:
7831 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7832 break;
7833 }
7834}
7835
33471629 7836/* must be called with rtnl_lock */
34f80b04 7837static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7838{
da5a662a 7839 int port = BP_PORT(bp);
a2fbb9ea 7840 u32 reset_code = 0;
da5a662a 7841 int i, cnt, rc;
a2fbb9ea 7842
993ac7b5
MC
7843#ifdef BCM_CNIC
7844 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7845#endif
a2fbb9ea
ET
7846 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7847
ab6ad5a4 7848 /* Set "drop all" */
228241eb
ET
7849 bp->rx_mode = BNX2X_RX_MODE_NONE;
7850 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7851
ab6ad5a4 7852 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7853 bnx2x_netif_stop(bp, 1);
e94d8af3 7854
34f80b04
EG
7855 del_timer_sync(&bp->timer);
7856 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7857 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7858 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7859
70b9986c 7860 /* Release IRQs */
6cbe5065 7861 bnx2x_free_irq(bp, false);
70b9986c 7862
555f6c78 7863 /* Wait until tx fastpath tasks complete */
54b9ddaa 7864 for_each_queue(bp, i) {
228241eb
ET
7865 struct bnx2x_fastpath *fp = &bp->fp[i];
7866
34f80b04 7867 cnt = 1000;
e8b5fc51 7868 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7869
7961f791 7870 bnx2x_tx_int(fp);
34f80b04
EG
7871 if (!cnt) {
7872 BNX2X_ERR("timeout waiting for queue[%d]\n",
7873 i);
7874#ifdef BNX2X_STOP_ON_ERROR
7875 bnx2x_panic();
7876 return -EBUSY;
7877#else
7878 break;
7879#endif
7880 }
7881 cnt--;
da5a662a 7882 msleep(1);
34f80b04 7883 }
228241eb 7884 }
da5a662a
VZ
7885 /* Give HW time to discard old tx messages */
7886 msleep(1);
a2fbb9ea 7887
3101c2bc
YG
7888 if (CHIP_IS_E1(bp)) {
7889 struct mac_configuration_cmd *config =
7890 bnx2x_sp(bp, mcast_config);
7891
e665bfda 7892 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7893
8d9c5f34 7894 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7895 CAM_INVALIDATE(config->config_table[i]);
7896
8d9c5f34 7897 config->hdr.length = i;
3101c2bc
YG
7898 if (CHIP_REV_IS_SLOW(bp))
7899 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7900 else
7901 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7902 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7903 config->hdr.reserved1 = 0;
7904
e665bfda
MC
7905 bp->set_mac_pending++;
7906 smp_wmb();
7907
3101c2bc
YG
7908 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7909 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7910 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7911
7912 } else { /* E1H */
65abd74d
YG
7913 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7914
e665bfda 7915 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7916
7917 for (i = 0; i < MC_HASH_SIZE; i++)
7918 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7919
7920 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 7921 }
993ac7b5
MC
7922#ifdef BCM_CNIC
7923 /* Clear iSCSI L2 MAC */
7924 mutex_lock(&bp->cnic_mutex);
7925 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7926 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7927 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7928 }
7929 mutex_unlock(&bp->cnic_mutex);
7930#endif
3101c2bc 7931
65abd74d
YG
7932 if (unload_mode == UNLOAD_NORMAL)
7933 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7934
7d0446c2 7935 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7936 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7937
7d0446c2 7938 else if (bp->wol) {
65abd74d
YG
7939 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7940 u8 *mac_addr = bp->dev->dev_addr;
7941 u32 val;
7942 /* The mac address is written to entries 1-4 to
7943 preserve entry 0 which is used by the PMF */
7944 u8 entry = (BP_E1HVN(bp) + 1)*8;
7945
7946 val = (mac_addr[0] << 8) | mac_addr[1];
7947 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7948
7949 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7950 (mac_addr[4] << 8) | mac_addr[5];
7951 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7952
7953 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7954
7955 } else
7956 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7957
34f80b04
EG
7958 /* Close multi and leading connections
7959 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7960 for_each_nondefault_queue(bp, i)
7961 if (bnx2x_stop_multi(bp, i))
228241eb 7962 goto unload_error;
a2fbb9ea 7963
da5a662a
VZ
7964 rc = bnx2x_stop_leading(bp);
7965 if (rc) {
34f80b04 7966 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7967#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7968 return -EBUSY;
da5a662a
VZ
7969#else
7970 goto unload_error;
34f80b04 7971#endif
228241eb
ET
7972 }
7973
7974unload_error:
34f80b04 7975 if (!BP_NOMCP(bp))
228241eb 7976 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7977 else {
f5372251 7978 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7979 load_count[0], load_count[1], load_count[2]);
7980 load_count[0]--;
da5a662a 7981 load_count[1 + port]--;
f5372251 7982 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7983 load_count[0], load_count[1], load_count[2]);
7984 if (load_count[0] == 0)
7985 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7986 else if (load_count[1 + port] == 0)
34f80b04
EG
7987 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7988 else
7989 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7990 }
a2fbb9ea 7991
34f80b04
EG
7992 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7993 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7994 bnx2x__link_reset(bp);
a2fbb9ea
ET
7995
7996 /* Reset the chip */
228241eb 7997 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7998
7999 /* Report UNLOAD_DONE to MCP */
34f80b04 8000 if (!BP_NOMCP(bp))
a2fbb9ea 8001 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8002
9a035440 8003 bp->port.pmf = 0;
a2fbb9ea 8004
7a9b2557 8005 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8006 bnx2x_free_skbs(bp);
54b9ddaa 8007 for_each_queue(bp, i)
3196a88a 8008 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8009 for_each_queue(bp, i)
7cde1c8b 8010 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8011 bnx2x_free_mem(bp);
8012
8013 bp->state = BNX2X_STATE_CLOSED;
228241eb 8014
a2fbb9ea
ET
8015 netif_carrier_off(bp->dev);
8016
8017 return 0;
8018}
8019
34f80b04
EG
8020static void bnx2x_reset_task(struct work_struct *work)
8021{
8022 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8023
8024#ifdef BNX2X_STOP_ON_ERROR
8025 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8026 " so reset not done to allow debug dump,\n"
ad361c98 8027 " you will need to reboot when done\n");
34f80b04
EG
8028 return;
8029#endif
8030
8031 rtnl_lock();
8032
8033 if (!netif_running(bp->dev))
8034 goto reset_task_exit;
8035
8036 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8037 bnx2x_nic_load(bp, LOAD_NORMAL);
8038
8039reset_task_exit:
8040 rtnl_unlock();
8041}
8042
a2fbb9ea
ET
8043/* end of nic load/unload */
8044
8045/* ethtool_ops */
8046
8047/*
8048 * Init service functions
8049 */
8050
f1ef27ef
EG
8051static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8052{
8053 switch (func) {
8054 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8055 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8056 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8057 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8058 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8059 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8060 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8061 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8062 default:
8063 BNX2X_ERR("Unsupported function index: %d\n", func);
8064 return (u32)(-1);
8065 }
8066}
8067
8068static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8069{
8070 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8071
8072 /* Flush all outstanding writes */
8073 mmiowb();
8074
8075 /* Pretend to be function 0 */
8076 REG_WR(bp, reg, 0);
8077 /* Flush the GRC transaction (in the chip) */
8078 new_val = REG_RD(bp, reg);
8079 if (new_val != 0) {
8080 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8081 new_val);
8082 BUG();
8083 }
8084
8085 /* From now we are in the "like-E1" mode */
8086 bnx2x_int_disable(bp);
8087
8088 /* Flush all outstanding writes */
8089 mmiowb();
8090
8091 /* Restore the original funtion settings */
8092 REG_WR(bp, reg, orig_func);
8093 new_val = REG_RD(bp, reg);
8094 if (new_val != orig_func) {
8095 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8096 orig_func, new_val);
8097 BUG();
8098 }
8099}
8100
8101static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8102{
8103 if (CHIP_IS_E1H(bp))
8104 bnx2x_undi_int_disable_e1h(bp, func);
8105 else
8106 bnx2x_int_disable(bp);
8107}
8108
34f80b04
EG
8109static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8110{
8111 u32 val;
8112
8113 /* Check if there is any driver already loaded */
8114 val = REG_RD(bp, MISC_REG_UNPREPARED);
8115 if (val == 0x1) {
8116 /* Check if it is the UNDI driver
8117 * UNDI driver initializes CID offset for normal bell to 0x7
8118 */
4a37fb66 8119 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8120 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8121 if (val == 0x7) {
8122 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8123 /* save our func */
34f80b04 8124 int func = BP_FUNC(bp);
da5a662a
VZ
8125 u32 swap_en;
8126 u32 swap_val;
34f80b04 8127
b4661739
EG
8128 /* clear the UNDI indication */
8129 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8130
34f80b04
EG
8131 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8132
8133 /* try unload UNDI on port 0 */
8134 bp->func = 0;
da5a662a
VZ
8135 bp->fw_seq =
8136 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8137 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8138 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8139
8140 /* if UNDI is loaded on the other port */
8141 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8142
da5a662a
VZ
8143 /* send "DONE" for previous unload */
8144 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8145
8146 /* unload UNDI on port 1 */
34f80b04 8147 bp->func = 1;
da5a662a
VZ
8148 bp->fw_seq =
8149 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8150 DRV_MSG_SEQ_NUMBER_MASK);
8151 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8152
8153 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8154 }
8155
b4661739
EG
8156 /* now it's safe to release the lock */
8157 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8158
f1ef27ef 8159 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8160
8161 /* close input traffic and wait for it */
8162 /* Do not rcv packets to BRB */
8163 REG_WR(bp,
8164 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8165 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8166 /* Do not direct rcv packets that are not for MCP to
8167 * the BRB */
8168 REG_WR(bp,
8169 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8170 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8171 /* clear AEU */
8172 REG_WR(bp,
8173 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8174 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8175 msleep(10);
8176
8177 /* save NIG port swap info */
8178 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8179 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8180 /* reset device */
8181 REG_WR(bp,
8182 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8183 0xd3ffffff);
34f80b04
EG
8184 REG_WR(bp,
8185 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8186 0x1403);
da5a662a
VZ
8187 /* take the NIG out of reset and restore swap values */
8188 REG_WR(bp,
8189 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8190 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8191 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8192 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8193
8194 /* send unload done to the MCP */
8195 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8196
8197 /* restore our func and fw_seq */
8198 bp->func = func;
8199 bp->fw_seq =
8200 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8201 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8202
8203 } else
8204 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8205 }
8206}
8207
8208static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8209{
8210 u32 val, val2, val3, val4, id;
72ce58c3 8211 u16 pmc;
34f80b04
EG
8212
8213 /* Get the chip revision id and number. */
8214 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8215 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8216 id = ((val & 0xffff) << 16);
8217 val = REG_RD(bp, MISC_REG_CHIP_REV);
8218 id |= ((val & 0xf) << 12);
8219 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8220 id |= ((val & 0xff) << 4);
5a40e08e 8221 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8222 id |= (val & 0xf);
8223 bp->common.chip_id = id;
8224 bp->link_params.chip_id = bp->common.chip_id;
8225 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8226
1c06328c
EG
8227 val = (REG_RD(bp, 0x2874) & 0x55);
8228 if ((bp->common.chip_id & 0x1) ||
8229 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8230 bp->flags |= ONE_PORT_FLAG;
8231 BNX2X_DEV_INFO("single port device\n");
8232 }
8233
34f80b04
EG
8234 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8235 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8236 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8237 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8238 bp->common.flash_size, bp->common.flash_size);
8239
8240 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8241 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8242 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8243 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8244 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8245
8246 if (!bp->common.shmem_base ||
8247 (bp->common.shmem_base < 0xA0000) ||
8248 (bp->common.shmem_base >= 0xC0000)) {
8249 BNX2X_DEV_INFO("MCP not active\n");
8250 bp->flags |= NO_MCP_FLAG;
8251 return;
8252 }
8253
8254 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8255 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8256 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8257 BNX2X_ERR("BAD MCP validity signature\n");
8258
8259 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8260 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8261
8262 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8263 SHARED_HW_CFG_LED_MODE_MASK) >>
8264 SHARED_HW_CFG_LED_MODE_SHIFT);
8265
c2c8b03e
EG
8266 bp->link_params.feature_config_flags = 0;
8267 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8268 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8269 bp->link_params.feature_config_flags |=
8270 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8271 else
8272 bp->link_params.feature_config_flags &=
8273 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8274
34f80b04
EG
8275 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8276 bp->common.bc_ver = val;
8277 BNX2X_DEV_INFO("bc_ver %X\n", val);
8278 if (val < BNX2X_BC_VER) {
8279 /* for now only warn
8280 * later we might need to enforce this */
8281 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8282 " please upgrade BC\n", BNX2X_BC_VER, val);
8283 }
4d295db0
EG
8284 bp->link_params.feature_config_flags |=
8285 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8286 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8287
8288 if (BP_E1HVN(bp) == 0) {
8289 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8290 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8291 } else {
8292 /* no WOL capability for E1HVN != 0 */
8293 bp->flags |= NO_WOL_FLAG;
8294 }
8295 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8296 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8297
8298 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8299 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8300 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8301 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8302
8303 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8304 val, val2, val3, val4);
8305}
8306
8307static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8308 u32 switch_cfg)
a2fbb9ea 8309{
34f80b04 8310 int port = BP_PORT(bp);
a2fbb9ea
ET
8311 u32 ext_phy_type;
8312
a2fbb9ea
ET
8313 switch (switch_cfg) {
8314 case SWITCH_CFG_1G:
8315 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8316
c18487ee
YR
8317 ext_phy_type =
8318 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8319 switch (ext_phy_type) {
8320 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8321 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8322 ext_phy_type);
8323
34f80b04
EG
8324 bp->port.supported |= (SUPPORTED_10baseT_Half |
8325 SUPPORTED_10baseT_Full |
8326 SUPPORTED_100baseT_Half |
8327 SUPPORTED_100baseT_Full |
8328 SUPPORTED_1000baseT_Full |
8329 SUPPORTED_2500baseX_Full |
8330 SUPPORTED_TP |
8331 SUPPORTED_FIBRE |
8332 SUPPORTED_Autoneg |
8333 SUPPORTED_Pause |
8334 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8335 break;
8336
8337 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8338 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8339 ext_phy_type);
8340
34f80b04
EG
8341 bp->port.supported |= (SUPPORTED_10baseT_Half |
8342 SUPPORTED_10baseT_Full |
8343 SUPPORTED_100baseT_Half |
8344 SUPPORTED_100baseT_Full |
8345 SUPPORTED_1000baseT_Full |
8346 SUPPORTED_TP |
8347 SUPPORTED_FIBRE |
8348 SUPPORTED_Autoneg |
8349 SUPPORTED_Pause |
8350 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8351 break;
8352
8353 default:
8354 BNX2X_ERR("NVRAM config error. "
8355 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8356 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8357 return;
8358 }
8359
34f80b04
EG
8360 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8361 port*0x10);
8362 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8363 break;
8364
8365 case SWITCH_CFG_10G:
8366 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8367
c18487ee
YR
8368 ext_phy_type =
8369 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8370 switch (ext_phy_type) {
8371 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8372 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8373 ext_phy_type);
8374
34f80b04
EG
8375 bp->port.supported |= (SUPPORTED_10baseT_Half |
8376 SUPPORTED_10baseT_Full |
8377 SUPPORTED_100baseT_Half |
8378 SUPPORTED_100baseT_Full |
8379 SUPPORTED_1000baseT_Full |
8380 SUPPORTED_2500baseX_Full |
8381 SUPPORTED_10000baseT_Full |
8382 SUPPORTED_TP |
8383 SUPPORTED_FIBRE |
8384 SUPPORTED_Autoneg |
8385 SUPPORTED_Pause |
8386 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8387 break;
8388
589abe3a
EG
8389 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8390 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8391 ext_phy_type);
f1410647 8392
34f80b04 8393 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8394 SUPPORTED_1000baseT_Full |
34f80b04 8395 SUPPORTED_FIBRE |
589abe3a 8396 SUPPORTED_Autoneg |
34f80b04
EG
8397 SUPPORTED_Pause |
8398 SUPPORTED_Asym_Pause);
f1410647
ET
8399 break;
8400
589abe3a
EG
8401 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8402 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8403 ext_phy_type);
8404
34f80b04 8405 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8406 SUPPORTED_2500baseX_Full |
34f80b04 8407 SUPPORTED_1000baseT_Full |
589abe3a
EG
8408 SUPPORTED_FIBRE |
8409 SUPPORTED_Autoneg |
8410 SUPPORTED_Pause |
8411 SUPPORTED_Asym_Pause);
8412 break;
8413
8414 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8415 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8416 ext_phy_type);
8417
8418 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8419 SUPPORTED_FIBRE |
8420 SUPPORTED_Pause |
8421 SUPPORTED_Asym_Pause);
f1410647
ET
8422 break;
8423
589abe3a
EG
8424 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8425 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8426 ext_phy_type);
8427
34f80b04
EG
8428 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8429 SUPPORTED_1000baseT_Full |
8430 SUPPORTED_FIBRE |
34f80b04
EG
8431 SUPPORTED_Pause |
8432 SUPPORTED_Asym_Pause);
f1410647
ET
8433 break;
8434
589abe3a
EG
8435 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8436 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8437 ext_phy_type);
8438
34f80b04 8439 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8440 SUPPORTED_1000baseT_Full |
34f80b04 8441 SUPPORTED_Autoneg |
589abe3a 8442 SUPPORTED_FIBRE |
34f80b04
EG
8443 SUPPORTED_Pause |
8444 SUPPORTED_Asym_Pause);
c18487ee
YR
8445 break;
8446
4d295db0
EG
8447 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8448 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8449 ext_phy_type);
8450
8451 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8452 SUPPORTED_1000baseT_Full |
8453 SUPPORTED_Autoneg |
8454 SUPPORTED_FIBRE |
8455 SUPPORTED_Pause |
8456 SUPPORTED_Asym_Pause);
8457 break;
8458
f1410647
ET
8459 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8460 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8461 ext_phy_type);
8462
34f80b04
EG
8463 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8464 SUPPORTED_TP |
8465 SUPPORTED_Autoneg |
8466 SUPPORTED_Pause |
8467 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8468 break;
8469
28577185
EG
8470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8471 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8472 ext_phy_type);
8473
8474 bp->port.supported |= (SUPPORTED_10baseT_Half |
8475 SUPPORTED_10baseT_Full |
8476 SUPPORTED_100baseT_Half |
8477 SUPPORTED_100baseT_Full |
8478 SUPPORTED_1000baseT_Full |
8479 SUPPORTED_10000baseT_Full |
8480 SUPPORTED_TP |
8481 SUPPORTED_Autoneg |
8482 SUPPORTED_Pause |
8483 SUPPORTED_Asym_Pause);
8484 break;
8485
c18487ee
YR
8486 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8487 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8488 bp->link_params.ext_phy_config);
8489 break;
8490
a2fbb9ea
ET
8491 default:
8492 BNX2X_ERR("NVRAM config error. "
8493 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8494 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8495 return;
8496 }
8497
34f80b04
EG
8498 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8499 port*0x18);
8500 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8501
a2fbb9ea
ET
8502 break;
8503
8504 default:
8505 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8506 bp->port.link_config);
a2fbb9ea
ET
8507 return;
8508 }
34f80b04 8509 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8510
8511 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8512 if (!(bp->link_params.speed_cap_mask &
8513 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8514 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8515
c18487ee
YR
8516 if (!(bp->link_params.speed_cap_mask &
8517 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8518 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8519
c18487ee
YR
8520 if (!(bp->link_params.speed_cap_mask &
8521 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8522 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8523
c18487ee
YR
8524 if (!(bp->link_params.speed_cap_mask &
8525 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8526 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8527
c18487ee
YR
8528 if (!(bp->link_params.speed_cap_mask &
8529 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8530 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8531 SUPPORTED_1000baseT_Full);
a2fbb9ea 8532
c18487ee
YR
8533 if (!(bp->link_params.speed_cap_mask &
8534 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8535 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8536
c18487ee
YR
8537 if (!(bp->link_params.speed_cap_mask &
8538 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8539 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8540
34f80b04 8541 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8542}
8543
34f80b04 8544static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8545{
c18487ee 8546 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8547
34f80b04 8548 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8549 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8550 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8551 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8552 bp->port.advertising = bp->port.supported;
a2fbb9ea 8553 } else {
c18487ee
YR
8554 u32 ext_phy_type =
8555 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8556
8557 if ((ext_phy_type ==
8558 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8559 (ext_phy_type ==
8560 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8561 /* force 10G, no AN */
c18487ee 8562 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8563 bp->port.advertising =
a2fbb9ea
ET
8564 (ADVERTISED_10000baseT_Full |
8565 ADVERTISED_FIBRE);
8566 break;
8567 }
8568 BNX2X_ERR("NVRAM config error. "
8569 "Invalid link_config 0x%x"
8570 " Autoneg not supported\n",
34f80b04 8571 bp->port.link_config);
a2fbb9ea
ET
8572 return;
8573 }
8574 break;
8575
8576 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8577 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8578 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8579 bp->port.advertising = (ADVERTISED_10baseT_Full |
8580 ADVERTISED_TP);
a2fbb9ea
ET
8581 } else {
8582 BNX2X_ERR("NVRAM config error. "
8583 "Invalid link_config 0x%x"
8584 " speed_cap_mask 0x%x\n",
34f80b04 8585 bp->port.link_config,
c18487ee 8586 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8587 return;
8588 }
8589 break;
8590
8591 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8592 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8593 bp->link_params.req_line_speed = SPEED_10;
8594 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8595 bp->port.advertising = (ADVERTISED_10baseT_Half |
8596 ADVERTISED_TP);
a2fbb9ea
ET
8597 } else {
8598 BNX2X_ERR("NVRAM config error. "
8599 "Invalid link_config 0x%x"
8600 " speed_cap_mask 0x%x\n",
34f80b04 8601 bp->port.link_config,
c18487ee 8602 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8603 return;
8604 }
8605 break;
8606
8607 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8608 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8609 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8610 bp->port.advertising = (ADVERTISED_100baseT_Full |
8611 ADVERTISED_TP);
a2fbb9ea
ET
8612 } else {
8613 BNX2X_ERR("NVRAM config error. "
8614 "Invalid link_config 0x%x"
8615 " speed_cap_mask 0x%x\n",
34f80b04 8616 bp->port.link_config,
c18487ee 8617 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8618 return;
8619 }
8620 break;
8621
8622 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8623 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8624 bp->link_params.req_line_speed = SPEED_100;
8625 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8626 bp->port.advertising = (ADVERTISED_100baseT_Half |
8627 ADVERTISED_TP);
a2fbb9ea
ET
8628 } else {
8629 BNX2X_ERR("NVRAM config error. "
8630 "Invalid link_config 0x%x"
8631 " speed_cap_mask 0x%x\n",
34f80b04 8632 bp->port.link_config,
c18487ee 8633 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8634 return;
8635 }
8636 break;
8637
8638 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8639 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8640 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8641 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8642 ADVERTISED_TP);
a2fbb9ea
ET
8643 } else {
8644 BNX2X_ERR("NVRAM config error. "
8645 "Invalid link_config 0x%x"
8646 " speed_cap_mask 0x%x\n",
34f80b04 8647 bp->port.link_config,
c18487ee 8648 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8649 return;
8650 }
8651 break;
8652
8653 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8654 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8655 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8656 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8657 ADVERTISED_TP);
a2fbb9ea
ET
8658 } else {
8659 BNX2X_ERR("NVRAM config error. "
8660 "Invalid link_config 0x%x"
8661 " speed_cap_mask 0x%x\n",
34f80b04 8662 bp->port.link_config,
c18487ee 8663 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8664 return;
8665 }
8666 break;
8667
8668 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8669 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8670 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8671 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8672 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8673 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8674 ADVERTISED_FIBRE);
a2fbb9ea
ET
8675 } else {
8676 BNX2X_ERR("NVRAM config error. "
8677 "Invalid link_config 0x%x"
8678 " speed_cap_mask 0x%x\n",
34f80b04 8679 bp->port.link_config,
c18487ee 8680 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8681 return;
8682 }
8683 break;
8684
8685 default:
8686 BNX2X_ERR("NVRAM config error. "
8687 "BAD link speed link_config 0x%x\n",
34f80b04 8688 bp->port.link_config);
c18487ee 8689 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8690 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8691 break;
8692 }
a2fbb9ea 8693
34f80b04
EG
8694 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8695 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8696 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8697 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8698 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8699
c18487ee 8700 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8701 " advertising 0x%x\n",
c18487ee
YR
8702 bp->link_params.req_line_speed,
8703 bp->link_params.req_duplex,
34f80b04 8704 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8705}
8706
e665bfda
MC
8707static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8708{
8709 mac_hi = cpu_to_be16(mac_hi);
8710 mac_lo = cpu_to_be32(mac_lo);
8711 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8712 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8713}
8714
34f80b04 8715static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8716{
34f80b04
EG
8717 int port = BP_PORT(bp);
8718 u32 val, val2;
589abe3a 8719 u32 config;
c2c8b03e 8720 u16 i;
01cd4528 8721 u32 ext_phy_type;
a2fbb9ea 8722
c18487ee 8723 bp->link_params.bp = bp;
34f80b04 8724 bp->link_params.port = port;
c18487ee 8725
c18487ee 8726 bp->link_params.lane_config =
a2fbb9ea 8727 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8728 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8729 SHMEM_RD(bp,
8730 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8731 /* BCM8727_NOC => BCM8727 no over current */
8732 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8733 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8734 bp->link_params.ext_phy_config &=
8735 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8736 bp->link_params.ext_phy_config |=
8737 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8738 bp->link_params.feature_config_flags |=
8739 FEATURE_CONFIG_BCM8727_NOC;
8740 }
8741
c18487ee 8742 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8743 SHMEM_RD(bp,
8744 dev_info.port_hw_config[port].speed_capability_mask);
8745
34f80b04 8746 bp->port.link_config =
a2fbb9ea
ET
8747 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8748
c2c8b03e
EG
8749 /* Get the 4 lanes xgxs config rx and tx */
8750 for (i = 0; i < 2; i++) {
8751 val = SHMEM_RD(bp,
8752 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8753 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8754 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8755
8756 val = SHMEM_RD(bp,
8757 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8758 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8759 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8760 }
8761
3ce2c3f9
EG
8762 /* If the device is capable of WoL, set the default state according
8763 * to the HW
8764 */
4d295db0 8765 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8766 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8767 (config & PORT_FEATURE_WOL_ENABLED));
8768
c2c8b03e
EG
8769 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8770 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8771 bp->link_params.lane_config,
8772 bp->link_params.ext_phy_config,
34f80b04 8773 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8774
4d295db0
EG
8775 bp->link_params.switch_cfg |= (bp->port.link_config &
8776 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8777 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8778
8779 bnx2x_link_settings_requested(bp);
8780
01cd4528
EG
8781 /*
8782 * If connected directly, work with the internal PHY, otherwise, work
8783 * with the external PHY
8784 */
8785 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8786 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8787 bp->mdio.prtad = bp->link_params.phy_addr;
8788
8789 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8790 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8791 bp->mdio.prtad =
659bc5c4 8792 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8793
a2fbb9ea
ET
8794 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8795 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8796 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8797 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8798 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8799
8800#ifdef BCM_CNIC
8801 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8802 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8803 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8804#endif
34f80b04
EG
8805}
8806
8807static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8808{
8809 int func = BP_FUNC(bp);
8810 u32 val, val2;
8811 int rc = 0;
a2fbb9ea 8812
34f80b04 8813 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8814
34f80b04
EG
8815 bp->e1hov = 0;
8816 bp->e1hmf = 0;
8817 if (CHIP_IS_E1H(bp)) {
8818 bp->mf_config =
8819 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8820
2691d51d 8821 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8822 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8823 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8824 bp->e1hmf = 1;
2691d51d
EG
8825 BNX2X_DEV_INFO("%s function mode\n",
8826 IS_E1HMF(bp) ? "multi" : "single");
8827
8828 if (IS_E1HMF(bp)) {
8829 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8830 e1hov_tag) &
8831 FUNC_MF_CFG_E1HOV_TAG_MASK);
8832 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8833 bp->e1hov = val;
8834 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8835 "(0x%04x)\n",
8836 func, bp->e1hov, bp->e1hov);
8837 } else {
34f80b04
EG
8838 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8839 " aborting\n", func);
8840 rc = -EPERM;
8841 }
2691d51d
EG
8842 } else {
8843 if (BP_E1HVN(bp)) {
8844 BNX2X_ERR("!!! VN %d in single function mode,"
8845 " aborting\n", BP_E1HVN(bp));
8846 rc = -EPERM;
8847 }
34f80b04
EG
8848 }
8849 }
a2fbb9ea 8850
34f80b04
EG
8851 if (!BP_NOMCP(bp)) {
8852 bnx2x_get_port_hwinfo(bp);
8853
8854 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8855 DRV_MSG_SEQ_NUMBER_MASK);
8856 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8857 }
8858
8859 if (IS_E1HMF(bp)) {
8860 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8861 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8862 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8863 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8864 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8865 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8866 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8867 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8868 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8869 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8870 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8871 ETH_ALEN);
8872 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8873 ETH_ALEN);
a2fbb9ea 8874 }
34f80b04
EG
8875
8876 return rc;
a2fbb9ea
ET
8877 }
8878
34f80b04
EG
8879 if (BP_NOMCP(bp)) {
8880 /* only supposed to happen on emulation/FPGA */
33471629 8881 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8882 random_ether_addr(bp->dev->dev_addr);
8883 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8884 }
a2fbb9ea 8885
34f80b04
EG
8886 return rc;
8887}
8888
8889static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8890{
8891 int func = BP_FUNC(bp);
87942b46 8892 int timer_interval;
34f80b04
EG
8893 int rc;
8894
da5a662a
VZ
8895 /* Disable interrupt handling until HW is initialized */
8896 atomic_set(&bp->intr_sem, 1);
e1510706 8897 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8898
34f80b04 8899 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8900 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
8901#ifdef BCM_CNIC
8902 mutex_init(&bp->cnic_mutex);
8903#endif
a2fbb9ea 8904
1cf167f2 8905 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8906 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8907
8908 rc = bnx2x_get_hwinfo(bp);
8909
8910 /* need to reset chip if undi was active */
8911 if (!BP_NOMCP(bp))
8912 bnx2x_undi_unload(bp);
8913
8914 if (CHIP_REV_IS_FPGA(bp))
8915 printk(KERN_ERR PFX "FPGA detected\n");
8916
8917 if (BP_NOMCP(bp) && (func == 0))
8918 printk(KERN_ERR PFX
8919 "MCP disabled, must load devices in order!\n");
8920
555f6c78 8921 /* Set multi queue mode */
8badd27a
EG
8922 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8923 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8924 printk(KERN_ERR PFX
8badd27a 8925 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8926 multi_mode = ETH_RSS_MODE_DISABLED;
8927 }
8928 bp->multi_mode = multi_mode;
8929
8930
7a9b2557
VZ
8931 /* Set TPA flags */
8932 if (disable_tpa) {
8933 bp->flags &= ~TPA_ENABLE_FLAG;
8934 bp->dev->features &= ~NETIF_F_LRO;
8935 } else {
8936 bp->flags |= TPA_ENABLE_FLAG;
8937 bp->dev->features |= NETIF_F_LRO;
8938 }
8939
a18f5128
EG
8940 if (CHIP_IS_E1(bp))
8941 bp->dropless_fc = 0;
8942 else
8943 bp->dropless_fc = dropless_fc;
8944
8d5726c4 8945 bp->mrrs = mrrs;
7a9b2557 8946
34f80b04
EG
8947 bp->tx_ring_size = MAX_TX_AVAIL;
8948 bp->rx_ring_size = MAX_RX_AVAIL;
8949
8950 bp->rx_csum = 1;
34f80b04 8951
7d323bfd
EG
8952 /* make sure that the numbers are in the right granularity */
8953 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8954 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 8955
87942b46
EG
8956 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8957 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8958
8959 init_timer(&bp->timer);
8960 bp->timer.expires = jiffies + bp->current_interval;
8961 bp->timer.data = (unsigned long) bp;
8962 bp->timer.function = bnx2x_timer;
8963
8964 return rc;
a2fbb9ea
ET
8965}
8966
8967/*
8968 * ethtool service functions
8969 */
8970
8971/* All ethtool functions called with rtnl_lock */
8972
8973static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8974{
8975 struct bnx2x *bp = netdev_priv(dev);
8976
34f80b04
EG
8977 cmd->supported = bp->port.supported;
8978 cmd->advertising = bp->port.advertising;
a2fbb9ea 8979
f34d28ea
EG
8980 if ((bp->state == BNX2X_STATE_OPEN) &&
8981 !(bp->flags & MF_FUNC_DIS) &&
8982 (bp->link_vars.link_up)) {
c18487ee
YR
8983 cmd->speed = bp->link_vars.line_speed;
8984 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
8985 if (IS_E1HMF(bp)) {
8986 u16 vn_max_rate;
34f80b04 8987
b015e3d1
EG
8988 vn_max_rate =
8989 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 8990 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
8991 if (vn_max_rate < cmd->speed)
8992 cmd->speed = vn_max_rate;
8993 }
8994 } else {
8995 cmd->speed = -1;
8996 cmd->duplex = -1;
34f80b04 8997 }
a2fbb9ea 8998
c18487ee
YR
8999 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9000 u32 ext_phy_type =
9001 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9002
9003 switch (ext_phy_type) {
9004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9006 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9007 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9008 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9010 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9011 cmd->port = PORT_FIBRE;
9012 break;
9013
9014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9016 cmd->port = PORT_TP;
9017 break;
9018
c18487ee
YR
9019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9020 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9021 bp->link_params.ext_phy_config);
9022 break;
9023
f1410647
ET
9024 default:
9025 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9026 bp->link_params.ext_phy_config);
9027 break;
f1410647
ET
9028 }
9029 } else
a2fbb9ea 9030 cmd->port = PORT_TP;
a2fbb9ea 9031
01cd4528 9032 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9033 cmd->transceiver = XCVR_INTERNAL;
9034
c18487ee 9035 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9036 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9037 else
a2fbb9ea 9038 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9039
9040 cmd->maxtxpkt = 0;
9041 cmd->maxrxpkt = 0;
9042
9043 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9044 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9045 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9046 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9047 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9048 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9049 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9050
9051 return 0;
9052}
9053
9054static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9055{
9056 struct bnx2x *bp = netdev_priv(dev);
9057 u32 advertising;
9058
34f80b04
EG
9059 if (IS_E1HMF(bp))
9060 return 0;
9061
a2fbb9ea
ET
9062 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9063 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9064 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9065 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9066 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9067 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9068 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9069
a2fbb9ea 9070 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9071 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9072 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9073 return -EINVAL;
f1410647 9074 }
a2fbb9ea
ET
9075
9076 /* advertise the requested speed and duplex if supported */
34f80b04 9077 cmd->advertising &= bp->port.supported;
a2fbb9ea 9078
c18487ee
YR
9079 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9080 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9081 bp->port.advertising |= (ADVERTISED_Autoneg |
9082 cmd->advertising);
a2fbb9ea
ET
9083
9084 } else { /* forced speed */
9085 /* advertise the requested speed and duplex if supported */
9086 switch (cmd->speed) {
9087 case SPEED_10:
9088 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9089 if (!(bp->port.supported &
f1410647
ET
9090 SUPPORTED_10baseT_Full)) {
9091 DP(NETIF_MSG_LINK,
9092 "10M full not supported\n");
a2fbb9ea 9093 return -EINVAL;
f1410647 9094 }
a2fbb9ea
ET
9095
9096 advertising = (ADVERTISED_10baseT_Full |
9097 ADVERTISED_TP);
9098 } else {
34f80b04 9099 if (!(bp->port.supported &
f1410647
ET
9100 SUPPORTED_10baseT_Half)) {
9101 DP(NETIF_MSG_LINK,
9102 "10M half not supported\n");
a2fbb9ea 9103 return -EINVAL;
f1410647 9104 }
a2fbb9ea
ET
9105
9106 advertising = (ADVERTISED_10baseT_Half |
9107 ADVERTISED_TP);
9108 }
9109 break;
9110
9111 case SPEED_100:
9112 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9113 if (!(bp->port.supported &
f1410647
ET
9114 SUPPORTED_100baseT_Full)) {
9115 DP(NETIF_MSG_LINK,
9116 "100M full not supported\n");
a2fbb9ea 9117 return -EINVAL;
f1410647 9118 }
a2fbb9ea
ET
9119
9120 advertising = (ADVERTISED_100baseT_Full |
9121 ADVERTISED_TP);
9122 } else {
34f80b04 9123 if (!(bp->port.supported &
f1410647
ET
9124 SUPPORTED_100baseT_Half)) {
9125 DP(NETIF_MSG_LINK,
9126 "100M half not supported\n");
a2fbb9ea 9127 return -EINVAL;
f1410647 9128 }
a2fbb9ea
ET
9129
9130 advertising = (ADVERTISED_100baseT_Half |
9131 ADVERTISED_TP);
9132 }
9133 break;
9134
9135 case SPEED_1000:
f1410647
ET
9136 if (cmd->duplex != DUPLEX_FULL) {
9137 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 9138 return -EINVAL;
f1410647 9139 }
a2fbb9ea 9140
34f80b04 9141 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 9142 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 9143 return -EINVAL;
f1410647 9144 }
a2fbb9ea
ET
9145
9146 advertising = (ADVERTISED_1000baseT_Full |
9147 ADVERTISED_TP);
9148 break;
9149
9150 case SPEED_2500:
f1410647
ET
9151 if (cmd->duplex != DUPLEX_FULL) {
9152 DP(NETIF_MSG_LINK,
9153 "2.5G half not supported\n");
a2fbb9ea 9154 return -EINVAL;
f1410647 9155 }
a2fbb9ea 9156
34f80b04 9157 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9158 DP(NETIF_MSG_LINK,
9159 "2.5G full not supported\n");
a2fbb9ea 9160 return -EINVAL;
f1410647 9161 }
a2fbb9ea 9162
f1410647 9163 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9164 ADVERTISED_TP);
9165 break;
9166
9167 case SPEED_10000:
f1410647
ET
9168 if (cmd->duplex != DUPLEX_FULL) {
9169 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9170 return -EINVAL;
f1410647 9171 }
a2fbb9ea 9172
34f80b04 9173 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9174 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9175 return -EINVAL;
f1410647 9176 }
a2fbb9ea
ET
9177
9178 advertising = (ADVERTISED_10000baseT_Full |
9179 ADVERTISED_FIBRE);
9180 break;
9181
9182 default:
f1410647 9183 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9184 return -EINVAL;
9185 }
9186
c18487ee
YR
9187 bp->link_params.req_line_speed = cmd->speed;
9188 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9189 bp->port.advertising = advertising;
a2fbb9ea
ET
9190 }
9191
c18487ee 9192 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9193 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9194 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9195 bp->port.advertising);
a2fbb9ea 9196
34f80b04 9197 if (netif_running(dev)) {
bb2a0f7a 9198 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9199 bnx2x_link_set(bp);
9200 }
a2fbb9ea
ET
9201
9202 return 0;
9203}
9204
0a64ea57
EG
9205#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9206#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9207
9208static int bnx2x_get_regs_len(struct net_device *dev)
9209{
0a64ea57 9210 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9211 int regdump_len = 0;
0a64ea57
EG
9212 int i;
9213
0a64ea57
EG
9214 if (CHIP_IS_E1(bp)) {
9215 for (i = 0; i < REGS_COUNT; i++)
9216 if (IS_E1_ONLINE(reg_addrs[i].info))
9217 regdump_len += reg_addrs[i].size;
9218
9219 for (i = 0; i < WREGS_COUNT_E1; i++)
9220 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9221 regdump_len += wreg_addrs_e1[i].size *
9222 (1 + wreg_addrs_e1[i].read_regs_count);
9223
9224 } else { /* E1H */
9225 for (i = 0; i < REGS_COUNT; i++)
9226 if (IS_E1H_ONLINE(reg_addrs[i].info))
9227 regdump_len += reg_addrs[i].size;
9228
9229 for (i = 0; i < WREGS_COUNT_E1H; i++)
9230 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9231 regdump_len += wreg_addrs_e1h[i].size *
9232 (1 + wreg_addrs_e1h[i].read_regs_count);
9233 }
9234 regdump_len *= 4;
9235 regdump_len += sizeof(struct dump_hdr);
9236
9237 return regdump_len;
9238}
9239
9240static void bnx2x_get_regs(struct net_device *dev,
9241 struct ethtool_regs *regs, void *_p)
9242{
9243 u32 *p = _p, i, j;
9244 struct bnx2x *bp = netdev_priv(dev);
9245 struct dump_hdr dump_hdr = {0};
9246
9247 regs->version = 0;
9248 memset(p, 0, regs->len);
9249
9250 if (!netif_running(bp->dev))
9251 return;
9252
9253 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9254 dump_hdr.dump_sign = dump_sign_all;
9255 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9256 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9257 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9258 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9259 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9260
9261 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9262 p += dump_hdr.hdr_size + 1;
9263
9264 if (CHIP_IS_E1(bp)) {
9265 for (i = 0; i < REGS_COUNT; i++)
9266 if (IS_E1_ONLINE(reg_addrs[i].info))
9267 for (j = 0; j < reg_addrs[i].size; j++)
9268 *p++ = REG_RD(bp,
9269 reg_addrs[i].addr + j*4);
9270
9271 } else { /* E1H */
9272 for (i = 0; i < REGS_COUNT; i++)
9273 if (IS_E1H_ONLINE(reg_addrs[i].info))
9274 for (j = 0; j < reg_addrs[i].size; j++)
9275 *p++ = REG_RD(bp,
9276 reg_addrs[i].addr + j*4);
9277 }
9278}
9279
0d28e49a
EG
9280#define PHY_FW_VER_LEN 10
9281
9282static void bnx2x_get_drvinfo(struct net_device *dev,
9283 struct ethtool_drvinfo *info)
9284{
9285 struct bnx2x *bp = netdev_priv(dev);
9286 u8 phy_fw_ver[PHY_FW_VER_LEN];
9287
9288 strcpy(info->driver, DRV_MODULE_NAME);
9289 strcpy(info->version, DRV_MODULE_VERSION);
9290
9291 phy_fw_ver[0] = '\0';
9292 if (bp->port.pmf) {
9293 bnx2x_acquire_phy_lock(bp);
9294 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9295 (bp->state != BNX2X_STATE_CLOSED),
9296 phy_fw_ver, PHY_FW_VER_LEN);
9297 bnx2x_release_phy_lock(bp);
9298 }
9299
9300 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9301 (bp->common.bc_ver & 0xff0000) >> 16,
9302 (bp->common.bc_ver & 0xff00) >> 8,
9303 (bp->common.bc_ver & 0xff),
9304 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9305 strcpy(info->bus_info, pci_name(bp->pdev));
9306 info->n_stats = BNX2X_NUM_STATS;
9307 info->testinfo_len = BNX2X_NUM_TESTS;
9308 info->eedump_len = bp->common.flash_size;
9309 info->regdump_len = bnx2x_get_regs_len(dev);
9310}
9311
a2fbb9ea
ET
9312static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9313{
9314 struct bnx2x *bp = netdev_priv(dev);
9315
9316 if (bp->flags & NO_WOL_FLAG) {
9317 wol->supported = 0;
9318 wol->wolopts = 0;
9319 } else {
9320 wol->supported = WAKE_MAGIC;
9321 if (bp->wol)
9322 wol->wolopts = WAKE_MAGIC;
9323 else
9324 wol->wolopts = 0;
9325 }
9326 memset(&wol->sopass, 0, sizeof(wol->sopass));
9327}
9328
9329static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9330{
9331 struct bnx2x *bp = netdev_priv(dev);
9332
9333 if (wol->wolopts & ~WAKE_MAGIC)
9334 return -EINVAL;
9335
9336 if (wol->wolopts & WAKE_MAGIC) {
9337 if (bp->flags & NO_WOL_FLAG)
9338 return -EINVAL;
9339
9340 bp->wol = 1;
34f80b04 9341 } else
a2fbb9ea 9342 bp->wol = 0;
34f80b04 9343
a2fbb9ea
ET
9344 return 0;
9345}
9346
9347static u32 bnx2x_get_msglevel(struct net_device *dev)
9348{
9349 struct bnx2x *bp = netdev_priv(dev);
9350
9351 return bp->msglevel;
9352}
9353
9354static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9355{
9356 struct bnx2x *bp = netdev_priv(dev);
9357
9358 if (capable(CAP_NET_ADMIN))
9359 bp->msglevel = level;
9360}
9361
9362static int bnx2x_nway_reset(struct net_device *dev)
9363{
9364 struct bnx2x *bp = netdev_priv(dev);
9365
34f80b04
EG
9366 if (!bp->port.pmf)
9367 return 0;
a2fbb9ea 9368
34f80b04 9369 if (netif_running(dev)) {
bb2a0f7a 9370 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9371 bnx2x_link_set(bp);
9372 }
a2fbb9ea
ET
9373
9374 return 0;
9375}
9376
ab6ad5a4 9377static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9378{
9379 struct bnx2x *bp = netdev_priv(dev);
9380
f34d28ea
EG
9381 if (bp->flags & MF_FUNC_DIS)
9382 return 0;
9383
01e53298
NO
9384 return bp->link_vars.link_up;
9385}
9386
a2fbb9ea
ET
9387static int bnx2x_get_eeprom_len(struct net_device *dev)
9388{
9389 struct bnx2x *bp = netdev_priv(dev);
9390
34f80b04 9391 return bp->common.flash_size;
a2fbb9ea
ET
9392}
9393
9394static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9395{
34f80b04 9396 int port = BP_PORT(bp);
a2fbb9ea
ET
9397 int count, i;
9398 u32 val = 0;
9399
9400 /* adjust timeout for emulation/FPGA */
9401 count = NVRAM_TIMEOUT_COUNT;
9402 if (CHIP_REV_IS_SLOW(bp))
9403 count *= 100;
9404
9405 /* request access to nvram interface */
9406 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9407 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9408
9409 for (i = 0; i < count*10; i++) {
9410 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9411 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9412 break;
9413
9414 udelay(5);
9415 }
9416
9417 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9418 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9419 return -EBUSY;
9420 }
9421
9422 return 0;
9423}
9424
9425static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9426{
34f80b04 9427 int port = BP_PORT(bp);
a2fbb9ea
ET
9428 int count, i;
9429 u32 val = 0;
9430
9431 /* adjust timeout for emulation/FPGA */
9432 count = NVRAM_TIMEOUT_COUNT;
9433 if (CHIP_REV_IS_SLOW(bp))
9434 count *= 100;
9435
9436 /* relinquish nvram interface */
9437 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9438 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9439
9440 for (i = 0; i < count*10; i++) {
9441 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9442 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9443 break;
9444
9445 udelay(5);
9446 }
9447
9448 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9449 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9450 return -EBUSY;
9451 }
9452
9453 return 0;
9454}
9455
9456static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9457{
9458 u32 val;
9459
9460 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9461
9462 /* enable both bits, even on read */
9463 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9464 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9465 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9466}
9467
9468static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9469{
9470 u32 val;
9471
9472 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9473
9474 /* disable both bits, even after read */
9475 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9476 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9477 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9478}
9479
4781bfad 9480static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9481 u32 cmd_flags)
9482{
f1410647 9483 int count, i, rc;
a2fbb9ea
ET
9484 u32 val;
9485
9486 /* build the command word */
9487 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9488
9489 /* need to clear DONE bit separately */
9490 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9491
9492 /* address of the NVRAM to read from */
9493 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9494 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9495
9496 /* issue a read command */
9497 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9498
9499 /* adjust timeout for emulation/FPGA */
9500 count = NVRAM_TIMEOUT_COUNT;
9501 if (CHIP_REV_IS_SLOW(bp))
9502 count *= 100;
9503
9504 /* wait for completion */
9505 *ret_val = 0;
9506 rc = -EBUSY;
9507 for (i = 0; i < count; i++) {
9508 udelay(5);
9509 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9510
9511 if (val & MCPR_NVM_COMMAND_DONE) {
9512 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9513 /* we read nvram data in cpu order
9514 * but ethtool sees it as an array of bytes
9515 * converting to big-endian will do the work */
4781bfad 9516 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9517 rc = 0;
9518 break;
9519 }
9520 }
9521
9522 return rc;
9523}
9524
9525static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9526 int buf_size)
9527{
9528 int rc;
9529 u32 cmd_flags;
4781bfad 9530 __be32 val;
a2fbb9ea
ET
9531
9532 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9533 DP(BNX2X_MSG_NVM,
c14423fe 9534 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9535 offset, buf_size);
9536 return -EINVAL;
9537 }
9538
34f80b04
EG
9539 if (offset + buf_size > bp->common.flash_size) {
9540 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9541 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9542 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9543 return -EINVAL;
9544 }
9545
9546 /* request access to nvram interface */
9547 rc = bnx2x_acquire_nvram_lock(bp);
9548 if (rc)
9549 return rc;
9550
9551 /* enable access to nvram interface */
9552 bnx2x_enable_nvram_access(bp);
9553
9554 /* read the first word(s) */
9555 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9556 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9557 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9558 memcpy(ret_buf, &val, 4);
9559
9560 /* advance to the next dword */
9561 offset += sizeof(u32);
9562 ret_buf += sizeof(u32);
9563 buf_size -= sizeof(u32);
9564 cmd_flags = 0;
9565 }
9566
9567 if (rc == 0) {
9568 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9569 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9570 memcpy(ret_buf, &val, 4);
9571 }
9572
9573 /* disable access to nvram interface */
9574 bnx2x_disable_nvram_access(bp);
9575 bnx2x_release_nvram_lock(bp);
9576
9577 return rc;
9578}
9579
9580static int bnx2x_get_eeprom(struct net_device *dev,
9581 struct ethtool_eeprom *eeprom, u8 *eebuf)
9582{
9583 struct bnx2x *bp = netdev_priv(dev);
9584 int rc;
9585
2add3acb
EG
9586 if (!netif_running(dev))
9587 return -EAGAIN;
9588
34f80b04 9589 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9590 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9591 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9592 eeprom->len, eeprom->len);
9593
9594 /* parameters already validated in ethtool_get_eeprom */
9595
9596 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9597
9598 return rc;
9599}
9600
9601static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9602 u32 cmd_flags)
9603{
f1410647 9604 int count, i, rc;
a2fbb9ea
ET
9605
9606 /* build the command word */
9607 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9608
9609 /* need to clear DONE bit separately */
9610 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9611
9612 /* write the data */
9613 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9614
9615 /* address of the NVRAM to write to */
9616 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9617 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9618
9619 /* issue the write command */
9620 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9621
9622 /* adjust timeout for emulation/FPGA */
9623 count = NVRAM_TIMEOUT_COUNT;
9624 if (CHIP_REV_IS_SLOW(bp))
9625 count *= 100;
9626
9627 /* wait for completion */
9628 rc = -EBUSY;
9629 for (i = 0; i < count; i++) {
9630 udelay(5);
9631 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9632 if (val & MCPR_NVM_COMMAND_DONE) {
9633 rc = 0;
9634 break;
9635 }
9636 }
9637
9638 return rc;
9639}
9640
f1410647 9641#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9642
9643static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9644 int buf_size)
9645{
9646 int rc;
9647 u32 cmd_flags;
9648 u32 align_offset;
4781bfad 9649 __be32 val;
a2fbb9ea 9650
34f80b04
EG
9651 if (offset + buf_size > bp->common.flash_size) {
9652 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9653 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9654 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9655 return -EINVAL;
9656 }
9657
9658 /* request access to nvram interface */
9659 rc = bnx2x_acquire_nvram_lock(bp);
9660 if (rc)
9661 return rc;
9662
9663 /* enable access to nvram interface */
9664 bnx2x_enable_nvram_access(bp);
9665
9666 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9667 align_offset = (offset & ~0x03);
9668 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9669
9670 if (rc == 0) {
9671 val &= ~(0xff << BYTE_OFFSET(offset));
9672 val |= (*data_buf << BYTE_OFFSET(offset));
9673
9674 /* nvram data is returned as an array of bytes
9675 * convert it back to cpu order */
9676 val = be32_to_cpu(val);
9677
a2fbb9ea
ET
9678 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9679 cmd_flags);
9680 }
9681
9682 /* disable access to nvram interface */
9683 bnx2x_disable_nvram_access(bp);
9684 bnx2x_release_nvram_lock(bp);
9685
9686 return rc;
9687}
9688
9689static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9690 int buf_size)
9691{
9692 int rc;
9693 u32 cmd_flags;
9694 u32 val;
9695 u32 written_so_far;
9696
34f80b04 9697 if (buf_size == 1) /* ethtool */
a2fbb9ea 9698 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9699
9700 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9701 DP(BNX2X_MSG_NVM,
c14423fe 9702 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9703 offset, buf_size);
9704 return -EINVAL;
9705 }
9706
34f80b04
EG
9707 if (offset + buf_size > bp->common.flash_size) {
9708 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9709 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9710 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9711 return -EINVAL;
9712 }
9713
9714 /* request access to nvram interface */
9715 rc = bnx2x_acquire_nvram_lock(bp);
9716 if (rc)
9717 return rc;
9718
9719 /* enable access to nvram interface */
9720 bnx2x_enable_nvram_access(bp);
9721
9722 written_so_far = 0;
9723 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9724 while ((written_so_far < buf_size) && (rc == 0)) {
9725 if (written_so_far == (buf_size - sizeof(u32)))
9726 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9727 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9728 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9729 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9730 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9731
9732 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9733
9734 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9735
9736 /* advance to the next dword */
9737 offset += sizeof(u32);
9738 data_buf += sizeof(u32);
9739 written_so_far += sizeof(u32);
9740 cmd_flags = 0;
9741 }
9742
9743 /* disable access to nvram interface */
9744 bnx2x_disable_nvram_access(bp);
9745 bnx2x_release_nvram_lock(bp);
9746
9747 return rc;
9748}
9749
9750static int bnx2x_set_eeprom(struct net_device *dev,
9751 struct ethtool_eeprom *eeprom, u8 *eebuf)
9752{
9753 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9754 int port = BP_PORT(bp);
9755 int rc = 0;
a2fbb9ea 9756
9f4c9583
EG
9757 if (!netif_running(dev))
9758 return -EAGAIN;
9759
34f80b04 9760 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9761 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9762 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9763 eeprom->len, eeprom->len);
9764
9765 /* parameters already validated in ethtool_set_eeprom */
9766
f57a6025
EG
9767 /* PHY eeprom can be accessed only by the PMF */
9768 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9769 !bp->port.pmf)
9770 return -EINVAL;
9771
9772 if (eeprom->magic == 0x50485950) {
9773 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9774 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9775
f57a6025
EG
9776 bnx2x_acquire_phy_lock(bp);
9777 rc |= bnx2x_link_reset(&bp->link_params,
9778 &bp->link_vars, 0);
9779 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9780 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9781 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9782 MISC_REGISTERS_GPIO_HIGH, port);
9783 bnx2x_release_phy_lock(bp);
9784 bnx2x_link_report(bp);
9785
9786 } else if (eeprom->magic == 0x50485952) {
9787 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 9788 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 9789 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9790 rc |= bnx2x_link_reset(&bp->link_params,
9791 &bp->link_vars, 1);
9792
9793 rc |= bnx2x_phy_init(&bp->link_params,
9794 &bp->link_vars);
4a37fb66 9795 bnx2x_release_phy_lock(bp);
f57a6025
EG
9796 bnx2x_calc_fc_adv(bp);
9797 }
9798 } else if (eeprom->magic == 0x53985943) {
9799 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9800 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9801 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9802 u8 ext_phy_addr =
659bc5c4 9803 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9804
9805 /* DSP Remove Download Mode */
9806 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9807 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9808
f57a6025
EG
9809 bnx2x_acquire_phy_lock(bp);
9810
9811 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9812
9813 /* wait 0.5 sec to allow it to run */
9814 msleep(500);
9815 bnx2x_ext_phy_hw_reset(bp, port);
9816 msleep(500);
9817 bnx2x_release_phy_lock(bp);
9818 }
9819 } else
c18487ee 9820 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9821
9822 return rc;
9823}
9824
9825static int bnx2x_get_coalesce(struct net_device *dev,
9826 struct ethtool_coalesce *coal)
9827{
9828 struct bnx2x *bp = netdev_priv(dev);
9829
9830 memset(coal, 0, sizeof(struct ethtool_coalesce));
9831
9832 coal->rx_coalesce_usecs = bp->rx_ticks;
9833 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9834
9835 return 0;
9836}
9837
ca00392c 9838#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9839static int bnx2x_set_coalesce(struct net_device *dev,
9840 struct ethtool_coalesce *coal)
9841{
9842 struct bnx2x *bp = netdev_priv(dev);
9843
9844 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9845 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9846 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9847
9848 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9849 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9850 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9851
34f80b04 9852 if (netif_running(dev))
a2fbb9ea
ET
9853 bnx2x_update_coalesce(bp);
9854
9855 return 0;
9856}
9857
9858static void bnx2x_get_ringparam(struct net_device *dev,
9859 struct ethtool_ringparam *ering)
9860{
9861 struct bnx2x *bp = netdev_priv(dev);
9862
9863 ering->rx_max_pending = MAX_RX_AVAIL;
9864 ering->rx_mini_max_pending = 0;
9865 ering->rx_jumbo_max_pending = 0;
9866
9867 ering->rx_pending = bp->rx_ring_size;
9868 ering->rx_mini_pending = 0;
9869 ering->rx_jumbo_pending = 0;
9870
9871 ering->tx_max_pending = MAX_TX_AVAIL;
9872 ering->tx_pending = bp->tx_ring_size;
9873}
9874
9875static int bnx2x_set_ringparam(struct net_device *dev,
9876 struct ethtool_ringparam *ering)
9877{
9878 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9879 int rc = 0;
a2fbb9ea
ET
9880
9881 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9882 (ering->tx_pending > MAX_TX_AVAIL) ||
9883 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9884 return -EINVAL;
9885
9886 bp->rx_ring_size = ering->rx_pending;
9887 bp->tx_ring_size = ering->tx_pending;
9888
34f80b04
EG
9889 if (netif_running(dev)) {
9890 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9891 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9892 }
9893
34f80b04 9894 return rc;
a2fbb9ea
ET
9895}
9896
9897static void bnx2x_get_pauseparam(struct net_device *dev,
9898 struct ethtool_pauseparam *epause)
9899{
9900 struct bnx2x *bp = netdev_priv(dev);
9901
356e2385
EG
9902 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9903 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9904 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9905
c0700f90
DM
9906 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9907 BNX2X_FLOW_CTRL_RX);
9908 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9909 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9910
9911 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9912 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9913 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9914}
9915
9916static int bnx2x_set_pauseparam(struct net_device *dev,
9917 struct ethtool_pauseparam *epause)
9918{
9919 struct bnx2x *bp = netdev_priv(dev);
9920
34f80b04
EG
9921 if (IS_E1HMF(bp))
9922 return 0;
9923
a2fbb9ea
ET
9924 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9925 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9926 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9927
c0700f90 9928 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9929
f1410647 9930 if (epause->rx_pause)
c0700f90 9931 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9932
f1410647 9933 if (epause->tx_pause)
c0700f90 9934 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9935
c0700f90
DM
9936 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9937 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9938
c18487ee 9939 if (epause->autoneg) {
34f80b04 9940 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9941 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9942 return -EINVAL;
9943 }
a2fbb9ea 9944
c18487ee 9945 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9946 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9947 }
a2fbb9ea 9948
c18487ee
YR
9949 DP(NETIF_MSG_LINK,
9950 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9951
9952 if (netif_running(dev)) {
bb2a0f7a 9953 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9954 bnx2x_link_set(bp);
9955 }
a2fbb9ea
ET
9956
9957 return 0;
9958}
9959
df0f2343
VZ
9960static int bnx2x_set_flags(struct net_device *dev, u32 data)
9961{
9962 struct bnx2x *bp = netdev_priv(dev);
9963 int changed = 0;
9964 int rc = 0;
9965
9966 /* TPA requires Rx CSUM offloading */
9967 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9968 if (!(dev->features & NETIF_F_LRO)) {
9969 dev->features |= NETIF_F_LRO;
9970 bp->flags |= TPA_ENABLE_FLAG;
9971 changed = 1;
9972 }
9973
9974 } else if (dev->features & NETIF_F_LRO) {
9975 dev->features &= ~NETIF_F_LRO;
9976 bp->flags &= ~TPA_ENABLE_FLAG;
9977 changed = 1;
9978 }
9979
9980 if (changed && netif_running(dev)) {
9981 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9982 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9983 }
9984
9985 return rc;
9986}
9987
a2fbb9ea
ET
9988static u32 bnx2x_get_rx_csum(struct net_device *dev)
9989{
9990 struct bnx2x *bp = netdev_priv(dev);
9991
9992 return bp->rx_csum;
9993}
9994
9995static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9996{
9997 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9998 int rc = 0;
a2fbb9ea
ET
9999
10000 bp->rx_csum = data;
df0f2343
VZ
10001
10002 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10003 TPA'ed packets will be discarded due to wrong TCP CSUM */
10004 if (!data) {
10005 u32 flags = ethtool_op_get_flags(dev);
10006
10007 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10008 }
10009
10010 return rc;
a2fbb9ea
ET
10011}
10012
10013static int bnx2x_set_tso(struct net_device *dev, u32 data)
10014{
755735eb 10015 if (data) {
a2fbb9ea 10016 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10017 dev->features |= NETIF_F_TSO6;
10018 } else {
a2fbb9ea 10019 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10020 dev->features &= ~NETIF_F_TSO6;
10021 }
10022
a2fbb9ea
ET
10023 return 0;
10024}
10025
f3c87cdd 10026static const struct {
a2fbb9ea
ET
10027 char string[ETH_GSTRING_LEN];
10028} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10029 { "register_test (offline)" },
10030 { "memory_test (offline)" },
10031 { "loopback_test (offline)" },
10032 { "nvram_test (online)" },
10033 { "interrupt_test (online)" },
10034 { "link_test (online)" },
d3d4f495 10035 { "idle check (online)" }
a2fbb9ea
ET
10036};
10037
f3c87cdd
YG
10038static int bnx2x_test_registers(struct bnx2x *bp)
10039{
10040 int idx, i, rc = -ENODEV;
10041 u32 wr_val = 0;
9dabc424 10042 int port = BP_PORT(bp);
f3c87cdd
YG
10043 static const struct {
10044 u32 offset0;
10045 u32 offset1;
10046 u32 mask;
10047 } reg_tbl[] = {
10048/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10049 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10050 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10051 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10052 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10053 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10054 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10055 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10056 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10057 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10058/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10059 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10060 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10061 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10062 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10063 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10064 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10065 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10066 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10067 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10068/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10069 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10070 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10071 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10072 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10073 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10074 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10075 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10076 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10077 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10078/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10079 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10080 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10081 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10082 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10083 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10084 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10085
10086 { 0xffffffff, 0, 0x00000000 }
10087 };
10088
10089 if (!netif_running(bp->dev))
10090 return rc;
10091
10092 /* Repeat the test twice:
10093 First by writing 0x00000000, second by writing 0xffffffff */
10094 for (idx = 0; idx < 2; idx++) {
10095
10096 switch (idx) {
10097 case 0:
10098 wr_val = 0;
10099 break;
10100 case 1:
10101 wr_val = 0xffffffff;
10102 break;
10103 }
10104
10105 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10106 u32 offset, mask, save_val, val;
f3c87cdd
YG
10107
10108 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10109 mask = reg_tbl[i].mask;
10110
10111 save_val = REG_RD(bp, offset);
10112
10113 REG_WR(bp, offset, wr_val);
10114 val = REG_RD(bp, offset);
10115
10116 /* Restore the original register's value */
10117 REG_WR(bp, offset, save_val);
10118
10119 /* verify that value is as expected value */
10120 if ((val & mask) != (wr_val & mask))
10121 goto test_reg_exit;
10122 }
10123 }
10124
10125 rc = 0;
10126
10127test_reg_exit:
10128 return rc;
10129}
10130
10131static int bnx2x_test_memory(struct bnx2x *bp)
10132{
10133 int i, j, rc = -ENODEV;
10134 u32 val;
10135 static const struct {
10136 u32 offset;
10137 int size;
10138 } mem_tbl[] = {
10139 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10140 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10141 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10142 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10143 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10144 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10145 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10146
10147 { 0xffffffff, 0 }
10148 };
10149 static const struct {
10150 char *name;
10151 u32 offset;
9dabc424
YG
10152 u32 e1_mask;
10153 u32 e1h_mask;
f3c87cdd 10154 } prty_tbl[] = {
9dabc424
YG
10155 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10156 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10157 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10158 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10159 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10160 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10161
10162 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10163 };
10164
10165 if (!netif_running(bp->dev))
10166 return rc;
10167
10168 /* Go through all the memories */
10169 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10170 for (j = 0; j < mem_tbl[i].size; j++)
10171 REG_RD(bp, mem_tbl[i].offset + j*4);
10172
10173 /* Check the parity status */
10174 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10175 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10176 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10177 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10178 DP(NETIF_MSG_HW,
10179 "%s is 0x%x\n", prty_tbl[i].name, val);
10180 goto test_mem_exit;
10181 }
10182 }
10183
10184 rc = 0;
10185
10186test_mem_exit:
10187 return rc;
10188}
10189
f3c87cdd
YG
10190static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10191{
10192 int cnt = 1000;
10193
10194 if (link_up)
10195 while (bnx2x_link_test(bp) && cnt--)
10196 msleep(10);
10197}
10198
10199static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10200{
10201 unsigned int pkt_size, num_pkts, i;
10202 struct sk_buff *skb;
10203 unsigned char *packet;
ca00392c 10204 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 10205 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
10206 u16 tx_start_idx, tx_idx;
10207 u16 rx_start_idx, rx_idx;
ca00392c 10208 u16 pkt_prod, bd_prod;
f3c87cdd 10209 struct sw_tx_bd *tx_buf;
ca00392c
EG
10210 struct eth_tx_start_bd *tx_start_bd;
10211 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10212 dma_addr_t mapping;
10213 union eth_rx_cqe *cqe;
10214 u8 cqe_fp_flags;
10215 struct sw_rx_bd *rx_buf;
10216 u16 len;
10217 int rc = -ENODEV;
10218
b5bf9068
EG
10219 /* check the loopback mode */
10220 switch (loopback_mode) {
10221 case BNX2X_PHY_LOOPBACK:
10222 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10223 return -EINVAL;
10224 break;
10225 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10226 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10227 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10228 break;
10229 default:
f3c87cdd 10230 return -EINVAL;
b5bf9068 10231 }
f3c87cdd 10232
b5bf9068
EG
10233 /* prepare the loopback packet */
10234 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10235 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10236 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10237 if (!skb) {
10238 rc = -ENOMEM;
10239 goto test_loopback_exit;
10240 }
10241 packet = skb_put(skb, pkt_size);
10242 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10243 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10244 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10245 for (i = ETH_HLEN; i < pkt_size; i++)
10246 packet[i] = (unsigned char) (i & 0xff);
10247
b5bf9068 10248 /* send the loopback packet */
f3c87cdd 10249 num_pkts = 0;
ca00392c
EG
10250 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10251 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10252
ca00392c
EG
10253 pkt_prod = fp_tx->tx_pkt_prod++;
10254 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10255 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10256 tx_buf->skb = skb;
ca00392c 10257 tx_buf->flags = 0;
f3c87cdd 10258
ca00392c
EG
10259 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10260 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10261 mapping = pci_map_single(bp->pdev, skb->data,
10262 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10263 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10264 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10265 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10266 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10267 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10268 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10269 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10270 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10271
10272 /* turn on parsing and get a BD */
10273 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10274 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10275
10276 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10277
58f4c4cf
EG
10278 wmb();
10279
ca00392c
EG
10280 fp_tx->tx_db.data.prod += 2;
10281 barrier();
54b9ddaa 10282 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
10283
10284 mmiowb();
10285
10286 num_pkts++;
ca00392c 10287 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10288
10289 udelay(100);
10290
ca00392c 10291 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10292 if (tx_idx != tx_start_idx + num_pkts)
10293 goto test_loopback_exit;
10294
ca00392c 10295 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10296 if (rx_idx != rx_start_idx + num_pkts)
10297 goto test_loopback_exit;
10298
ca00392c 10299 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10300 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10301 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10302 goto test_loopback_rx_exit;
10303
10304 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10305 if (len != pkt_size)
10306 goto test_loopback_rx_exit;
10307
ca00392c 10308 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10309 skb = rx_buf->skb;
10310 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10311 for (i = ETH_HLEN; i < pkt_size; i++)
10312 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10313 goto test_loopback_rx_exit;
10314
10315 rc = 0;
10316
10317test_loopback_rx_exit:
f3c87cdd 10318
ca00392c
EG
10319 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10320 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10321 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10322 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10323
10324 /* Update producers */
ca00392c
EG
10325 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10326 fp_rx->rx_sge_prod);
f3c87cdd
YG
10327
10328test_loopback_exit:
10329 bp->link_params.loopback_mode = LOOPBACK_NONE;
10330
10331 return rc;
10332}
10333
10334static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10335{
b5bf9068 10336 int rc = 0, res;
f3c87cdd
YG
10337
10338 if (!netif_running(bp->dev))
10339 return BNX2X_LOOPBACK_FAILED;
10340
f8ef6e44 10341 bnx2x_netif_stop(bp, 1);
3910c8ae 10342 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10343
b5bf9068
EG
10344 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10345 if (res) {
10346 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10347 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10348 }
10349
b5bf9068
EG
10350 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10351 if (res) {
10352 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10353 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10354 }
10355
3910c8ae 10356 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10357 bnx2x_netif_start(bp);
10358
10359 return rc;
10360}
10361
10362#define CRC32_RESIDUAL 0xdebb20e3
10363
10364static int bnx2x_test_nvram(struct bnx2x *bp)
10365{
10366 static const struct {
10367 int offset;
10368 int size;
10369 } nvram_tbl[] = {
10370 { 0, 0x14 }, /* bootstrap */
10371 { 0x14, 0xec }, /* dir */
10372 { 0x100, 0x350 }, /* manuf_info */
10373 { 0x450, 0xf0 }, /* feature_info */
10374 { 0x640, 0x64 }, /* upgrade_key_info */
10375 { 0x6a4, 0x64 },
10376 { 0x708, 0x70 }, /* manuf_key_info */
10377 { 0x778, 0x70 },
10378 { 0, 0 }
10379 };
4781bfad 10380 __be32 buf[0x350 / 4];
f3c87cdd
YG
10381 u8 *data = (u8 *)buf;
10382 int i, rc;
ab6ad5a4 10383 u32 magic, crc;
f3c87cdd
YG
10384
10385 rc = bnx2x_nvram_read(bp, 0, data, 4);
10386 if (rc) {
f5372251 10387 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10388 goto test_nvram_exit;
10389 }
10390
10391 magic = be32_to_cpu(buf[0]);
10392 if (magic != 0x669955aa) {
10393 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10394 rc = -ENODEV;
10395 goto test_nvram_exit;
10396 }
10397
10398 for (i = 0; nvram_tbl[i].size; i++) {
10399
10400 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10401 nvram_tbl[i].size);
10402 if (rc) {
10403 DP(NETIF_MSG_PROBE,
f5372251 10404 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10405 goto test_nvram_exit;
10406 }
10407
ab6ad5a4
EG
10408 crc = ether_crc_le(nvram_tbl[i].size, data);
10409 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10410 DP(NETIF_MSG_PROBE,
ab6ad5a4 10411 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10412 rc = -ENODEV;
10413 goto test_nvram_exit;
10414 }
10415 }
10416
10417test_nvram_exit:
10418 return rc;
10419}
10420
10421static int bnx2x_test_intr(struct bnx2x *bp)
10422{
10423 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10424 int i, rc;
10425
10426 if (!netif_running(bp->dev))
10427 return -ENODEV;
10428
8d9c5f34 10429 config->hdr.length = 0;
af246401
EG
10430 if (CHIP_IS_E1(bp))
10431 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10432 else
10433 config->hdr.offset = BP_FUNC(bp);
0626b899 10434 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10435 config->hdr.reserved1 = 0;
10436
e665bfda
MC
10437 bp->set_mac_pending++;
10438 smp_wmb();
f3c87cdd
YG
10439 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10440 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10441 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10442 if (rc == 0) {
f3c87cdd
YG
10443 for (i = 0; i < 10; i++) {
10444 if (!bp->set_mac_pending)
10445 break;
e665bfda 10446 smp_rmb();
f3c87cdd
YG
10447 msleep_interruptible(10);
10448 }
10449 if (i == 10)
10450 rc = -ENODEV;
10451 }
10452
10453 return rc;
10454}
10455
a2fbb9ea
ET
10456static void bnx2x_self_test(struct net_device *dev,
10457 struct ethtool_test *etest, u64 *buf)
10458{
10459 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10460
10461 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10462
f3c87cdd 10463 if (!netif_running(dev))
a2fbb9ea 10464 return;
a2fbb9ea 10465
33471629 10466 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10467 if (IS_E1HMF(bp))
10468 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10469
10470 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10471 int port = BP_PORT(bp);
10472 u32 val;
f3c87cdd
YG
10473 u8 link_up;
10474
279abdf5
EG
10475 /* save current value of input enable for TX port IF */
10476 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10477 /* disable input for TX port IF */
10478 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10479
061bc702 10480 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
10481 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10482 bnx2x_nic_load(bp, LOAD_DIAG);
10483 /* wait until link state is restored */
10484 bnx2x_wait_for_link(bp, link_up);
10485
10486 if (bnx2x_test_registers(bp) != 0) {
10487 buf[0] = 1;
10488 etest->flags |= ETH_TEST_FL_FAILED;
10489 }
10490 if (bnx2x_test_memory(bp) != 0) {
10491 buf[1] = 1;
10492 etest->flags |= ETH_TEST_FL_FAILED;
10493 }
10494 buf[2] = bnx2x_test_loopback(bp, link_up);
10495 if (buf[2] != 0)
10496 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10497
f3c87cdd 10498 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10499
10500 /* restore input for TX port IF */
10501 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10502
f3c87cdd
YG
10503 bnx2x_nic_load(bp, LOAD_NORMAL);
10504 /* wait until link state is restored */
10505 bnx2x_wait_for_link(bp, link_up);
10506 }
10507 if (bnx2x_test_nvram(bp) != 0) {
10508 buf[3] = 1;
a2fbb9ea
ET
10509 etest->flags |= ETH_TEST_FL_FAILED;
10510 }
f3c87cdd
YG
10511 if (bnx2x_test_intr(bp) != 0) {
10512 buf[4] = 1;
10513 etest->flags |= ETH_TEST_FL_FAILED;
10514 }
10515 if (bp->port.pmf)
10516 if (bnx2x_link_test(bp) != 0) {
10517 buf[5] = 1;
10518 etest->flags |= ETH_TEST_FL_FAILED;
10519 }
f3c87cdd
YG
10520
10521#ifdef BNX2X_EXTRA_DEBUG
10522 bnx2x_panic_dump(bp);
10523#endif
a2fbb9ea
ET
10524}
10525
de832a55
EG
10526static const struct {
10527 long offset;
10528 int size;
10529 u8 string[ETH_GSTRING_LEN];
10530} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10531/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10532 { Q_STATS_OFFSET32(error_bytes_received_hi),
10533 8, "[%d]: rx_error_bytes" },
10534 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10535 8, "[%d]: rx_ucast_packets" },
10536 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10537 8, "[%d]: rx_mcast_packets" },
10538 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10539 8, "[%d]: rx_bcast_packets" },
10540 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10541 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10542 4, "[%d]: rx_phy_ip_err_discards"},
10543 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10544 4, "[%d]: rx_skb_alloc_discard" },
10545 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10546
10547/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10548 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10549 8, "[%d]: tx_packets" }
10550};
10551
bb2a0f7a
YG
10552static const struct {
10553 long offset;
10554 int size;
10555 u32 flags;
66e855f3
YG
10556#define STATS_FLAGS_PORT 1
10557#define STATS_FLAGS_FUNC 2
de832a55 10558#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10559 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10560} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10561/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10562 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10563 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10564 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10565 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10566 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10567 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10568 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10569 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10570 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10571 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10572 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10573 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10574 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10575 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10576 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10577 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10578 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10579/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10580 8, STATS_FLAGS_PORT, "rx_fragments" },
10581 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10582 8, STATS_FLAGS_PORT, "rx_jabbers" },
10583 { STATS_OFFSET32(no_buff_discard_hi),
10584 8, STATS_FLAGS_BOTH, "rx_discards" },
10585 { STATS_OFFSET32(mac_filter_discard),
10586 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10587 { STATS_OFFSET32(xxoverflow_discard),
10588 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10589 { STATS_OFFSET32(brb_drop_hi),
10590 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10591 { STATS_OFFSET32(brb_truncate_hi),
10592 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10593 { STATS_OFFSET32(pause_frames_received_hi),
10594 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10595 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10596 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10597 { STATS_OFFSET32(nig_timer_max),
10598 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10599/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10600 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10601 { STATS_OFFSET32(rx_skb_alloc_failed),
10602 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10603 { STATS_OFFSET32(hw_csum_err),
10604 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10605
10606 { STATS_OFFSET32(total_bytes_transmitted_hi),
10607 8, STATS_FLAGS_BOTH, "tx_bytes" },
10608 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10609 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10610 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10611 8, STATS_FLAGS_BOTH, "tx_packets" },
10612 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10613 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10614 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10615 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10616 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10617 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10618 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10619 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10620/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10621 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10622 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10623 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10624 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10625 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10626 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10627 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10628 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10629 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10630 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10631 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10632 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10633 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10634 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10635 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10636 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10637 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10638 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10639 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10640/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10641 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10642 { STATS_OFFSET32(pause_frames_sent_hi),
10643 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10644};
10645
de832a55
EG
10646#define IS_PORT_STAT(i) \
10647 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10648#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10649#define IS_E1HMF_MODE_STAT(bp) \
10650 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10651
15f0a394
BH
10652static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10653{
10654 struct bnx2x *bp = netdev_priv(dev);
10655 int i, num_stats;
10656
10657 switch(stringset) {
10658 case ETH_SS_STATS:
10659 if (is_multi(bp)) {
54b9ddaa 10660 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
10661 if (!IS_E1HMF_MODE_STAT(bp))
10662 num_stats += BNX2X_NUM_STATS;
10663 } else {
10664 if (IS_E1HMF_MODE_STAT(bp)) {
10665 num_stats = 0;
10666 for (i = 0; i < BNX2X_NUM_STATS; i++)
10667 if (IS_FUNC_STAT(i))
10668 num_stats++;
10669 } else
10670 num_stats = BNX2X_NUM_STATS;
10671 }
10672 return num_stats;
10673
10674 case ETH_SS_TEST:
10675 return BNX2X_NUM_TESTS;
10676
10677 default:
10678 return -EINVAL;
10679 }
10680}
10681
a2fbb9ea
ET
10682static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10683{
bb2a0f7a 10684 struct bnx2x *bp = netdev_priv(dev);
de832a55 10685 int i, j, k;
bb2a0f7a 10686
a2fbb9ea
ET
10687 switch (stringset) {
10688 case ETH_SS_STATS:
de832a55
EG
10689 if (is_multi(bp)) {
10690 k = 0;
54b9ddaa 10691 for_each_queue(bp, i) {
de832a55
EG
10692 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10693 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10694 bnx2x_q_stats_arr[j].string, i);
10695 k += BNX2X_NUM_Q_STATS;
10696 }
10697 if (IS_E1HMF_MODE_STAT(bp))
10698 break;
10699 for (j = 0; j < BNX2X_NUM_STATS; j++)
10700 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10701 bnx2x_stats_arr[j].string);
10702 } else {
10703 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10704 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10705 continue;
10706 strcpy(buf + j*ETH_GSTRING_LEN,
10707 bnx2x_stats_arr[i].string);
10708 j++;
10709 }
bb2a0f7a 10710 }
a2fbb9ea
ET
10711 break;
10712
10713 case ETH_SS_TEST:
10714 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10715 break;
10716 }
10717}
10718
a2fbb9ea
ET
10719static void bnx2x_get_ethtool_stats(struct net_device *dev,
10720 struct ethtool_stats *stats, u64 *buf)
10721{
10722 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10723 u32 *hw_stats, *offset;
10724 int i, j, k;
bb2a0f7a 10725
de832a55
EG
10726 if (is_multi(bp)) {
10727 k = 0;
54b9ddaa 10728 for_each_queue(bp, i) {
de832a55
EG
10729 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10730 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10731 if (bnx2x_q_stats_arr[j].size == 0) {
10732 /* skip this counter */
10733 buf[k + j] = 0;
10734 continue;
10735 }
10736 offset = (hw_stats +
10737 bnx2x_q_stats_arr[j].offset);
10738 if (bnx2x_q_stats_arr[j].size == 4) {
10739 /* 4-byte counter */
10740 buf[k + j] = (u64) *offset;
10741 continue;
10742 }
10743 /* 8-byte counter */
10744 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10745 }
10746 k += BNX2X_NUM_Q_STATS;
10747 }
10748 if (IS_E1HMF_MODE_STAT(bp))
10749 return;
10750 hw_stats = (u32 *)&bp->eth_stats;
10751 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10752 if (bnx2x_stats_arr[j].size == 0) {
10753 /* skip this counter */
10754 buf[k + j] = 0;
10755 continue;
10756 }
10757 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10758 if (bnx2x_stats_arr[j].size == 4) {
10759 /* 4-byte counter */
10760 buf[k + j] = (u64) *offset;
10761 continue;
10762 }
10763 /* 8-byte counter */
10764 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10765 }
de832a55
EG
10766 } else {
10767 hw_stats = (u32 *)&bp->eth_stats;
10768 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10769 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10770 continue;
10771 if (bnx2x_stats_arr[i].size == 0) {
10772 /* skip this counter */
10773 buf[j] = 0;
10774 j++;
10775 continue;
10776 }
10777 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10778 if (bnx2x_stats_arr[i].size == 4) {
10779 /* 4-byte counter */
10780 buf[j] = (u64) *offset;
10781 j++;
10782 continue;
10783 }
10784 /* 8-byte counter */
10785 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10786 j++;
a2fbb9ea 10787 }
a2fbb9ea
ET
10788 }
10789}
10790
10791static int bnx2x_phys_id(struct net_device *dev, u32 data)
10792{
10793 struct bnx2x *bp = netdev_priv(dev);
10794 int i;
10795
34f80b04
EG
10796 if (!netif_running(dev))
10797 return 0;
10798
10799 if (!bp->port.pmf)
10800 return 0;
10801
a2fbb9ea
ET
10802 if (data == 0)
10803 data = 2;
10804
10805 for (i = 0; i < (data * 2); i++) {
c18487ee 10806 if ((i % 2) == 0)
7846e471
YR
10807 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10808 SPEED_1000);
c18487ee 10809 else
7846e471 10810 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 10811
a2fbb9ea
ET
10812 msleep_interruptible(500);
10813 if (signal_pending(current))
10814 break;
10815 }
10816
c18487ee 10817 if (bp->link_vars.link_up)
7846e471
YR
10818 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10819 bp->link_vars.line_speed);
a2fbb9ea
ET
10820
10821 return 0;
10822}
10823
0fc0b732 10824static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10825 .get_settings = bnx2x_get_settings,
10826 .set_settings = bnx2x_set_settings,
10827 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10828 .get_regs_len = bnx2x_get_regs_len,
10829 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10830 .get_wol = bnx2x_get_wol,
10831 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10832 .get_msglevel = bnx2x_get_msglevel,
10833 .set_msglevel = bnx2x_set_msglevel,
10834 .nway_reset = bnx2x_nway_reset,
01e53298 10835 .get_link = bnx2x_get_link,
7a9b2557
VZ
10836 .get_eeprom_len = bnx2x_get_eeprom_len,
10837 .get_eeprom = bnx2x_get_eeprom,
10838 .set_eeprom = bnx2x_set_eeprom,
10839 .get_coalesce = bnx2x_get_coalesce,
10840 .set_coalesce = bnx2x_set_coalesce,
10841 .get_ringparam = bnx2x_get_ringparam,
10842 .set_ringparam = bnx2x_set_ringparam,
10843 .get_pauseparam = bnx2x_get_pauseparam,
10844 .set_pauseparam = bnx2x_set_pauseparam,
10845 .get_rx_csum = bnx2x_get_rx_csum,
10846 .set_rx_csum = bnx2x_set_rx_csum,
10847 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10848 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10849 .set_flags = bnx2x_set_flags,
10850 .get_flags = ethtool_op_get_flags,
10851 .get_sg = ethtool_op_get_sg,
10852 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10853 .get_tso = ethtool_op_get_tso,
10854 .set_tso = bnx2x_set_tso,
7a9b2557 10855 .self_test = bnx2x_self_test,
15f0a394 10856 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10857 .get_strings = bnx2x_get_strings,
a2fbb9ea 10858 .phys_id = bnx2x_phys_id,
bb2a0f7a 10859 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10860};
10861
10862/* end of ethtool_ops */
10863
10864/****************************************************************************
10865* General service functions
10866****************************************************************************/
10867
10868static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10869{
10870 u16 pmcsr;
10871
10872 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10873
10874 switch (state) {
10875 case PCI_D0:
34f80b04 10876 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10877 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10878 PCI_PM_CTRL_PME_STATUS));
10879
10880 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10881 /* delay required during transition out of D3hot */
a2fbb9ea 10882 msleep(20);
34f80b04 10883 break;
a2fbb9ea 10884
34f80b04
EG
10885 case PCI_D3hot:
10886 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10887 pmcsr |= 3;
a2fbb9ea 10888
34f80b04
EG
10889 if (bp->wol)
10890 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10891
34f80b04
EG
10892 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10893 pmcsr);
a2fbb9ea 10894
34f80b04
EG
10895 /* No more memory access after this point until
10896 * device is brought back to D0.
10897 */
10898 break;
10899
10900 default:
10901 return -EINVAL;
10902 }
10903 return 0;
a2fbb9ea
ET
10904}
10905
237907c1
EG
10906static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10907{
10908 u16 rx_cons_sb;
10909
10910 /* Tell compiler that status block fields can change */
10911 barrier();
10912 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10913 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10914 rx_cons_sb++;
10915 return (fp->rx_comp_cons != rx_cons_sb);
10916}
10917
34f80b04
EG
10918/*
10919 * net_device service functions
10920 */
10921
a2fbb9ea
ET
10922static int bnx2x_poll(struct napi_struct *napi, int budget)
10923{
54b9ddaa 10924 int work_done = 0;
a2fbb9ea
ET
10925 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10926 napi);
10927 struct bnx2x *bp = fp->bp;
a2fbb9ea 10928
54b9ddaa 10929 while (1) {
a2fbb9ea 10930#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
10931 if (unlikely(bp->panic)) {
10932 napi_complete(napi);
10933 return 0;
10934 }
a2fbb9ea
ET
10935#endif
10936
54b9ddaa
VZ
10937 if (bnx2x_has_tx_work(fp))
10938 bnx2x_tx_int(fp);
356e2385 10939
54b9ddaa
VZ
10940 if (bnx2x_has_rx_work(fp)) {
10941 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 10942
54b9ddaa
VZ
10943 /* must not complete if we consumed full budget */
10944 if (work_done >= budget)
10945 break;
10946 }
a2fbb9ea 10947
54b9ddaa
VZ
10948 /* Fall out from the NAPI loop if needed */
10949 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10950 bnx2x_update_fpsb_idx(fp);
10951 /* bnx2x_has_rx_work() reads the status block, thus we need
10952 * to ensure that status block indices have been actually read
10953 * (bnx2x_update_fpsb_idx) prior to this check
10954 * (bnx2x_has_rx_work) so that we won't write the "newer"
10955 * value of the status block to IGU (if there was a DMA right
10956 * after bnx2x_has_rx_work and if there is no rmb, the memory
10957 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10958 * before bnx2x_ack_sb). In this case there will never be
10959 * another interrupt until there is another update of the
10960 * status block, while there is still unhandled work.
10961 */
10962 rmb();
a2fbb9ea 10963
54b9ddaa
VZ
10964 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10965 napi_complete(napi);
10966 /* Re-enable interrupts */
10967 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10968 le16_to_cpu(fp->fp_c_idx),
10969 IGU_INT_NOP, 1);
10970 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10971 le16_to_cpu(fp->fp_u_idx),
10972 IGU_INT_ENABLE, 1);
10973 break;
10974 }
10975 }
a2fbb9ea 10976 }
356e2385 10977
a2fbb9ea
ET
10978 return work_done;
10979}
10980
755735eb
EG
10981
10982/* we split the first BD into headers and data BDs
33471629 10983 * to ease the pain of our fellow microcode engineers
755735eb
EG
10984 * we use one mapping for both BDs
10985 * So far this has only been observed to happen
10986 * in Other Operating Systems(TM)
10987 */
10988static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10989 struct bnx2x_fastpath *fp,
ca00392c
EG
10990 struct sw_tx_bd *tx_buf,
10991 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10992 u16 bd_prod, int nbd)
10993{
ca00392c 10994 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10995 struct eth_tx_bd *d_tx_bd;
10996 dma_addr_t mapping;
10997 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10998
10999 /* first fix first BD */
11000 h_tx_bd->nbd = cpu_to_le16(nbd);
11001 h_tx_bd->nbytes = cpu_to_le16(hlen);
11002
11003 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11004 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11005 h_tx_bd->addr_lo, h_tx_bd->nbd);
11006
11007 /* now get a new data BD
11008 * (after the pbd) and fill it */
11009 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11010 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11011
11012 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11013 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11014
11015 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11016 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11017 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11018
11019 /* this marks the BD as one that has no individual mapping */
11020 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11021
755735eb
EG
11022 DP(NETIF_MSG_TX_QUEUED,
11023 "TSO split data size is %d (%x:%x)\n",
11024 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11025
ca00392c
EG
11026 /* update tx_bd */
11027 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11028
11029 return bd_prod;
11030}
11031
11032static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11033{
11034 if (fix > 0)
11035 csum = (u16) ~csum_fold(csum_sub(csum,
11036 csum_partial(t_header - fix, fix, 0)));
11037
11038 else if (fix < 0)
11039 csum = (u16) ~csum_fold(csum_add(csum,
11040 csum_partial(t_header, -fix, 0)));
11041
11042 return swab16(csum);
11043}
11044
11045static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11046{
11047 u32 rc;
11048
11049 if (skb->ip_summed != CHECKSUM_PARTIAL)
11050 rc = XMIT_PLAIN;
11051
11052 else {
4781bfad 11053 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11054 rc = XMIT_CSUM_V6;
11055 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11056 rc |= XMIT_CSUM_TCP;
11057
11058 } else {
11059 rc = XMIT_CSUM_V4;
11060 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11061 rc |= XMIT_CSUM_TCP;
11062 }
11063 }
11064
11065 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 11066 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
11067
11068 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 11069 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
11070
11071 return rc;
11072}
11073
632da4d6 11074#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11075/* check if packet requires linearization (packet is too fragmented)
11076 no need to check fragmentation if page size > 8K (there will be no
11077 violation to FW restrictions) */
755735eb
EG
11078static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11079 u32 xmit_type)
11080{
11081 int to_copy = 0;
11082 int hlen = 0;
11083 int first_bd_sz = 0;
11084
11085 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11086 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11087
11088 if (xmit_type & XMIT_GSO) {
11089 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11090 /* Check if LSO packet needs to be copied:
11091 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11092 int wnd_size = MAX_FETCH_BD - 3;
33471629 11093 /* Number of windows to check */
755735eb
EG
11094 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11095 int wnd_idx = 0;
11096 int frag_idx = 0;
11097 u32 wnd_sum = 0;
11098
11099 /* Headers length */
11100 hlen = (int)(skb_transport_header(skb) - skb->data) +
11101 tcp_hdrlen(skb);
11102
11103 /* Amount of data (w/o headers) on linear part of SKB*/
11104 first_bd_sz = skb_headlen(skb) - hlen;
11105
11106 wnd_sum = first_bd_sz;
11107
11108 /* Calculate the first sum - it's special */
11109 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11110 wnd_sum +=
11111 skb_shinfo(skb)->frags[frag_idx].size;
11112
11113 /* If there was data on linear skb data - check it */
11114 if (first_bd_sz > 0) {
11115 if (unlikely(wnd_sum < lso_mss)) {
11116 to_copy = 1;
11117 goto exit_lbl;
11118 }
11119
11120 wnd_sum -= first_bd_sz;
11121 }
11122
11123 /* Others are easier: run through the frag list and
11124 check all windows */
11125 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11126 wnd_sum +=
11127 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11128
11129 if (unlikely(wnd_sum < lso_mss)) {
11130 to_copy = 1;
11131 break;
11132 }
11133 wnd_sum -=
11134 skb_shinfo(skb)->frags[wnd_idx].size;
11135 }
755735eb
EG
11136 } else {
11137 /* in non-LSO too fragmented packet should always
11138 be linearized */
11139 to_copy = 1;
11140 }
11141 }
11142
11143exit_lbl:
11144 if (unlikely(to_copy))
11145 DP(NETIF_MSG_TX_QUEUED,
11146 "Linearization IS REQUIRED for %s packet. "
11147 "num_frags %d hlen %d first_bd_sz %d\n",
11148 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11149 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11150
11151 return to_copy;
11152}
632da4d6 11153#endif
755735eb
EG
11154
11155/* called with netif_tx_lock
a2fbb9ea 11156 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11157 * netif_wake_queue()
a2fbb9ea 11158 */
61357325 11159static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11160{
11161 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 11162 struct bnx2x_fastpath *fp;
555f6c78 11163 struct netdev_queue *txq;
a2fbb9ea 11164 struct sw_tx_bd *tx_buf;
ca00392c
EG
11165 struct eth_tx_start_bd *tx_start_bd;
11166 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11167 struct eth_tx_parse_bd *pbd = NULL;
11168 u16 pkt_prod, bd_prod;
755735eb 11169 int nbd, fp_index;
a2fbb9ea 11170 dma_addr_t mapping;
755735eb 11171 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11172 int i;
11173 u8 hlen = 0;
ca00392c 11174 __le16 pkt_size = 0;
a2fbb9ea
ET
11175
11176#ifdef BNX2X_STOP_ON_ERROR
11177 if (unlikely(bp->panic))
11178 return NETDEV_TX_BUSY;
11179#endif
11180
555f6c78
EG
11181 fp_index = skb_get_queue_mapping(skb);
11182 txq = netdev_get_tx_queue(dev, fp_index);
11183
54b9ddaa 11184 fp = &bp->fp[fp_index];
755735eb 11185
231fd58a 11186 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 11187 fp->eth_q_stats.driver_xoff++;
555f6c78 11188 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11189 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11190 return NETDEV_TX_BUSY;
11191 }
11192
755735eb
EG
11193 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11194 " gso type %x xmit_type %x\n",
11195 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11196 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11197
632da4d6 11198#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11199 /* First, check if we need to linearize the skb (due to FW
11200 restrictions). No need to check fragmentation if page size > 8K
11201 (there will be no violation to FW restrictions) */
755735eb
EG
11202 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11203 /* Statistics of linearization */
11204 bp->lin_cnt++;
11205 if (skb_linearize(skb) != 0) {
11206 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11207 "silently dropping this SKB\n");
11208 dev_kfree_skb_any(skb);
da5a662a 11209 return NETDEV_TX_OK;
755735eb
EG
11210 }
11211 }
632da4d6 11212#endif
755735eb 11213
a2fbb9ea 11214 /*
755735eb 11215 Please read carefully. First we use one BD which we mark as start,
ca00392c 11216 then we have a parsing info BD (used for TSO or xsum),
755735eb 11217 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11218 (don't forget to mark the last one as last,
11219 and to unmap only AFTER you write to the BD ...)
755735eb 11220 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11221 */
11222
11223 pkt_prod = fp->tx_pkt_prod++;
755735eb 11224 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11225
755735eb 11226 /* get a tx_buf and first BD */
a2fbb9ea 11227 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11228 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11229
ca00392c
EG
11230 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11231 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11232 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11233 /* header nbd */
ca00392c 11234 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11235
755735eb
EG
11236 /* remember the first BD of the packet */
11237 tx_buf->first_bd = fp->tx_bd_prod;
11238 tx_buf->skb = skb;
ca00392c 11239 tx_buf->flags = 0;
a2fbb9ea
ET
11240
11241 DP(NETIF_MSG_TX_QUEUED,
11242 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11243 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11244
0c6671b0
EG
11245#ifdef BCM_VLAN
11246 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11247 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11248 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11249 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11250 } else
0c6671b0 11251#endif
ca00392c 11252 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11253
ca00392c
EG
11254 /* turn on parsing and get a BD */
11255 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11256 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11257
ca00392c 11258 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11259
11260 if (xmit_type & XMIT_CSUM) {
ca00392c 11261 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11262
11263 /* for now NS flag is not used in Linux */
4781bfad
EG
11264 pbd->global_data =
11265 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11266 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11267
755735eb
EG
11268 pbd->ip_hlen = (skb_transport_header(skb) -
11269 skb_network_header(skb)) / 2;
11270
11271 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11272
755735eb 11273 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11274 hlen = hlen*2;
a2fbb9ea 11275
ca00392c 11276 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11277
11278 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11279 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11280 ETH_TX_BD_FLAGS_IP_CSUM;
11281 else
ca00392c
EG
11282 tx_start_bd->bd_flags.as_bitfield |=
11283 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11284
11285 if (xmit_type & XMIT_CSUM_TCP) {
11286 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11287
11288 } else {
11289 s8 fix = SKB_CS_OFF(skb); /* signed! */
11290
ca00392c 11291 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11292
755735eb 11293 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11294 "hlen %d fix %d csum before fix %x\n",
11295 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11296
11297 /* HW bug: fixup the CSUM */
11298 pbd->tcp_pseudo_csum =
11299 bnx2x_csum_fix(skb_transport_header(skb),
11300 SKB_CS(skb), fix);
11301
11302 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11303 pbd->tcp_pseudo_csum);
11304 }
a2fbb9ea
ET
11305 }
11306
11307 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11308 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11309
ca00392c
EG
11310 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11311 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11312 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11313 tx_start_bd->nbd = cpu_to_le16(nbd);
11314 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11315 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11316
11317 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11318 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11319 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11320 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11321 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11322
755735eb 11323 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11324
11325 DP(NETIF_MSG_TX_QUEUED,
11326 "TSO packet len %d hlen %d total len %d tso size %d\n",
11327 skb->len, hlen, skb_headlen(skb),
11328 skb_shinfo(skb)->gso_size);
11329
ca00392c 11330 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11331
755735eb 11332 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11333 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11334 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11335
11336 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11337 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11338 pbd->tcp_flags = pbd_tcp_flags(skb);
11339
11340 if (xmit_type & XMIT_GSO_V4) {
11341 pbd->ip_id = swab16(ip_hdr(skb)->id);
11342 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11343 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11344 ip_hdr(skb)->daddr,
11345 0, IPPROTO_TCP, 0));
755735eb
EG
11346
11347 } else
11348 pbd->tcp_pseudo_csum =
11349 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11350 &ipv6_hdr(skb)->daddr,
11351 0, IPPROTO_TCP, 0));
11352
a2fbb9ea
ET
11353 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11354 }
ca00392c 11355 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11356
755735eb
EG
11357 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11358 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11359
755735eb 11360 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11361 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11362 if (total_pkt_bd == NULL)
11363 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11364
755735eb
EG
11365 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11366 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11367
ca00392c
EG
11368 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11369 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11370 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11371 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11372
755735eb 11373 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11374 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11375 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11376 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11377 }
11378
ca00392c 11379 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11380
a2fbb9ea
ET
11381 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11382
755735eb 11383 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11384 * if the packet contains or ends with it
11385 */
11386 if (TX_BD_POFF(bd_prod) < nbd)
11387 nbd++;
11388
ca00392c
EG
11389 if (total_pkt_bd != NULL)
11390 total_pkt_bd->total_pkt_bytes = pkt_size;
11391
a2fbb9ea
ET
11392 if (pbd)
11393 DP(NETIF_MSG_TX_QUEUED,
11394 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11395 " tcp_flags %x xsum %x seq %u hlen %u\n",
11396 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11397 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11398 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11399
755735eb 11400 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11401
58f4c4cf
EG
11402 /*
11403 * Make sure that the BD data is updated before updating the producer
11404 * since FW might read the BD right after the producer is updated.
11405 * This is only applicable for weak-ordered memory model archs such
11406 * as IA-64. The following barrier is also mandatory since FW will
11407 * assumes packets must have BDs.
11408 */
11409 wmb();
11410
ca00392c
EG
11411 fp->tx_db.data.prod += nbd;
11412 barrier();
54b9ddaa 11413 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
11414
11415 mmiowb();
11416
755735eb 11417 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11418
11419 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11420 netif_tx_stop_queue(txq);
58f4c4cf
EG
11421 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11422 if we put Tx into XOFF state. */
11423 smp_mb();
54b9ddaa 11424 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 11425 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11426 netif_tx_wake_queue(txq);
a2fbb9ea 11427 }
54b9ddaa 11428 fp->tx_pkt++;
a2fbb9ea
ET
11429
11430 return NETDEV_TX_OK;
11431}
11432
bb2a0f7a 11433/* called with rtnl_lock */
a2fbb9ea
ET
11434static int bnx2x_open(struct net_device *dev)
11435{
11436 struct bnx2x *bp = netdev_priv(dev);
11437
6eccabb3
EG
11438 netif_carrier_off(dev);
11439
a2fbb9ea
ET
11440 bnx2x_set_power_state(bp, PCI_D0);
11441
bb2a0f7a 11442 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11443}
11444
bb2a0f7a 11445/* called with rtnl_lock */
a2fbb9ea
ET
11446static int bnx2x_close(struct net_device *dev)
11447{
a2fbb9ea
ET
11448 struct bnx2x *bp = netdev_priv(dev);
11449
11450 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11451 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11452 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11453 if (!CHIP_REV_IS_SLOW(bp))
11454 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11455
11456 return 0;
11457}
11458
f5372251 11459/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11460static void bnx2x_set_rx_mode(struct net_device *dev)
11461{
11462 struct bnx2x *bp = netdev_priv(dev);
11463 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11464 int port = BP_PORT(bp);
11465
11466 if (bp->state != BNX2X_STATE_OPEN) {
11467 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11468 return;
11469 }
11470
11471 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11472
11473 if (dev->flags & IFF_PROMISC)
11474 rx_mode = BNX2X_RX_MODE_PROMISC;
11475
11476 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
11477 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11478 CHIP_IS_E1(bp)))
34f80b04
EG
11479 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11480
11481 else { /* some multicasts */
11482 if (CHIP_IS_E1(bp)) {
11483 int i, old, offset;
11484 struct dev_mc_list *mclist;
11485 struct mac_configuration_cmd *config =
11486 bnx2x_sp(bp, mcast_config);
11487
11488 for (i = 0, mclist = dev->mc_list;
4cd24eaf 11489 mclist && (i < netdev_mc_count(dev));
34f80b04
EG
11490 i++, mclist = mclist->next) {
11491
11492 config->config_table[i].
11493 cam_entry.msb_mac_addr =
11494 swab16(*(u16 *)&mclist->dmi_addr[0]);
11495 config->config_table[i].
11496 cam_entry.middle_mac_addr =
11497 swab16(*(u16 *)&mclist->dmi_addr[2]);
11498 config->config_table[i].
11499 cam_entry.lsb_mac_addr =
11500 swab16(*(u16 *)&mclist->dmi_addr[4]);
11501 config->config_table[i].cam_entry.flags =
11502 cpu_to_le16(port);
11503 config->config_table[i].
11504 target_table_entry.flags = 0;
ca00392c
EG
11505 config->config_table[i].target_table_entry.
11506 clients_bit_vector =
11507 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11508 config->config_table[i].
11509 target_table_entry.vlan_id = 0;
11510
11511 DP(NETIF_MSG_IFUP,
11512 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11513 config->config_table[i].
11514 cam_entry.msb_mac_addr,
11515 config->config_table[i].
11516 cam_entry.middle_mac_addr,
11517 config->config_table[i].
11518 cam_entry.lsb_mac_addr);
11519 }
8d9c5f34 11520 old = config->hdr.length;
34f80b04
EG
11521 if (old > i) {
11522 for (; i < old; i++) {
11523 if (CAM_IS_INVALID(config->
11524 config_table[i])) {
af246401 11525 /* already invalidated */
34f80b04
EG
11526 break;
11527 }
11528 /* invalidate */
11529 CAM_INVALIDATE(config->
11530 config_table[i]);
11531 }
11532 }
11533
11534 if (CHIP_REV_IS_SLOW(bp))
11535 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11536 else
11537 offset = BNX2X_MAX_MULTICAST*(1 + port);
11538
8d9c5f34 11539 config->hdr.length = i;
34f80b04 11540 config->hdr.offset = offset;
8d9c5f34 11541 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11542 config->hdr.reserved1 = 0;
11543
e665bfda
MC
11544 bp->set_mac_pending++;
11545 smp_wmb();
11546
34f80b04
EG
11547 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11548 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11549 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11550 0);
11551 } else { /* E1H */
11552 /* Accept one or more multicasts */
11553 struct dev_mc_list *mclist;
11554 u32 mc_filter[MC_HASH_SIZE];
11555 u32 crc, bit, regidx;
11556 int i;
11557
11558 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11559
11560 for (i = 0, mclist = dev->mc_list;
4cd24eaf 11561 mclist && (i < netdev_mc_count(dev));
34f80b04
EG
11562 i++, mclist = mclist->next) {
11563
7c510e4b
JB
11564 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11565 mclist->dmi_addr);
34f80b04
EG
11566
11567 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11568 bit = (crc >> 24) & 0xff;
11569 regidx = bit >> 5;
11570 bit &= 0x1f;
11571 mc_filter[regidx] |= (1 << bit);
11572 }
11573
11574 for (i = 0; i < MC_HASH_SIZE; i++)
11575 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11576 mc_filter[i]);
11577 }
11578 }
11579
11580 bp->rx_mode = rx_mode;
11581 bnx2x_set_storm_rx_mode(bp);
11582}
11583
11584/* called with rtnl_lock */
a2fbb9ea
ET
11585static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11586{
11587 struct sockaddr *addr = p;
11588 struct bnx2x *bp = netdev_priv(dev);
11589
34f80b04 11590 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11591 return -EINVAL;
11592
11593 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11594 if (netif_running(dev)) {
11595 if (CHIP_IS_E1(bp))
e665bfda 11596 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11597 else
e665bfda 11598 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11599 }
a2fbb9ea
ET
11600
11601 return 0;
11602}
11603
c18487ee 11604/* called with rtnl_lock */
01cd4528
EG
11605static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11606 int devad, u16 addr)
a2fbb9ea 11607{
01cd4528
EG
11608 struct bnx2x *bp = netdev_priv(netdev);
11609 u16 value;
11610 int rc;
11611 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11612
01cd4528
EG
11613 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11614 prtad, devad, addr);
a2fbb9ea 11615
01cd4528
EG
11616 if (prtad != bp->mdio.prtad) {
11617 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11618 prtad, bp->mdio.prtad);
11619 return -EINVAL;
11620 }
11621
11622 /* The HW expects different devad if CL22 is used */
11623 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11624
01cd4528
EG
11625 bnx2x_acquire_phy_lock(bp);
11626 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11627 devad, addr, &value);
11628 bnx2x_release_phy_lock(bp);
11629 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11630
01cd4528
EG
11631 if (!rc)
11632 rc = value;
11633 return rc;
11634}
a2fbb9ea 11635
01cd4528
EG
11636/* called with rtnl_lock */
11637static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11638 u16 addr, u16 value)
11639{
11640 struct bnx2x *bp = netdev_priv(netdev);
11641 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11642 int rc;
11643
11644 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11645 " value 0x%x\n", prtad, devad, addr, value);
11646
11647 if (prtad != bp->mdio.prtad) {
11648 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11649 prtad, bp->mdio.prtad);
11650 return -EINVAL;
a2fbb9ea
ET
11651 }
11652
01cd4528
EG
11653 /* The HW expects different devad if CL22 is used */
11654 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11655
01cd4528
EG
11656 bnx2x_acquire_phy_lock(bp);
11657 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11658 devad, addr, value);
11659 bnx2x_release_phy_lock(bp);
11660 return rc;
11661}
c18487ee 11662
01cd4528
EG
11663/* called with rtnl_lock */
11664static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11665{
11666 struct bnx2x *bp = netdev_priv(dev);
11667 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11668
01cd4528
EG
11669 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11670 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11671
01cd4528
EG
11672 if (!netif_running(dev))
11673 return -EAGAIN;
11674
11675 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11676}
11677
34f80b04 11678/* called with rtnl_lock */
a2fbb9ea
ET
11679static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11680{
11681 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11682 int rc = 0;
a2fbb9ea
ET
11683
11684 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11685 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11686 return -EINVAL;
11687
11688 /* This does not race with packet allocation
c14423fe 11689 * because the actual alloc size is
a2fbb9ea
ET
11690 * only updated as part of load
11691 */
11692 dev->mtu = new_mtu;
11693
11694 if (netif_running(dev)) {
34f80b04
EG
11695 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11696 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11697 }
34f80b04
EG
11698
11699 return rc;
a2fbb9ea
ET
11700}
11701
11702static void bnx2x_tx_timeout(struct net_device *dev)
11703{
11704 struct bnx2x *bp = netdev_priv(dev);
11705
11706#ifdef BNX2X_STOP_ON_ERROR
11707 if (!bp->panic)
11708 bnx2x_panic();
11709#endif
11710 /* This allows the netif to be shutdown gracefully before resetting */
11711 schedule_work(&bp->reset_task);
11712}
11713
11714#ifdef BCM_VLAN
34f80b04 11715/* called with rtnl_lock */
a2fbb9ea
ET
11716static void bnx2x_vlan_rx_register(struct net_device *dev,
11717 struct vlan_group *vlgrp)
11718{
11719 struct bnx2x *bp = netdev_priv(dev);
11720
11721 bp->vlgrp = vlgrp;
0c6671b0
EG
11722
11723 /* Set flags according to the required capabilities */
11724 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11725
11726 if (dev->features & NETIF_F_HW_VLAN_TX)
11727 bp->flags |= HW_VLAN_TX_FLAG;
11728
11729 if (dev->features & NETIF_F_HW_VLAN_RX)
11730 bp->flags |= HW_VLAN_RX_FLAG;
11731
a2fbb9ea 11732 if (netif_running(dev))
49d66772 11733 bnx2x_set_client_config(bp);
a2fbb9ea 11734}
34f80b04 11735
a2fbb9ea
ET
11736#endif
11737
257ddbda 11738#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
11739static void poll_bnx2x(struct net_device *dev)
11740{
11741 struct bnx2x *bp = netdev_priv(dev);
11742
11743 disable_irq(bp->pdev->irq);
11744 bnx2x_interrupt(bp->pdev->irq, dev);
11745 enable_irq(bp->pdev->irq);
11746}
11747#endif
11748
c64213cd
SH
11749static const struct net_device_ops bnx2x_netdev_ops = {
11750 .ndo_open = bnx2x_open,
11751 .ndo_stop = bnx2x_close,
11752 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11753 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11754 .ndo_set_mac_address = bnx2x_change_mac_addr,
11755 .ndo_validate_addr = eth_validate_addr,
11756 .ndo_do_ioctl = bnx2x_ioctl,
11757 .ndo_change_mtu = bnx2x_change_mtu,
11758 .ndo_tx_timeout = bnx2x_tx_timeout,
11759#ifdef BCM_VLAN
11760 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11761#endif
257ddbda 11762#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
11763 .ndo_poll_controller = poll_bnx2x,
11764#endif
11765};
11766
34f80b04
EG
11767static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11768 struct net_device *dev)
a2fbb9ea
ET
11769{
11770 struct bnx2x *bp;
11771 int rc;
11772
11773 SET_NETDEV_DEV(dev, &pdev->dev);
11774 bp = netdev_priv(dev);
11775
34f80b04
EG
11776 bp->dev = dev;
11777 bp->pdev = pdev;
a2fbb9ea 11778 bp->flags = 0;
34f80b04 11779 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11780
11781 rc = pci_enable_device(pdev);
11782 if (rc) {
11783 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11784 goto err_out;
11785 }
11786
11787 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11788 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11789 " aborting\n");
11790 rc = -ENODEV;
11791 goto err_out_disable;
11792 }
11793
11794 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11795 printk(KERN_ERR PFX "Cannot find second PCI device"
11796 " base address, aborting\n");
11797 rc = -ENODEV;
11798 goto err_out_disable;
11799 }
11800
34f80b04
EG
11801 if (atomic_read(&pdev->enable_cnt) == 1) {
11802 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11803 if (rc) {
11804 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11805 " aborting\n");
11806 goto err_out_disable;
11807 }
a2fbb9ea 11808
34f80b04
EG
11809 pci_set_master(pdev);
11810 pci_save_state(pdev);
11811 }
a2fbb9ea
ET
11812
11813 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11814 if (bp->pm_cap == 0) {
11815 printk(KERN_ERR PFX "Cannot find power management"
11816 " capability, aborting\n");
11817 rc = -EIO;
11818 goto err_out_release;
11819 }
11820
11821 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11822 if (bp->pcie_cap == 0) {
11823 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11824 " aborting\n");
11825 rc = -EIO;
11826 goto err_out_release;
11827 }
11828
6a35528a 11829 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11830 bp->flags |= USING_DAC_FLAG;
6a35528a 11831 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11832 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11833 " failed, aborting\n");
11834 rc = -EIO;
11835 goto err_out_release;
11836 }
11837
284901a9 11838 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11839 printk(KERN_ERR PFX "System does not support DMA,"
11840 " aborting\n");
11841 rc = -EIO;
11842 goto err_out_release;
11843 }
11844
34f80b04
EG
11845 dev->mem_start = pci_resource_start(pdev, 0);
11846 dev->base_addr = dev->mem_start;
11847 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11848
11849 dev->irq = pdev->irq;
11850
275f165f 11851 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11852 if (!bp->regview) {
11853 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11854 rc = -ENOMEM;
11855 goto err_out_release;
11856 }
11857
34f80b04
EG
11858 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11859 min_t(u64, BNX2X_DB_SIZE,
11860 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11861 if (!bp->doorbells) {
11862 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11863 rc = -ENOMEM;
11864 goto err_out_unmap;
11865 }
11866
11867 bnx2x_set_power_state(bp, PCI_D0);
11868
34f80b04
EG
11869 /* clean indirect addresses */
11870 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11871 PCICFG_VENDOR_ID_OFFSET);
11872 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11873 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11874 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11875 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11876
34f80b04 11877 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11878
c64213cd 11879 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11880 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11881 dev->features |= NETIF_F_SG;
11882 dev->features |= NETIF_F_HW_CSUM;
11883 if (bp->flags & USING_DAC_FLAG)
11884 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11885 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11886 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11887#ifdef BCM_VLAN
11888 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11889 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11890
11891 dev->vlan_features |= NETIF_F_SG;
11892 dev->vlan_features |= NETIF_F_HW_CSUM;
11893 if (bp->flags & USING_DAC_FLAG)
11894 dev->vlan_features |= NETIF_F_HIGHDMA;
11895 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11896 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11897#endif
a2fbb9ea 11898
01cd4528
EG
11899 /* get_port_hwinfo() will set prtad and mmds properly */
11900 bp->mdio.prtad = MDIO_PRTAD_NONE;
11901 bp->mdio.mmds = 0;
11902 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11903 bp->mdio.dev = dev;
11904 bp->mdio.mdio_read = bnx2x_mdio_read;
11905 bp->mdio.mdio_write = bnx2x_mdio_write;
11906
a2fbb9ea
ET
11907 return 0;
11908
11909err_out_unmap:
11910 if (bp->regview) {
11911 iounmap(bp->regview);
11912 bp->regview = NULL;
11913 }
a2fbb9ea
ET
11914 if (bp->doorbells) {
11915 iounmap(bp->doorbells);
11916 bp->doorbells = NULL;
11917 }
11918
11919err_out_release:
34f80b04
EG
11920 if (atomic_read(&pdev->enable_cnt) == 1)
11921 pci_release_regions(pdev);
a2fbb9ea
ET
11922
11923err_out_disable:
11924 pci_disable_device(pdev);
11925 pci_set_drvdata(pdev, NULL);
11926
11927err_out:
11928 return rc;
11929}
11930
37f9ce62
EG
11931static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11932 int *width, int *speed)
25047950
ET
11933{
11934 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11935
37f9ce62 11936 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11937
37f9ce62
EG
11938 /* return value of 1=2.5GHz 2=5GHz */
11939 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11940}
37f9ce62 11941
94a78b79
VZ
11942static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11943{
37f9ce62 11944 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11945 struct bnx2x_fw_file_hdr *fw_hdr;
11946 struct bnx2x_fw_file_section *sections;
94a78b79 11947 u32 offset, len, num_ops;
37f9ce62 11948 u16 *ops_offsets;
94a78b79 11949 int i;
37f9ce62 11950 const u8 *fw_ver;
94a78b79
VZ
11951
11952 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11953 return -EINVAL;
11954
11955 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11956 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11957
11958 /* Make sure none of the offsets and sizes make us read beyond
11959 * the end of the firmware data */
11960 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11961 offset = be32_to_cpu(sections[i].offset);
11962 len = be32_to_cpu(sections[i].len);
11963 if (offset + len > firmware->size) {
37f9ce62
EG
11964 printk(KERN_ERR PFX "Section %d length is out of "
11965 "bounds\n", i);
94a78b79
VZ
11966 return -EINVAL;
11967 }
11968 }
11969
11970 /* Likewise for the init_ops offsets */
11971 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11972 ops_offsets = (u16 *)(firmware->data + offset);
11973 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11974
11975 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11976 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
11977 printk(KERN_ERR PFX "Section offset %d is out of "
11978 "bounds\n", i);
94a78b79
VZ
11979 return -EINVAL;
11980 }
11981 }
11982
11983 /* Check FW version */
11984 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11985 fw_ver = firmware->data + offset;
11986 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11987 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11988 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11989 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11990 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11991 " Should be %d.%d.%d.%d\n",
11992 fw_ver[0], fw_ver[1], fw_ver[2],
11993 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11994 BCM_5710_FW_MINOR_VERSION,
11995 BCM_5710_FW_REVISION_VERSION,
11996 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 11997 return -EINVAL;
94a78b79
VZ
11998 }
11999
12000 return 0;
12001}
12002
ab6ad5a4 12003static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12004{
ab6ad5a4
EG
12005 const __be32 *source = (const __be32 *)_source;
12006 u32 *target = (u32 *)_target;
94a78b79 12007 u32 i;
94a78b79
VZ
12008
12009 for (i = 0; i < n/4; i++)
12010 target[i] = be32_to_cpu(source[i]);
12011}
12012
12013/*
12014 Ops array is stored in the following format:
12015 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12016 */
ab6ad5a4 12017static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12018{
ab6ad5a4
EG
12019 const __be32 *source = (const __be32 *)_source;
12020 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12021 u32 i, j, tmp;
94a78b79 12022
ab6ad5a4 12023 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12024 tmp = be32_to_cpu(source[j]);
12025 target[i].op = (tmp >> 24) & 0xff;
12026 target[i].offset = tmp & 0xffffff;
12027 target[i].raw_data = be32_to_cpu(source[j+1]);
12028 }
12029}
ab6ad5a4
EG
12030
12031static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12032{
ab6ad5a4
EG
12033 const __be16 *source = (const __be16 *)_source;
12034 u16 *target = (u16 *)_target;
94a78b79 12035 u32 i;
94a78b79
VZ
12036
12037 for (i = 0; i < n/2; i++)
12038 target[i] = be16_to_cpu(source[i]);
12039}
12040
12041#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
ab6ad5a4
EG
12042 do { \
12043 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12044 bp->arr = kmalloc(len, GFP_KERNEL); \
94a78b79 12045 if (!bp->arr) { \
ab6ad5a4
EG
12046 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12047 "for "#arr"\n", len); \
94a78b79
VZ
12048 goto lbl; \
12049 } \
ab6ad5a4
EG
12050 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12051 (u8 *)bp->arr, len); \
94a78b79
VZ
12052 } while (0)
12053
94a78b79
VZ
12054static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12055{
45229b42 12056 const char *fw_file_name;
94a78b79 12057 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 12058 int rc;
94a78b79 12059
94a78b79 12060 if (CHIP_IS_E1(bp))
45229b42 12061 fw_file_name = FW_FILE_NAME_E1;
94a78b79 12062 else
45229b42 12063 fw_file_name = FW_FILE_NAME_E1H;
94a78b79
VZ
12064
12065 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12066
12067 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12068 if (rc) {
ab6ad5a4
EG
12069 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12070 fw_file_name);
94a78b79
VZ
12071 goto request_firmware_exit;
12072 }
12073
12074 rc = bnx2x_check_firmware(bp);
12075 if (rc) {
12076 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12077 goto request_firmware_exit;
12078 }
12079
12080 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12081
12082 /* Initialize the pointers to the init arrays */
12083 /* Blob */
12084 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12085
12086 /* Opcodes */
12087 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12088
12089 /* Offsets */
ab6ad5a4
EG
12090 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12091 be16_to_cpu_n);
94a78b79
VZ
12092
12093 /* STORMs firmware */
573f2035
EG
12094 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12095 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12096 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12097 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12098 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12099 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12100 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12101 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12102 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12103 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12104 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12105 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12106 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12107 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12108 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12109 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
12110
12111 return 0;
ab6ad5a4 12112
94a78b79
VZ
12113init_offsets_alloc_err:
12114 kfree(bp->init_ops);
12115init_ops_alloc_err:
12116 kfree(bp->init_data);
12117request_firmware_exit:
12118 release_firmware(bp->firmware);
12119
12120 return rc;
12121}
12122
12123
a2fbb9ea
ET
12124static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12125 const struct pci_device_id *ent)
12126{
a2fbb9ea
ET
12127 struct net_device *dev = NULL;
12128 struct bnx2x *bp;
37f9ce62 12129 int pcie_width, pcie_speed;
25047950 12130 int rc;
a2fbb9ea 12131
a2fbb9ea 12132 /* dev zeroed in init_etherdev */
555f6c78 12133 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
12134 if (!dev) {
12135 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 12136 return -ENOMEM;
34f80b04 12137 }
a2fbb9ea 12138
a2fbb9ea
ET
12139 bp = netdev_priv(dev);
12140 bp->msglevel = debug;
12141
df4770de
EG
12142 pci_set_drvdata(pdev, dev);
12143
34f80b04 12144 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12145 if (rc < 0) {
12146 free_netdev(dev);
12147 return rc;
12148 }
12149
34f80b04 12150 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12151 if (rc)
12152 goto init_one_exit;
12153
94a78b79
VZ
12154 /* Set init arrays */
12155 rc = bnx2x_init_firmware(bp, &pdev->dev);
12156 if (rc) {
12157 printk(KERN_ERR PFX "Error loading firmware\n");
12158 goto init_one_exit;
12159 }
12160
693fc0d1 12161 rc = register_netdev(dev);
34f80b04 12162 if (rc) {
693fc0d1 12163 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12164 goto init_one_exit;
12165 }
12166
37f9ce62 12167 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 12168 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 12169 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 12170 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 12171 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 12172 dev->base_addr, bp->pdev->irq);
e174961c 12173 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 12174
a2fbb9ea 12175 return 0;
34f80b04
EG
12176
12177init_one_exit:
12178 if (bp->regview)
12179 iounmap(bp->regview);
12180
12181 if (bp->doorbells)
12182 iounmap(bp->doorbells);
12183
12184 free_netdev(dev);
12185
12186 if (atomic_read(&pdev->enable_cnt) == 1)
12187 pci_release_regions(pdev);
12188
12189 pci_disable_device(pdev);
12190 pci_set_drvdata(pdev, NULL);
12191
12192 return rc;
a2fbb9ea
ET
12193}
12194
12195static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12196{
12197 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12198 struct bnx2x *bp;
12199
12200 if (!dev) {
228241eb
ET
12201 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12202 return;
12203 }
228241eb 12204 bp = netdev_priv(dev);
a2fbb9ea 12205
a2fbb9ea
ET
12206 unregister_netdev(dev);
12207
94a78b79
VZ
12208 kfree(bp->init_ops_offsets);
12209 kfree(bp->init_ops);
12210 kfree(bp->init_data);
12211 release_firmware(bp->firmware);
12212
a2fbb9ea
ET
12213 if (bp->regview)
12214 iounmap(bp->regview);
12215
12216 if (bp->doorbells)
12217 iounmap(bp->doorbells);
12218
12219 free_netdev(dev);
34f80b04
EG
12220
12221 if (atomic_read(&pdev->enable_cnt) == 1)
12222 pci_release_regions(pdev);
12223
a2fbb9ea
ET
12224 pci_disable_device(pdev);
12225 pci_set_drvdata(pdev, NULL);
12226}
12227
12228static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12229{
12230 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12231 struct bnx2x *bp;
12232
34f80b04
EG
12233 if (!dev) {
12234 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12235 return -ENODEV;
12236 }
12237 bp = netdev_priv(dev);
a2fbb9ea 12238
34f80b04 12239 rtnl_lock();
a2fbb9ea 12240
34f80b04 12241 pci_save_state(pdev);
228241eb 12242
34f80b04
EG
12243 if (!netif_running(dev)) {
12244 rtnl_unlock();
12245 return 0;
12246 }
a2fbb9ea
ET
12247
12248 netif_device_detach(dev);
a2fbb9ea 12249
da5a662a 12250 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12251
a2fbb9ea 12252 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12253
34f80b04
EG
12254 rtnl_unlock();
12255
a2fbb9ea
ET
12256 return 0;
12257}
12258
12259static int bnx2x_resume(struct pci_dev *pdev)
12260{
12261 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12262 struct bnx2x *bp;
a2fbb9ea
ET
12263 int rc;
12264
228241eb
ET
12265 if (!dev) {
12266 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12267 return -ENODEV;
12268 }
228241eb 12269 bp = netdev_priv(dev);
a2fbb9ea 12270
34f80b04
EG
12271 rtnl_lock();
12272
228241eb 12273 pci_restore_state(pdev);
34f80b04
EG
12274
12275 if (!netif_running(dev)) {
12276 rtnl_unlock();
12277 return 0;
12278 }
12279
a2fbb9ea
ET
12280 bnx2x_set_power_state(bp, PCI_D0);
12281 netif_device_attach(dev);
12282
da5a662a 12283 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12284
34f80b04
EG
12285 rtnl_unlock();
12286
12287 return rc;
a2fbb9ea
ET
12288}
12289
f8ef6e44
YG
12290static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12291{
12292 int i;
12293
12294 bp->state = BNX2X_STATE_ERROR;
12295
12296 bp->rx_mode = BNX2X_RX_MODE_NONE;
12297
12298 bnx2x_netif_stop(bp, 0);
12299
12300 del_timer_sync(&bp->timer);
12301 bp->stats_state = STATS_STATE_DISABLED;
12302 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12303
12304 /* Release IRQs */
6cbe5065 12305 bnx2x_free_irq(bp, false);
f8ef6e44
YG
12306
12307 if (CHIP_IS_E1(bp)) {
12308 struct mac_configuration_cmd *config =
12309 bnx2x_sp(bp, mcast_config);
12310
8d9c5f34 12311 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12312 CAM_INVALIDATE(config->config_table[i]);
12313 }
12314
12315 /* Free SKBs, SGEs, TPA pool and driver internals */
12316 bnx2x_free_skbs(bp);
54b9ddaa 12317 for_each_queue(bp, i)
f8ef6e44 12318 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 12319 for_each_queue(bp, i)
7cde1c8b 12320 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12321 bnx2x_free_mem(bp);
12322
12323 bp->state = BNX2X_STATE_CLOSED;
12324
12325 netif_carrier_off(bp->dev);
12326
12327 return 0;
12328}
12329
12330static void bnx2x_eeh_recover(struct bnx2x *bp)
12331{
12332 u32 val;
12333
12334 mutex_init(&bp->port.phy_mutex);
12335
12336 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12337 bp->link_params.shmem_base = bp->common.shmem_base;
12338 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12339
12340 if (!bp->common.shmem_base ||
12341 (bp->common.shmem_base < 0xA0000) ||
12342 (bp->common.shmem_base >= 0xC0000)) {
12343 BNX2X_DEV_INFO("MCP not active\n");
12344 bp->flags |= NO_MCP_FLAG;
12345 return;
12346 }
12347
12348 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12349 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12350 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12351 BNX2X_ERR("BAD MCP validity signature\n");
12352
12353 if (!BP_NOMCP(bp)) {
12354 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12355 & DRV_MSG_SEQ_NUMBER_MASK);
12356 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12357 }
12358}
12359
493adb1f
WX
12360/**
12361 * bnx2x_io_error_detected - called when PCI error is detected
12362 * @pdev: Pointer to PCI device
12363 * @state: The current pci connection state
12364 *
12365 * This function is called after a PCI bus error affecting
12366 * this device has been detected.
12367 */
12368static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12369 pci_channel_state_t state)
12370{
12371 struct net_device *dev = pci_get_drvdata(pdev);
12372 struct bnx2x *bp = netdev_priv(dev);
12373
12374 rtnl_lock();
12375
12376 netif_device_detach(dev);
12377
07ce50e4
DN
12378 if (state == pci_channel_io_perm_failure) {
12379 rtnl_unlock();
12380 return PCI_ERS_RESULT_DISCONNECT;
12381 }
12382
493adb1f 12383 if (netif_running(dev))
f8ef6e44 12384 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12385
12386 pci_disable_device(pdev);
12387
12388 rtnl_unlock();
12389
12390 /* Request a slot reset */
12391 return PCI_ERS_RESULT_NEED_RESET;
12392}
12393
12394/**
12395 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12396 * @pdev: Pointer to PCI device
12397 *
12398 * Restart the card from scratch, as if from a cold-boot.
12399 */
12400static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12401{
12402 struct net_device *dev = pci_get_drvdata(pdev);
12403 struct bnx2x *bp = netdev_priv(dev);
12404
12405 rtnl_lock();
12406
12407 if (pci_enable_device(pdev)) {
12408 dev_err(&pdev->dev,
12409 "Cannot re-enable PCI device after reset\n");
12410 rtnl_unlock();
12411 return PCI_ERS_RESULT_DISCONNECT;
12412 }
12413
12414 pci_set_master(pdev);
12415 pci_restore_state(pdev);
12416
12417 if (netif_running(dev))
12418 bnx2x_set_power_state(bp, PCI_D0);
12419
12420 rtnl_unlock();
12421
12422 return PCI_ERS_RESULT_RECOVERED;
12423}
12424
12425/**
12426 * bnx2x_io_resume - called when traffic can start flowing again
12427 * @pdev: Pointer to PCI device
12428 *
12429 * This callback is called when the error recovery driver tells us that
12430 * its OK to resume normal operation.
12431 */
12432static void bnx2x_io_resume(struct pci_dev *pdev)
12433{
12434 struct net_device *dev = pci_get_drvdata(pdev);
12435 struct bnx2x *bp = netdev_priv(dev);
12436
12437 rtnl_lock();
12438
f8ef6e44
YG
12439 bnx2x_eeh_recover(bp);
12440
493adb1f 12441 if (netif_running(dev))
f8ef6e44 12442 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12443
12444 netif_device_attach(dev);
12445
12446 rtnl_unlock();
12447}
12448
12449static struct pci_error_handlers bnx2x_err_handler = {
12450 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12451 .slot_reset = bnx2x_io_slot_reset,
12452 .resume = bnx2x_io_resume,
493adb1f
WX
12453};
12454
a2fbb9ea 12455static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12456 .name = DRV_MODULE_NAME,
12457 .id_table = bnx2x_pci_tbl,
12458 .probe = bnx2x_init_one,
12459 .remove = __devexit_p(bnx2x_remove_one),
12460 .suspend = bnx2x_suspend,
12461 .resume = bnx2x_resume,
12462 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12463};
12464
12465static int __init bnx2x_init(void)
12466{
dd21ca6d
SG
12467 int ret;
12468
938cf541
EG
12469 printk(KERN_INFO "%s", version);
12470
1cf167f2
EG
12471 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12472 if (bnx2x_wq == NULL) {
12473 printk(KERN_ERR PFX "Cannot create workqueue\n");
12474 return -ENOMEM;
12475 }
12476
dd21ca6d
SG
12477 ret = pci_register_driver(&bnx2x_pci_driver);
12478 if (ret) {
12479 printk(KERN_ERR PFX "Cannot register driver\n");
12480 destroy_workqueue(bnx2x_wq);
12481 }
12482 return ret;
a2fbb9ea
ET
12483}
12484
12485static void __exit bnx2x_cleanup(void)
12486{
12487 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12488
12489 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12490}
12491
12492module_init(bnx2x_init);
12493module_exit(bnx2x_cleanup);
12494
993ac7b5
MC
12495#ifdef BCM_CNIC
12496
12497/* count denotes the number of new completions we have seen */
12498static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12499{
12500 struct eth_spe *spe;
12501
12502#ifdef BNX2X_STOP_ON_ERROR
12503 if (unlikely(bp->panic))
12504 return;
12505#endif
12506
12507 spin_lock_bh(&bp->spq_lock);
12508 bp->cnic_spq_pending -= count;
12509
12510 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12511 bp->cnic_spq_pending++) {
12512
12513 if (!bp->cnic_kwq_pending)
12514 break;
12515
12516 spe = bnx2x_sp_get_next(bp);
12517 *spe = *bp->cnic_kwq_cons;
12518
12519 bp->cnic_kwq_pending--;
12520
12521 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12522 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12523
12524 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12525 bp->cnic_kwq_cons = bp->cnic_kwq;
12526 else
12527 bp->cnic_kwq_cons++;
12528 }
12529 bnx2x_sp_prod_update(bp);
12530 spin_unlock_bh(&bp->spq_lock);
12531}
12532
12533static int bnx2x_cnic_sp_queue(struct net_device *dev,
12534 struct kwqe_16 *kwqes[], u32 count)
12535{
12536 struct bnx2x *bp = netdev_priv(dev);
12537 int i;
12538
12539#ifdef BNX2X_STOP_ON_ERROR
12540 if (unlikely(bp->panic))
12541 return -EIO;
12542#endif
12543
12544 spin_lock_bh(&bp->spq_lock);
12545
12546 for (i = 0; i < count; i++) {
12547 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12548
12549 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12550 break;
12551
12552 *bp->cnic_kwq_prod = *spe;
12553
12554 bp->cnic_kwq_pending++;
12555
12556 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12557 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12558 spe->data.mac_config_addr.hi,
12559 spe->data.mac_config_addr.lo,
12560 bp->cnic_kwq_pending);
12561
12562 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12563 bp->cnic_kwq_prod = bp->cnic_kwq;
12564 else
12565 bp->cnic_kwq_prod++;
12566 }
12567
12568 spin_unlock_bh(&bp->spq_lock);
12569
12570 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12571 bnx2x_cnic_sp_post(bp, 0);
12572
12573 return i;
12574}
12575
12576static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12577{
12578 struct cnic_ops *c_ops;
12579 int rc = 0;
12580
12581 mutex_lock(&bp->cnic_mutex);
12582 c_ops = bp->cnic_ops;
12583 if (c_ops)
12584 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12585 mutex_unlock(&bp->cnic_mutex);
12586
12587 return rc;
12588}
12589
12590static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12591{
12592 struct cnic_ops *c_ops;
12593 int rc = 0;
12594
12595 rcu_read_lock();
12596 c_ops = rcu_dereference(bp->cnic_ops);
12597 if (c_ops)
12598 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12599 rcu_read_unlock();
12600
12601 return rc;
12602}
12603
12604/*
12605 * for commands that have no data
12606 */
12607static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12608{
12609 struct cnic_ctl_info ctl = {0};
12610
12611 ctl.cmd = cmd;
12612
12613 return bnx2x_cnic_ctl_send(bp, &ctl);
12614}
12615
12616static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12617{
12618 struct cnic_ctl_info ctl;
12619
12620 /* first we tell CNIC and only then we count this as a completion */
12621 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12622 ctl.data.comp.cid = cid;
12623
12624 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12625 bnx2x_cnic_sp_post(bp, 1);
12626}
12627
12628static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12629{
12630 struct bnx2x *bp = netdev_priv(dev);
12631 int rc = 0;
12632
12633 switch (ctl->cmd) {
12634 case DRV_CTL_CTXTBL_WR_CMD: {
12635 u32 index = ctl->data.io.offset;
12636 dma_addr_t addr = ctl->data.io.dma_addr;
12637
12638 bnx2x_ilt_wr(bp, index, addr);
12639 break;
12640 }
12641
12642 case DRV_CTL_COMPLETION_CMD: {
12643 int count = ctl->data.comp.comp_count;
12644
12645 bnx2x_cnic_sp_post(bp, count);
12646 break;
12647 }
12648
12649 /* rtnl_lock is held. */
12650 case DRV_CTL_START_L2_CMD: {
12651 u32 cli = ctl->data.ring.client_id;
12652
12653 bp->rx_mode_cl_mask |= (1 << cli);
12654 bnx2x_set_storm_rx_mode(bp);
12655 break;
12656 }
12657
12658 /* rtnl_lock is held. */
12659 case DRV_CTL_STOP_L2_CMD: {
12660 u32 cli = ctl->data.ring.client_id;
12661
12662 bp->rx_mode_cl_mask &= ~(1 << cli);
12663 bnx2x_set_storm_rx_mode(bp);
12664 break;
12665 }
12666
12667 default:
12668 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12669 rc = -EINVAL;
12670 }
12671
12672 return rc;
12673}
12674
12675static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12676{
12677 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12678
12679 if (bp->flags & USING_MSIX_FLAG) {
12680 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12681 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12682 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12683 } else {
12684 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12685 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12686 }
12687 cp->irq_arr[0].status_blk = bp->cnic_sb;
12688 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12689 cp->irq_arr[1].status_blk = bp->def_status_blk;
12690 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12691
12692 cp->num_irq = 2;
12693}
12694
12695static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12696 void *data)
12697{
12698 struct bnx2x *bp = netdev_priv(dev);
12699 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12700
12701 if (ops == NULL)
12702 return -EINVAL;
12703
12704 if (atomic_read(&bp->intr_sem) != 0)
12705 return -EBUSY;
12706
12707 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12708 if (!bp->cnic_kwq)
12709 return -ENOMEM;
12710
12711 bp->cnic_kwq_cons = bp->cnic_kwq;
12712 bp->cnic_kwq_prod = bp->cnic_kwq;
12713 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12714
12715 bp->cnic_spq_pending = 0;
12716 bp->cnic_kwq_pending = 0;
12717
12718 bp->cnic_data = data;
12719
12720 cp->num_irq = 0;
12721 cp->drv_state = CNIC_DRV_STATE_REGD;
12722
12723 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12724
12725 bnx2x_setup_cnic_irq_info(bp);
12726 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12727 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12728 rcu_assign_pointer(bp->cnic_ops, ops);
12729
12730 return 0;
12731}
12732
12733static int bnx2x_unregister_cnic(struct net_device *dev)
12734{
12735 struct bnx2x *bp = netdev_priv(dev);
12736 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12737
12738 mutex_lock(&bp->cnic_mutex);
12739 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12740 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12741 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12742 }
12743 cp->drv_state = 0;
12744 rcu_assign_pointer(bp->cnic_ops, NULL);
12745 mutex_unlock(&bp->cnic_mutex);
12746 synchronize_rcu();
12747 kfree(bp->cnic_kwq);
12748 bp->cnic_kwq = NULL;
12749
12750 return 0;
12751}
12752
12753struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12754{
12755 struct bnx2x *bp = netdev_priv(dev);
12756 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12757
12758 cp->drv_owner = THIS_MODULE;
12759 cp->chip_id = CHIP_ID(bp);
12760 cp->pdev = bp->pdev;
12761 cp->io_base = bp->regview;
12762 cp->io_base2 = bp->doorbells;
12763 cp->max_kwqe_pending = 8;
12764 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12765 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12766 cp->ctx_tbl_len = CNIC_ILT_LINES;
12767 cp->starting_cid = BCM_CNIC_CID_START;
12768 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12769 cp->drv_ctl = bnx2x_drv_ctl;
12770 cp->drv_register_cnic = bnx2x_register_cnic;
12771 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12772
12773 return cp;
12774}
12775EXPORT_SYMBOL(bnx2x_cnic_probe);
12776
12777#endif /* BCM_CNIC */
94a78b79 12778
This page took 1.721163 seconds and 5 git commands to generate.