bnx2x: Remove SGMII configuration when not required
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
573dd788
EG
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
a18f5128
EG
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
9898f86d 108static int poll;
a2fbb9ea 109module_param(poll, int, 0);
9898f86d 110MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
9898f86d 116static int debug;
a2fbb9ea 117module_param(debug, int, 0);
9898f86d
EG
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
a2fbb9ea
ET
128};
129
34f80b04 130/* indexed by board_type, above */
53a10565 131static struct {
a2fbb9ea
ET
132 char *name;
133} board_info[] __devinitdata = {
34f80b04
EG
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
137};
138
34f80b04 139
a2fbb9ea
ET
140static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
143 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
145 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
159static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
a2fbb9ea
ET
167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
a2fbb9ea
ET
178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
ad8d3948
EG
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
ad8d3948
EG
203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
a2fbb9ea 205{
5ff7b6d4 206 struct dmae_command dmae;
a2fbb9ea 207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
5ff7b6d4 219 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 220
5ff7b6d4
EG
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 224#ifdef __BIG_ENDIAN
5ff7b6d4 225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 226#else
5ff7b6d4 227 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 228#endif
5ff7b6d4
EG
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 239
c3eefaf6 240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 250
5ff7b6d4
EG
251 mutex_lock(&bp->dmae_mutex);
252
a2fbb9ea
ET
253 *wb_comp = 0;
254
5ff7b6d4 255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
256
257 udelay(5);
ad8d3948
EG
258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
ad8d3948 262 if (!cnt) {
c3eefaf6 263 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
264 break;
265 }
ad8d3948 266 cnt--;
12469401
YG
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
a2fbb9ea 272 }
ad8d3948
EG
273
274 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
275}
276
c18487ee 277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 278{
5ff7b6d4 279 struct dmae_command dmae;
a2fbb9ea 280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
5ff7b6d4 294 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 295
5ff7b6d4
EG
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 299#ifdef __BIG_ENDIAN
5ff7b6d4 300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 301#else
5ff7b6d4 302 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 303#endif
5ff7b6d4
EG
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 314
c3eefaf6 315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 322
5ff7b6d4
EG
323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
326 *wb_comp = 0;
327
5ff7b6d4 328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
329
330 udelay(5);
ad8d3948
EG
331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
ad8d3948 334 if (!cnt) {
c3eefaf6 335 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
336 break;
337 }
ad8d3948 338 cnt--;
12469401
YG
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
a2fbb9ea 344 }
ad8d3948 345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
348
349 mutex_unlock(&bp->dmae_mutex);
350}
351
352/* used only for slowpath so not inlined */
353static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
354{
355 u32 wb_write[2];
356
357 wb_write[0] = val_hi;
358 wb_write[1] = val_lo;
359 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 360}
a2fbb9ea 361
ad8d3948
EG
362#ifdef USE_WB_RD
363static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
364{
365 u32 wb_data[2];
366
367 REG_RD_DMAE(bp, reg, wb_data, 2);
368
369 return HILO_U64(wb_data[0], wb_data[1]);
370}
371#endif
372
a2fbb9ea
ET
373static int bnx2x_mc_assert(struct bnx2x *bp)
374{
a2fbb9ea 375 char last_idx;
34f80b04
EG
376 int i, rc = 0;
377 u32 row0, row1, row2, row3;
378
379 /* XSTORM */
380 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
381 XSTORM_ASSERT_LIST_INDEX_OFFSET);
382 if (last_idx)
383 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
384
385 /* print the asserts */
386 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
387
388 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i));
390 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
392 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
394 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
395 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
396
397 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
398 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
399 " 0x%08x 0x%08x 0x%08x\n",
400 i, row3, row2, row1, row0);
401 rc++;
402 } else {
403 break;
404 }
405 }
406
407 /* TSTORM */
408 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
409 TSTORM_ASSERT_LIST_INDEX_OFFSET);
410 if (last_idx)
411 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
412
413 /* print the asserts */
414 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
415
416 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i));
418 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
420 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
422 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
423 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
424
425 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
426 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
427 " 0x%08x 0x%08x 0x%08x\n",
428 i, row3, row2, row1, row0);
429 rc++;
430 } else {
431 break;
432 }
433 }
434
435 /* CSTORM */
436 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
437 CSTORM_ASSERT_LIST_INDEX_OFFSET);
438 if (last_idx)
439 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
440
441 /* print the asserts */
442 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
443
444 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i));
446 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
448 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
450 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
451 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
452
453 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
454 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
455 " 0x%08x 0x%08x 0x%08x\n",
456 i, row3, row2, row1, row0);
457 rc++;
458 } else {
459 break;
460 }
461 }
462
463 /* USTORM */
464 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
465 USTORM_ASSERT_LIST_INDEX_OFFSET);
466 if (last_idx)
467 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
468
469 /* print the asserts */
470 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
471
472 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i));
474 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 4);
476 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_OFFSET(i) + 8);
478 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
479 USTORM_ASSERT_LIST_OFFSET(i) + 12);
480
481 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
482 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
483 " 0x%08x 0x%08x 0x%08x\n",
484 i, row3, row2, row1, row0);
485 rc++;
486 } else {
487 break;
a2fbb9ea
ET
488 }
489 }
34f80b04 490
a2fbb9ea
ET
491 return rc;
492}
c14423fe 493
a2fbb9ea
ET
494static void bnx2x_fw_dump(struct bnx2x *bp)
495{
496 u32 mark, offset;
4781bfad 497 __be32 data[9];
a2fbb9ea
ET
498 int word;
499
500 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 501 mark = ((mark + 0x3) & ~0x3);
ad361c98 502 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 503
ad361c98 504 printk(KERN_ERR PFX);
a2fbb9ea
ET
505 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
506 for (word = 0; word < 8; word++)
507 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
508 offset + 4*word));
509 data[8] = 0x0;
49d66772 510 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
511 }
512 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
513 for (word = 0; word < 8; word++)
514 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
515 offset + 4*word));
516 data[8] = 0x0;
49d66772 517 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 518 }
ad361c98 519 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
520}
521
522static void bnx2x_panic_dump(struct bnx2x *bp)
523{
524 int i;
525 u16 j, start, end;
526
66e855f3
YG
527 bp->stats_state = STATS_STATE_DISABLED;
528 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
529
a2fbb9ea
ET
530 BNX2X_ERR("begin crash dump -----------------\n");
531
8440d2b6
EG
532 /* Indices */
533 /* Common */
534 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
535 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
536 " spq_prod_idx(%u)\n",
537 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
538 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
539
540 /* Rx */
541 for_each_rx_queue(bp, i) {
a2fbb9ea 542 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 543
c3eefaf6 544 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
545 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
546 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 547 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
548 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
549 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 550 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
551 " fp_u_idx(%x) *sb_u_idx(%x)\n",
552 fp->rx_sge_prod, fp->last_max_sge,
553 le16_to_cpu(fp->fp_u_idx),
554 fp->status_blk->u_status_block.status_block_index);
555 }
a2fbb9ea 556
8440d2b6
EG
557 /* Tx */
558 for_each_tx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 560
c3eefaf6 561 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
562 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
563 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
564 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 565 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 566 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 567 fp->status_blk->c_status_block.status_block_index,
ca00392c 568 fp->tx_db.data.prod);
8440d2b6 569 }
a2fbb9ea 570
8440d2b6
EG
571 /* Rings */
572 /* Rx */
573 for_each_rx_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
575
576 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
577 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 578 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
579 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
580 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
581
c3eefaf6
EG
582 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
583 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
584 }
585
3196a88a
EG
586 start = RX_SGE(fp->rx_sge_prod);
587 end = RX_SGE(fp->last_max_sge);
8440d2b6 588 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
589 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
590 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
591
c3eefaf6
EG
592 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
593 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
594 }
595
a2fbb9ea
ET
596 start = RCQ_BD(fp->rx_comp_cons - 10);
597 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 598 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
599 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
600
c3eefaf6
EG
601 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
602 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
603 }
604 }
605
8440d2b6
EG
606 /* Tx */
607 for_each_tx_queue(bp, i) {
608 struct bnx2x_fastpath *fp = &bp->fp[i];
609
610 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
611 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
612 for (j = start; j != end; j = TX_BD(j + 1)) {
613 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
614
c3eefaf6
EG
615 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
616 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
617 }
618
619 start = TX_BD(fp->tx_bd_cons - 10);
620 end = TX_BD(fp->tx_bd_cons + 254);
621 for (j = start; j != end; j = TX_BD(j + 1)) {
622 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
623
c3eefaf6
EG
624 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
625 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
626 }
627 }
a2fbb9ea 628
34f80b04 629 bnx2x_fw_dump(bp);
a2fbb9ea
ET
630 bnx2x_mc_assert(bp);
631 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
632}
633
615f8fd9 634static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 635{
34f80b04 636 int port = BP_PORT(bp);
a2fbb9ea
ET
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
639 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 640 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
641
642 if (msix) {
8badd27a
EG
643 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
645 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
646 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
647 } else if (msi) {
648 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
652 } else {
653 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 654 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
655 HC_CONFIG_0_REG_INT_LINE_EN_0 |
656 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 657
8badd27a
EG
658 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
659 val, port, addr);
615f8fd9
ET
660
661 REG_WR(bp, addr, val);
662
a2fbb9ea
ET
663 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
664 }
665
8badd27a
EG
666 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
667 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
668
669 REG_WR(bp, addr, val);
37dbbf32
EG
670 /*
671 * Ensure that HC_CONFIG is written before leading/trailing edge config
672 */
673 mmiowb();
674 barrier();
34f80b04
EG
675
676 if (CHIP_IS_E1H(bp)) {
677 /* init leading/trailing edge */
678 if (IS_E1HMF(bp)) {
8badd27a 679 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 680 if (bp->port.pmf)
4acac6a5
EG
681 /* enable nig and gpio3 attention */
682 val |= 0x1100;
34f80b04
EG
683 } else
684 val = 0xffff;
685
686 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
687 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
688 }
37dbbf32
EG
689
690 /* Make sure that interrupts are indeed enabled from here on */
691 mmiowb();
a2fbb9ea
ET
692}
693
615f8fd9 694static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 695{
34f80b04 696 int port = BP_PORT(bp);
a2fbb9ea
ET
697 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
698 u32 val = REG_RD(bp, addr);
699
700 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
701 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
702 HC_CONFIG_0_REG_INT_LINE_EN_0 |
703 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
704
705 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
706 val, port, addr);
707
8badd27a
EG
708 /* flush all outstanding writes */
709 mmiowb();
710
a2fbb9ea
ET
711 REG_WR(bp, addr, val);
712 if (REG_RD(bp, addr) != val)
713 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 714
a2fbb9ea
ET
715}
716
f8ef6e44 717static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 718{
a2fbb9ea 719 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 720 int i, offset;
a2fbb9ea 721
34f80b04 722 /* disable interrupt handling */
a2fbb9ea 723 atomic_inc(&bp->intr_sem);
e1510706
EG
724 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
725
f8ef6e44
YG
726 if (disable_hw)
727 /* prevent the HW from sending interrupts */
728 bnx2x_int_disable(bp);
a2fbb9ea
ET
729
730 /* make sure all ISRs are done */
731 if (msix) {
8badd27a
EG
732 synchronize_irq(bp->msix_table[0].vector);
733 offset = 1;
a2fbb9ea 734 for_each_queue(bp, i)
8badd27a 735 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
736 } else
737 synchronize_irq(bp->pdev->irq);
738
739 /* make sure sp_task is not running */
1cf167f2
EG
740 cancel_delayed_work(&bp->sp_task);
741 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
742}
743
34f80b04 744/* fast path */
a2fbb9ea
ET
745
746/*
34f80b04 747 * General service functions
a2fbb9ea
ET
748 */
749
34f80b04 750static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
751 u8 storm, u16 index, u8 op, u8 update)
752{
5c862848
EG
753 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
754 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
755 struct igu_ack_register igu_ack;
756
757 igu_ack.status_block_index = index;
758 igu_ack.sb_id_and_flags =
34f80b04 759 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
760 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
761 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
762 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
763
5c862848
EG
764 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
765 (*(u32 *)&igu_ack), hc_addr);
766 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
767
768 /* Make sure that ACK is written */
769 mmiowb();
770 barrier();
a2fbb9ea
ET
771}
772
773static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
774{
775 struct host_status_block *fpsb = fp->status_blk;
776 u16 rc = 0;
777
778 barrier(); /* status block is written to by the chip */
779 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
780 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
781 rc |= 1;
782 }
783 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
784 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
785 rc |= 2;
786 }
787 return rc;
788}
789
a2fbb9ea
ET
790static u16 bnx2x_ack_int(struct bnx2x *bp)
791{
5c862848
EG
792 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
793 COMMAND_REG_SIMD_MASK);
794 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 795
5c862848
EG
796 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
797 result, hc_addr);
a2fbb9ea 798
a2fbb9ea
ET
799 return result;
800}
801
802
803/*
804 * fast path service functions
805 */
806
e8b5fc51
VZ
807static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
808{
809 /* Tell compiler that consumer and producer can change */
810 barrier();
811 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
812}
813
a2fbb9ea
ET
814/* free skb in the packet ring at pos idx
815 * return idx of last bd freed
816 */
817static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
818 u16 idx)
819{
820 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
821 struct eth_tx_start_bd *tx_start_bd;
822 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 823 struct sk_buff *skb = tx_buf->skb;
34f80b04 824 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
825 int nbd;
826
827 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
828 idx, tx_buf, skb);
829
830 /* unmap first bd */
831 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
832 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
833 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
834 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 835
ca00392c 836 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 837#ifdef BNX2X_STOP_ON_ERROR
ca00392c 838 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 839 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
840 bnx2x_panic();
841 }
842#endif
ca00392c 843 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 844
ca00392c
EG
845 /* Get the next bd */
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 847
ca00392c
EG
848 /* Skip a parse bd... */
849 --nbd;
850 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
851
852 /* ...and the TSO split header bd since they have no mapping */
853 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
854 --nbd;
855 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
856 }
857
858 /* now free frags */
859 while (nbd > 0) {
860
861 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
862 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
863 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
864 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
865 if (--nbd)
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
867 }
868
869 /* release skb */
53e5e96e 870 WARN_ON(!skb);
ca00392c 871 dev_kfree_skb_any(skb);
a2fbb9ea
ET
872 tx_buf->first_bd = 0;
873 tx_buf->skb = NULL;
874
34f80b04 875 return new_cons;
a2fbb9ea
ET
876}
877
34f80b04 878static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 879{
34f80b04
EG
880 s16 used;
881 u16 prod;
882 u16 cons;
a2fbb9ea 883
34f80b04 884 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
885 prod = fp->tx_bd_prod;
886 cons = fp->tx_bd_cons;
887
34f80b04
EG
888 /* NUM_TX_RINGS = number of "next-page" entries
889 It will be used as a threshold */
890 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 891
34f80b04 892#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
893 WARN_ON(used < 0);
894 WARN_ON(used > fp->bp->tx_ring_size);
895 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 896#endif
a2fbb9ea 897
34f80b04 898 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
899}
900
7961f791 901static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
902{
903 struct bnx2x *bp = fp->bp;
555f6c78 904 struct netdev_queue *txq;
a2fbb9ea
ET
905 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
906 int done = 0;
907
908#ifdef BNX2X_STOP_ON_ERROR
909 if (unlikely(bp->panic))
910 return;
911#endif
912
ca00392c 913 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
914 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
915 sw_cons = fp->tx_pkt_cons;
916
917 while (sw_cons != hw_cons) {
918 u16 pkt_cons;
919
920 pkt_cons = TX_BD(sw_cons);
921
922 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
923
34f80b04 924 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
925 hw_cons, sw_cons, pkt_cons);
926
34f80b04 927/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
928 rmb();
929 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
930 }
931*/
932 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
933 sw_cons++;
934 done++;
a2fbb9ea
ET
935 }
936
937 fp->tx_pkt_cons = sw_cons;
938 fp->tx_bd_cons = bd_cons;
939
a2fbb9ea 940 /* TBD need a thresh? */
555f6c78 941 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 942
6044735d
EG
943 /* Need to make the tx_bd_cons update visible to start_xmit()
944 * before checking for netif_tx_queue_stopped(). Without the
945 * memory barrier, there is a small possibility that
946 * start_xmit() will miss it and cause the queue to be stopped
947 * forever.
948 */
949 smp_mb();
950
555f6c78 951 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 952 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 953 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 954 netif_tx_wake_queue(txq);
a2fbb9ea
ET
955 }
956}
957
3196a88a 958
a2fbb9ea
ET
959static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
960 union eth_rx_cqe *rr_cqe)
961{
962 struct bnx2x *bp = fp->bp;
963 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
964 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
965
34f80b04 966 DP(BNX2X_MSG_SP,
a2fbb9ea 967 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 968 fp->index, cid, command, bp->state,
34f80b04 969 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
970
971 bp->spq_left++;
972
0626b899 973 if (fp->index) {
a2fbb9ea
ET
974 switch (command | fp->state) {
975 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
976 BNX2X_FP_STATE_OPENING):
977 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
978 cid);
979 fp->state = BNX2X_FP_STATE_OPEN;
980 break;
981
982 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
983 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
984 cid);
985 fp->state = BNX2X_FP_STATE_HALTED;
986 break;
987
988 default:
34f80b04
EG
989 BNX2X_ERR("unexpected MC reply (%d) "
990 "fp->state is %x\n", command, fp->state);
991 break;
a2fbb9ea 992 }
34f80b04 993 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
994 return;
995 }
c14423fe 996
a2fbb9ea
ET
997 switch (command | bp->state) {
998 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
999 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1000 bp->state = BNX2X_STATE_OPEN;
1001 break;
1002
1003 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1005 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1006 fp->state = BNX2X_FP_STATE_HALTED;
1007 break;
1008
a2fbb9ea 1009 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1010 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1011 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1012 break;
1013
3196a88a 1014
a2fbb9ea 1015 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1016 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1017 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1018 bp->set_mac_pending = 0;
a2fbb9ea
ET
1019 break;
1020
49d66772 1021 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1022 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1023 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1024 break;
1025
a2fbb9ea 1026 default:
34f80b04 1027 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1028 command, bp->state);
34f80b04 1029 break;
a2fbb9ea 1030 }
34f80b04 1031 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1032}
1033
7a9b2557
VZ
1034static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1035 struct bnx2x_fastpath *fp, u16 index)
1036{
1037 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1038 struct page *page = sw_buf->page;
1039 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1040
1041 /* Skip "next page" elements */
1042 if (!page)
1043 return;
1044
1045 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1046 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1047 __free_pages(page, PAGES_PER_SGE_SHIFT);
1048
1049 sw_buf->page = NULL;
1050 sge->addr_hi = 0;
1051 sge->addr_lo = 0;
1052}
1053
1054static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1055 struct bnx2x_fastpath *fp, int last)
1056{
1057 int i;
1058
1059 for (i = 0; i < last; i++)
1060 bnx2x_free_rx_sge(bp, fp, i);
1061}
1062
1063static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1064 struct bnx2x_fastpath *fp, u16 index)
1065{
1066 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1067 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1068 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1069 dma_addr_t mapping;
1070
1071 if (unlikely(page == NULL))
1072 return -ENOMEM;
1073
4f40f2cb 1074 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1075 PCI_DMA_FROMDEVICE);
8d8bb39b 1076 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1077 __free_pages(page, PAGES_PER_SGE_SHIFT);
1078 return -ENOMEM;
1079 }
1080
1081 sw_buf->page = page;
1082 pci_unmap_addr_set(sw_buf, mapping, mapping);
1083
1084 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1085 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1086
1087 return 0;
1088}
1089
a2fbb9ea
ET
1090static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1091 struct bnx2x_fastpath *fp, u16 index)
1092{
1093 struct sk_buff *skb;
1094 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1095 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1096 dma_addr_t mapping;
1097
1098 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1099 if (unlikely(skb == NULL))
1100 return -ENOMEM;
1101
437cf2f1 1102 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1103 PCI_DMA_FROMDEVICE);
8d8bb39b 1104 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1105 dev_kfree_skb(skb);
1106 return -ENOMEM;
1107 }
1108
1109 rx_buf->skb = skb;
1110 pci_unmap_addr_set(rx_buf, mapping, mapping);
1111
1112 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1113 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1114
1115 return 0;
1116}
1117
1118/* note that we are not allocating a new skb,
1119 * we are just moving one from cons to prod
1120 * we are not creating a new mapping,
1121 * so there is no need to check for dma_mapping_error().
1122 */
1123static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1124 struct sk_buff *skb, u16 cons, u16 prod)
1125{
1126 struct bnx2x *bp = fp->bp;
1127 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1128 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1129 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1130 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1131
1132 pci_dma_sync_single_for_device(bp->pdev,
1133 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1134 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1135
1136 prod_rx_buf->skb = cons_rx_buf->skb;
1137 pci_unmap_addr_set(prod_rx_buf, mapping,
1138 pci_unmap_addr(cons_rx_buf, mapping));
1139 *prod_bd = *cons_bd;
1140}
1141
7a9b2557
VZ
1142static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1143 u16 idx)
1144{
1145 u16 last_max = fp->last_max_sge;
1146
1147 if (SUB_S16(idx, last_max) > 0)
1148 fp->last_max_sge = idx;
1149}
1150
1151static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1152{
1153 int i, j;
1154
1155 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1156 int idx = RX_SGE_CNT * i - 1;
1157
1158 for (j = 0; j < 2; j++) {
1159 SGE_MASK_CLEAR_BIT(fp, idx);
1160 idx--;
1161 }
1162 }
1163}
1164
1165static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1166 struct eth_fast_path_rx_cqe *fp_cqe)
1167{
1168 struct bnx2x *bp = fp->bp;
4f40f2cb 1169 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1170 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1171 SGE_PAGE_SHIFT;
7a9b2557
VZ
1172 u16 last_max, last_elem, first_elem;
1173 u16 delta = 0;
1174 u16 i;
1175
1176 if (!sge_len)
1177 return;
1178
1179 /* First mark all used pages */
1180 for (i = 0; i < sge_len; i++)
1181 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1182
1183 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1184 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 /* Here we assume that the last SGE index is the biggest */
1187 prefetch((void *)(fp->sge_mask));
1188 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1189
1190 last_max = RX_SGE(fp->last_max_sge);
1191 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1192 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1193
1194 /* If ring is not full */
1195 if (last_elem + 1 != first_elem)
1196 last_elem++;
1197
1198 /* Now update the prod */
1199 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1200 if (likely(fp->sge_mask[i]))
1201 break;
1202
1203 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1204 delta += RX_SGE_MASK_ELEM_SZ;
1205 }
1206
1207 if (delta > 0) {
1208 fp->rx_sge_prod += delta;
1209 /* clear page-end entries */
1210 bnx2x_clear_sge_mask_next_elems(fp);
1211 }
1212
1213 DP(NETIF_MSG_RX_STATUS,
1214 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1215 fp->last_max_sge, fp->rx_sge_prod);
1216}
1217
1218static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1219{
1220 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1221 memset(fp->sge_mask, 0xff,
1222 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1223
33471629
EG
1224 /* Clear the two last indices in the page to 1:
1225 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1226 hence will never be indicated and should be removed from
1227 the calculations. */
1228 bnx2x_clear_sge_mask_next_elems(fp);
1229}
1230
1231static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1232 struct sk_buff *skb, u16 cons, u16 prod)
1233{
1234 struct bnx2x *bp = fp->bp;
1235 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1236 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1237 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1238 dma_addr_t mapping;
1239
1240 /* move empty skb from pool to prod and map it */
1241 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1242 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1243 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1244 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1245
1246 /* move partial skb from cons to pool (don't unmap yet) */
1247 fp->tpa_pool[queue] = *cons_rx_buf;
1248
1249 /* mark bin state as start - print error if current state != stop */
1250 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1251 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1252
1253 fp->tpa_state[queue] = BNX2X_TPA_START;
1254
1255 /* point prod_bd to new skb */
1256 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1257 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1258
1259#ifdef BNX2X_STOP_ON_ERROR
1260 fp->tpa_queue_used |= (1 << queue);
1261#ifdef __powerpc64__
1262 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1263#else
1264 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1265#endif
1266 fp->tpa_queue_used);
1267#endif
1268}
1269
1270static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1271 struct sk_buff *skb,
1272 struct eth_fast_path_rx_cqe *fp_cqe,
1273 u16 cqe_idx)
1274{
1275 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1276 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1277 u32 i, frag_len, frag_size, pages;
1278 int err;
1279 int j;
1280
1281 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1282 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1283
1284 /* This is needed in order to enable forwarding support */
1285 if (frag_size)
4f40f2cb 1286 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1287 max(frag_size, (u32)len_on_bd));
1288
1289#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1290 if (pages >
1291 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1292 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1293 pages, cqe_idx);
1294 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1295 fp_cqe->pkt_len, len_on_bd);
1296 bnx2x_panic();
1297 return -EINVAL;
1298 }
1299#endif
1300
1301 /* Run through the SGL and compose the fragmented skb */
1302 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1303 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1304
1305 /* FW gives the indices of the SGE as if the ring is an array
1306 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1307 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1308 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1309 old_rx_pg = *rx_pg;
1310
1311 /* If we fail to allocate a substitute page, we simply stop
1312 where we are and drop the whole packet */
1313 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1314 if (unlikely(err)) {
de832a55 1315 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1316 return err;
1317 }
1318
1319 /* Unmap the page as we r going to pass it to the stack */
1320 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1321 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1322
1323 /* Add one frag and update the appropriate fields in the skb */
1324 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1325
1326 skb->data_len += frag_len;
1327 skb->truesize += frag_len;
1328 skb->len += frag_len;
1329
1330 frag_size -= frag_len;
1331 }
1332
1333 return 0;
1334}
1335
1336static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1337 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1338 u16 cqe_idx)
1339{
1340 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1341 struct sk_buff *skb = rx_buf->skb;
1342 /* alloc new skb */
1343 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1344
1345 /* Unmap skb in the pool anyway, as we are going to change
1346 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1347 fails. */
1348 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1349 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1350
7a9b2557 1351 if (likely(new_skb)) {
66e855f3
YG
1352 /* fix ip xsum and give it to the stack */
1353 /* (no need to map the new skb) */
0c6671b0
EG
1354#ifdef BCM_VLAN
1355 int is_vlan_cqe =
1356 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1357 PARSING_FLAGS_VLAN);
1358 int is_not_hwaccel_vlan_cqe =
1359 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1360#endif
7a9b2557
VZ
1361
1362 prefetch(skb);
1363 prefetch(((char *)(skb)) + 128);
1364
7a9b2557
VZ
1365#ifdef BNX2X_STOP_ON_ERROR
1366 if (pad + len > bp->rx_buf_size) {
1367 BNX2X_ERR("skb_put is about to fail... "
1368 "pad %d len %d rx_buf_size %d\n",
1369 pad, len, bp->rx_buf_size);
1370 bnx2x_panic();
1371 return;
1372 }
1373#endif
1374
1375 skb_reserve(skb, pad);
1376 skb_put(skb, len);
1377
1378 skb->protocol = eth_type_trans(skb, bp->dev);
1379 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380
1381 {
1382 struct iphdr *iph;
1383
1384 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1385#ifdef BCM_VLAN
1386 /* If there is no Rx VLAN offloading -
1387 take VLAN tag into an account */
1388 if (unlikely(is_not_hwaccel_vlan_cqe))
1389 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1390#endif
7a9b2557
VZ
1391 iph->check = 0;
1392 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1393 }
1394
1395 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1396 &cqe->fast_path_cqe, cqe_idx)) {
1397#ifdef BCM_VLAN
0c6671b0
EG
1398 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1399 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1400 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1401 le16_to_cpu(cqe->fast_path_cqe.
1402 vlan_tag));
1403 else
1404#endif
1405 netif_receive_skb(skb);
1406 } else {
1407 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1408 " - dropping packet!\n");
1409 dev_kfree_skb(skb);
1410 }
1411
7a9b2557
VZ
1412
1413 /* put new skb in bin */
1414 fp->tpa_pool[queue].skb = new_skb;
1415
1416 } else {
66e855f3 1417 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1418 DP(NETIF_MSG_RX_STATUS,
1419 "Failed to allocate new skb - dropping packet!\n");
de832a55 1420 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1421 }
1422
1423 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1424}
1425
1426static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1427 struct bnx2x_fastpath *fp,
1428 u16 bd_prod, u16 rx_comp_prod,
1429 u16 rx_sge_prod)
1430{
8d9c5f34 1431 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1432 int i;
1433
1434 /* Update producers */
1435 rx_prods.bd_prod = bd_prod;
1436 rx_prods.cqe_prod = rx_comp_prod;
1437 rx_prods.sge_prod = rx_sge_prod;
1438
58f4c4cf
EG
1439 /*
1440 * Make sure that the BD and SGE data is updated before updating the
1441 * producers since FW might read the BD/SGE right after the producer
1442 * is updated.
1443 * This is only applicable for weak-ordered memory model archs such
1444 * as IA-64. The following barrier is also mandatory since FW will
1445 * assumes BDs must have buffers.
1446 */
1447 wmb();
1448
8d9c5f34
EG
1449 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1450 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1451 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1452 ((u32 *)&rx_prods)[i]);
1453
58f4c4cf
EG
1454 mmiowb(); /* keep prod updates ordered */
1455
7a9b2557 1456 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1457 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1458 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1459}
1460
a2fbb9ea
ET
1461static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1462{
1463 struct bnx2x *bp = fp->bp;
34f80b04 1464 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1465 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1466 int rx_pkt = 0;
1467
1468#ifdef BNX2X_STOP_ON_ERROR
1469 if (unlikely(bp->panic))
1470 return 0;
1471#endif
1472
34f80b04
EG
1473 /* CQ "next element" is of the size of the regular element,
1474 that's why it's ok here */
a2fbb9ea
ET
1475 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1476 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1477 hw_comp_cons++;
1478
1479 bd_cons = fp->rx_bd_cons;
1480 bd_prod = fp->rx_bd_prod;
34f80b04 1481 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1482 sw_comp_cons = fp->rx_comp_cons;
1483 sw_comp_prod = fp->rx_comp_prod;
1484
1485 /* Memory barrier necessary as speculative reads of the rx
1486 * buffer can be ahead of the index in the status block
1487 */
1488 rmb();
1489
1490 DP(NETIF_MSG_RX_STATUS,
1491 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1492 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1493
1494 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1495 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1496 struct sk_buff *skb;
1497 union eth_rx_cqe *cqe;
34f80b04
EG
1498 u8 cqe_fp_flags;
1499 u16 len, pad;
a2fbb9ea
ET
1500
1501 comp_ring_cons = RCQ_BD(sw_comp_cons);
1502 bd_prod = RX_BD(bd_prod);
1503 bd_cons = RX_BD(bd_cons);
1504
619e7a66
EG
1505 /* Prefetch the page containing the BD descriptor
1506 at producer's index. It will be needed when new skb is
1507 allocated */
1508 prefetch((void *)(PAGE_ALIGN((unsigned long)
1509 (&fp->rx_desc_ring[bd_prod])) -
1510 PAGE_SIZE + 1));
1511
a2fbb9ea 1512 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1513 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1514
a2fbb9ea 1515 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1516 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1517 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1518 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1519 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1520 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1521
1522 /* is this a slowpath msg? */
34f80b04 1523 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1524 bnx2x_sp_event(fp, cqe);
1525 goto next_cqe;
1526
1527 /* this is an rx packet */
1528 } else {
1529 rx_buf = &fp->rx_buf_ring[bd_cons];
1530 skb = rx_buf->skb;
a2fbb9ea
ET
1531 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1532 pad = cqe->fast_path_cqe.placement_offset;
1533
7a9b2557
VZ
1534 /* If CQE is marked both TPA_START and TPA_END
1535 it is a non-TPA CQE */
1536 if ((!fp->disable_tpa) &&
1537 (TPA_TYPE(cqe_fp_flags) !=
1538 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1539 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1540
1541 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1542 DP(NETIF_MSG_RX_STATUS,
1543 "calling tpa_start on queue %d\n",
1544 queue);
1545
1546 bnx2x_tpa_start(fp, queue, skb,
1547 bd_cons, bd_prod);
1548 goto next_rx;
1549 }
1550
1551 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1552 DP(NETIF_MSG_RX_STATUS,
1553 "calling tpa_stop on queue %d\n",
1554 queue);
1555
1556 if (!BNX2X_RX_SUM_FIX(cqe))
1557 BNX2X_ERR("STOP on none TCP "
1558 "data\n");
1559
1560 /* This is a size of the linear data
1561 on this skb */
1562 len = le16_to_cpu(cqe->fast_path_cqe.
1563 len_on_bd);
1564 bnx2x_tpa_stop(bp, fp, queue, pad,
1565 len, cqe, comp_ring_cons);
1566#ifdef BNX2X_STOP_ON_ERROR
1567 if (bp->panic)
17cb4006 1568 return 0;
7a9b2557
VZ
1569#endif
1570
1571 bnx2x_update_sge_prod(fp,
1572 &cqe->fast_path_cqe);
1573 goto next_cqe;
1574 }
1575 }
1576
a2fbb9ea
ET
1577 pci_dma_sync_single_for_device(bp->pdev,
1578 pci_unmap_addr(rx_buf, mapping),
1579 pad + RX_COPY_THRESH,
1580 PCI_DMA_FROMDEVICE);
1581 prefetch(skb);
1582 prefetch(((char *)(skb)) + 128);
1583
1584 /* is this an error packet? */
34f80b04 1585 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1586 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1587 "ERROR flags %x rx packet %u\n",
1588 cqe_fp_flags, sw_comp_cons);
de832a55 1589 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1590 goto reuse_rx;
1591 }
1592
1593 /* Since we don't have a jumbo ring
1594 * copy small packets if mtu > 1500
1595 */
1596 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1597 (len <= RX_COPY_THRESH)) {
1598 struct sk_buff *new_skb;
1599
1600 new_skb = netdev_alloc_skb(bp->dev,
1601 len + pad);
1602 if (new_skb == NULL) {
1603 DP(NETIF_MSG_RX_ERR,
34f80b04 1604 "ERROR packet dropped "
a2fbb9ea 1605 "because of alloc failure\n");
de832a55 1606 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1607 goto reuse_rx;
1608 }
1609
1610 /* aligned copy */
1611 skb_copy_from_linear_data_offset(skb, pad,
1612 new_skb->data + pad, len);
1613 skb_reserve(new_skb, pad);
1614 skb_put(new_skb, len);
1615
1616 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1617
1618 skb = new_skb;
1619
a119a069
EG
1620 } else
1621 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1622 pci_unmap_single(bp->pdev,
1623 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1624 bp->rx_buf_size,
a2fbb9ea
ET
1625 PCI_DMA_FROMDEVICE);
1626 skb_reserve(skb, pad);
1627 skb_put(skb, len);
1628
1629 } else {
1630 DP(NETIF_MSG_RX_ERR,
34f80b04 1631 "ERROR packet dropped because "
a2fbb9ea 1632 "of alloc failure\n");
de832a55 1633 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1634reuse_rx:
1635 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1636 goto next_rx;
1637 }
1638
1639 skb->protocol = eth_type_trans(skb, bp->dev);
1640
1641 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1642 if (bp->rx_csum) {
1adcd8be
EG
1643 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1644 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1645 else
de832a55 1646 fp->eth_q_stats.hw_csum_err++;
66e855f3 1647 }
a2fbb9ea
ET
1648 }
1649
748e5439 1650 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1651#ifdef BCM_VLAN
0c6671b0 1652 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1653 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1654 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1655 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1656 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1657 else
1658#endif
34f80b04 1659 netif_receive_skb(skb);
a2fbb9ea 1660
a2fbb9ea
ET
1661
1662next_rx:
1663 rx_buf->skb = NULL;
1664
1665 bd_cons = NEXT_RX_IDX(bd_cons);
1666 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1667 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1668 rx_pkt++;
a2fbb9ea
ET
1669next_cqe:
1670 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1671 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1672
34f80b04 1673 if (rx_pkt == budget)
a2fbb9ea
ET
1674 break;
1675 } /* while */
1676
1677 fp->rx_bd_cons = bd_cons;
34f80b04 1678 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1679 fp->rx_comp_cons = sw_comp_cons;
1680 fp->rx_comp_prod = sw_comp_prod;
1681
7a9b2557
VZ
1682 /* Update producers */
1683 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1684 fp->rx_sge_prod);
a2fbb9ea
ET
1685
1686 fp->rx_pkt += rx_pkt;
1687 fp->rx_calls++;
1688
1689 return rx_pkt;
1690}
1691
1692static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1693{
1694 struct bnx2x_fastpath *fp = fp_cookie;
1695 struct bnx2x *bp = fp->bp;
a2fbb9ea 1696
da5a662a
VZ
1697 /* Return here if interrupt is disabled */
1698 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1699 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1700 return IRQ_HANDLED;
1701 }
1702
34f80b04 1703 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1704 fp->index, fp->sb_id);
0626b899 1705 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1706
1707#ifdef BNX2X_STOP_ON_ERROR
1708 if (unlikely(bp->panic))
1709 return IRQ_HANDLED;
1710#endif
ca00392c
EG
1711 /* Handle Rx or Tx according to MSI-X vector */
1712 if (fp->is_rx_queue) {
1713 prefetch(fp->rx_cons_sb);
1714 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1715
ca00392c 1716 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1717
ca00392c
EG
1718 } else {
1719 prefetch(fp->tx_cons_sb);
1720 prefetch(&fp->status_blk->c_status_block.status_block_index);
1721
1722 bnx2x_update_fpsb_idx(fp);
1723 rmb();
1724 bnx2x_tx_int(fp);
1725
1726 /* Re-enable interrupts */
1727 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1728 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1729 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1730 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1731 }
34f80b04 1732
a2fbb9ea
ET
1733 return IRQ_HANDLED;
1734}
1735
1736static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1737{
555f6c78 1738 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1739 u16 status = bnx2x_ack_int(bp);
34f80b04 1740 u16 mask;
ca00392c 1741 int i;
a2fbb9ea 1742
34f80b04 1743 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1744 if (unlikely(status == 0)) {
1745 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1746 return IRQ_NONE;
1747 }
f5372251 1748 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1749
34f80b04 1750 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1751 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1752 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1753 return IRQ_HANDLED;
1754 }
1755
3196a88a
EG
1756#ifdef BNX2X_STOP_ON_ERROR
1757 if (unlikely(bp->panic))
1758 return IRQ_HANDLED;
1759#endif
1760
ca00392c
EG
1761 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1762 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1763
ca00392c
EG
1764 mask = 0x2 << fp->sb_id;
1765 if (status & mask) {
1766 /* Handle Rx or Tx according to SB id */
1767 if (fp->is_rx_queue) {
1768 prefetch(fp->rx_cons_sb);
1769 prefetch(&fp->status_blk->u_status_block.
1770 status_block_index);
a2fbb9ea 1771
ca00392c 1772 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1773
ca00392c
EG
1774 } else {
1775 prefetch(fp->tx_cons_sb);
1776 prefetch(&fp->status_blk->c_status_block.
1777 status_block_index);
1778
1779 bnx2x_update_fpsb_idx(fp);
1780 rmb();
1781 bnx2x_tx_int(fp);
1782
1783 /* Re-enable interrupts */
1784 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1785 le16_to_cpu(fp->fp_u_idx),
1786 IGU_INT_NOP, 1);
1787 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1788 le16_to_cpu(fp->fp_c_idx),
1789 IGU_INT_ENABLE, 1);
1790 }
1791 status &= ~mask;
1792 }
a2fbb9ea
ET
1793 }
1794
a2fbb9ea 1795
34f80b04 1796 if (unlikely(status & 0x1)) {
1cf167f2 1797 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1798
1799 status &= ~0x1;
1800 if (!status)
1801 return IRQ_HANDLED;
1802 }
1803
34f80b04
EG
1804 if (status)
1805 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1806 status);
a2fbb9ea 1807
c18487ee 1808 return IRQ_HANDLED;
a2fbb9ea
ET
1809}
1810
c18487ee 1811/* end of fast path */
a2fbb9ea 1812
bb2a0f7a 1813static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1814
c18487ee
YR
1815/* Link */
1816
1817/*
1818 * General service functions
1819 */
a2fbb9ea 1820
4a37fb66 1821static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1822{
1823 u32 lock_status;
1824 u32 resource_bit = (1 << resource);
4a37fb66
YG
1825 int func = BP_FUNC(bp);
1826 u32 hw_lock_control_reg;
c18487ee 1827 int cnt;
a2fbb9ea 1828
c18487ee
YR
1829 /* Validating that the resource is within range */
1830 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1831 DP(NETIF_MSG_HW,
1832 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1833 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1834 return -EINVAL;
1835 }
a2fbb9ea 1836
4a37fb66
YG
1837 if (func <= 5) {
1838 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1839 } else {
1840 hw_lock_control_reg =
1841 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1842 }
1843
c18487ee 1844 /* Validating that the resource is not already taken */
4a37fb66 1845 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1846 if (lock_status & resource_bit) {
1847 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1848 lock_status, resource_bit);
1849 return -EEXIST;
1850 }
a2fbb9ea 1851
46230476
EG
1852 /* Try for 5 second every 5ms */
1853 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1854 /* Try to acquire the lock */
4a37fb66
YG
1855 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1856 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1857 if (lock_status & resource_bit)
1858 return 0;
a2fbb9ea 1859
c18487ee 1860 msleep(5);
a2fbb9ea 1861 }
c18487ee
YR
1862 DP(NETIF_MSG_HW, "Timeout\n");
1863 return -EAGAIN;
1864}
a2fbb9ea 1865
4a37fb66 1866static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1867{
1868 u32 lock_status;
1869 u32 resource_bit = (1 << resource);
4a37fb66
YG
1870 int func = BP_FUNC(bp);
1871 u32 hw_lock_control_reg;
a2fbb9ea 1872
c18487ee
YR
1873 /* Validating that the resource is within range */
1874 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1875 DP(NETIF_MSG_HW,
1876 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1877 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1878 return -EINVAL;
1879 }
1880
4a37fb66
YG
1881 if (func <= 5) {
1882 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1883 } else {
1884 hw_lock_control_reg =
1885 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1886 }
1887
c18487ee 1888 /* Validating that the resource is currently taken */
4a37fb66 1889 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1890 if (!(lock_status & resource_bit)) {
1891 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1892 lock_status, resource_bit);
1893 return -EFAULT;
a2fbb9ea
ET
1894 }
1895
4a37fb66 1896 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1897 return 0;
1898}
1899
1900/* HW Lock for shared dual port PHYs */
4a37fb66 1901static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1902{
34f80b04 1903 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1904
46c6a674
EG
1905 if (bp->port.need_hw_lock)
1906 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1907}
a2fbb9ea 1908
4a37fb66 1909static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1910{
46c6a674
EG
1911 if (bp->port.need_hw_lock)
1912 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1913
34f80b04 1914 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1915}
a2fbb9ea 1916
4acac6a5
EG
1917int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1918{
1919 /* The GPIO should be swapped if swap register is set and active */
1920 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1921 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1922 int gpio_shift = gpio_num +
1923 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1924 u32 gpio_mask = (1 << gpio_shift);
1925 u32 gpio_reg;
1926 int value;
1927
1928 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1929 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1930 return -EINVAL;
1931 }
1932
1933 /* read GPIO value */
1934 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1935
1936 /* get the requested pin value */
1937 if ((gpio_reg & gpio_mask) == gpio_mask)
1938 value = 1;
1939 else
1940 value = 0;
1941
1942 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1943
1944 return value;
1945}
1946
17de50b7 1947int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1948{
1949 /* The GPIO should be swapped if swap register is set and active */
1950 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1951 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1952 int gpio_shift = gpio_num +
1953 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1954 u32 gpio_mask = (1 << gpio_shift);
1955 u32 gpio_reg;
a2fbb9ea 1956
c18487ee
YR
1957 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1958 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1959 return -EINVAL;
1960 }
a2fbb9ea 1961
4a37fb66 1962 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1963 /* read GPIO and mask except the float bits */
1964 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1965
c18487ee
YR
1966 switch (mode) {
1967 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1968 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1969 gpio_num, gpio_shift);
1970 /* clear FLOAT and set CLR */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1973 break;
a2fbb9ea 1974
c18487ee
YR
1975 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1976 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1977 gpio_num, gpio_shift);
1978 /* clear FLOAT and set SET */
1979 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1980 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1981 break;
a2fbb9ea 1982
17de50b7 1983 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1984 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1985 gpio_num, gpio_shift);
1986 /* set FLOAT */
1987 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1988 break;
a2fbb9ea 1989
c18487ee
YR
1990 default:
1991 break;
a2fbb9ea
ET
1992 }
1993
c18487ee 1994 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1995 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1996
c18487ee 1997 return 0;
a2fbb9ea
ET
1998}
1999
4acac6a5
EG
2000int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2001{
2002 /* The GPIO should be swapped if swap register is set and active */
2003 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2004 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2005 int gpio_shift = gpio_num +
2006 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2007 u32 gpio_mask = (1 << gpio_shift);
2008 u32 gpio_reg;
2009
2010 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2011 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2012 return -EINVAL;
2013 }
2014
2015 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2016 /* read GPIO int */
2017 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2018
2019 switch (mode) {
2020 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2021 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2022 "output low\n", gpio_num, gpio_shift);
2023 /* clear SET and set CLR */
2024 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2025 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2026 break;
2027
2028 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2029 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2030 "output high\n", gpio_num, gpio_shift);
2031 /* clear CLR and set SET */
2032 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2033 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2034 break;
2035
2036 default:
2037 break;
2038 }
2039
2040 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2041 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2042
2043 return 0;
2044}
2045
c18487ee 2046static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2047{
c18487ee
YR
2048 u32 spio_mask = (1 << spio_num);
2049 u32 spio_reg;
a2fbb9ea 2050
c18487ee
YR
2051 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2052 (spio_num > MISC_REGISTERS_SPIO_7)) {
2053 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2054 return -EINVAL;
a2fbb9ea
ET
2055 }
2056
4a37fb66 2057 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2058 /* read SPIO and mask except the float bits */
2059 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2060
c18487ee 2061 switch (mode) {
6378c025 2062 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2063 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2064 /* clear FLOAT and set CLR */
2065 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2066 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2067 break;
a2fbb9ea 2068
6378c025 2069 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2070 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2071 /* clear FLOAT and set SET */
2072 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2073 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2074 break;
a2fbb9ea 2075
c18487ee
YR
2076 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2077 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2078 /* set FLOAT */
2079 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2080 break;
a2fbb9ea 2081
c18487ee
YR
2082 default:
2083 break;
a2fbb9ea
ET
2084 }
2085
c18487ee 2086 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2087 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2088
a2fbb9ea
ET
2089 return 0;
2090}
2091
c18487ee 2092static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2093{
ad33ea3a
EG
2094 switch (bp->link_vars.ieee_fc &
2095 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2096 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2097 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2098 ADVERTISED_Pause);
2099 break;
356e2385 2100
c18487ee 2101 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2102 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2103 ADVERTISED_Pause);
2104 break;
356e2385 2105
c18487ee 2106 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2107 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2108 break;
356e2385 2109
c18487ee 2110 default:
34f80b04 2111 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2112 ADVERTISED_Pause);
2113 break;
2114 }
2115}
f1410647 2116
c18487ee
YR
2117static void bnx2x_link_report(struct bnx2x *bp)
2118{
2691d51d
EG
2119 if (bp->state == BNX2X_STATE_DISABLED) {
2120 netif_carrier_off(bp->dev);
2121 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2122 return;
2123 }
2124
c18487ee
YR
2125 if (bp->link_vars.link_up) {
2126 if (bp->state == BNX2X_STATE_OPEN)
2127 netif_carrier_on(bp->dev);
2128 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2129
c18487ee 2130 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2131
c18487ee
YR
2132 if (bp->link_vars.duplex == DUPLEX_FULL)
2133 printk("full duplex");
2134 else
2135 printk("half duplex");
f1410647 2136
c0700f90
DM
2137 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2138 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2139 printk(", receive ");
356e2385
EG
2140 if (bp->link_vars.flow_ctrl &
2141 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2142 printk("& transmit ");
2143 } else {
2144 printk(", transmit ");
2145 }
2146 printk("flow control ON");
2147 }
2148 printk("\n");
f1410647 2149
c18487ee
YR
2150 } else { /* link_down */
2151 netif_carrier_off(bp->dev);
2152 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2153 }
c18487ee
YR
2154}
2155
b5bf9068 2156static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2157{
19680c48
EG
2158 if (!BP_NOMCP(bp)) {
2159 u8 rc;
a2fbb9ea 2160
19680c48 2161 /* Initialize link parameters structure variables */
8c99e7b0
YR
2162 /* It is recommended to turn off RX FC for jumbo frames
2163 for better performance */
0c593270 2164 if (bp->dev->mtu > 5000)
c0700f90 2165 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2166 else
c0700f90 2167 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2168
4a37fb66 2169 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2170
2171 if (load_mode == LOAD_DIAG)
2172 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2173
19680c48 2174 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2175
4a37fb66 2176 bnx2x_release_phy_lock(bp);
a2fbb9ea 2177
3c96c68b
EG
2178 bnx2x_calc_fc_adv(bp);
2179
b5bf9068
EG
2180 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2181 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2182 bnx2x_link_report(bp);
b5bf9068 2183 }
34f80b04 2184
19680c48
EG
2185 return rc;
2186 }
f5372251 2187 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2188 return -EINVAL;
a2fbb9ea
ET
2189}
2190
c18487ee 2191static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2192{
19680c48 2193 if (!BP_NOMCP(bp)) {
4a37fb66 2194 bnx2x_acquire_phy_lock(bp);
19680c48 2195 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2196 bnx2x_release_phy_lock(bp);
a2fbb9ea 2197
19680c48
EG
2198 bnx2x_calc_fc_adv(bp);
2199 } else
f5372251 2200 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2201}
a2fbb9ea 2202
c18487ee
YR
2203static void bnx2x__link_reset(struct bnx2x *bp)
2204{
19680c48 2205 if (!BP_NOMCP(bp)) {
4a37fb66 2206 bnx2x_acquire_phy_lock(bp);
589abe3a 2207 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2208 bnx2x_release_phy_lock(bp);
19680c48 2209 } else
f5372251 2210 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2211}
a2fbb9ea 2212
c18487ee
YR
2213static u8 bnx2x_link_test(struct bnx2x *bp)
2214{
2215 u8 rc;
a2fbb9ea 2216
4a37fb66 2217 bnx2x_acquire_phy_lock(bp);
c18487ee 2218 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2219 bnx2x_release_phy_lock(bp);
a2fbb9ea 2220
c18487ee
YR
2221 return rc;
2222}
a2fbb9ea 2223
8a1c38d1 2224static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2225{
8a1c38d1
EG
2226 u32 r_param = bp->link_vars.line_speed / 8;
2227 u32 fair_periodic_timeout_usec;
2228 u32 t_fair;
34f80b04 2229
8a1c38d1
EG
2230 memset(&(bp->cmng.rs_vars), 0,
2231 sizeof(struct rate_shaping_vars_per_port));
2232 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2233
8a1c38d1
EG
2234 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2235 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2236
8a1c38d1
EG
2237 /* this is the threshold below which no timer arming will occur
2238 1.25 coefficient is for the threshold to be a little bigger
2239 than the real time, to compensate for timer in-accuracy */
2240 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2241 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2242
8a1c38d1
EG
2243 /* resolution of fairness timer */
2244 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2245 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2246 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2247
8a1c38d1
EG
2248 /* this is the threshold below which we won't arm the timer anymore */
2249 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2250
8a1c38d1
EG
2251 /* we multiply by 1e3/8 to get bytes/msec.
2252 We don't want the credits to pass a credit
2253 of the t_fair*FAIR_MEM (algorithm resolution) */
2254 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2255 /* since each tick is 4 usec */
2256 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2257}
2258
2691d51d
EG
2259/* Calculates the sum of vn_min_rates.
2260 It's needed for further normalizing of the min_rates.
2261 Returns:
2262 sum of vn_min_rates.
2263 or
2264 0 - if all the min_rates are 0.
2265 In the later case fainess algorithm should be deactivated.
2266 If not all min_rates are zero then those that are zeroes will be set to 1.
2267 */
2268static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2269{
2270 int all_zero = 1;
2271 int port = BP_PORT(bp);
2272 int vn;
2273
2274 bp->vn_weight_sum = 0;
2275 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2276 int func = 2*vn + port;
2277 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2278 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2279 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2280
2281 /* Skip hidden vns */
2282 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2283 continue;
2284
2285 /* If min rate is zero - set it to 1 */
2286 if (!vn_min_rate)
2287 vn_min_rate = DEF_MIN_RATE;
2288 else
2289 all_zero = 0;
2290
2291 bp->vn_weight_sum += vn_min_rate;
2292 }
2293
2294 /* ... only if all min rates are zeros - disable fairness */
2295 if (all_zero)
2296 bp->vn_weight_sum = 0;
2297}
2298
8a1c38d1 2299static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2300{
2301 struct rate_shaping_vars_per_vn m_rs_vn;
2302 struct fairness_vars_per_vn m_fair_vn;
2303 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2304 u16 vn_min_rate, vn_max_rate;
2305 int i;
2306
2307 /* If function is hidden - set min and max to zeroes */
2308 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2309 vn_min_rate = 0;
2310 vn_max_rate = 0;
2311
2312 } else {
2313 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2314 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2315 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2316 if current min rate is zero - set it to 1.
33471629 2317 This is a requirement of the algorithm. */
8a1c38d1 2318 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2319 vn_min_rate = DEF_MIN_RATE;
2320 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2321 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2322 }
2323
8a1c38d1
EG
2324 DP(NETIF_MSG_IFUP,
2325 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2326 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2327
2328 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2329 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2330
2331 /* global vn counter - maximal Mbps for this vn */
2332 m_rs_vn.vn_counter.rate = vn_max_rate;
2333
2334 /* quota - number of bytes transmitted in this period */
2335 m_rs_vn.vn_counter.quota =
2336 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2337
8a1c38d1 2338 if (bp->vn_weight_sum) {
34f80b04
EG
2339 /* credit for each period of the fairness algorithm:
2340 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2341 vn_weight_sum should not be larger than 10000, thus
2342 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2343 than zero */
34f80b04 2344 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2345 max((u32)(vn_min_rate * (T_FAIR_COEF /
2346 (8 * bp->vn_weight_sum))),
2347 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2348 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2349 m_fair_vn.vn_credit_delta);
2350 }
2351
34f80b04
EG
2352 /* Store it to internal memory */
2353 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2354 REG_WR(bp, BAR_XSTRORM_INTMEM +
2355 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2356 ((u32 *)(&m_rs_vn))[i]);
2357
2358 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2359 REG_WR(bp, BAR_XSTRORM_INTMEM +
2360 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2361 ((u32 *)(&m_fair_vn))[i]);
2362}
2363
8a1c38d1 2364
c18487ee
YR
2365/* This function is called upon link interrupt */
2366static void bnx2x_link_attn(struct bnx2x *bp)
2367{
bb2a0f7a
YG
2368 /* Make sure that we are synced with the current statistics */
2369 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2370
c18487ee 2371 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2372
bb2a0f7a
YG
2373 if (bp->link_vars.link_up) {
2374
1c06328c 2375 /* dropless flow control */
a18f5128 2376 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2377 int port = BP_PORT(bp);
2378 u32 pause_enabled = 0;
2379
2380 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2381 pause_enabled = 1;
2382
2383 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2384 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2385 pause_enabled);
2386 }
2387
bb2a0f7a
YG
2388 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2389 struct host_port_stats *pstats;
2390
2391 pstats = bnx2x_sp(bp, port_stats);
2392 /* reset old bmac stats */
2393 memset(&(pstats->mac_stx[0]), 0,
2394 sizeof(struct mac_stx));
2395 }
2396 if ((bp->state == BNX2X_STATE_OPEN) ||
2397 (bp->state == BNX2X_STATE_DISABLED))
2398 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2399 }
2400
c18487ee
YR
2401 /* indicate link status */
2402 bnx2x_link_report(bp);
34f80b04
EG
2403
2404 if (IS_E1HMF(bp)) {
8a1c38d1 2405 int port = BP_PORT(bp);
34f80b04 2406 int func;
8a1c38d1 2407 int vn;
34f80b04
EG
2408
2409 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2410 if (vn == BP_E1HVN(bp))
2411 continue;
2412
8a1c38d1 2413 func = ((vn << 1) | port);
34f80b04
EG
2414
2415 /* Set the attention towards other drivers
2416 on the same port */
2417 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2418 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2419 }
34f80b04 2420
8a1c38d1
EG
2421 if (bp->link_vars.link_up) {
2422 int i;
2423
2424 /* Init rate shaping and fairness contexts */
2425 bnx2x_init_port_minmax(bp);
34f80b04 2426
34f80b04 2427 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2428 bnx2x_init_vn_minmax(bp, 2*vn + port);
2429
2430 /* Store it to internal memory */
2431 for (i = 0;
2432 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2433 REG_WR(bp, BAR_XSTRORM_INTMEM +
2434 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2435 ((u32 *)(&bp->cmng))[i]);
2436 }
34f80b04 2437 }
c18487ee 2438}
a2fbb9ea 2439
c18487ee
YR
2440static void bnx2x__link_status_update(struct bnx2x *bp)
2441{
2691d51d
EG
2442 int func = BP_FUNC(bp);
2443
c18487ee
YR
2444 if (bp->state != BNX2X_STATE_OPEN)
2445 return;
a2fbb9ea 2446
c18487ee 2447 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2448
bb2a0f7a
YG
2449 if (bp->link_vars.link_up)
2450 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2451 else
2452 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2453
2691d51d
EG
2454 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2455 bnx2x_calc_vn_weight_sum(bp);
2456
c18487ee
YR
2457 /* indicate link status */
2458 bnx2x_link_report(bp);
a2fbb9ea 2459}
a2fbb9ea 2460
34f80b04
EG
2461static void bnx2x_pmf_update(struct bnx2x *bp)
2462{
2463 int port = BP_PORT(bp);
2464 u32 val;
2465
2466 bp->port.pmf = 1;
2467 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2468
2469 /* enable nig attention */
2470 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2471 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2472 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2473
2474 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2475}
2476
c18487ee 2477/* end of Link */
a2fbb9ea
ET
2478
2479/* slow path */
2480
2481/*
2482 * General service functions
2483 */
2484
2691d51d
EG
2485/* send the MCP a request, block until there is a reply */
2486u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2487{
2488 int func = BP_FUNC(bp);
2489 u32 seq = ++bp->fw_seq;
2490 u32 rc = 0;
2491 u32 cnt = 1;
2492 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2493
2494 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2495 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2496
2497 do {
2498 /* let the FW do it's magic ... */
2499 msleep(delay);
2500
2501 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2502
2503 /* Give the FW up to 2 second (200*10ms) */
2504 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2505
2506 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2507 cnt*delay, rc, seq);
2508
2509 /* is this a reply to our command? */
2510 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2511 rc &= FW_MSG_CODE_MASK;
2512 else {
2513 /* FW BUG! */
2514 BNX2X_ERR("FW failed to respond!\n");
2515 bnx2x_fw_dump(bp);
2516 rc = 0;
2517 }
2518
2519 return rc;
2520}
2521
2522static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2523static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2524static void bnx2x_set_rx_mode(struct net_device *dev);
2525
2526static void bnx2x_e1h_disable(struct bnx2x *bp)
2527{
2528 int port = BP_PORT(bp);
2529 int i;
2530
2531 bp->rx_mode = BNX2X_RX_MODE_NONE;
2532 bnx2x_set_storm_rx_mode(bp);
2533
2534 netif_tx_disable(bp->dev);
2535 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2536
2537 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2538
2539 bnx2x_set_mac_addr_e1h(bp, 0);
2540
2541 for (i = 0; i < MC_HASH_SIZE; i++)
2542 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2543
2544 netif_carrier_off(bp->dev);
2545}
2546
2547static void bnx2x_e1h_enable(struct bnx2x *bp)
2548{
2549 int port = BP_PORT(bp);
2550
2551 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2552
2553 bnx2x_set_mac_addr_e1h(bp, 1);
2554
2555 /* Tx queue should be only reenabled */
2556 netif_tx_wake_all_queues(bp->dev);
2557
2558 /* Initialize the receive filter. */
2559 bnx2x_set_rx_mode(bp->dev);
2560}
2561
2562static void bnx2x_update_min_max(struct bnx2x *bp)
2563{
2564 int port = BP_PORT(bp);
2565 int vn, i;
2566
2567 /* Init rate shaping and fairness contexts */
2568 bnx2x_init_port_minmax(bp);
2569
2570 bnx2x_calc_vn_weight_sum(bp);
2571
2572 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2573 bnx2x_init_vn_minmax(bp, 2*vn + port);
2574
2575 if (bp->port.pmf) {
2576 int func;
2577
2578 /* Set the attention towards other drivers on the same port */
2579 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2580 if (vn == BP_E1HVN(bp))
2581 continue;
2582
2583 func = ((vn << 1) | port);
2584 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2585 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2586 }
2587
2588 /* Store it to internal memory */
2589 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2590 REG_WR(bp, BAR_XSTRORM_INTMEM +
2591 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2592 ((u32 *)(&bp->cmng))[i]);
2593 }
2594}
2595
2596static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2597{
2598 int func = BP_FUNC(bp);
2599
2600 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2601 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2602
2603 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2604
2605 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2606 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2607 bp->state = BNX2X_STATE_DISABLED;
2608
2609 bnx2x_e1h_disable(bp);
2610 } else {
2611 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2612 bp->state = BNX2X_STATE_OPEN;
2613
2614 bnx2x_e1h_enable(bp);
2615 }
2616 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2617 }
2618 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2619
2620 bnx2x_update_min_max(bp);
2621 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2622 }
2623
2624 /* Report results to MCP */
2625 if (dcc_event)
2626 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2627 else
2628 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2629}
2630
a2fbb9ea
ET
2631/* the slow path queue is odd since completions arrive on the fastpath ring */
2632static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2633 u32 data_hi, u32 data_lo, int common)
2634{
34f80b04 2635 int func = BP_FUNC(bp);
a2fbb9ea 2636
34f80b04
EG
2637 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2638 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2639 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2640 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2641 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2642
2643#ifdef BNX2X_STOP_ON_ERROR
2644 if (unlikely(bp->panic))
2645 return -EIO;
2646#endif
2647
34f80b04 2648 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2649
2650 if (!bp->spq_left) {
2651 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2652 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2653 bnx2x_panic();
2654 return -EBUSY;
2655 }
f1410647 2656
a2fbb9ea
ET
2657 /* CID needs port number to be encoded int it */
2658 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2659 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2660 HW_CID(bp, cid)));
2661 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2662 if (common)
2663 bp->spq_prod_bd->hdr.type |=
2664 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2665
2666 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2667 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2668
2669 bp->spq_left--;
2670
2671 if (bp->spq_prod_bd == bp->spq_last_bd) {
2672 bp->spq_prod_bd = bp->spq;
2673 bp->spq_prod_idx = 0;
2674 DP(NETIF_MSG_TIMER, "end of spq\n");
2675
2676 } else {
2677 bp->spq_prod_bd++;
2678 bp->spq_prod_idx++;
2679 }
2680
37dbbf32
EG
2681 /* Make sure that BD data is updated before writing the producer */
2682 wmb();
2683
34f80b04 2684 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2685 bp->spq_prod_idx);
2686
37dbbf32
EG
2687 mmiowb();
2688
34f80b04 2689 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2690 return 0;
2691}
2692
2693/* acquire split MCP access lock register */
4a37fb66 2694static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2695{
a2fbb9ea 2696 u32 i, j, val;
34f80b04 2697 int rc = 0;
a2fbb9ea
ET
2698
2699 might_sleep();
2700 i = 100;
2701 for (j = 0; j < i*10; j++) {
2702 val = (1UL << 31);
2703 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2704 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2705 if (val & (1L << 31))
2706 break;
2707
2708 msleep(5);
2709 }
a2fbb9ea 2710 if (!(val & (1L << 31))) {
19680c48 2711 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2712 rc = -EBUSY;
2713 }
2714
2715 return rc;
2716}
2717
4a37fb66
YG
2718/* release split MCP access lock register */
2719static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2720{
2721 u32 val = 0;
2722
2723 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2724}
2725
2726static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2727{
2728 struct host_def_status_block *def_sb = bp->def_status_blk;
2729 u16 rc = 0;
2730
2731 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2732 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2733 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2734 rc |= 1;
2735 }
2736 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2737 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2738 rc |= 2;
2739 }
2740 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2741 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2742 rc |= 4;
2743 }
2744 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2745 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2746 rc |= 8;
2747 }
2748 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2749 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2750 rc |= 16;
2751 }
2752 return rc;
2753}
2754
2755/*
2756 * slow path service functions
2757 */
2758
2759static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2760{
34f80b04 2761 int port = BP_PORT(bp);
5c862848
EG
2762 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2763 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2764 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2765 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2766 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2767 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2768 u32 aeu_mask;
87942b46 2769 u32 nig_mask = 0;
a2fbb9ea 2770
a2fbb9ea
ET
2771 if (bp->attn_state & asserted)
2772 BNX2X_ERR("IGU ERROR\n");
2773
3fcaf2e5
EG
2774 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2775 aeu_mask = REG_RD(bp, aeu_addr);
2776
a2fbb9ea 2777 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2778 aeu_mask, asserted);
2779 aeu_mask &= ~(asserted & 0xff);
2780 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2781
3fcaf2e5
EG
2782 REG_WR(bp, aeu_addr, aeu_mask);
2783 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2784
3fcaf2e5 2785 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2786 bp->attn_state |= asserted;
3fcaf2e5 2787 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2788
2789 if (asserted & ATTN_HARD_WIRED_MASK) {
2790 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2791
a5e9a7cf
EG
2792 bnx2x_acquire_phy_lock(bp);
2793
877e9aa4 2794 /* save nig interrupt mask */
87942b46 2795 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2796 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2797
c18487ee 2798 bnx2x_link_attn(bp);
a2fbb9ea
ET
2799
2800 /* handle unicore attn? */
2801 }
2802 if (asserted & ATTN_SW_TIMER_4_FUNC)
2803 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2804
2805 if (asserted & GPIO_2_FUNC)
2806 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2807
2808 if (asserted & GPIO_3_FUNC)
2809 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2810
2811 if (asserted & GPIO_4_FUNC)
2812 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2813
2814 if (port == 0) {
2815 if (asserted & ATTN_GENERAL_ATTN_1) {
2816 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2817 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2818 }
2819 if (asserted & ATTN_GENERAL_ATTN_2) {
2820 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2821 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2822 }
2823 if (asserted & ATTN_GENERAL_ATTN_3) {
2824 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2825 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2826 }
2827 } else {
2828 if (asserted & ATTN_GENERAL_ATTN_4) {
2829 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2830 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2831 }
2832 if (asserted & ATTN_GENERAL_ATTN_5) {
2833 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2834 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2835 }
2836 if (asserted & ATTN_GENERAL_ATTN_6) {
2837 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2838 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2839 }
2840 }
2841
2842 } /* if hardwired */
2843
5c862848
EG
2844 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2845 asserted, hc_addr);
2846 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2847
2848 /* now set back the mask */
a5e9a7cf 2849 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2850 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2851 bnx2x_release_phy_lock(bp);
2852 }
a2fbb9ea
ET
2853}
2854
fd4ef40d
EG
2855static inline void bnx2x_fan_failure(struct bnx2x *bp)
2856{
2857 int port = BP_PORT(bp);
2858
2859 /* mark the failure */
2860 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2861 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2862 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2863 bp->link_params.ext_phy_config);
2864
2865 /* log the failure */
2866 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2867 " the driver to shutdown the card to prevent permanent"
2868 " damage. Please contact Dell Support for assistance\n",
2869 bp->dev->name);
2870}
877e9aa4 2871static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2872{
34f80b04 2873 int port = BP_PORT(bp);
877e9aa4 2874 int reg_offset;
4d295db0 2875 u32 val, swap_val, swap_override;
877e9aa4 2876
34f80b04
EG
2877 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2878 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2879
34f80b04 2880 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2881
2882 val = REG_RD(bp, reg_offset);
2883 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2884 REG_WR(bp, reg_offset, val);
2885
2886 BNX2X_ERR("SPIO5 hw attention\n");
2887
fd4ef40d 2888 /* Fan failure attention */
35b19ba5
EG
2889 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2890 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2891 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2892 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2893 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2894 /* The PHY reset is controlled by GPIO 1 */
2895 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2896 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2897 break;
2898
4d295db0
EG
2899 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2900 /* The PHY reset is controlled by GPIO 1 */
2901 /* fake the port number to cancel the swap done in
2902 set_gpio() */
2903 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2904 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2905 port = (swap_val && swap_override) ^ 1;
2906 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2907 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2908 break;
2909
877e9aa4
ET
2910 default:
2911 break;
2912 }
fd4ef40d 2913 bnx2x_fan_failure(bp);
877e9aa4 2914 }
34f80b04 2915
589abe3a
EG
2916 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2917 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2918 bnx2x_acquire_phy_lock(bp);
2919 bnx2x_handle_module_detect_int(&bp->link_params);
2920 bnx2x_release_phy_lock(bp);
2921 }
2922
34f80b04
EG
2923 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2924
2925 val = REG_RD(bp, reg_offset);
2926 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2927 REG_WR(bp, reg_offset, val);
2928
2929 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2930 (attn & HW_INTERRUT_ASSERT_SET_0));
2931 bnx2x_panic();
2932 }
877e9aa4
ET
2933}
2934
2935static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2936{
2937 u32 val;
2938
0626b899 2939 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2940
2941 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2942 BNX2X_ERR("DB hw attention 0x%x\n", val);
2943 /* DORQ discard attention */
2944 if (val & 0x2)
2945 BNX2X_ERR("FATAL error from DORQ\n");
2946 }
34f80b04
EG
2947
2948 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2949
2950 int port = BP_PORT(bp);
2951 int reg_offset;
2952
2953 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2954 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2955
2956 val = REG_RD(bp, reg_offset);
2957 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2958 REG_WR(bp, reg_offset, val);
2959
2960 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2961 (attn & HW_INTERRUT_ASSERT_SET_1));
2962 bnx2x_panic();
2963 }
877e9aa4
ET
2964}
2965
2966static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2967{
2968 u32 val;
2969
2970 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2971
2972 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2973 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2974 /* CFC error attention */
2975 if (val & 0x2)
2976 BNX2X_ERR("FATAL error from CFC\n");
2977 }
2978
2979 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2980
2981 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2982 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2983 /* RQ_USDMDP_FIFO_OVERFLOW */
2984 if (val & 0x18000)
2985 BNX2X_ERR("FATAL error from PXP\n");
2986 }
34f80b04
EG
2987
2988 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2989
2990 int port = BP_PORT(bp);
2991 int reg_offset;
2992
2993 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2994 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2995
2996 val = REG_RD(bp, reg_offset);
2997 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2998 REG_WR(bp, reg_offset, val);
2999
3000 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3001 (attn & HW_INTERRUT_ASSERT_SET_2));
3002 bnx2x_panic();
3003 }
877e9aa4
ET
3004}
3005
3006static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3007{
34f80b04
EG
3008 u32 val;
3009
877e9aa4
ET
3010 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3011
34f80b04
EG
3012 if (attn & BNX2X_PMF_LINK_ASSERT) {
3013 int func = BP_FUNC(bp);
3014
3015 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3016 val = SHMEM_RD(bp, func_mb[func].drv_status);
3017 if (val & DRV_STATUS_DCC_EVENT_MASK)
3018 bnx2x_dcc_event(bp,
3019 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3020 bnx2x__link_status_update(bp);
2691d51d 3021 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3022 bnx2x_pmf_update(bp);
3023
3024 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3025
3026 BNX2X_ERR("MC assert!\n");
3027 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3028 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3029 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3030 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3031 bnx2x_panic();
3032
3033 } else if (attn & BNX2X_MCP_ASSERT) {
3034
3035 BNX2X_ERR("MCP assert!\n");
3036 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3037 bnx2x_fw_dump(bp);
877e9aa4
ET
3038
3039 } else
3040 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3041 }
3042
3043 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3044 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3045 if (attn & BNX2X_GRC_TIMEOUT) {
3046 val = CHIP_IS_E1H(bp) ?
3047 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3048 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3049 }
3050 if (attn & BNX2X_GRC_RSV) {
3051 val = CHIP_IS_E1H(bp) ?
3052 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3053 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3054 }
877e9aa4 3055 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3056 }
3057}
3058
3059static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3060{
a2fbb9ea
ET
3061 struct attn_route attn;
3062 struct attn_route group_mask;
34f80b04 3063 int port = BP_PORT(bp);
877e9aa4 3064 int index;
a2fbb9ea
ET
3065 u32 reg_addr;
3066 u32 val;
3fcaf2e5 3067 u32 aeu_mask;
a2fbb9ea
ET
3068
3069 /* need to take HW lock because MCP or other port might also
3070 try to handle this event */
4a37fb66 3071 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3072
3073 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3074 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3075 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3076 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3077 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3078 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3079
3080 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3081 if (deasserted & (1 << index)) {
3082 group_mask = bp->attn_group[index];
3083
34f80b04
EG
3084 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3085 index, group_mask.sig[0], group_mask.sig[1],
3086 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3087
877e9aa4
ET
3088 bnx2x_attn_int_deasserted3(bp,
3089 attn.sig[3] & group_mask.sig[3]);
3090 bnx2x_attn_int_deasserted1(bp,
3091 attn.sig[1] & group_mask.sig[1]);
3092 bnx2x_attn_int_deasserted2(bp,
3093 attn.sig[2] & group_mask.sig[2]);
3094 bnx2x_attn_int_deasserted0(bp,
3095 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3096
a2fbb9ea
ET
3097 if ((attn.sig[0] & group_mask.sig[0] &
3098 HW_PRTY_ASSERT_SET_0) ||
3099 (attn.sig[1] & group_mask.sig[1] &
3100 HW_PRTY_ASSERT_SET_1) ||
3101 (attn.sig[2] & group_mask.sig[2] &
3102 HW_PRTY_ASSERT_SET_2))
6378c025 3103 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3104 }
3105 }
3106
4a37fb66 3107 bnx2x_release_alr(bp);
a2fbb9ea 3108
5c862848 3109 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3110
3111 val = ~deasserted;
3fcaf2e5
EG
3112 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3113 val, reg_addr);
5c862848 3114 REG_WR(bp, reg_addr, val);
a2fbb9ea 3115
a2fbb9ea 3116 if (~bp->attn_state & deasserted)
3fcaf2e5 3117 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3118
3119 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3120 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3121
3fcaf2e5
EG
3122 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3123 aeu_mask = REG_RD(bp, reg_addr);
3124
3125 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3126 aeu_mask, deasserted);
3127 aeu_mask |= (deasserted & 0xff);
3128 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3129
3fcaf2e5
EG
3130 REG_WR(bp, reg_addr, aeu_mask);
3131 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3132
3133 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3134 bp->attn_state &= ~deasserted;
3135 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3136}
3137
3138static void bnx2x_attn_int(struct bnx2x *bp)
3139{
3140 /* read local copy of bits */
68d59484
EG
3141 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3142 attn_bits);
3143 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3144 attn_bits_ack);
a2fbb9ea
ET
3145 u32 attn_state = bp->attn_state;
3146
3147 /* look for changed bits */
3148 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3149 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3150
3151 DP(NETIF_MSG_HW,
3152 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3153 attn_bits, attn_ack, asserted, deasserted);
3154
3155 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3156 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3157
3158 /* handle bits that were raised */
3159 if (asserted)
3160 bnx2x_attn_int_asserted(bp, asserted);
3161
3162 if (deasserted)
3163 bnx2x_attn_int_deasserted(bp, deasserted);
3164}
3165
3166static void bnx2x_sp_task(struct work_struct *work)
3167{
1cf167f2 3168 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3169 u16 status;
3170
34f80b04 3171
a2fbb9ea
ET
3172 /* Return here if interrupt is disabled */
3173 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3174 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3175 return;
3176 }
3177
3178 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3179/* if (status == 0) */
3180/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3181
3196a88a 3182 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3183
877e9aa4
ET
3184 /* HW attentions */
3185 if (status & 0x1)
a2fbb9ea 3186 bnx2x_attn_int(bp);
a2fbb9ea 3187
68d59484 3188 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3189 IGU_INT_NOP, 1);
3190 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3191 IGU_INT_NOP, 1);
3192 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3193 IGU_INT_NOP, 1);
3194 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3195 IGU_INT_NOP, 1);
3196 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3197 IGU_INT_ENABLE, 1);
877e9aa4 3198
a2fbb9ea
ET
3199}
3200
3201static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3202{
3203 struct net_device *dev = dev_instance;
3204 struct bnx2x *bp = netdev_priv(dev);
3205
3206 /* Return here if interrupt is disabled */
3207 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3208 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3209 return IRQ_HANDLED;
3210 }
3211
8d9c5f34 3212 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3213
3214#ifdef BNX2X_STOP_ON_ERROR
3215 if (unlikely(bp->panic))
3216 return IRQ_HANDLED;
3217#endif
3218
1cf167f2 3219 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3220
3221 return IRQ_HANDLED;
3222}
3223
3224/* end of slow path */
3225
3226/* Statistics */
3227
3228/****************************************************************************
3229* Macros
3230****************************************************************************/
3231
a2fbb9ea
ET
3232/* sum[hi:lo] += add[hi:lo] */
3233#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3234 do { \
3235 s_lo += a_lo; \
f5ba6772 3236 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3237 } while (0)
3238
3239/* difference = minuend - subtrahend */
3240#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3241 do { \
bb2a0f7a
YG
3242 if (m_lo < s_lo) { \
3243 /* underflow */ \
a2fbb9ea 3244 d_hi = m_hi - s_hi; \
bb2a0f7a 3245 if (d_hi > 0) { \
6378c025 3246 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3247 d_hi--; \
3248 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3249 } else { \
6378c025 3250 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3251 d_hi = 0; \
3252 d_lo = 0; \
3253 } \
bb2a0f7a
YG
3254 } else { \
3255 /* m_lo >= s_lo */ \
a2fbb9ea 3256 if (m_hi < s_hi) { \
bb2a0f7a
YG
3257 d_hi = 0; \
3258 d_lo = 0; \
3259 } else { \
6378c025 3260 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3261 d_hi = m_hi - s_hi; \
3262 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3263 } \
3264 } \
3265 } while (0)
3266
bb2a0f7a 3267#define UPDATE_STAT64(s, t) \
a2fbb9ea 3268 do { \
bb2a0f7a
YG
3269 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3270 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3271 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3272 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3273 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3274 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3275 } while (0)
3276
bb2a0f7a 3277#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3278 do { \
bb2a0f7a
YG
3279 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3280 diff.lo, new->s##_lo, old->s##_lo); \
3281 ADD_64(estats->t##_hi, diff.hi, \
3282 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3283 } while (0)
3284
3285/* sum[hi:lo] += add */
3286#define ADD_EXTEND_64(s_hi, s_lo, a) \
3287 do { \
3288 s_lo += a; \
3289 s_hi += (s_lo < a) ? 1 : 0; \
3290 } while (0)
3291
bb2a0f7a 3292#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3293 do { \
bb2a0f7a
YG
3294 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3295 pstats->mac_stx[1].s##_lo, \
3296 new->s); \
a2fbb9ea
ET
3297 } while (0)
3298
bb2a0f7a 3299#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3300 do { \
4781bfad
EG
3301 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3302 old_tclient->s = tclient->s; \
de832a55
EG
3303 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3304 } while (0)
3305
3306#define UPDATE_EXTEND_USTAT(s, t) \
3307 do { \
3308 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3309 old_uclient->s = uclient->s; \
3310 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3311 } while (0)
3312
3313#define UPDATE_EXTEND_XSTAT(s, t) \
3314 do { \
4781bfad
EG
3315 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3316 old_xclient->s = xclient->s; \
de832a55
EG
3317 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3318 } while (0)
3319
3320/* minuend -= subtrahend */
3321#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3322 do { \
3323 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3324 } while (0)
3325
3326/* minuend[hi:lo] -= subtrahend */
3327#define SUB_EXTEND_64(m_hi, m_lo, s) \
3328 do { \
3329 SUB_64(m_hi, 0, m_lo, s); \
3330 } while (0)
3331
3332#define SUB_EXTEND_USTAT(s, t) \
3333 do { \
3334 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3335 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3336 } while (0)
3337
3338/*
3339 * General service functions
3340 */
3341
3342static inline long bnx2x_hilo(u32 *hiref)
3343{
3344 u32 lo = *(hiref + 1);
3345#if (BITS_PER_LONG == 64)
3346 u32 hi = *hiref;
3347
3348 return HILO_U64(hi, lo);
3349#else
3350 return lo;
3351#endif
3352}
3353
3354/*
3355 * Init service functions
3356 */
3357
bb2a0f7a
YG
3358static void bnx2x_storm_stats_post(struct bnx2x *bp)
3359{
3360 if (!bp->stats_pending) {
3361 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3362 int i, rc;
bb2a0f7a
YG
3363
3364 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3365 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3366 for_each_queue(bp, i)
3367 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3368
3369 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3370 ((u32 *)&ramrod_data)[1],
3371 ((u32 *)&ramrod_data)[0], 0);
3372 if (rc == 0) {
3373 /* stats ramrod has it's own slot on the spq */
3374 bp->spq_left++;
3375 bp->stats_pending = 1;
3376 }
3377 }
3378}
3379
bb2a0f7a
YG
3380static void bnx2x_hw_stats_post(struct bnx2x *bp)
3381{
3382 struct dmae_command *dmae = &bp->stats_dmae;
3383 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3384
3385 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3386 if (CHIP_REV_IS_SLOW(bp))
3387 return;
bb2a0f7a
YG
3388
3389 /* loader */
3390 if (bp->executer_idx) {
3391 int loader_idx = PMF_DMAE_C(bp);
3392
3393 memset(dmae, 0, sizeof(struct dmae_command));
3394
3395 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3396 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3397 DMAE_CMD_DST_RESET |
3398#ifdef __BIG_ENDIAN
3399 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3400#else
3401 DMAE_CMD_ENDIANITY_DW_SWAP |
3402#endif
3403 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3404 DMAE_CMD_PORT_0) |
3405 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3406 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3407 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3408 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3409 sizeof(struct dmae_command) *
3410 (loader_idx + 1)) >> 2;
3411 dmae->dst_addr_hi = 0;
3412 dmae->len = sizeof(struct dmae_command) >> 2;
3413 if (CHIP_IS_E1(bp))
3414 dmae->len--;
3415 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3416 dmae->comp_addr_hi = 0;
3417 dmae->comp_val = 1;
3418
3419 *stats_comp = 0;
3420 bnx2x_post_dmae(bp, dmae, loader_idx);
3421
3422 } else if (bp->func_stx) {
3423 *stats_comp = 0;
3424 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3425 }
3426}
3427
3428static int bnx2x_stats_comp(struct bnx2x *bp)
3429{
3430 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3431 int cnt = 10;
3432
3433 might_sleep();
3434 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3435 if (!cnt) {
3436 BNX2X_ERR("timeout waiting for stats finished\n");
3437 break;
3438 }
3439 cnt--;
12469401 3440 msleep(1);
bb2a0f7a
YG
3441 }
3442 return 1;
3443}
3444
3445/*
3446 * Statistics service functions
3447 */
3448
3449static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3450{
3451 struct dmae_command *dmae;
3452 u32 opcode;
3453 int loader_idx = PMF_DMAE_C(bp);
3454 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3455
3456 /* sanity */
3457 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3458 BNX2X_ERR("BUG!\n");
3459 return;
3460 }
3461
3462 bp->executer_idx = 0;
3463
3464 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3465 DMAE_CMD_C_ENABLE |
3466 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3467#ifdef __BIG_ENDIAN
3468 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3469#else
3470 DMAE_CMD_ENDIANITY_DW_SWAP |
3471#endif
3472 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3473 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3474
3475 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3476 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3477 dmae->src_addr_lo = bp->port.port_stx >> 2;
3478 dmae->src_addr_hi = 0;
3479 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3480 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3481 dmae->len = DMAE_LEN32_RD_MAX;
3482 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3483 dmae->comp_addr_hi = 0;
3484 dmae->comp_val = 1;
3485
3486 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3487 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3488 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3489 dmae->src_addr_hi = 0;
7a9b2557
VZ
3490 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3491 DMAE_LEN32_RD_MAX * 4);
3492 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3493 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3494 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3495 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3496 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3497 dmae->comp_val = DMAE_COMP_VAL;
3498
3499 *stats_comp = 0;
3500 bnx2x_hw_stats_post(bp);
3501 bnx2x_stats_comp(bp);
3502}
3503
3504static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3505{
3506 struct dmae_command *dmae;
34f80b04 3507 int port = BP_PORT(bp);
bb2a0f7a 3508 int vn = BP_E1HVN(bp);
a2fbb9ea 3509 u32 opcode;
bb2a0f7a 3510 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3511 u32 mac_addr;
bb2a0f7a
YG
3512 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3513
3514 /* sanity */
3515 if (!bp->link_vars.link_up || !bp->port.pmf) {
3516 BNX2X_ERR("BUG!\n");
3517 return;
3518 }
a2fbb9ea
ET
3519
3520 bp->executer_idx = 0;
bb2a0f7a
YG
3521
3522 /* MCP */
3523 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3524 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3525 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3526#ifdef __BIG_ENDIAN
bb2a0f7a 3527 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3528#else
bb2a0f7a 3529 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3530#endif
bb2a0f7a
YG
3531 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3532 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3533
bb2a0f7a 3534 if (bp->port.port_stx) {
a2fbb9ea
ET
3535
3536 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3537 dmae->opcode = opcode;
bb2a0f7a
YG
3538 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3539 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3540 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3541 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3542 dmae->len = sizeof(struct host_port_stats) >> 2;
3543 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3544 dmae->comp_addr_hi = 0;
3545 dmae->comp_val = 1;
a2fbb9ea
ET
3546 }
3547
bb2a0f7a
YG
3548 if (bp->func_stx) {
3549
3550 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3551 dmae->opcode = opcode;
3552 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3553 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3554 dmae->dst_addr_lo = bp->func_stx >> 2;
3555 dmae->dst_addr_hi = 0;
3556 dmae->len = sizeof(struct host_func_stats) >> 2;
3557 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3558 dmae->comp_addr_hi = 0;
3559 dmae->comp_val = 1;
a2fbb9ea
ET
3560 }
3561
bb2a0f7a 3562 /* MAC */
a2fbb9ea
ET
3563 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3564 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3565 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3566#ifdef __BIG_ENDIAN
3567 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3568#else
3569 DMAE_CMD_ENDIANITY_DW_SWAP |
3570#endif
bb2a0f7a
YG
3571 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3572 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3573
c18487ee 3574 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3575
3576 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3577 NIG_REG_INGRESS_BMAC0_MEM);
3578
3579 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3580 BIGMAC_REGISTER_TX_STAT_GTBYT */
3581 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3582 dmae->opcode = opcode;
3583 dmae->src_addr_lo = (mac_addr +
3584 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3585 dmae->src_addr_hi = 0;
3586 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3587 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3588 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3589 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3590 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3591 dmae->comp_addr_hi = 0;
3592 dmae->comp_val = 1;
3593
3594 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3595 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3596 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3597 dmae->opcode = opcode;
3598 dmae->src_addr_lo = (mac_addr +
3599 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3600 dmae->src_addr_hi = 0;
3601 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3602 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3603 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3604 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3605 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3606 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3607 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3608 dmae->comp_addr_hi = 0;
3609 dmae->comp_val = 1;
3610
c18487ee 3611 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3612
3613 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3614
3615 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3616 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617 dmae->opcode = opcode;
3618 dmae->src_addr_lo = (mac_addr +
3619 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3620 dmae->src_addr_hi = 0;
3621 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3622 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3623 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3624 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3625 dmae->comp_addr_hi = 0;
3626 dmae->comp_val = 1;
3627
3628 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3629 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3630 dmae->opcode = opcode;
3631 dmae->src_addr_lo = (mac_addr +
3632 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3633 dmae->src_addr_hi = 0;
3634 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3635 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3636 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3637 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3638 dmae->len = 1;
3639 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640 dmae->comp_addr_hi = 0;
3641 dmae->comp_val = 1;
3642
3643 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3644 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3645 dmae->opcode = opcode;
3646 dmae->src_addr_lo = (mac_addr +
3647 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3648 dmae->src_addr_hi = 0;
3649 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3650 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3651 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3652 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3653 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3654 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3655 dmae->comp_addr_hi = 0;
3656 dmae->comp_val = 1;
3657 }
3658
3659 /* NIG */
bb2a0f7a
YG
3660 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3661 dmae->opcode = opcode;
3662 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3663 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3664 dmae->src_addr_hi = 0;
3665 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3666 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3667 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3668 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3669 dmae->comp_addr_hi = 0;
3670 dmae->comp_val = 1;
3671
3672 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3673 dmae->opcode = opcode;
3674 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3675 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3676 dmae->src_addr_hi = 0;
3677 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3678 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3679 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3680 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3681 dmae->len = (2*sizeof(u32)) >> 2;
3682 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3683 dmae->comp_addr_hi = 0;
3684 dmae->comp_val = 1;
3685
a2fbb9ea
ET
3686 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3687 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3688 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3689 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3690#ifdef __BIG_ENDIAN
3691 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3692#else
3693 DMAE_CMD_ENDIANITY_DW_SWAP |
3694#endif
bb2a0f7a
YG
3695 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3696 (vn << DMAE_CMD_E1HVN_SHIFT));
3697 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3698 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3699 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3700 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3701 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3702 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3703 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3704 dmae->len = (2*sizeof(u32)) >> 2;
3705 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3706 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3707 dmae->comp_val = DMAE_COMP_VAL;
3708
3709 *stats_comp = 0;
a2fbb9ea
ET
3710}
3711
bb2a0f7a 3712static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3713{
bb2a0f7a
YG
3714 struct dmae_command *dmae = &bp->stats_dmae;
3715 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3716
bb2a0f7a
YG
3717 /* sanity */
3718 if (!bp->func_stx) {
3719 BNX2X_ERR("BUG!\n");
3720 return;
3721 }
a2fbb9ea 3722
bb2a0f7a
YG
3723 bp->executer_idx = 0;
3724 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3725
bb2a0f7a
YG
3726 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3727 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3728 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3729#ifdef __BIG_ENDIAN
3730 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3731#else
3732 DMAE_CMD_ENDIANITY_DW_SWAP |
3733#endif
3734 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3735 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3736 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3737 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3738 dmae->dst_addr_lo = bp->func_stx >> 2;
3739 dmae->dst_addr_hi = 0;
3740 dmae->len = sizeof(struct host_func_stats) >> 2;
3741 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3742 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3743 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3744
bb2a0f7a
YG
3745 *stats_comp = 0;
3746}
a2fbb9ea 3747
bb2a0f7a
YG
3748static void bnx2x_stats_start(struct bnx2x *bp)
3749{
3750 if (bp->port.pmf)
3751 bnx2x_port_stats_init(bp);
3752
3753 else if (bp->func_stx)
3754 bnx2x_func_stats_init(bp);
3755
3756 bnx2x_hw_stats_post(bp);
3757 bnx2x_storm_stats_post(bp);
3758}
3759
3760static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3761{
3762 bnx2x_stats_comp(bp);
3763 bnx2x_stats_pmf_update(bp);
3764 bnx2x_stats_start(bp);
3765}
3766
3767static void bnx2x_stats_restart(struct bnx2x *bp)
3768{
3769 bnx2x_stats_comp(bp);
3770 bnx2x_stats_start(bp);
3771}
3772
3773static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3774{
3775 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3776 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3777 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3778 struct {
3779 u32 lo;
3780 u32 hi;
3781 } diff;
bb2a0f7a
YG
3782
3783 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3784 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3785 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3786 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3787 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3788 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3789 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3790 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3791 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3792 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3793 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3794 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3795 UPDATE_STAT64(tx_stat_gt127,
3796 tx_stat_etherstatspkts65octetsto127octets);
3797 UPDATE_STAT64(tx_stat_gt255,
3798 tx_stat_etherstatspkts128octetsto255octets);
3799 UPDATE_STAT64(tx_stat_gt511,
3800 tx_stat_etherstatspkts256octetsto511octets);
3801 UPDATE_STAT64(tx_stat_gt1023,
3802 tx_stat_etherstatspkts512octetsto1023octets);
3803 UPDATE_STAT64(tx_stat_gt1518,
3804 tx_stat_etherstatspkts1024octetsto1522octets);
3805 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3806 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3807 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3808 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3809 UPDATE_STAT64(tx_stat_gterr,
3810 tx_stat_dot3statsinternalmactransmiterrors);
3811 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3812
3813 estats->pause_frames_received_hi =
3814 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3815 estats->pause_frames_received_lo =
3816 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3817
3818 estats->pause_frames_sent_hi =
3819 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3820 estats->pause_frames_sent_lo =
3821 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3822}
3823
3824static void bnx2x_emac_stats_update(struct bnx2x *bp)
3825{
3826 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3827 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3828 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3829
3830 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3831 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3832 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3833 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3834 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3835 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3836 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3837 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3838 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3839 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3840 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3841 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3842 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3843 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3844 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3845 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3846 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3847 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3848 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3849 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3850 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3851 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3852 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3853 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3854 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3855 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3856 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3857 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3858 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3859 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3860 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3861
3862 estats->pause_frames_received_hi =
3863 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3864 estats->pause_frames_received_lo =
3865 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3866 ADD_64(estats->pause_frames_received_hi,
3867 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3868 estats->pause_frames_received_lo,
3869 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3870
3871 estats->pause_frames_sent_hi =
3872 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3873 estats->pause_frames_sent_lo =
3874 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3875 ADD_64(estats->pause_frames_sent_hi,
3876 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3877 estats->pause_frames_sent_lo,
3878 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3879}
3880
3881static int bnx2x_hw_stats_update(struct bnx2x *bp)
3882{
3883 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3884 struct nig_stats *old = &(bp->port.old_nig_stats);
3885 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3886 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3887 struct {
3888 u32 lo;
3889 u32 hi;
3890 } diff;
de832a55 3891 u32 nig_timer_max;
bb2a0f7a
YG
3892
3893 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3894 bnx2x_bmac_stats_update(bp);
3895
3896 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3897 bnx2x_emac_stats_update(bp);
3898
3899 else { /* unreached */
c3eefaf6 3900 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3901 return -1;
3902 }
a2fbb9ea 3903
bb2a0f7a
YG
3904 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3905 new->brb_discard - old->brb_discard);
66e855f3
YG
3906 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3907 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3908
bb2a0f7a
YG
3909 UPDATE_STAT64_NIG(egress_mac_pkt0,
3910 etherstatspkts1024octetsto1522octets);
3911 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3912
bb2a0f7a 3913 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3914
bb2a0f7a
YG
3915 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3916 sizeof(struct mac_stx));
3917 estats->brb_drop_hi = pstats->brb_drop_hi;
3918 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3919
bb2a0f7a 3920 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3921
de832a55
EG
3922 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3923 if (nig_timer_max != estats->nig_timer_max) {
3924 estats->nig_timer_max = nig_timer_max;
3925 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3926 }
3927
bb2a0f7a 3928 return 0;
a2fbb9ea
ET
3929}
3930
bb2a0f7a 3931static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3932{
3933 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3934 struct tstorm_per_port_stats *tport =
de832a55 3935 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3936 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3937 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3938 int i;
3939
6fe49bb9
EG
3940 memcpy(&(fstats->total_bytes_received_hi),
3941 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3942 sizeof(struct host_func_stats) - 2*sizeof(u32));
3943 estats->error_bytes_received_hi = 0;
3944 estats->error_bytes_received_lo = 0;
3945 estats->etherstatsoverrsizepkts_hi = 0;
3946 estats->etherstatsoverrsizepkts_lo = 0;
3947 estats->no_buff_discard_hi = 0;
3948 estats->no_buff_discard_lo = 0;
a2fbb9ea 3949
ca00392c 3950 for_each_rx_queue(bp, i) {
de832a55
EG
3951 struct bnx2x_fastpath *fp = &bp->fp[i];
3952 int cl_id = fp->cl_id;
3953 struct tstorm_per_client_stats *tclient =
3954 &stats->tstorm_common.client_statistics[cl_id];
3955 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3956 struct ustorm_per_client_stats *uclient =
3957 &stats->ustorm_common.client_statistics[cl_id];
3958 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3959 struct xstorm_per_client_stats *xclient =
3960 &stats->xstorm_common.client_statistics[cl_id];
3961 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3962 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3963 u32 diff;
3964
3965 /* are storm stats valid? */
3966 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3967 bp->stats_counter) {
de832a55
EG
3968 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3969 " xstorm counter (%d) != stats_counter (%d)\n",
3970 i, xclient->stats_counter, bp->stats_counter);
3971 return -1;
3972 }
3973 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3974 bp->stats_counter) {
de832a55
EG
3975 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3976 " tstorm counter (%d) != stats_counter (%d)\n",
3977 i, tclient->stats_counter, bp->stats_counter);
3978 return -2;
3979 }
3980 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3981 bp->stats_counter) {
3982 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3983 " ustorm counter (%d) != stats_counter (%d)\n",
3984 i, uclient->stats_counter, bp->stats_counter);
3985 return -4;
3986 }
a2fbb9ea 3987
de832a55 3988 qstats->total_bytes_received_hi =
ca00392c 3989 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 3990 qstats->total_bytes_received_lo =
ca00392c
EG
3991 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3992
3993 ADD_64(qstats->total_bytes_received_hi,
3994 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3995 qstats->total_bytes_received_lo,
3996 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3997
3998 ADD_64(qstats->total_bytes_received_hi,
3999 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4000 qstats->total_bytes_received_lo,
4001 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4002
4003 qstats->valid_bytes_received_hi =
4004 qstats->total_bytes_received_hi;
de832a55 4005 qstats->valid_bytes_received_lo =
ca00392c 4006 qstats->total_bytes_received_lo;
bb2a0f7a 4007
de832a55 4008 qstats->error_bytes_received_hi =
bb2a0f7a 4009 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4010 qstats->error_bytes_received_lo =
bb2a0f7a 4011 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4012
de832a55
EG
4013 ADD_64(qstats->total_bytes_received_hi,
4014 qstats->error_bytes_received_hi,
4015 qstats->total_bytes_received_lo,
4016 qstats->error_bytes_received_lo);
4017
4018 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4019 total_unicast_packets_received);
4020 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4021 total_multicast_packets_received);
4022 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4023 total_broadcast_packets_received);
4024 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4025 etherstatsoverrsizepkts);
4026 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4027
4028 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4029 total_unicast_packets_received);
4030 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4031 total_multicast_packets_received);
4032 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4033 total_broadcast_packets_received);
4034 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4035 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4036 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4037
4038 qstats->total_bytes_transmitted_hi =
ca00392c 4039 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4040 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4041 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4042
4043 ADD_64(qstats->total_bytes_transmitted_hi,
4044 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4045 qstats->total_bytes_transmitted_lo,
4046 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4047
4048 ADD_64(qstats->total_bytes_transmitted_hi,
4049 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4050 qstats->total_bytes_transmitted_lo,
4051 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4052
de832a55
EG
4053 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4054 total_unicast_packets_transmitted);
4055 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4056 total_multicast_packets_transmitted);
4057 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4058 total_broadcast_packets_transmitted);
4059
4060 old_tclient->checksum_discard = tclient->checksum_discard;
4061 old_tclient->ttl0_discard = tclient->ttl0_discard;
4062
4063 ADD_64(fstats->total_bytes_received_hi,
4064 qstats->total_bytes_received_hi,
4065 fstats->total_bytes_received_lo,
4066 qstats->total_bytes_received_lo);
4067 ADD_64(fstats->total_bytes_transmitted_hi,
4068 qstats->total_bytes_transmitted_hi,
4069 fstats->total_bytes_transmitted_lo,
4070 qstats->total_bytes_transmitted_lo);
4071 ADD_64(fstats->total_unicast_packets_received_hi,
4072 qstats->total_unicast_packets_received_hi,
4073 fstats->total_unicast_packets_received_lo,
4074 qstats->total_unicast_packets_received_lo);
4075 ADD_64(fstats->total_multicast_packets_received_hi,
4076 qstats->total_multicast_packets_received_hi,
4077 fstats->total_multicast_packets_received_lo,
4078 qstats->total_multicast_packets_received_lo);
4079 ADD_64(fstats->total_broadcast_packets_received_hi,
4080 qstats->total_broadcast_packets_received_hi,
4081 fstats->total_broadcast_packets_received_lo,
4082 qstats->total_broadcast_packets_received_lo);
4083 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4084 qstats->total_unicast_packets_transmitted_hi,
4085 fstats->total_unicast_packets_transmitted_lo,
4086 qstats->total_unicast_packets_transmitted_lo);
4087 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4088 qstats->total_multicast_packets_transmitted_hi,
4089 fstats->total_multicast_packets_transmitted_lo,
4090 qstats->total_multicast_packets_transmitted_lo);
4091 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4092 qstats->total_broadcast_packets_transmitted_hi,
4093 fstats->total_broadcast_packets_transmitted_lo,
4094 qstats->total_broadcast_packets_transmitted_lo);
4095 ADD_64(fstats->valid_bytes_received_hi,
4096 qstats->valid_bytes_received_hi,
4097 fstats->valid_bytes_received_lo,
4098 qstats->valid_bytes_received_lo);
4099
4100 ADD_64(estats->error_bytes_received_hi,
4101 qstats->error_bytes_received_hi,
4102 estats->error_bytes_received_lo,
4103 qstats->error_bytes_received_lo);
4104 ADD_64(estats->etherstatsoverrsizepkts_hi,
4105 qstats->etherstatsoverrsizepkts_hi,
4106 estats->etherstatsoverrsizepkts_lo,
4107 qstats->etherstatsoverrsizepkts_lo);
4108 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4109 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4110 }
4111
4112 ADD_64(fstats->total_bytes_received_hi,
4113 estats->rx_stat_ifhcinbadoctets_hi,
4114 fstats->total_bytes_received_lo,
4115 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4116
4117 memcpy(estats, &(fstats->total_bytes_received_hi),
4118 sizeof(struct host_func_stats) - 2*sizeof(u32));
4119
de832a55
EG
4120 ADD_64(estats->etherstatsoverrsizepkts_hi,
4121 estats->rx_stat_dot3statsframestoolong_hi,
4122 estats->etherstatsoverrsizepkts_lo,
4123 estats->rx_stat_dot3statsframestoolong_lo);
4124 ADD_64(estats->error_bytes_received_hi,
4125 estats->rx_stat_ifhcinbadoctets_hi,
4126 estats->error_bytes_received_lo,
4127 estats->rx_stat_ifhcinbadoctets_lo);
4128
4129 if (bp->port.pmf) {
4130 estats->mac_filter_discard =
4131 le32_to_cpu(tport->mac_filter_discard);
4132 estats->xxoverflow_discard =
4133 le32_to_cpu(tport->xxoverflow_discard);
4134 estats->brb_truncate_discard =
bb2a0f7a 4135 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4136 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4137 }
bb2a0f7a
YG
4138
4139 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4140
de832a55
EG
4141 bp->stats_pending = 0;
4142
a2fbb9ea
ET
4143 return 0;
4144}
4145
bb2a0f7a 4146static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4147{
bb2a0f7a 4148 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4149 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4150 int i;
a2fbb9ea
ET
4151
4152 nstats->rx_packets =
4153 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4154 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4155 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4156
4157 nstats->tx_packets =
4158 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4159 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4160 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4161
de832a55 4162 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4163
0e39e645 4164 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4165
de832a55 4166 nstats->rx_dropped = estats->mac_discard;
ca00392c 4167 for_each_rx_queue(bp, i)
de832a55
EG
4168 nstats->rx_dropped +=
4169 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4170
a2fbb9ea
ET
4171 nstats->tx_dropped = 0;
4172
4173 nstats->multicast =
de832a55 4174 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4175
bb2a0f7a 4176 nstats->collisions =
de832a55 4177 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4178
4179 nstats->rx_length_errors =
de832a55
EG
4180 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4181 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4182 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4183 bnx2x_hilo(&estats->brb_truncate_hi);
4184 nstats->rx_crc_errors =
4185 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4186 nstats->rx_frame_errors =
4187 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4188 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4189 nstats->rx_missed_errors = estats->xxoverflow_discard;
4190
4191 nstats->rx_errors = nstats->rx_length_errors +
4192 nstats->rx_over_errors +
4193 nstats->rx_crc_errors +
4194 nstats->rx_frame_errors +
0e39e645
ET
4195 nstats->rx_fifo_errors +
4196 nstats->rx_missed_errors;
a2fbb9ea 4197
bb2a0f7a 4198 nstats->tx_aborted_errors =
de832a55
EG
4199 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4200 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4201 nstats->tx_carrier_errors =
4202 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4203 nstats->tx_fifo_errors = 0;
4204 nstats->tx_heartbeat_errors = 0;
4205 nstats->tx_window_errors = 0;
4206
4207 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4208 nstats->tx_carrier_errors +
4209 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4210}
4211
4212static void bnx2x_drv_stats_update(struct bnx2x *bp)
4213{
4214 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4215 int i;
4216
4217 estats->driver_xoff = 0;
4218 estats->rx_err_discard_pkt = 0;
4219 estats->rx_skb_alloc_failed = 0;
4220 estats->hw_csum_err = 0;
ca00392c 4221 for_each_rx_queue(bp, i) {
de832a55
EG
4222 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4223
4224 estats->driver_xoff += qstats->driver_xoff;
4225 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4226 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4227 estats->hw_csum_err += qstats->hw_csum_err;
4228 }
a2fbb9ea
ET
4229}
4230
bb2a0f7a 4231static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4232{
bb2a0f7a 4233 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4234
bb2a0f7a
YG
4235 if (*stats_comp != DMAE_COMP_VAL)
4236 return;
4237
4238 if (bp->port.pmf)
de832a55 4239 bnx2x_hw_stats_update(bp);
a2fbb9ea 4240
de832a55
EG
4241 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4242 BNX2X_ERR("storm stats were not updated for 3 times\n");
4243 bnx2x_panic();
4244 return;
a2fbb9ea
ET
4245 }
4246
de832a55
EG
4247 bnx2x_net_stats_update(bp);
4248 bnx2x_drv_stats_update(bp);
4249
a2fbb9ea 4250 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4251 struct bnx2x_fastpath *fp0_rx = bp->fp;
4252 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4253 struct tstorm_per_client_stats *old_tclient =
4254 &bp->fp->old_tclient;
4255 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4256 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4257 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4258 int i;
a2fbb9ea
ET
4259
4260 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4261 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4262 " tx pkt (%lx)\n",
ca00392c
EG
4263 bnx2x_tx_avail(fp0_tx),
4264 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4265 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4266 " rx pkt (%lx)\n",
ca00392c
EG
4267 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4268 fp0_rx->rx_comp_cons),
4269 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4270 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4271 "brb truncate %u\n",
4272 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4273 qstats->driver_xoff,
4274 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4275 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4276 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4277 "mac_discard %u mac_filter_discard %u "
4278 "xxovrflow_discard %u brb_truncate_discard %u "
4279 "ttl0_discard %u\n",
4781bfad 4280 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4281 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4282 bnx2x_hilo(&qstats->no_buff_discard_hi),
4283 estats->mac_discard, estats->mac_filter_discard,
4284 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4285 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4286
4287 for_each_queue(bp, i) {
4288 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4289 bnx2x_fp(bp, i, tx_pkt),
4290 bnx2x_fp(bp, i, rx_pkt),
4291 bnx2x_fp(bp, i, rx_calls));
4292 }
4293 }
4294
bb2a0f7a
YG
4295 bnx2x_hw_stats_post(bp);
4296 bnx2x_storm_stats_post(bp);
4297}
a2fbb9ea 4298
bb2a0f7a
YG
4299static void bnx2x_port_stats_stop(struct bnx2x *bp)
4300{
4301 struct dmae_command *dmae;
4302 u32 opcode;
4303 int loader_idx = PMF_DMAE_C(bp);
4304 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4305
bb2a0f7a 4306 bp->executer_idx = 0;
a2fbb9ea 4307
bb2a0f7a
YG
4308 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4309 DMAE_CMD_C_ENABLE |
4310 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4311#ifdef __BIG_ENDIAN
bb2a0f7a 4312 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4313#else
bb2a0f7a 4314 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4315#endif
bb2a0f7a
YG
4316 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4317 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4318
4319 if (bp->port.port_stx) {
4320
4321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4322 if (bp->func_stx)
4323 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4324 else
4325 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4326 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4327 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4328 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4329 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4330 dmae->len = sizeof(struct host_port_stats) >> 2;
4331 if (bp->func_stx) {
4332 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4333 dmae->comp_addr_hi = 0;
4334 dmae->comp_val = 1;
4335 } else {
4336 dmae->comp_addr_lo =
4337 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4338 dmae->comp_addr_hi =
4339 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4340 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4341
bb2a0f7a
YG
4342 *stats_comp = 0;
4343 }
a2fbb9ea
ET
4344 }
4345
bb2a0f7a
YG
4346 if (bp->func_stx) {
4347
4348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4349 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4350 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4351 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4352 dmae->dst_addr_lo = bp->func_stx >> 2;
4353 dmae->dst_addr_hi = 0;
4354 dmae->len = sizeof(struct host_func_stats) >> 2;
4355 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4356 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4357 dmae->comp_val = DMAE_COMP_VAL;
4358
4359 *stats_comp = 0;
a2fbb9ea 4360 }
bb2a0f7a
YG
4361}
4362
4363static void bnx2x_stats_stop(struct bnx2x *bp)
4364{
4365 int update = 0;
4366
4367 bnx2x_stats_comp(bp);
4368
4369 if (bp->port.pmf)
4370 update = (bnx2x_hw_stats_update(bp) == 0);
4371
4372 update |= (bnx2x_storm_stats_update(bp) == 0);
4373
4374 if (update) {
4375 bnx2x_net_stats_update(bp);
a2fbb9ea 4376
bb2a0f7a
YG
4377 if (bp->port.pmf)
4378 bnx2x_port_stats_stop(bp);
4379
4380 bnx2x_hw_stats_post(bp);
4381 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4382 }
4383}
4384
bb2a0f7a
YG
4385static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4386{
4387}
4388
4389static const struct {
4390 void (*action)(struct bnx2x *bp);
4391 enum bnx2x_stats_state next_state;
4392} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4393/* state event */
4394{
4395/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4396/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4397/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4398/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4399},
4400{
4401/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4402/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4403/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4404/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4405}
4406};
4407
4408static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4409{
4410 enum bnx2x_stats_state state = bp->stats_state;
4411
4412 bnx2x_stats_stm[state][event].action(bp);
4413 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4414
4415 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4416 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4417 state, event, bp->stats_state);
4418}
4419
6fe49bb9
EG
4420static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4421{
4422 struct dmae_command *dmae;
4423 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4424
4425 /* sanity */
4426 if (!bp->port.pmf || !bp->port.port_stx) {
4427 BNX2X_ERR("BUG!\n");
4428 return;
4429 }
4430
4431 bp->executer_idx = 0;
4432
4433 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4434 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4435 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4436 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4437#ifdef __BIG_ENDIAN
4438 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4439#else
4440 DMAE_CMD_ENDIANITY_DW_SWAP |
4441#endif
4442 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4443 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4444 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4445 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4446 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4447 dmae->dst_addr_hi = 0;
4448 dmae->len = sizeof(struct host_port_stats) >> 2;
4449 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4450 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4451 dmae->comp_val = DMAE_COMP_VAL;
4452
4453 *stats_comp = 0;
4454 bnx2x_hw_stats_post(bp);
4455 bnx2x_stats_comp(bp);
4456}
4457
4458static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4459{
4460 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4461 int port = BP_PORT(bp);
4462 int func;
4463 u32 func_stx;
4464
4465 /* sanity */
4466 if (!bp->port.pmf || !bp->func_stx) {
4467 BNX2X_ERR("BUG!\n");
4468 return;
4469 }
4470
4471 /* save our func_stx */
4472 func_stx = bp->func_stx;
4473
4474 for (vn = VN_0; vn < vn_max; vn++) {
4475 func = 2*vn + port;
4476
4477 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4478 bnx2x_func_stats_init(bp);
4479 bnx2x_hw_stats_post(bp);
4480 bnx2x_stats_comp(bp);
4481 }
4482
4483 /* restore our func_stx */
4484 bp->func_stx = func_stx;
4485}
4486
4487static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4488{
4489 struct dmae_command *dmae = &bp->stats_dmae;
4490 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4491
4492 /* sanity */
4493 if (!bp->func_stx) {
4494 BNX2X_ERR("BUG!\n");
4495 return;
4496 }
4497
4498 bp->executer_idx = 0;
4499 memset(dmae, 0, sizeof(struct dmae_command));
4500
4501 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4502 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4503 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4504#ifdef __BIG_ENDIAN
4505 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4506#else
4507 DMAE_CMD_ENDIANITY_DW_SWAP |
4508#endif
4509 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4510 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4511 dmae->src_addr_lo = bp->func_stx >> 2;
4512 dmae->src_addr_hi = 0;
4513 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4514 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4515 dmae->len = sizeof(struct host_func_stats) >> 2;
4516 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4517 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4518 dmae->comp_val = DMAE_COMP_VAL;
4519
4520 *stats_comp = 0;
4521 bnx2x_hw_stats_post(bp);
4522 bnx2x_stats_comp(bp);
4523}
4524
4525static void bnx2x_stats_init(struct bnx2x *bp)
4526{
4527 int port = BP_PORT(bp);
4528 int func = BP_FUNC(bp);
4529 int i;
4530
4531 bp->stats_pending = 0;
4532 bp->executer_idx = 0;
4533 bp->stats_counter = 0;
4534
4535 /* port and func stats for management */
4536 if (!BP_NOMCP(bp)) {
4537 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4538 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4539
4540 } else {
4541 bp->port.port_stx = 0;
4542 bp->func_stx = 0;
4543 }
4544 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4545 bp->port.port_stx, bp->func_stx);
4546
4547 /* port stats */
4548 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4549 bp->port.old_nig_stats.brb_discard =
4550 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4551 bp->port.old_nig_stats.brb_truncate =
4552 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4553 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4554 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4555 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4556 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4557
4558 /* function stats */
4559 for_each_queue(bp, i) {
4560 struct bnx2x_fastpath *fp = &bp->fp[i];
4561
4562 memset(&fp->old_tclient, 0,
4563 sizeof(struct tstorm_per_client_stats));
4564 memset(&fp->old_uclient, 0,
4565 sizeof(struct ustorm_per_client_stats));
4566 memset(&fp->old_xclient, 0,
4567 sizeof(struct xstorm_per_client_stats));
4568 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4569 }
4570
4571 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4572 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4573
4574 bp->stats_state = STATS_STATE_DISABLED;
4575
4576 if (bp->port.pmf) {
4577 if (bp->port.port_stx)
4578 bnx2x_port_stats_base_init(bp);
4579
4580 if (bp->func_stx)
4581 bnx2x_func_stats_base_init(bp);
4582
4583 } else if (bp->func_stx)
4584 bnx2x_func_stats_base_update(bp);
4585}
4586
a2fbb9ea
ET
4587static void bnx2x_timer(unsigned long data)
4588{
4589 struct bnx2x *bp = (struct bnx2x *) data;
4590
4591 if (!netif_running(bp->dev))
4592 return;
4593
4594 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4595 goto timer_restart;
a2fbb9ea
ET
4596
4597 if (poll) {
4598 struct bnx2x_fastpath *fp = &bp->fp[0];
4599 int rc;
4600
7961f791 4601 bnx2x_tx_int(fp);
a2fbb9ea
ET
4602 rc = bnx2x_rx_int(fp, 1000);
4603 }
4604
34f80b04
EG
4605 if (!BP_NOMCP(bp)) {
4606 int func = BP_FUNC(bp);
a2fbb9ea
ET
4607 u32 drv_pulse;
4608 u32 mcp_pulse;
4609
4610 ++bp->fw_drv_pulse_wr_seq;
4611 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4612 /* TBD - add SYSTEM_TIME */
4613 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4614 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4615
34f80b04 4616 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4617 MCP_PULSE_SEQ_MASK);
4618 /* The delta between driver pulse and mcp response
4619 * should be 1 (before mcp response) or 0 (after mcp response)
4620 */
4621 if ((drv_pulse != mcp_pulse) &&
4622 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4623 /* someone lost a heartbeat... */
4624 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4625 drv_pulse, mcp_pulse);
4626 }
4627 }
4628
bb2a0f7a
YG
4629 if ((bp->state == BNX2X_STATE_OPEN) ||
4630 (bp->state == BNX2X_STATE_DISABLED))
4631 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4632
f1410647 4633timer_restart:
a2fbb9ea
ET
4634 mod_timer(&bp->timer, jiffies + bp->current_interval);
4635}
4636
4637/* end of Statistics */
4638
4639/* nic init */
4640
4641/*
4642 * nic init service functions
4643 */
4644
34f80b04 4645static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4646{
34f80b04
EG
4647 int port = BP_PORT(bp);
4648
ca00392c
EG
4649 /* "CSTORM" */
4650 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4651 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4652 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4653 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4654 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4655 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4656}
4657
5c862848
EG
4658static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4659 dma_addr_t mapping, int sb_id)
34f80b04
EG
4660{
4661 int port = BP_PORT(bp);
bb2a0f7a 4662 int func = BP_FUNC(bp);
a2fbb9ea 4663 int index;
34f80b04 4664 u64 section;
a2fbb9ea
ET
4665
4666 /* USTORM */
4667 section = ((u64)mapping) + offsetof(struct host_status_block,
4668 u_status_block);
34f80b04 4669 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4670
ca00392c
EG
4671 REG_WR(bp, BAR_CSTRORM_INTMEM +
4672 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4673 REG_WR(bp, BAR_CSTRORM_INTMEM +
4674 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4675 U64_HI(section));
ca00392c
EG
4676 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4677 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4678
4679 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4680 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4681 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4682
4683 /* CSTORM */
4684 section = ((u64)mapping) + offsetof(struct host_status_block,
4685 c_status_block);
34f80b04 4686 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4687
4688 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4689 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4690 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4691 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4692 U64_HI(section));
7a9b2557 4693 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4694 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4695
4696 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4697 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4698 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4699
4700 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4701}
4702
4703static void bnx2x_zero_def_sb(struct bnx2x *bp)
4704{
4705 int func = BP_FUNC(bp);
a2fbb9ea 4706
ca00392c 4707 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4708 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4709 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4710 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4711 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4712 sizeof(struct cstorm_def_status_block_u)/4);
4713 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4714 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4715 sizeof(struct cstorm_def_status_block_c)/4);
4716 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4717 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4718 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4719}
4720
4721static void bnx2x_init_def_sb(struct bnx2x *bp,
4722 struct host_def_status_block *def_sb,
34f80b04 4723 dma_addr_t mapping, int sb_id)
a2fbb9ea 4724{
34f80b04
EG
4725 int port = BP_PORT(bp);
4726 int func = BP_FUNC(bp);
a2fbb9ea
ET
4727 int index, val, reg_offset;
4728 u64 section;
4729
4730 /* ATTN */
4731 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4732 atten_status_block);
34f80b04 4733 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4734
49d66772
ET
4735 bp->attn_state = 0;
4736
a2fbb9ea
ET
4737 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4738 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4739
34f80b04 4740 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4741 bp->attn_group[index].sig[0] = REG_RD(bp,
4742 reg_offset + 0x10*index);
4743 bp->attn_group[index].sig[1] = REG_RD(bp,
4744 reg_offset + 0x4 + 0x10*index);
4745 bp->attn_group[index].sig[2] = REG_RD(bp,
4746 reg_offset + 0x8 + 0x10*index);
4747 bp->attn_group[index].sig[3] = REG_RD(bp,
4748 reg_offset + 0xc + 0x10*index);
4749 }
4750
a2fbb9ea
ET
4751 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4752 HC_REG_ATTN_MSG0_ADDR_L);
4753
4754 REG_WR(bp, reg_offset, U64_LO(section));
4755 REG_WR(bp, reg_offset + 4, U64_HI(section));
4756
4757 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4758
4759 val = REG_RD(bp, reg_offset);
34f80b04 4760 val |= sb_id;
a2fbb9ea
ET
4761 REG_WR(bp, reg_offset, val);
4762
4763 /* USTORM */
4764 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4765 u_def_status_block);
34f80b04 4766 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4767
ca00392c
EG
4768 REG_WR(bp, BAR_CSTRORM_INTMEM +
4769 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4770 REG_WR(bp, BAR_CSTRORM_INTMEM +
4771 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4772 U64_HI(section));
ca00392c
EG
4773 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4774 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4775
4776 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4777 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4778 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4779
4780 /* CSTORM */
4781 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4782 c_def_status_block);
34f80b04 4783 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4784
4785 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4786 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4787 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4788 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4789 U64_HI(section));
5c862848 4790 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4791 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4792
4793 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4794 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4795 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4796
4797 /* TSTORM */
4798 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4799 t_def_status_block);
34f80b04 4800 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4801
4802 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4803 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4804 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4805 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4806 U64_HI(section));
5c862848 4807 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4808 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4809
4810 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4811 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4812 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4813
4814 /* XSTORM */
4815 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816 x_def_status_block);
34f80b04 4817 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4818
4819 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4820 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4821 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4822 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4823 U64_HI(section));
5c862848 4824 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4825 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4826
4827 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4828 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4829 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4830
bb2a0f7a 4831 bp->stats_pending = 0;
66e855f3 4832 bp->set_mac_pending = 0;
bb2a0f7a 4833
34f80b04 4834 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4835}
4836
4837static void bnx2x_update_coalesce(struct bnx2x *bp)
4838{
34f80b04 4839 int port = BP_PORT(bp);
a2fbb9ea
ET
4840 int i;
4841
4842 for_each_queue(bp, i) {
34f80b04 4843 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4844
4845 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4846 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4847 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4848 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4849 bp->rx_ticks/12);
ca00392c
EG
4850 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4851 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4852 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4853 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4854
4855 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4856 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4857 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4858 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4859 bp->tx_ticks/12);
a2fbb9ea 4860 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4861 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4862 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4863 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4864 }
4865}
4866
7a9b2557
VZ
4867static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4868 struct bnx2x_fastpath *fp, int last)
4869{
4870 int i;
4871
4872 for (i = 0; i < last; i++) {
4873 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4874 struct sk_buff *skb = rx_buf->skb;
4875
4876 if (skb == NULL) {
4877 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4878 continue;
4879 }
4880
4881 if (fp->tpa_state[i] == BNX2X_TPA_START)
4882 pci_unmap_single(bp->pdev,
4883 pci_unmap_addr(rx_buf, mapping),
356e2385 4884 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4885
4886 dev_kfree_skb(skb);
4887 rx_buf->skb = NULL;
4888 }
4889}
4890
a2fbb9ea
ET
4891static void bnx2x_init_rx_rings(struct bnx2x *bp)
4892{
7a9b2557 4893 int func = BP_FUNC(bp);
32626230
EG
4894 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4895 ETH_MAX_AGGREGATION_QUEUES_E1H;
4896 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4897 int i, j;
a2fbb9ea 4898
87942b46 4899 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4900 DP(NETIF_MSG_IFUP,
4901 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4902
7a9b2557 4903 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4904
555f6c78 4905 for_each_rx_queue(bp, j) {
32626230 4906 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4907
32626230 4908 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4909 fp->tpa_pool[i].skb =
4910 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4911 if (!fp->tpa_pool[i].skb) {
4912 BNX2X_ERR("Failed to allocate TPA "
4913 "skb pool for queue[%d] - "
4914 "disabling TPA on this "
4915 "queue!\n", j);
4916 bnx2x_free_tpa_pool(bp, fp, i);
4917 fp->disable_tpa = 1;
4918 break;
4919 }
4920 pci_unmap_addr_set((struct sw_rx_bd *)
4921 &bp->fp->tpa_pool[i],
4922 mapping, 0);
4923 fp->tpa_state[i] = BNX2X_TPA_STOP;
4924 }
4925 }
4926 }
4927
555f6c78 4928 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4929 struct bnx2x_fastpath *fp = &bp->fp[j];
4930
4931 fp->rx_bd_cons = 0;
4932 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4933 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4934
ca00392c
EG
4935 /* Mark queue as Rx */
4936 fp->is_rx_queue = 1;
4937
7a9b2557
VZ
4938 /* "next page" elements initialization */
4939 /* SGE ring */
4940 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4941 struct eth_rx_sge *sge;
4942
4943 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4944 sge->addr_hi =
4945 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4946 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4947 sge->addr_lo =
4948 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4949 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4950 }
4951
4952 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4953
7a9b2557 4954 /* RX BD ring */
a2fbb9ea
ET
4955 for (i = 1; i <= NUM_RX_RINGS; i++) {
4956 struct eth_rx_bd *rx_bd;
4957
4958 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4959 rx_bd->addr_hi =
4960 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4961 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4962 rx_bd->addr_lo =
4963 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4964 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4965 }
4966
34f80b04 4967 /* CQ ring */
a2fbb9ea
ET
4968 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4969 struct eth_rx_cqe_next_page *nextpg;
4970
4971 nextpg = (struct eth_rx_cqe_next_page *)
4972 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4973 nextpg->addr_hi =
4974 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4975 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4976 nextpg->addr_lo =
4977 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4978 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4979 }
4980
7a9b2557
VZ
4981 /* Allocate SGEs and initialize the ring elements */
4982 for (i = 0, ring_prod = 0;
4983 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4984
7a9b2557
VZ
4985 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4986 BNX2X_ERR("was only able to allocate "
4987 "%d rx sges\n", i);
4988 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4989 /* Cleanup already allocated elements */
4990 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4991 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4992 fp->disable_tpa = 1;
4993 ring_prod = 0;
4994 break;
4995 }
4996 ring_prod = NEXT_SGE_IDX(ring_prod);
4997 }
4998 fp->rx_sge_prod = ring_prod;
4999
5000 /* Allocate BDs and initialize BD ring */
66e855f3 5001 fp->rx_comp_cons = 0;
7a9b2557 5002 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5003 for (i = 0; i < bp->rx_ring_size; i++) {
5004 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5005 BNX2X_ERR("was only able to allocate "
de832a55
EG
5006 "%d rx skbs on queue[%d]\n", i, j);
5007 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5008 break;
5009 }
5010 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5011 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5012 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5013 }
5014
7a9b2557
VZ
5015 fp->rx_bd_prod = ring_prod;
5016 /* must not have more available CQEs than BDs */
5017 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5018 cqe_ring_prod);
a2fbb9ea
ET
5019 fp->rx_pkt = fp->rx_calls = 0;
5020
7a9b2557
VZ
5021 /* Warning!
5022 * this will generate an interrupt (to the TSTORM)
5023 * must only be done after chip is initialized
5024 */
5025 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5026 fp->rx_sge_prod);
a2fbb9ea
ET
5027 if (j != 0)
5028 continue;
5029
5030 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5031 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5032 U64_LO(fp->rx_comp_mapping));
5033 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5034 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5035 U64_HI(fp->rx_comp_mapping));
5036 }
5037}
5038
5039static void bnx2x_init_tx_ring(struct bnx2x *bp)
5040{
5041 int i, j;
5042
555f6c78 5043 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5044 struct bnx2x_fastpath *fp = &bp->fp[j];
5045
5046 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5047 struct eth_tx_next_bd *tx_next_bd =
5048 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5049
ca00392c 5050 tx_next_bd->addr_hi =
a2fbb9ea 5051 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5052 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5053 tx_next_bd->addr_lo =
a2fbb9ea 5054 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5055 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5056 }
5057
ca00392c
EG
5058 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5059 fp->tx_db.data.zero_fill1 = 0;
5060 fp->tx_db.data.prod = 0;
5061
a2fbb9ea
ET
5062 fp->tx_pkt_prod = 0;
5063 fp->tx_pkt_cons = 0;
5064 fp->tx_bd_prod = 0;
5065 fp->tx_bd_cons = 0;
5066 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5067 fp->tx_pkt = 0;
5068 }
6fe49bb9
EG
5069
5070 /* clean tx statistics */
5071 for_each_rx_queue(bp, i)
5072 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5073}
5074
5075static void bnx2x_init_sp_ring(struct bnx2x *bp)
5076{
34f80b04 5077 int func = BP_FUNC(bp);
a2fbb9ea
ET
5078
5079 spin_lock_init(&bp->spq_lock);
5080
5081 bp->spq_left = MAX_SPQ_PENDING;
5082 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5083 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5084 bp->spq_prod_bd = bp->spq;
5085 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5086
34f80b04 5087 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5088 U64_LO(bp->spq_mapping));
34f80b04
EG
5089 REG_WR(bp,
5090 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5091 U64_HI(bp->spq_mapping));
5092
34f80b04 5093 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5094 bp->spq_prod_idx);
5095}
5096
5097static void bnx2x_init_context(struct bnx2x *bp)
5098{
5099 int i;
5100
ca00392c 5101 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5102 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5103 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5104 u8 cl_id = fp->cl_id;
a2fbb9ea 5105
34f80b04
EG
5106 context->ustorm_st_context.common.sb_index_numbers =
5107 BNX2X_RX_SB_INDEX_NUM;
0626b899 5108 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5109 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5110 context->ustorm_st_context.common.flags =
de832a55
EG
5111 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5112 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5113 context->ustorm_st_context.common.statistics_counter_id =
5114 cl_id;
8d9c5f34 5115 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5116 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5117 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5118 bp->rx_buf_size;
34f80b04 5119 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5120 U64_HI(fp->rx_desc_mapping);
34f80b04 5121 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5122 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5123 if (!fp->disable_tpa) {
5124 context->ustorm_st_context.common.flags |=
ca00392c 5125 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5126 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5127 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5128 (u32)0xffff);
7a9b2557
VZ
5129 context->ustorm_st_context.common.sge_page_base_hi =
5130 U64_HI(fp->rx_sge_mapping);
5131 context->ustorm_st_context.common.sge_page_base_lo =
5132 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5133
5134 context->ustorm_st_context.common.max_sges_for_packet =
5135 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5136 context->ustorm_st_context.common.max_sges_for_packet =
5137 ((context->ustorm_st_context.common.
5138 max_sges_for_packet + PAGES_PER_SGE - 1) &
5139 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5140 }
5141
8d9c5f34
EG
5142 context->ustorm_ag_context.cdu_usage =
5143 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5144 CDU_REGION_NUMBER_UCM_AG,
5145 ETH_CONNECTION_TYPE);
5146
ca00392c
EG
5147 context->xstorm_ag_context.cdu_reserved =
5148 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5149 CDU_REGION_NUMBER_XCM_AG,
5150 ETH_CONNECTION_TYPE);
5151 }
5152
5153 for_each_tx_queue(bp, i) {
5154 struct bnx2x_fastpath *fp = &bp->fp[i];
5155 struct eth_context *context =
5156 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5157
5158 context->cstorm_st_context.sb_index_number =
5159 C_SB_ETH_TX_CQ_INDEX;
5160 context->cstorm_st_context.status_block_id = fp->sb_id;
5161
8d9c5f34
EG
5162 context->xstorm_st_context.tx_bd_page_base_hi =
5163 U64_HI(fp->tx_desc_mapping);
5164 context->xstorm_st_context.tx_bd_page_base_lo =
5165 U64_LO(fp->tx_desc_mapping);
ca00392c 5166 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5167 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5168 }
5169}
5170
5171static void bnx2x_init_ind_table(struct bnx2x *bp)
5172{
26c8fa4d 5173 int func = BP_FUNC(bp);
a2fbb9ea
ET
5174 int i;
5175
555f6c78 5176 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5177 return;
5178
555f6c78
EG
5179 DP(NETIF_MSG_IFUP,
5180 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5181 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5182 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5183 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5184 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5185}
5186
49d66772
ET
5187static void bnx2x_set_client_config(struct bnx2x *bp)
5188{
49d66772 5189 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5190 int port = BP_PORT(bp);
5191 int i;
49d66772 5192
e7799c5f 5193 tstorm_client.mtu = bp->dev->mtu;
49d66772 5194 tstorm_client.config_flags =
de832a55
EG
5195 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5196 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5197#ifdef BCM_VLAN
0c6671b0 5198 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5199 tstorm_client.config_flags |=
8d9c5f34 5200 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5201 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5202 }
5203#endif
49d66772
ET
5204
5205 for_each_queue(bp, i) {
de832a55
EG
5206 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5207
49d66772 5208 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5209 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5210 ((u32 *)&tstorm_client)[0]);
5211 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5212 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5213 ((u32 *)&tstorm_client)[1]);
5214 }
5215
34f80b04
EG
5216 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5217 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5218}
5219
a2fbb9ea
ET
5220static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5221{
a2fbb9ea 5222 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
5223 int mode = bp->rx_mode;
5224 int mask = (1 << BP_L_ID(bp));
5225 int func = BP_FUNC(bp);
581ce43d 5226 int port = BP_PORT(bp);
a2fbb9ea 5227 int i;
581ce43d
EG
5228 /* All but management unicast packets should pass to the host as well */
5229 u32 llh_mask =
5230 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5231 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5232 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5233 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5234
3196a88a 5235 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5236
5237 switch (mode) {
5238 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5239 tstorm_mac_filter.ucast_drop_all = mask;
5240 tstorm_mac_filter.mcast_drop_all = mask;
5241 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5242 break;
356e2385 5243
a2fbb9ea 5244 case BNX2X_RX_MODE_NORMAL:
34f80b04 5245 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5246 break;
356e2385 5247
a2fbb9ea 5248 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5249 tstorm_mac_filter.mcast_accept_all = mask;
5250 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5251 break;
356e2385 5252
a2fbb9ea 5253 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5254 tstorm_mac_filter.ucast_accept_all = mask;
5255 tstorm_mac_filter.mcast_accept_all = mask;
5256 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5257 /* pass management unicast packets as well */
5258 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5259 break;
356e2385 5260
a2fbb9ea 5261 default:
34f80b04
EG
5262 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5263 break;
a2fbb9ea
ET
5264 }
5265
581ce43d
EG
5266 REG_WR(bp,
5267 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5268 llh_mask);
5269
a2fbb9ea
ET
5270 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5271 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5272 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5273 ((u32 *)&tstorm_mac_filter)[i]);
5274
34f80b04 5275/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5276 ((u32 *)&tstorm_mac_filter)[i]); */
5277 }
a2fbb9ea 5278
49d66772
ET
5279 if (mode != BNX2X_RX_MODE_NONE)
5280 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5281}
5282
471de716
EG
5283static void bnx2x_init_internal_common(struct bnx2x *bp)
5284{
5285 int i;
5286
5287 /* Zero this manually as its initialization is
5288 currently missing in the initTool */
5289 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5290 REG_WR(bp, BAR_USTRORM_INTMEM +
5291 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5292}
5293
5294static void bnx2x_init_internal_port(struct bnx2x *bp)
5295{
5296 int port = BP_PORT(bp);
5297
ca00392c
EG
5298 REG_WR(bp,
5299 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5300 REG_WR(bp,
5301 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5302 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5303 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5304}
5305
5306static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5307{
a2fbb9ea
ET
5308 struct tstorm_eth_function_common_config tstorm_config = {0};
5309 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5310 int port = BP_PORT(bp);
5311 int func = BP_FUNC(bp);
de832a55
EG
5312 int i, j;
5313 u32 offset;
471de716 5314 u16 max_agg_size;
a2fbb9ea
ET
5315
5316 if (is_multi(bp)) {
555f6c78 5317 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5318 tstorm_config.rss_result_mask = MULTI_MASK;
5319 }
ca00392c
EG
5320
5321 /* Enable TPA if needed */
5322 if (bp->flags & TPA_ENABLE_FLAG)
5323 tstorm_config.config_flags |=
5324 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5325
8d9c5f34
EG
5326 if (IS_E1HMF(bp))
5327 tstorm_config.config_flags |=
5328 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5329
34f80b04
EG
5330 tstorm_config.leading_client_id = BP_L_ID(bp);
5331
a2fbb9ea 5332 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5333 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5334 (*(u32 *)&tstorm_config));
5335
c14423fe 5336 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5337 bnx2x_set_storm_rx_mode(bp);
5338
de832a55
EG
5339 for_each_queue(bp, i) {
5340 u8 cl_id = bp->fp[i].cl_id;
5341
5342 /* reset xstorm per client statistics */
5343 offset = BAR_XSTRORM_INTMEM +
5344 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5345 for (j = 0;
5346 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5347 REG_WR(bp, offset + j*4, 0);
5348
5349 /* reset tstorm per client statistics */
5350 offset = BAR_TSTRORM_INTMEM +
5351 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5352 for (j = 0;
5353 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5354 REG_WR(bp, offset + j*4, 0);
5355
5356 /* reset ustorm per client statistics */
5357 offset = BAR_USTRORM_INTMEM +
5358 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5359 for (j = 0;
5360 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5361 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5362 }
5363
5364 /* Init statistics related context */
34f80b04 5365 stats_flags.collect_eth = 1;
a2fbb9ea 5366
66e855f3 5367 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5368 ((u32 *)&stats_flags)[0]);
66e855f3 5369 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5370 ((u32 *)&stats_flags)[1]);
5371
66e855f3 5372 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5373 ((u32 *)&stats_flags)[0]);
66e855f3 5374 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5375 ((u32 *)&stats_flags)[1]);
5376
de832a55
EG
5377 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5378 ((u32 *)&stats_flags)[0]);
5379 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5380 ((u32 *)&stats_flags)[1]);
5381
66e855f3 5382 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5383 ((u32 *)&stats_flags)[0]);
66e855f3 5384 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5385 ((u32 *)&stats_flags)[1]);
5386
66e855f3
YG
5387 REG_WR(bp, BAR_XSTRORM_INTMEM +
5388 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5389 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5390 REG_WR(bp, BAR_XSTRORM_INTMEM +
5391 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5392 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5393
5394 REG_WR(bp, BAR_TSTRORM_INTMEM +
5395 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5396 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5397 REG_WR(bp, BAR_TSTRORM_INTMEM +
5398 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5399 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5400
de832a55
EG
5401 REG_WR(bp, BAR_USTRORM_INTMEM +
5402 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5403 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5404 REG_WR(bp, BAR_USTRORM_INTMEM +
5405 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5406 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5407
34f80b04
EG
5408 if (CHIP_IS_E1H(bp)) {
5409 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5410 IS_E1HMF(bp));
5411 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5412 IS_E1HMF(bp));
5413 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5414 IS_E1HMF(bp));
5415 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5416 IS_E1HMF(bp));
5417
7a9b2557
VZ
5418 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5419 bp->e1hov);
34f80b04
EG
5420 }
5421
4f40f2cb
EG
5422 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5423 max_agg_size =
5424 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5425 SGE_PAGE_SIZE * PAGES_PER_SGE),
5426 (u32)0xffff);
555f6c78 5427 for_each_rx_queue(bp, i) {
7a9b2557 5428 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5429
5430 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5431 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5432 U64_LO(fp->rx_comp_mapping));
5433 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5434 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5435 U64_HI(fp->rx_comp_mapping));
5436
ca00392c
EG
5437 /* Next page */
5438 REG_WR(bp, BAR_USTRORM_INTMEM +
5439 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5440 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5441 REG_WR(bp, BAR_USTRORM_INTMEM +
5442 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5443 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5444
7a9b2557 5445 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5446 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5447 max_agg_size);
5448 }
8a1c38d1 5449
1c06328c
EG
5450 /* dropless flow control */
5451 if (CHIP_IS_E1H(bp)) {
5452 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5453
5454 rx_pause.bd_thr_low = 250;
5455 rx_pause.cqe_thr_low = 250;
5456 rx_pause.cos = 1;
5457 rx_pause.sge_thr_low = 0;
5458 rx_pause.bd_thr_high = 350;
5459 rx_pause.cqe_thr_high = 350;
5460 rx_pause.sge_thr_high = 0;
5461
5462 for_each_rx_queue(bp, i) {
5463 struct bnx2x_fastpath *fp = &bp->fp[i];
5464
5465 if (!fp->disable_tpa) {
5466 rx_pause.sge_thr_low = 150;
5467 rx_pause.sge_thr_high = 250;
5468 }
5469
5470
5471 offset = BAR_USTRORM_INTMEM +
5472 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5473 fp->cl_id);
5474 for (j = 0;
5475 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5476 j++)
5477 REG_WR(bp, offset + j*4,
5478 ((u32 *)&rx_pause)[j]);
5479 }
5480 }
5481
8a1c38d1
EG
5482 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5483
5484 /* Init rate shaping and fairness contexts */
5485 if (IS_E1HMF(bp)) {
5486 int vn;
5487
5488 /* During init there is no active link
5489 Until link is up, set link rate to 10Gbps */
5490 bp->link_vars.line_speed = SPEED_10000;
5491 bnx2x_init_port_minmax(bp);
5492
5493 bnx2x_calc_vn_weight_sum(bp);
5494
5495 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5496 bnx2x_init_vn_minmax(bp, 2*vn + port);
5497
5498 /* Enable rate shaping and fairness */
5499 bp->cmng.flags.cmng_enables =
5500 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5501 if (bp->vn_weight_sum)
5502 bp->cmng.flags.cmng_enables |=
5503 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5504 else
5505 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5506 " fairness will be disabled\n");
5507 } else {
5508 /* rate shaping and fairness are disabled */
5509 DP(NETIF_MSG_IFUP,
5510 "single function mode minmax will be disabled\n");
5511 }
5512
5513
5514 /* Store it to internal memory */
5515 if (bp->port.pmf)
5516 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5517 REG_WR(bp, BAR_XSTRORM_INTMEM +
5518 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5519 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5520}
5521
471de716
EG
5522static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5523{
5524 switch (load_code) {
5525 case FW_MSG_CODE_DRV_LOAD_COMMON:
5526 bnx2x_init_internal_common(bp);
5527 /* no break */
5528
5529 case FW_MSG_CODE_DRV_LOAD_PORT:
5530 bnx2x_init_internal_port(bp);
5531 /* no break */
5532
5533 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5534 bnx2x_init_internal_func(bp);
5535 break;
5536
5537 default:
5538 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5539 break;
5540 }
5541}
5542
5543static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5544{
5545 int i;
5546
5547 for_each_queue(bp, i) {
5548 struct bnx2x_fastpath *fp = &bp->fp[i];
5549
34f80b04 5550 fp->bp = bp;
a2fbb9ea 5551 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5552 fp->index = i;
34f80b04
EG
5553 fp->cl_id = BP_L_ID(bp) + i;
5554 fp->sb_id = fp->cl_id;
ca00392c
EG
5555 /* Suitable Rx and Tx SBs are served by the same client */
5556 if (i >= bp->num_rx_queues)
5557 fp->cl_id -= bp->num_rx_queues;
34f80b04 5558 DP(NETIF_MSG_IFUP,
f5372251
EG
5559 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5560 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5561 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5562 fp->sb_id);
5c862848 5563 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5564 }
5565
16119785
EG
5566 /* ensure status block indices were read */
5567 rmb();
5568
5569
5c862848
EG
5570 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5571 DEF_SB_ID);
5572 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5573 bnx2x_update_coalesce(bp);
5574 bnx2x_init_rx_rings(bp);
5575 bnx2x_init_tx_ring(bp);
5576 bnx2x_init_sp_ring(bp);
5577 bnx2x_init_context(bp);
471de716 5578 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5579 bnx2x_init_ind_table(bp);
0ef00459
EG
5580 bnx2x_stats_init(bp);
5581
5582 /* At this point, we are ready for interrupts */
5583 atomic_set(&bp->intr_sem, 0);
5584
5585 /* flush all before enabling interrupts */
5586 mb();
5587 mmiowb();
5588
615f8fd9 5589 bnx2x_int_enable(bp);
eb8da205
EG
5590
5591 /* Check for SPIO5 */
5592 bnx2x_attn_int_deasserted0(bp,
5593 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5594 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5595}
5596
5597/* end of nic init */
5598
5599/*
5600 * gzip service functions
5601 */
5602
5603static int bnx2x_gunzip_init(struct bnx2x *bp)
5604{
5605 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5606 &bp->gunzip_mapping);
5607 if (bp->gunzip_buf == NULL)
5608 goto gunzip_nomem1;
5609
5610 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5611 if (bp->strm == NULL)
5612 goto gunzip_nomem2;
5613
5614 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5615 GFP_KERNEL);
5616 if (bp->strm->workspace == NULL)
5617 goto gunzip_nomem3;
5618
5619 return 0;
5620
5621gunzip_nomem3:
5622 kfree(bp->strm);
5623 bp->strm = NULL;
5624
5625gunzip_nomem2:
5626 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5627 bp->gunzip_mapping);
5628 bp->gunzip_buf = NULL;
5629
5630gunzip_nomem1:
5631 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5632 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5633 return -ENOMEM;
5634}
5635
5636static void bnx2x_gunzip_end(struct bnx2x *bp)
5637{
5638 kfree(bp->strm->workspace);
5639
5640 kfree(bp->strm);
5641 bp->strm = NULL;
5642
5643 if (bp->gunzip_buf) {
5644 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5645 bp->gunzip_mapping);
5646 bp->gunzip_buf = NULL;
5647 }
5648}
5649
94a78b79 5650static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5651{
5652 int n, rc;
5653
5654 /* check gzip header */
94a78b79
VZ
5655 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5656 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5657 return -EINVAL;
94a78b79 5658 }
a2fbb9ea
ET
5659
5660 n = 10;
5661
34f80b04 5662#define FNAME 0x8
a2fbb9ea
ET
5663
5664 if (zbuf[3] & FNAME)
5665 while ((zbuf[n++] != 0) && (n < len));
5666
94a78b79 5667 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5668 bp->strm->avail_in = len - n;
5669 bp->strm->next_out = bp->gunzip_buf;
5670 bp->strm->avail_out = FW_BUF_SIZE;
5671
5672 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5673 if (rc != Z_OK)
5674 return rc;
5675
5676 rc = zlib_inflate(bp->strm, Z_FINISH);
5677 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5678 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5679 bp->dev->name, bp->strm->msg);
5680
5681 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5682 if (bp->gunzip_outlen & 0x3)
5683 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5684 " gunzip_outlen (%d) not aligned\n",
5685 bp->dev->name, bp->gunzip_outlen);
5686 bp->gunzip_outlen >>= 2;
5687
5688 zlib_inflateEnd(bp->strm);
5689
5690 if (rc == Z_STREAM_END)
5691 return 0;
5692
5693 return rc;
5694}
5695
5696/* nic load/unload */
5697
5698/*
34f80b04 5699 * General service functions
a2fbb9ea
ET
5700 */
5701
5702/* send a NIG loopback debug packet */
5703static void bnx2x_lb_pckt(struct bnx2x *bp)
5704{
a2fbb9ea 5705 u32 wb_write[3];
a2fbb9ea
ET
5706
5707 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5708 wb_write[0] = 0x55555555;
5709 wb_write[1] = 0x55555555;
34f80b04 5710 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5711 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5712
5713 /* NON-IP protocol */
a2fbb9ea
ET
5714 wb_write[0] = 0x09000000;
5715 wb_write[1] = 0x55555555;
34f80b04 5716 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5717 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5718}
5719
5720/* some of the internal memories
5721 * are not directly readable from the driver
5722 * to test them we send debug packets
5723 */
5724static int bnx2x_int_mem_test(struct bnx2x *bp)
5725{
5726 int factor;
5727 int count, i;
5728 u32 val = 0;
5729
ad8d3948 5730 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5731 factor = 120;
ad8d3948
EG
5732 else if (CHIP_REV_IS_EMUL(bp))
5733 factor = 200;
5734 else
a2fbb9ea 5735 factor = 1;
a2fbb9ea
ET
5736
5737 DP(NETIF_MSG_HW, "start part1\n");
5738
5739 /* Disable inputs of parser neighbor blocks */
5740 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5741 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5742 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5743 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5744
5745 /* Write 0 to parser credits for CFC search request */
5746 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5747
5748 /* send Ethernet packet */
5749 bnx2x_lb_pckt(bp);
5750
5751 /* TODO do i reset NIG statistic? */
5752 /* Wait until NIG register shows 1 packet of size 0x10 */
5753 count = 1000 * factor;
5754 while (count) {
34f80b04 5755
a2fbb9ea
ET
5756 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5757 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5758 if (val == 0x10)
5759 break;
5760
5761 msleep(10);
5762 count--;
5763 }
5764 if (val != 0x10) {
5765 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5766 return -1;
5767 }
5768
5769 /* Wait until PRS register shows 1 packet */
5770 count = 1000 * factor;
5771 while (count) {
5772 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5773 if (val == 1)
5774 break;
5775
5776 msleep(10);
5777 count--;
5778 }
5779 if (val != 0x1) {
5780 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5781 return -2;
5782 }
5783
5784 /* Reset and init BRB, PRS */
34f80b04 5785 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5786 msleep(50);
34f80b04 5787 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5788 msleep(50);
94a78b79
VZ
5789 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5790 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5791
5792 DP(NETIF_MSG_HW, "part2\n");
5793
5794 /* Disable inputs of parser neighbor blocks */
5795 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5796 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5797 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5798 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5799
5800 /* Write 0 to parser credits for CFC search request */
5801 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5802
5803 /* send 10 Ethernet packets */
5804 for (i = 0; i < 10; i++)
5805 bnx2x_lb_pckt(bp);
5806
5807 /* Wait until NIG register shows 10 + 1
5808 packets of size 11*0x10 = 0xb0 */
5809 count = 1000 * factor;
5810 while (count) {
34f80b04 5811
a2fbb9ea
ET
5812 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5813 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5814 if (val == 0xb0)
5815 break;
5816
5817 msleep(10);
5818 count--;
5819 }
5820 if (val != 0xb0) {
5821 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5822 return -3;
5823 }
5824
5825 /* Wait until PRS register shows 2 packets */
5826 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5827 if (val != 2)
5828 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5829
5830 /* Write 1 to parser credits for CFC search request */
5831 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5832
5833 /* Wait until PRS register shows 3 packets */
5834 msleep(10 * factor);
5835 /* Wait until NIG register shows 1 packet of size 0x10 */
5836 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5837 if (val != 3)
5838 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5839
5840 /* clear NIG EOP FIFO */
5841 for (i = 0; i < 11; i++)
5842 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5843 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5844 if (val != 1) {
5845 BNX2X_ERR("clear of NIG failed\n");
5846 return -4;
5847 }
5848
5849 /* Reset and init BRB, PRS, NIG */
5850 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5851 msleep(50);
5852 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5853 msleep(50);
94a78b79
VZ
5854 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5855 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5856#ifndef BCM_ISCSI
5857 /* set NIC mode */
5858 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5859#endif
5860
5861 /* Enable inputs of parser neighbor blocks */
5862 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5863 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5864 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5865 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5866
5867 DP(NETIF_MSG_HW, "done\n");
5868
5869 return 0; /* OK */
5870}
5871
5872static void enable_blocks_attention(struct bnx2x *bp)
5873{
5874 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5875 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5876 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5877 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5878 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5879 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5880 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5881 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5882 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5883/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5884/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5885 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5886 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5887 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5888/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5889/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5890 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5891 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5892 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5893 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5894/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5895/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5896 if (CHIP_REV_IS_FPGA(bp))
5897 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5898 else
5899 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5900 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5901 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5902 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5903/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5904/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5905 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5906 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5907/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5908 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5909}
5910
34f80b04 5911
81f75bbf
EG
5912static void bnx2x_reset_common(struct bnx2x *bp)
5913{
5914 /* reset_common */
5915 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5916 0xd3ffff7f);
5917 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5918}
5919
fd4ef40d
EG
5920
5921static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5922{
5923 u32 val;
5924 u8 port;
5925 u8 is_required = 0;
5926
5927 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5928 SHARED_HW_CFG_FAN_FAILURE_MASK;
5929
5930 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5931 is_required = 1;
5932
5933 /*
5934 * The fan failure mechanism is usually related to the PHY type since
5935 * the power consumption of the board is affected by the PHY. Currently,
5936 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5937 */
5938 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5939 for (port = PORT_0; port < PORT_MAX; port++) {
5940 u32 phy_type =
5941 SHMEM_RD(bp, dev_info.port_hw_config[port].
5942 external_phy_config) &
5943 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5944 is_required |=
5945 ((phy_type ==
5946 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5947 (phy_type ==
5948 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5949 (phy_type ==
5950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5951 }
5952
5953 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5954
5955 if (is_required == 0)
5956 return;
5957
5958 /* Fan failure is indicated by SPIO 5 */
5959 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5960 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5961
5962 /* set to active low mode */
5963 val = REG_RD(bp, MISC_REG_SPIO_INT);
5964 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5965 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5966 REG_WR(bp, MISC_REG_SPIO_INT, val);
5967
5968 /* enable interrupt to signal the IGU */
5969 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5970 val |= (1 << MISC_REGISTERS_SPIO_5);
5971 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5972}
5973
34f80b04 5974static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5975{
a2fbb9ea 5976 u32 val, i;
a2fbb9ea 5977
34f80b04 5978 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5979
81f75bbf 5980 bnx2x_reset_common(bp);
34f80b04
EG
5981 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5982 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5983
94a78b79 5984 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5985 if (CHIP_IS_E1H(bp))
5986 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5987
34f80b04
EG
5988 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5989 msleep(30);
5990 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5991
94a78b79 5992 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5993 if (CHIP_IS_E1(bp)) {
5994 /* enable HW interrupt from PXP on USDM overflow
5995 bit 16 on INT_MASK_0 */
5996 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5997 }
a2fbb9ea 5998
94a78b79 5999 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6000 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6001
6002#ifdef __BIG_ENDIAN
34f80b04
EG
6003 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6004 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6005 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6006 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6007 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6008 /* make sure this value is 0 */
6009 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6010
6011/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6012 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6013 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6014 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6015 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6016#endif
6017
34f80b04 6018 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 6019#ifdef BCM_ISCSI
34f80b04
EG
6020 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6021 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6022 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6023#endif
6024
34f80b04
EG
6025 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6026 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6027
34f80b04
EG
6028 /* let the HW do it's magic ... */
6029 msleep(100);
6030 /* finish PXP init */
6031 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6032 if (val != 1) {
6033 BNX2X_ERR("PXP2 CFG failed\n");
6034 return -EBUSY;
6035 }
6036 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6037 if (val != 1) {
6038 BNX2X_ERR("PXP2 RD_INIT failed\n");
6039 return -EBUSY;
6040 }
a2fbb9ea 6041
34f80b04
EG
6042 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6043 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6044
94a78b79 6045 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6046
34f80b04
EG
6047 /* clean the DMAE memory */
6048 bp->dmae_ready = 1;
6049 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6050
94a78b79
VZ
6051 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6052 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6053 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6054 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6055
34f80b04
EG
6056 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6057 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6058 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6059 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6060
94a78b79 6061 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
6062 /* soft reset pulse */
6063 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6064 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
6065
6066#ifdef BCM_ISCSI
94a78b79 6067 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6068#endif
a2fbb9ea 6069
94a78b79 6070 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6071 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6072 if (!CHIP_REV_IS_SLOW(bp)) {
6073 /* enable hw interrupt from doorbell Q */
6074 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6075 }
a2fbb9ea 6076
94a78b79
VZ
6077 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6078 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6079 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
6080 /* set NIC mode */
6081 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
6082 if (CHIP_IS_E1H(bp))
6083 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6084
94a78b79
VZ
6085 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6086 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6087 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6088 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6089
ca00392c
EG
6090 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6091 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6092 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6093 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6094
94a78b79
VZ
6095 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6096 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6097 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6098 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6099
34f80b04
EG
6100 /* sync semi rtc */
6101 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6102 0x80000000);
6103 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6104 0x80000000);
a2fbb9ea 6105
94a78b79
VZ
6106 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6107 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6108 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6109
34f80b04
EG
6110 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6111 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6112 REG_WR(bp, i, 0xc0cac01a);
6113 /* TODO: replace with something meaningful */
6114 }
94a78b79 6115 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 6116 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6117
34f80b04
EG
6118 if (sizeof(union cdu_context) != 1024)
6119 /* we currently assume that a context is 1024 bytes */
6120 printk(KERN_ALERT PFX "please adjust the size of"
6121 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6122
94a78b79 6123 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6124 val = (4 << 24) + (0 << 12) + 1024;
6125 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6126
94a78b79 6127 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6128 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6129 /* enable context validation interrupt from CFC */
6130 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6131
6132 /* set the thresholds to prevent CFC/CDU race */
6133 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6134
94a78b79
VZ
6135 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6136 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6137
94a78b79 6138 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6139 /* Reset PCIE errors for debug */
6140 REG_WR(bp, 0x2814, 0xffffffff);
6141 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6142
94a78b79 6143 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6144 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6145 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6146 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6147
94a78b79 6148 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6149 if (CHIP_IS_E1H(bp)) {
6150 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6151 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6152 }
6153
6154 if (CHIP_REV_IS_SLOW(bp))
6155 msleep(200);
6156
6157 /* finish CFC init */
6158 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6159 if (val != 1) {
6160 BNX2X_ERR("CFC LL_INIT failed\n");
6161 return -EBUSY;
6162 }
6163 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6164 if (val != 1) {
6165 BNX2X_ERR("CFC AC_INIT failed\n");
6166 return -EBUSY;
6167 }
6168 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6169 if (val != 1) {
6170 BNX2X_ERR("CFC CAM_INIT failed\n");
6171 return -EBUSY;
6172 }
6173 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6174
34f80b04
EG
6175 /* read NIG statistic
6176 to see if this is our first up since powerup */
6177 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6178 val = *bnx2x_sp(bp, wb_data[0]);
6179
6180 /* do internal memory self test */
6181 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6182 BNX2X_ERR("internal mem self test failed\n");
6183 return -EBUSY;
6184 }
6185
35b19ba5 6186 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6188 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6189 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6190 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6191 bp->port.need_hw_lock = 1;
6192 break;
6193
34f80b04
EG
6194 default:
6195 break;
6196 }
f1410647 6197
fd4ef40d
EG
6198 bnx2x_setup_fan_failure_detection(bp);
6199
34f80b04
EG
6200 /* clear PXP2 attentions */
6201 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6202
34f80b04 6203 enable_blocks_attention(bp);
a2fbb9ea 6204
6bbca910
YR
6205 if (!BP_NOMCP(bp)) {
6206 bnx2x_acquire_phy_lock(bp);
6207 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6208 bnx2x_release_phy_lock(bp);
6209 } else
6210 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6211
34f80b04
EG
6212 return 0;
6213}
a2fbb9ea 6214
34f80b04
EG
6215static int bnx2x_init_port(struct bnx2x *bp)
6216{
6217 int port = BP_PORT(bp);
94a78b79 6218 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6219 u32 low, high;
34f80b04 6220 u32 val;
a2fbb9ea 6221
34f80b04
EG
6222 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6223
6224 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6225
94a78b79 6226 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6227 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6228
6229 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6230 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6231 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
6232#ifdef BCM_ISCSI
6233 /* Port0 1
6234 * Port1 385 */
6235 i++;
6236 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6237 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6238 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6239 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6240
6241 /* Port0 2
6242 * Port1 386 */
6243 i++;
6244 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6245 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6246 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6247 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6248
6249 /* Port0 3
6250 * Port1 387 */
6251 i++;
6252 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6253 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6254 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6255 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6256#endif
94a78b79 6257 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6258
a2fbb9ea
ET
6259#ifdef BCM_ISCSI
6260 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6261 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6262
94a78b79 6263 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 6264#endif
94a78b79 6265 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6266
94a78b79 6267 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6268 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6269 /* no pause for emulation and FPGA */
6270 low = 0;
6271 high = 513;
6272 } else {
6273 if (IS_E1HMF(bp))
6274 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6275 else if (bp->dev->mtu > 4096) {
6276 if (bp->flags & ONE_PORT_FLAG)
6277 low = 160;
6278 else {
6279 val = bp->dev->mtu;
6280 /* (24*1024 + val*4)/256 */
6281 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6282 }
6283 } else
6284 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6285 high = low + 56; /* 14*1024/256 */
6286 }
6287 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6288 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6289
6290
94a78b79 6291 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6292
94a78b79 6293 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6294 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6295 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6296 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6297
94a78b79
VZ
6298 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6299 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6300 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6301 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6302
94a78b79 6303 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6304 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6305
94a78b79 6306 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6307
6308 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6309 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6310
6311 /* update threshold */
34f80b04 6312 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6313 /* update init credit */
34f80b04 6314 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6315
6316 /* probe changes */
34f80b04 6317 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6318 msleep(5);
34f80b04 6319 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6320
6321#ifdef BCM_ISCSI
6322 /* tell the searcher where the T2 table is */
6323 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6324
6325 wb_write[0] = U64_LO(bp->t2_mapping);
6326 wb_write[1] = U64_HI(bp->t2_mapping);
6327 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6328 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6329 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6330 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6331
6332 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6333#endif
94a78b79 6334 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6335 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6336
6337 if (CHIP_IS_E1(bp)) {
6338 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6339 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6340 }
94a78b79 6341 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6342
94a78b79 6343 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6344 /* init aeu_mask_attn_func_0/1:
6345 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6346 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6347 * bits 4-7 are used for "per vn group attention" */
6348 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6349 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6350
94a78b79 6351 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6352 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6353 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6354 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6355 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6356
94a78b79 6357 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6358
6359 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6360
6361 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6362 /* 0x2 disable e1hov, 0x1 enable */
6363 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6364 (IS_E1HMF(bp) ? 0x1 : 0x2));
6365
1c06328c
EG
6366 {
6367 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6368 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6369 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6370 }
34f80b04
EG
6371 }
6372
94a78b79 6373 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6374 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6375
35b19ba5 6376 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6377 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6378 {
6379 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6380
6381 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6382 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6383
6384 /* The GPIO should be swapped if the swap register is
6385 set and active */
6386 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6387 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6388
6389 /* Select function upon port-swap configuration */
6390 if (port == 0) {
6391 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6392 aeu_gpio_mask = (swap_val && swap_override) ?
6393 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6394 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6395 } else {
6396 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6397 aeu_gpio_mask = (swap_val && swap_override) ?
6398 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6399 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6400 }
6401 val = REG_RD(bp, offset);
6402 /* add GPIO3 to group */
6403 val |= aeu_gpio_mask;
6404 REG_WR(bp, offset, val);
6405 }
6406 break;
6407
35b19ba5 6408 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6409 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6410 /* add SPIO 5 to group 0 */
4d295db0
EG
6411 {
6412 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6413 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6414 val = REG_RD(bp, reg_addr);
f1410647 6415 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6416 REG_WR(bp, reg_addr, val);
6417 }
f1410647
ET
6418 break;
6419
6420 default:
6421 break;
6422 }
6423
c18487ee 6424 bnx2x__link_reset(bp);
a2fbb9ea 6425
34f80b04
EG
6426 return 0;
6427}
6428
6429#define ILT_PER_FUNC (768/2)
6430#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6431/* the phys address is shifted right 12 bits and has an added
6432 1=valid bit added to the 53rd bit
6433 then since this is a wide register(TM)
6434 we split it into two 32 bit writes
6435 */
6436#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6437#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6438#define PXP_ONE_ILT(x) (((x) << 10) | x)
6439#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6440
6441#define CNIC_ILT_LINES 0
6442
6443static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6444{
6445 int reg;
6446
6447 if (CHIP_IS_E1H(bp))
6448 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6449 else /* E1 */
6450 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6451
6452 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6453}
6454
6455static int bnx2x_init_func(struct bnx2x *bp)
6456{
6457 int port = BP_PORT(bp);
6458 int func = BP_FUNC(bp);
8badd27a 6459 u32 addr, val;
34f80b04
EG
6460 int i;
6461
6462 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6463
8badd27a
EG
6464 /* set MSI reconfigure capability */
6465 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6466 val = REG_RD(bp, addr);
6467 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6468 REG_WR(bp, addr, val);
6469
34f80b04
EG
6470 i = FUNC_ILT_BASE(func);
6471
6472 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6473 if (CHIP_IS_E1H(bp)) {
6474 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6475 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6476 } else /* E1 */
6477 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6478 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6479
6480
6481 if (CHIP_IS_E1H(bp)) {
6482 for (i = 0; i < 9; i++)
6483 bnx2x_init_block(bp,
94a78b79 6484 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6485
6486 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6487 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6488 }
6489
6490 /* HC init per function */
6491 if (CHIP_IS_E1H(bp)) {
6492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6493
6494 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6495 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6496 }
94a78b79 6497 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6498
c14423fe 6499 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6500 REG_WR(bp, 0x2114, 0xffffffff);
6501 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6502
34f80b04
EG
6503 return 0;
6504}
6505
6506static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6507{
6508 int i, rc = 0;
a2fbb9ea 6509
34f80b04
EG
6510 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6511 BP_FUNC(bp), load_code);
a2fbb9ea 6512
34f80b04
EG
6513 bp->dmae_ready = 0;
6514 mutex_init(&bp->dmae_mutex);
54016b26
EG
6515 rc = bnx2x_gunzip_init(bp);
6516 if (rc)
6517 return rc;
a2fbb9ea 6518
34f80b04
EG
6519 switch (load_code) {
6520 case FW_MSG_CODE_DRV_LOAD_COMMON:
6521 rc = bnx2x_init_common(bp);
6522 if (rc)
6523 goto init_hw_err;
6524 /* no break */
6525
6526 case FW_MSG_CODE_DRV_LOAD_PORT:
6527 bp->dmae_ready = 1;
6528 rc = bnx2x_init_port(bp);
6529 if (rc)
6530 goto init_hw_err;
6531 /* no break */
6532
6533 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6534 bp->dmae_ready = 1;
6535 rc = bnx2x_init_func(bp);
6536 if (rc)
6537 goto init_hw_err;
6538 break;
6539
6540 default:
6541 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6542 break;
6543 }
6544
6545 if (!BP_NOMCP(bp)) {
6546 int func = BP_FUNC(bp);
a2fbb9ea
ET
6547
6548 bp->fw_drv_pulse_wr_seq =
34f80b04 6549 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6550 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6551 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6552 }
a2fbb9ea 6553
34f80b04
EG
6554 /* this needs to be done before gunzip end */
6555 bnx2x_zero_def_sb(bp);
6556 for_each_queue(bp, i)
6557 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6558
6559init_hw_err:
6560 bnx2x_gunzip_end(bp);
6561
6562 return rc;
a2fbb9ea
ET
6563}
6564
a2fbb9ea
ET
6565static void bnx2x_free_mem(struct bnx2x *bp)
6566{
6567
6568#define BNX2X_PCI_FREE(x, y, size) \
6569 do { \
6570 if (x) { \
6571 pci_free_consistent(bp->pdev, size, x, y); \
6572 x = NULL; \
6573 y = 0; \
6574 } \
6575 } while (0)
6576
6577#define BNX2X_FREE(x) \
6578 do { \
6579 if (x) { \
6580 vfree(x); \
6581 x = NULL; \
6582 } \
6583 } while (0)
6584
6585 int i;
6586
6587 /* fastpath */
555f6c78 6588 /* Common */
a2fbb9ea
ET
6589 for_each_queue(bp, i) {
6590
555f6c78 6591 /* status blocks */
a2fbb9ea
ET
6592 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6593 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6594 sizeof(struct host_status_block));
555f6c78
EG
6595 }
6596 /* Rx */
6597 for_each_rx_queue(bp, i) {
a2fbb9ea 6598
555f6c78 6599 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6600 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6601 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6602 bnx2x_fp(bp, i, rx_desc_mapping),
6603 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6604
6605 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6606 bnx2x_fp(bp, i, rx_comp_mapping),
6607 sizeof(struct eth_fast_path_rx_cqe) *
6608 NUM_RCQ_BD);
a2fbb9ea 6609
7a9b2557 6610 /* SGE ring */
32626230 6611 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6612 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6613 bnx2x_fp(bp, i, rx_sge_mapping),
6614 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6615 }
555f6c78
EG
6616 /* Tx */
6617 for_each_tx_queue(bp, i) {
6618
6619 /* fastpath tx rings: tx_buf tx_desc */
6620 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6621 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6622 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6623 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6624 }
a2fbb9ea
ET
6625 /* end of fastpath */
6626
6627 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6628 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6629
6630 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6631 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6632
6633#ifdef BCM_ISCSI
6634 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6635 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6636 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6637 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6638#endif
7a9b2557 6639 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6640
6641#undef BNX2X_PCI_FREE
6642#undef BNX2X_KFREE
6643}
6644
6645static int bnx2x_alloc_mem(struct bnx2x *bp)
6646{
6647
6648#define BNX2X_PCI_ALLOC(x, y, size) \
6649 do { \
6650 x = pci_alloc_consistent(bp->pdev, size, y); \
6651 if (x == NULL) \
6652 goto alloc_mem_err; \
6653 memset(x, 0, size); \
6654 } while (0)
6655
6656#define BNX2X_ALLOC(x, size) \
6657 do { \
6658 x = vmalloc(size); \
6659 if (x == NULL) \
6660 goto alloc_mem_err; \
6661 memset(x, 0, size); \
6662 } while (0)
6663
6664 int i;
6665
6666 /* fastpath */
555f6c78 6667 /* Common */
a2fbb9ea
ET
6668 for_each_queue(bp, i) {
6669 bnx2x_fp(bp, i, bp) = bp;
6670
555f6c78 6671 /* status blocks */
a2fbb9ea
ET
6672 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6673 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6674 sizeof(struct host_status_block));
555f6c78
EG
6675 }
6676 /* Rx */
6677 for_each_rx_queue(bp, i) {
a2fbb9ea 6678
555f6c78 6679 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6680 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6681 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6682 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6683 &bnx2x_fp(bp, i, rx_desc_mapping),
6684 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6685
6686 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6687 &bnx2x_fp(bp, i, rx_comp_mapping),
6688 sizeof(struct eth_fast_path_rx_cqe) *
6689 NUM_RCQ_BD);
6690
7a9b2557
VZ
6691 /* SGE ring */
6692 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6693 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6694 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6695 &bnx2x_fp(bp, i, rx_sge_mapping),
6696 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6697 }
555f6c78
EG
6698 /* Tx */
6699 for_each_tx_queue(bp, i) {
6700
555f6c78
EG
6701 /* fastpath tx rings: tx_buf tx_desc */
6702 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6703 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6704 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6705 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6706 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6707 }
a2fbb9ea
ET
6708 /* end of fastpath */
6709
6710 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6711 sizeof(struct host_def_status_block));
6712
6713 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6714 sizeof(struct bnx2x_slowpath));
6715
6716#ifdef BCM_ISCSI
6717 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6718
6719 /* Initialize T1 */
6720 for (i = 0; i < 64*1024; i += 64) {
6721 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6722 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6723 }
6724
6725 /* allocate searcher T2 table
6726 we allocate 1/4 of alloc num for T2
6727 (which is not entered into the ILT) */
6728 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6729
6730 /* Initialize T2 */
6731 for (i = 0; i < 16*1024; i += 64)
6732 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6733
c14423fe 6734 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6735 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6736
6737 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6738 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6739
6740 /* QM queues (128*MAX_CONN) */
6741 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6742#endif
6743
6744 /* Slow path ring */
6745 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6746
6747 return 0;
6748
6749alloc_mem_err:
6750 bnx2x_free_mem(bp);
6751 return -ENOMEM;
6752
6753#undef BNX2X_PCI_ALLOC
6754#undef BNX2X_ALLOC
6755}
6756
6757static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6758{
6759 int i;
6760
555f6c78 6761 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6762 struct bnx2x_fastpath *fp = &bp->fp[i];
6763
6764 u16 bd_cons = fp->tx_bd_cons;
6765 u16 sw_prod = fp->tx_pkt_prod;
6766 u16 sw_cons = fp->tx_pkt_cons;
6767
a2fbb9ea
ET
6768 while (sw_cons != sw_prod) {
6769 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6770 sw_cons++;
6771 }
6772 }
6773}
6774
6775static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6776{
6777 int i, j;
6778
555f6c78 6779 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6780 struct bnx2x_fastpath *fp = &bp->fp[j];
6781
a2fbb9ea
ET
6782 for (i = 0; i < NUM_RX_BD; i++) {
6783 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6784 struct sk_buff *skb = rx_buf->skb;
6785
6786 if (skb == NULL)
6787 continue;
6788
6789 pci_unmap_single(bp->pdev,
6790 pci_unmap_addr(rx_buf, mapping),
356e2385 6791 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6792
6793 rx_buf->skb = NULL;
6794 dev_kfree_skb(skb);
6795 }
7a9b2557 6796 if (!fp->disable_tpa)
32626230
EG
6797 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6798 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6799 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6800 }
6801}
6802
6803static void bnx2x_free_skbs(struct bnx2x *bp)
6804{
6805 bnx2x_free_tx_skbs(bp);
6806 bnx2x_free_rx_skbs(bp);
6807}
6808
6809static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6810{
34f80b04 6811 int i, offset = 1;
a2fbb9ea
ET
6812
6813 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6814 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6815 bp->msix_table[0].vector);
6816
6817 for_each_queue(bp, i) {
c14423fe 6818 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6819 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6820 bnx2x_fp(bp, i, state));
6821
34f80b04 6822 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6823 }
a2fbb9ea
ET
6824}
6825
6826static void bnx2x_free_irq(struct bnx2x *bp)
6827{
a2fbb9ea 6828 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6829 bnx2x_free_msix_irqs(bp);
6830 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6831 bp->flags &= ~USING_MSIX_FLAG;
6832
8badd27a
EG
6833 } else if (bp->flags & USING_MSI_FLAG) {
6834 free_irq(bp->pdev->irq, bp->dev);
6835 pci_disable_msi(bp->pdev);
6836 bp->flags &= ~USING_MSI_FLAG;
6837
a2fbb9ea
ET
6838 } else
6839 free_irq(bp->pdev->irq, bp->dev);
6840}
6841
6842static int bnx2x_enable_msix(struct bnx2x *bp)
6843{
8badd27a
EG
6844 int i, rc, offset = 1;
6845 int igu_vec = 0;
a2fbb9ea 6846
8badd27a
EG
6847 bp->msix_table[0].entry = igu_vec;
6848 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6849
34f80b04 6850 for_each_queue(bp, i) {
8badd27a 6851 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6852 bp->msix_table[i + offset].entry = igu_vec;
6853 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6854 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6855 }
6856
34f80b04 6857 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6858 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6859 if (rc) {
8badd27a
EG
6860 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6861 return rc;
34f80b04 6862 }
8badd27a 6863
a2fbb9ea
ET
6864 bp->flags |= USING_MSIX_FLAG;
6865
6866 return 0;
a2fbb9ea
ET
6867}
6868
a2fbb9ea
ET
6869static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6870{
34f80b04 6871 int i, rc, offset = 1;
a2fbb9ea 6872
a2fbb9ea
ET
6873 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6874 bp->dev->name, bp->dev);
a2fbb9ea
ET
6875 if (rc) {
6876 BNX2X_ERR("request sp irq failed\n");
6877 return -EBUSY;
6878 }
6879
6880 for_each_queue(bp, i) {
555f6c78
EG
6881 struct bnx2x_fastpath *fp = &bp->fp[i];
6882
ca00392c
EG
6883 if (i < bp->num_rx_queues)
6884 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6885 else
6886 sprintf(fp->name, "%s-tx-%d",
6887 bp->dev->name, i - bp->num_rx_queues);
6888
34f80b04 6889 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6890 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6891 if (rc) {
555f6c78 6892 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6893 bnx2x_free_msix_irqs(bp);
6894 return -EBUSY;
6895 }
6896
555f6c78 6897 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6898 }
6899
555f6c78 6900 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6901 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6902 " ... fp[%d] %d\n",
6903 bp->dev->name, bp->msix_table[0].vector,
6904 0, bp->msix_table[offset].vector,
6905 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6906
a2fbb9ea 6907 return 0;
a2fbb9ea
ET
6908}
6909
8badd27a
EG
6910static int bnx2x_enable_msi(struct bnx2x *bp)
6911{
6912 int rc;
6913
6914 rc = pci_enable_msi(bp->pdev);
6915 if (rc) {
6916 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6917 return -1;
6918 }
6919 bp->flags |= USING_MSI_FLAG;
6920
6921 return 0;
6922}
6923
a2fbb9ea
ET
6924static int bnx2x_req_irq(struct bnx2x *bp)
6925{
8badd27a 6926 unsigned long flags;
34f80b04 6927 int rc;
a2fbb9ea 6928
8badd27a
EG
6929 if (bp->flags & USING_MSI_FLAG)
6930 flags = 0;
6931 else
6932 flags = IRQF_SHARED;
6933
6934 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6935 bp->dev->name, bp->dev);
a2fbb9ea
ET
6936 if (!rc)
6937 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6938
6939 return rc;
a2fbb9ea
ET
6940}
6941
65abd74d
YG
6942static void bnx2x_napi_enable(struct bnx2x *bp)
6943{
6944 int i;
6945
555f6c78 6946 for_each_rx_queue(bp, i)
65abd74d
YG
6947 napi_enable(&bnx2x_fp(bp, i, napi));
6948}
6949
6950static void bnx2x_napi_disable(struct bnx2x *bp)
6951{
6952 int i;
6953
555f6c78 6954 for_each_rx_queue(bp, i)
65abd74d
YG
6955 napi_disable(&bnx2x_fp(bp, i, napi));
6956}
6957
6958static void bnx2x_netif_start(struct bnx2x *bp)
6959{
e1510706
EG
6960 int intr_sem;
6961
6962 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6963 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6964
6965 if (intr_sem) {
65abd74d 6966 if (netif_running(bp->dev)) {
65abd74d
YG
6967 bnx2x_napi_enable(bp);
6968 bnx2x_int_enable(bp);
555f6c78
EG
6969 if (bp->state == BNX2X_STATE_OPEN)
6970 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6971 }
6972 }
6973}
6974
f8ef6e44 6975static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6976{
f8ef6e44 6977 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6978 bnx2x_napi_disable(bp);
762d5f6c
EG
6979 netif_tx_disable(bp->dev);
6980 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6981}
6982
a2fbb9ea
ET
6983/*
6984 * Init service functions
6985 */
6986
3101c2bc 6987static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6988{
6989 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6990 int port = BP_PORT(bp);
a2fbb9ea
ET
6991
6992 /* CAM allocation
6993 * unicasts 0-31:port0 32-63:port1
6994 * multicast 64-127:port0 128-191:port1
6995 */
8d9c5f34 6996 config->hdr.length = 2;
af246401 6997 config->hdr.offset = port ? 32 : 0;
0626b899 6998 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6999 config->hdr.reserved1 = 0;
7000
7001 /* primary MAC */
7002 config->config_table[0].cam_entry.msb_mac_addr =
7003 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7004 config->config_table[0].cam_entry.middle_mac_addr =
7005 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7006 config->config_table[0].cam_entry.lsb_mac_addr =
7007 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 7008 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7009 if (set)
7010 config->config_table[0].target_table_entry.flags = 0;
7011 else
7012 CAM_INVALIDATE(config->config_table[0]);
ca00392c
EG
7013 config->config_table[0].target_table_entry.clients_bit_vector =
7014 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7015 config->config_table[0].target_table_entry.vlan_id = 0;
7016
3101c2bc
YG
7017 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7018 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7019 config->config_table[0].cam_entry.msb_mac_addr,
7020 config->config_table[0].cam_entry.middle_mac_addr,
7021 config->config_table[0].cam_entry.lsb_mac_addr);
7022
7023 /* broadcast */
4781bfad
EG
7024 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7025 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7026 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 7027 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7028 if (set)
7029 config->config_table[1].target_table_entry.flags =
a2fbb9ea 7030 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
7031 else
7032 CAM_INVALIDATE(config->config_table[1]);
ca00392c
EG
7033 config->config_table[1].target_table_entry.clients_bit_vector =
7034 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7035 config->config_table[1].target_table_entry.vlan_id = 0;
7036
7037 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7038 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7039 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7040}
7041
3101c2bc 7042static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
7043{
7044 struct mac_configuration_cmd_e1h *config =
7045 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7046
34f80b04
EG
7047 /* CAM allocation for E1H
7048 * unicasts: by func number
7049 * multicast: 20+FUNC*20, 20 each
7050 */
8d9c5f34 7051 config->hdr.length = 1;
34f80b04 7052 config->hdr.offset = BP_FUNC(bp);
0626b899 7053 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
7054 config->hdr.reserved1 = 0;
7055
7056 /* primary MAC */
7057 config->config_table[0].msb_mac_addr =
7058 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7059 config->config_table[0].middle_mac_addr =
7060 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7061 config->config_table[0].lsb_mac_addr =
7062 swab16(*(u16 *)&bp->dev->dev_addr[4]);
ca00392c
EG
7063 config->config_table[0].clients_bit_vector =
7064 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
7065 config->config_table[0].vlan_id = 0;
7066 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7067 if (set)
7068 config->config_table[0].flags = BP_PORT(bp);
7069 else
7070 config->config_table[0].flags =
7071 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7072
3101c2bc
YG
7073 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7074 (set ? "setting" : "clearing"),
34f80b04
EG
7075 config->config_table[0].msb_mac_addr,
7076 config->config_table[0].middle_mac_addr,
7077 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7078
7079 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7080 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7081 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7082}
7083
a2fbb9ea
ET
7084static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7085 int *state_p, int poll)
7086{
7087 /* can take a while if any port is running */
8b3a0f0b 7088 int cnt = 5000;
a2fbb9ea 7089
c14423fe
ET
7090 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7091 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7092
7093 might_sleep();
34f80b04 7094 while (cnt--) {
a2fbb9ea
ET
7095 if (poll) {
7096 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7097 /* if index is different from 0
7098 * the reply for some commands will
3101c2bc 7099 * be on the non default queue
a2fbb9ea
ET
7100 */
7101 if (idx)
7102 bnx2x_rx_int(&bp->fp[idx], 10);
7103 }
a2fbb9ea 7104
3101c2bc 7105 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7106 if (*state_p == state) {
7107#ifdef BNX2X_STOP_ON_ERROR
7108 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7109#endif
a2fbb9ea 7110 return 0;
8b3a0f0b 7111 }
a2fbb9ea 7112
a2fbb9ea 7113 msleep(1);
e3553b29
EG
7114
7115 if (bp->panic)
7116 return -EIO;
a2fbb9ea
ET
7117 }
7118
a2fbb9ea 7119 /* timeout! */
49d66772
ET
7120 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7121 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7122#ifdef BNX2X_STOP_ON_ERROR
7123 bnx2x_panic();
7124#endif
a2fbb9ea 7125
49d66772 7126 return -EBUSY;
a2fbb9ea
ET
7127}
7128
7129static int bnx2x_setup_leading(struct bnx2x *bp)
7130{
34f80b04 7131 int rc;
a2fbb9ea 7132
c14423fe 7133 /* reset IGU state */
34f80b04 7134 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7135
7136 /* SETUP ramrod */
7137 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7138
34f80b04
EG
7139 /* Wait for completion */
7140 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7141
34f80b04 7142 return rc;
a2fbb9ea
ET
7143}
7144
7145static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7146{
555f6c78
EG
7147 struct bnx2x_fastpath *fp = &bp->fp[index];
7148
a2fbb9ea 7149 /* reset IGU state */
555f6c78 7150 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7151
228241eb 7152 /* SETUP ramrod */
555f6c78
EG
7153 fp->state = BNX2X_FP_STATE_OPENING;
7154 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7155 fp->cl_id, 0);
a2fbb9ea
ET
7156
7157 /* Wait for completion */
7158 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7159 &(fp->state), 0);
a2fbb9ea
ET
7160}
7161
a2fbb9ea 7162static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7163
ca00392c
EG
7164static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7165 int *num_tx_queues_out)
7166{
7167 int _num_rx_queues = 0, _num_tx_queues = 0;
7168
7169 switch (bp->multi_mode) {
7170 case ETH_RSS_MODE_DISABLED:
7171 _num_rx_queues = 1;
7172 _num_tx_queues = 1;
7173 break;
7174
7175 case ETH_RSS_MODE_REGULAR:
7176 if (num_rx_queues)
7177 _num_rx_queues = min_t(u32, num_rx_queues,
7178 BNX2X_MAX_QUEUES(bp));
7179 else
7180 _num_rx_queues = min_t(u32, num_online_cpus(),
7181 BNX2X_MAX_QUEUES(bp));
7182
7183 if (num_tx_queues)
7184 _num_tx_queues = min_t(u32, num_tx_queues,
7185 BNX2X_MAX_QUEUES(bp));
7186 else
7187 _num_tx_queues = min_t(u32, num_online_cpus(),
7188 BNX2X_MAX_QUEUES(bp));
7189
7190 /* There must be not more Tx queues than Rx queues */
7191 if (_num_tx_queues > _num_rx_queues) {
7192 BNX2X_ERR("number of tx queues (%d) > "
7193 "number of rx queues (%d)"
7194 " defaulting to %d\n",
7195 _num_tx_queues, _num_rx_queues,
7196 _num_rx_queues);
7197 _num_tx_queues = _num_rx_queues;
7198 }
7199 break;
7200
7201
7202 default:
7203 _num_rx_queues = 1;
7204 _num_tx_queues = 1;
7205 break;
7206 }
7207
7208 *num_rx_queues_out = _num_rx_queues;
7209 *num_tx_queues_out = _num_tx_queues;
7210}
7211
7212static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7213{
ca00392c 7214 int rc = 0;
a2fbb9ea 7215
8badd27a
EG
7216 switch (int_mode) {
7217 case INT_MODE_INTx:
7218 case INT_MODE_MSI:
ca00392c
EG
7219 bp->num_rx_queues = 1;
7220 bp->num_tx_queues = 1;
7221 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7222 break;
7223
7224 case INT_MODE_MSIX:
7225 default:
ca00392c
EG
7226 /* Set interrupt mode according to bp->multi_mode value */
7227 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7228 &bp->num_tx_queues);
7229
7230 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7231 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7232
2dfe0e1f
EG
7233 /* if we can't use MSI-X we only need one fp,
7234 * so try to enable MSI-X with the requested number of fp's
7235 * and fallback to MSI or legacy INTx with one fp
7236 */
ca00392c
EG
7237 rc = bnx2x_enable_msix(bp);
7238 if (rc) {
34f80b04 7239 /* failed to enable MSI-X */
555f6c78
EG
7240 if (bp->multi_mode)
7241 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7242 "enable MSI-X (rx %d tx %d), "
7243 "set number of queues to 1\n",
7244 bp->num_rx_queues, bp->num_tx_queues);
7245 bp->num_rx_queues = 1;
7246 bp->num_tx_queues = 1;
a2fbb9ea 7247 }
8badd27a 7248 break;
a2fbb9ea 7249 }
555f6c78 7250 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7251 return rc;
8badd27a
EG
7252}
7253
8badd27a
EG
7254
7255/* must be called with rtnl_lock */
7256static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7257{
7258 u32 load_code;
ca00392c
EG
7259 int i, rc;
7260
8badd27a 7261#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7262 if (unlikely(bp->panic))
7263 return -EPERM;
7264#endif
7265
7266 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7267
ca00392c 7268 rc = bnx2x_set_int_mode(bp);
c14423fe 7269
a2fbb9ea
ET
7270 if (bnx2x_alloc_mem(bp))
7271 return -ENOMEM;
7272
555f6c78 7273 for_each_rx_queue(bp, i)
7a9b2557
VZ
7274 bnx2x_fp(bp, i, disable_tpa) =
7275 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7276
555f6c78 7277 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7278 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7279 bnx2x_poll, 128);
7280
2dfe0e1f
EG
7281 bnx2x_napi_enable(bp);
7282
34f80b04
EG
7283 if (bp->flags & USING_MSIX_FLAG) {
7284 rc = bnx2x_req_msix_irqs(bp);
7285 if (rc) {
7286 pci_disable_msix(bp->pdev);
2dfe0e1f 7287 goto load_error1;
34f80b04
EG
7288 }
7289 } else {
ca00392c
EG
7290 /* Fall to INTx if failed to enable MSI-X due to lack of
7291 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7292 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7293 bnx2x_enable_msi(bp);
34f80b04
EG
7294 bnx2x_ack_int(bp);
7295 rc = bnx2x_req_irq(bp);
7296 if (rc) {
2dfe0e1f 7297 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7298 if (bp->flags & USING_MSI_FLAG)
7299 pci_disable_msi(bp->pdev);
2dfe0e1f 7300 goto load_error1;
a2fbb9ea 7301 }
8badd27a
EG
7302 if (bp->flags & USING_MSI_FLAG) {
7303 bp->dev->irq = bp->pdev->irq;
7304 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7305 bp->dev->name, bp->pdev->irq);
7306 }
a2fbb9ea
ET
7307 }
7308
2dfe0e1f
EG
7309 /* Send LOAD_REQUEST command to MCP
7310 Returns the type of LOAD command:
7311 if it is the first port to be initialized
7312 common blocks should be initialized, otherwise - not
7313 */
7314 if (!BP_NOMCP(bp)) {
7315 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7316 if (!load_code) {
7317 BNX2X_ERR("MCP response failure, aborting\n");
7318 rc = -EBUSY;
7319 goto load_error2;
7320 }
7321 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7322 rc = -EBUSY; /* other port in diagnostic mode */
7323 goto load_error2;
7324 }
7325
7326 } else {
7327 int port = BP_PORT(bp);
7328
f5372251 7329 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7330 load_count[0], load_count[1], load_count[2]);
7331 load_count[0]++;
7332 load_count[1 + port]++;
f5372251 7333 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7334 load_count[0], load_count[1], load_count[2]);
7335 if (load_count[0] == 1)
7336 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7337 else if (load_count[1 + port] == 1)
7338 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7339 else
7340 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7341 }
7342
7343 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7344 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7345 bp->port.pmf = 1;
7346 else
7347 bp->port.pmf = 0;
7348 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7349
a2fbb9ea 7350 /* Initialize HW */
34f80b04
EG
7351 rc = bnx2x_init_hw(bp, load_code);
7352 if (rc) {
a2fbb9ea 7353 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7354 goto load_error2;
a2fbb9ea
ET
7355 }
7356
a2fbb9ea 7357 /* Setup NIC internals and enable interrupts */
471de716 7358 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7359
2691d51d
EG
7360 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7361 (bp->common.shmem2_base))
7362 SHMEM2_WR(bp, dcc_support,
7363 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7364 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7365
a2fbb9ea 7366 /* Send LOAD_DONE command to MCP */
34f80b04 7367 if (!BP_NOMCP(bp)) {
228241eb
ET
7368 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7369 if (!load_code) {
da5a662a 7370 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7371 rc = -EBUSY;
2dfe0e1f 7372 goto load_error3;
a2fbb9ea
ET
7373 }
7374 }
7375
7376 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7377
34f80b04
EG
7378 rc = bnx2x_setup_leading(bp);
7379 if (rc) {
da5a662a 7380 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7381#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7382 goto load_error3;
e3553b29
EG
7383#else
7384 bp->panic = 1;
7385 return -EBUSY;
7386#endif
34f80b04 7387 }
a2fbb9ea 7388
34f80b04
EG
7389 if (CHIP_IS_E1H(bp))
7390 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7391 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7392 bp->state = BNX2X_STATE_DISABLED;
7393 }
a2fbb9ea 7394
ca00392c 7395 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7396 for_each_nondefault_queue(bp, i) {
7397 rc = bnx2x_setup_multi(bp, i);
7398 if (rc)
2dfe0e1f 7399 goto load_error3;
34f80b04 7400 }
a2fbb9ea 7401
ca00392c
EG
7402 if (CHIP_IS_E1(bp))
7403 bnx2x_set_mac_addr_e1(bp, 1);
7404 else
7405 bnx2x_set_mac_addr_e1h(bp, 1);
7406 }
34f80b04
EG
7407
7408 if (bp->port.pmf)
b5bf9068 7409 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7410
7411 /* Start fast path */
34f80b04
EG
7412 switch (load_mode) {
7413 case LOAD_NORMAL:
ca00392c
EG
7414 if (bp->state == BNX2X_STATE_OPEN) {
7415 /* Tx queue should be only reenabled */
7416 netif_tx_wake_all_queues(bp->dev);
7417 }
2dfe0e1f 7418 /* Initialize the receive filter. */
34f80b04
EG
7419 bnx2x_set_rx_mode(bp->dev);
7420 break;
7421
7422 case LOAD_OPEN:
555f6c78 7423 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7424 if (bp->state != BNX2X_STATE_OPEN)
7425 netif_tx_disable(bp->dev);
2dfe0e1f 7426 /* Initialize the receive filter. */
34f80b04 7427 bnx2x_set_rx_mode(bp->dev);
34f80b04 7428 break;
a2fbb9ea 7429
34f80b04 7430 case LOAD_DIAG:
2dfe0e1f 7431 /* Initialize the receive filter. */
a2fbb9ea 7432 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7433 bp->state = BNX2X_STATE_DIAG;
7434 break;
7435
7436 default:
7437 break;
a2fbb9ea
ET
7438 }
7439
34f80b04
EG
7440 if (!bp->port.pmf)
7441 bnx2x__link_status_update(bp);
7442
a2fbb9ea
ET
7443 /* start the timer */
7444 mod_timer(&bp->timer, jiffies + bp->current_interval);
7445
34f80b04 7446
a2fbb9ea
ET
7447 return 0;
7448
2dfe0e1f
EG
7449load_error3:
7450 bnx2x_int_disable_sync(bp, 1);
7451 if (!BP_NOMCP(bp)) {
7452 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7453 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7454 }
7455 bp->port.pmf = 0;
7a9b2557
VZ
7456 /* Free SKBs, SGEs, TPA pool and driver internals */
7457 bnx2x_free_skbs(bp);
555f6c78 7458 for_each_rx_queue(bp, i)
3196a88a 7459 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7460load_error2:
d1014634
YG
7461 /* Release IRQs */
7462 bnx2x_free_irq(bp);
2dfe0e1f
EG
7463load_error1:
7464 bnx2x_napi_disable(bp);
555f6c78 7465 for_each_rx_queue(bp, i)
7cde1c8b 7466 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7467 bnx2x_free_mem(bp);
7468
34f80b04 7469 return rc;
a2fbb9ea
ET
7470}
7471
7472static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7473{
555f6c78 7474 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7475 int rc;
7476
c14423fe 7477 /* halt the connection */
555f6c78
EG
7478 fp->state = BNX2X_FP_STATE_HALTING;
7479 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7480
34f80b04 7481 /* Wait for completion */
a2fbb9ea 7482 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7483 &(fp->state), 1);
c14423fe 7484 if (rc) /* timeout */
a2fbb9ea
ET
7485 return rc;
7486
7487 /* delete cfc entry */
7488 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7489
34f80b04
EG
7490 /* Wait for completion */
7491 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7492 &(fp->state), 1);
34f80b04 7493 return rc;
a2fbb9ea
ET
7494}
7495
da5a662a 7496static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7497{
4781bfad 7498 __le16 dsb_sp_prod_idx;
c14423fe 7499 /* if the other port is handling traffic,
a2fbb9ea 7500 this can take a lot of time */
34f80b04
EG
7501 int cnt = 500;
7502 int rc;
a2fbb9ea
ET
7503
7504 might_sleep();
7505
7506 /* Send HALT ramrod */
7507 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7508 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7509
34f80b04
EG
7510 /* Wait for completion */
7511 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7512 &(bp->fp[0].state), 1);
7513 if (rc) /* timeout */
da5a662a 7514 return rc;
a2fbb9ea 7515
49d66772 7516 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7517
228241eb 7518 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7519 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7520
49d66772 7521 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7522 we are going to reset the chip anyway
7523 so there is not much to do if this times out
7524 */
34f80b04 7525 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7526 if (!cnt) {
7527 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7528 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7529 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7530#ifdef BNX2X_STOP_ON_ERROR
7531 bnx2x_panic();
7532#endif
36e552ab 7533 rc = -EBUSY;
34f80b04
EG
7534 break;
7535 }
7536 cnt--;
da5a662a 7537 msleep(1);
5650d9d4 7538 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7539 }
7540 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7541 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7542
7543 return rc;
a2fbb9ea
ET
7544}
7545
34f80b04
EG
7546static void bnx2x_reset_func(struct bnx2x *bp)
7547{
7548 int port = BP_PORT(bp);
7549 int func = BP_FUNC(bp);
7550 int base, i;
7551
7552 /* Configure IGU */
7553 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7554 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7555
34f80b04
EG
7556 /* Clear ILT */
7557 base = FUNC_ILT_BASE(func);
7558 for (i = base; i < base + ILT_PER_FUNC; i++)
7559 bnx2x_ilt_wr(bp, i, 0);
7560}
7561
7562static void bnx2x_reset_port(struct bnx2x *bp)
7563{
7564 int port = BP_PORT(bp);
7565 u32 val;
7566
7567 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7568
7569 /* Do not rcv packets to BRB */
7570 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7571 /* Do not direct rcv packets that are not for MCP to the BRB */
7572 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7573 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7574
7575 /* Configure AEU */
7576 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7577
7578 msleep(100);
7579 /* Check for BRB port occupancy */
7580 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7581 if (val)
7582 DP(NETIF_MSG_IFDOWN,
33471629 7583 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7584
7585 /* TODO: Close Doorbell port? */
7586}
7587
34f80b04
EG
7588static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7589{
7590 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7591 BP_FUNC(bp), reset_code);
7592
7593 switch (reset_code) {
7594 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7595 bnx2x_reset_port(bp);
7596 bnx2x_reset_func(bp);
7597 bnx2x_reset_common(bp);
7598 break;
7599
7600 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7601 bnx2x_reset_port(bp);
7602 bnx2x_reset_func(bp);
7603 break;
7604
7605 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7606 bnx2x_reset_func(bp);
7607 break;
49d66772 7608
34f80b04
EG
7609 default:
7610 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7611 break;
7612 }
7613}
7614
33471629 7615/* must be called with rtnl_lock */
34f80b04 7616static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7617{
da5a662a 7618 int port = BP_PORT(bp);
a2fbb9ea 7619 u32 reset_code = 0;
da5a662a 7620 int i, cnt, rc;
a2fbb9ea
ET
7621
7622 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7623
228241eb
ET
7624 bp->rx_mode = BNX2X_RX_MODE_NONE;
7625 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7626
f8ef6e44 7627 bnx2x_netif_stop(bp, 1);
e94d8af3 7628
34f80b04
EG
7629 del_timer_sync(&bp->timer);
7630 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7631 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7632 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7633
70b9986c
EG
7634 /* Release IRQs */
7635 bnx2x_free_irq(bp);
7636
555f6c78
EG
7637 /* Wait until tx fastpath tasks complete */
7638 for_each_tx_queue(bp, i) {
228241eb
ET
7639 struct bnx2x_fastpath *fp = &bp->fp[i];
7640
34f80b04 7641 cnt = 1000;
e8b5fc51 7642 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7643
7961f791 7644 bnx2x_tx_int(fp);
34f80b04
EG
7645 if (!cnt) {
7646 BNX2X_ERR("timeout waiting for queue[%d]\n",
7647 i);
7648#ifdef BNX2X_STOP_ON_ERROR
7649 bnx2x_panic();
7650 return -EBUSY;
7651#else
7652 break;
7653#endif
7654 }
7655 cnt--;
da5a662a 7656 msleep(1);
34f80b04 7657 }
228241eb 7658 }
da5a662a
VZ
7659 /* Give HW time to discard old tx messages */
7660 msleep(1);
a2fbb9ea 7661
3101c2bc
YG
7662 if (CHIP_IS_E1(bp)) {
7663 struct mac_configuration_cmd *config =
7664 bnx2x_sp(bp, mcast_config);
7665
7666 bnx2x_set_mac_addr_e1(bp, 0);
7667
8d9c5f34 7668 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7669 CAM_INVALIDATE(config->config_table[i]);
7670
8d9c5f34 7671 config->hdr.length = i;
3101c2bc
YG
7672 if (CHIP_REV_IS_SLOW(bp))
7673 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7674 else
7675 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7676 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7677 config->hdr.reserved1 = 0;
7678
7679 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7680 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7681 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7682
7683 } else { /* E1H */
65abd74d
YG
7684 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7685
3101c2bc
YG
7686 bnx2x_set_mac_addr_e1h(bp, 0);
7687
7688 for (i = 0; i < MC_HASH_SIZE; i++)
7689 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7690
7691 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7692 }
7693
65abd74d
YG
7694 if (unload_mode == UNLOAD_NORMAL)
7695 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7696
7d0446c2 7697 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7698 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7699
7d0446c2 7700 else if (bp->wol) {
65abd74d
YG
7701 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7702 u8 *mac_addr = bp->dev->dev_addr;
7703 u32 val;
7704 /* The mac address is written to entries 1-4 to
7705 preserve entry 0 which is used by the PMF */
7706 u8 entry = (BP_E1HVN(bp) + 1)*8;
7707
7708 val = (mac_addr[0] << 8) | mac_addr[1];
7709 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7710
7711 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7712 (mac_addr[4] << 8) | mac_addr[5];
7713 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7714
7715 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7716
7717 } else
7718 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7719
34f80b04
EG
7720 /* Close multi and leading connections
7721 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7722 for_each_nondefault_queue(bp, i)
7723 if (bnx2x_stop_multi(bp, i))
228241eb 7724 goto unload_error;
a2fbb9ea 7725
da5a662a
VZ
7726 rc = bnx2x_stop_leading(bp);
7727 if (rc) {
34f80b04 7728 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7729#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7730 return -EBUSY;
da5a662a
VZ
7731#else
7732 goto unload_error;
34f80b04 7733#endif
228241eb
ET
7734 }
7735
7736unload_error:
34f80b04 7737 if (!BP_NOMCP(bp))
228241eb 7738 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7739 else {
f5372251 7740 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7741 load_count[0], load_count[1], load_count[2]);
7742 load_count[0]--;
da5a662a 7743 load_count[1 + port]--;
f5372251 7744 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7745 load_count[0], load_count[1], load_count[2]);
7746 if (load_count[0] == 0)
7747 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7748 else if (load_count[1 + port] == 0)
34f80b04
EG
7749 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7750 else
7751 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7752 }
a2fbb9ea 7753
34f80b04
EG
7754 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7755 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7756 bnx2x__link_reset(bp);
a2fbb9ea
ET
7757
7758 /* Reset the chip */
228241eb 7759 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7760
7761 /* Report UNLOAD_DONE to MCP */
34f80b04 7762 if (!BP_NOMCP(bp))
a2fbb9ea 7763 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7764
9a035440 7765 bp->port.pmf = 0;
a2fbb9ea 7766
7a9b2557 7767 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7768 bnx2x_free_skbs(bp);
555f6c78 7769 for_each_rx_queue(bp, i)
3196a88a 7770 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7771 for_each_rx_queue(bp, i)
7cde1c8b 7772 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7773 bnx2x_free_mem(bp);
7774
7775 bp->state = BNX2X_STATE_CLOSED;
228241eb 7776
a2fbb9ea
ET
7777 netif_carrier_off(bp->dev);
7778
7779 return 0;
7780}
7781
34f80b04
EG
7782static void bnx2x_reset_task(struct work_struct *work)
7783{
7784 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7785
7786#ifdef BNX2X_STOP_ON_ERROR
7787 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7788 " so reset not done to allow debug dump,\n"
ad361c98 7789 " you will need to reboot when done\n");
34f80b04
EG
7790 return;
7791#endif
7792
7793 rtnl_lock();
7794
7795 if (!netif_running(bp->dev))
7796 goto reset_task_exit;
7797
7798 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7799 bnx2x_nic_load(bp, LOAD_NORMAL);
7800
7801reset_task_exit:
7802 rtnl_unlock();
7803}
7804
a2fbb9ea
ET
7805/* end of nic load/unload */
7806
7807/* ethtool_ops */
7808
7809/*
7810 * Init service functions
7811 */
7812
f1ef27ef
EG
7813static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7814{
7815 switch (func) {
7816 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7817 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7818 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7819 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7820 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7821 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7822 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7823 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7824 default:
7825 BNX2X_ERR("Unsupported function index: %d\n", func);
7826 return (u32)(-1);
7827 }
7828}
7829
7830static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7831{
7832 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7833
7834 /* Flush all outstanding writes */
7835 mmiowb();
7836
7837 /* Pretend to be function 0 */
7838 REG_WR(bp, reg, 0);
7839 /* Flush the GRC transaction (in the chip) */
7840 new_val = REG_RD(bp, reg);
7841 if (new_val != 0) {
7842 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7843 new_val);
7844 BUG();
7845 }
7846
7847 /* From now we are in the "like-E1" mode */
7848 bnx2x_int_disable(bp);
7849
7850 /* Flush all outstanding writes */
7851 mmiowb();
7852
7853 /* Restore the original funtion settings */
7854 REG_WR(bp, reg, orig_func);
7855 new_val = REG_RD(bp, reg);
7856 if (new_val != orig_func) {
7857 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7858 orig_func, new_val);
7859 BUG();
7860 }
7861}
7862
7863static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7864{
7865 if (CHIP_IS_E1H(bp))
7866 bnx2x_undi_int_disable_e1h(bp, func);
7867 else
7868 bnx2x_int_disable(bp);
7869}
7870
34f80b04
EG
7871static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7872{
7873 u32 val;
7874
7875 /* Check if there is any driver already loaded */
7876 val = REG_RD(bp, MISC_REG_UNPREPARED);
7877 if (val == 0x1) {
7878 /* Check if it is the UNDI driver
7879 * UNDI driver initializes CID offset for normal bell to 0x7
7880 */
4a37fb66 7881 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7882 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7883 if (val == 0x7) {
7884 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7885 /* save our func */
34f80b04 7886 int func = BP_FUNC(bp);
da5a662a
VZ
7887 u32 swap_en;
7888 u32 swap_val;
34f80b04 7889
b4661739
EG
7890 /* clear the UNDI indication */
7891 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7892
34f80b04
EG
7893 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7894
7895 /* try unload UNDI on port 0 */
7896 bp->func = 0;
da5a662a
VZ
7897 bp->fw_seq =
7898 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7899 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7900 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7901
7902 /* if UNDI is loaded on the other port */
7903 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7904
da5a662a
VZ
7905 /* send "DONE" for previous unload */
7906 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7907
7908 /* unload UNDI on port 1 */
34f80b04 7909 bp->func = 1;
da5a662a
VZ
7910 bp->fw_seq =
7911 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7912 DRV_MSG_SEQ_NUMBER_MASK);
7913 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7914
7915 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7916 }
7917
b4661739
EG
7918 /* now it's safe to release the lock */
7919 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7920
f1ef27ef 7921 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7922
7923 /* close input traffic and wait for it */
7924 /* Do not rcv packets to BRB */
7925 REG_WR(bp,
7926 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7927 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7928 /* Do not direct rcv packets that are not for MCP to
7929 * the BRB */
7930 REG_WR(bp,
7931 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7932 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7933 /* clear AEU */
7934 REG_WR(bp,
7935 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7936 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7937 msleep(10);
7938
7939 /* save NIG port swap info */
7940 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7941 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7942 /* reset device */
7943 REG_WR(bp,
7944 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7945 0xd3ffffff);
34f80b04
EG
7946 REG_WR(bp,
7947 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7948 0x1403);
da5a662a
VZ
7949 /* take the NIG out of reset and restore swap values */
7950 REG_WR(bp,
7951 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7952 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7953 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7954 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7955
7956 /* send unload done to the MCP */
7957 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7958
7959 /* restore our func and fw_seq */
7960 bp->func = func;
7961 bp->fw_seq =
7962 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7963 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7964
7965 } else
7966 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7967 }
7968}
7969
7970static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7971{
7972 u32 val, val2, val3, val4, id;
72ce58c3 7973 u16 pmc;
34f80b04
EG
7974
7975 /* Get the chip revision id and number. */
7976 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7977 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7978 id = ((val & 0xffff) << 16);
7979 val = REG_RD(bp, MISC_REG_CHIP_REV);
7980 id |= ((val & 0xf) << 12);
7981 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7982 id |= ((val & 0xff) << 4);
5a40e08e 7983 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7984 id |= (val & 0xf);
7985 bp->common.chip_id = id;
7986 bp->link_params.chip_id = bp->common.chip_id;
7987 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7988
1c06328c
EG
7989 val = (REG_RD(bp, 0x2874) & 0x55);
7990 if ((bp->common.chip_id & 0x1) ||
7991 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7992 bp->flags |= ONE_PORT_FLAG;
7993 BNX2X_DEV_INFO("single port device\n");
7994 }
7995
34f80b04
EG
7996 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7997 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7998 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7999 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8000 bp->common.flash_size, bp->common.flash_size);
8001
8002 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8003 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8004 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8005 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8006 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8007
8008 if (!bp->common.shmem_base ||
8009 (bp->common.shmem_base < 0xA0000) ||
8010 (bp->common.shmem_base >= 0xC0000)) {
8011 BNX2X_DEV_INFO("MCP not active\n");
8012 bp->flags |= NO_MCP_FLAG;
8013 return;
8014 }
8015
8016 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8017 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8018 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8019 BNX2X_ERR("BAD MCP validity signature\n");
8020
8021 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8022 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8023
8024 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8025 SHARED_HW_CFG_LED_MODE_MASK) >>
8026 SHARED_HW_CFG_LED_MODE_SHIFT);
8027
c2c8b03e
EG
8028 bp->link_params.feature_config_flags = 0;
8029 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8030 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8031 bp->link_params.feature_config_flags |=
8032 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8033 else
8034 bp->link_params.feature_config_flags &=
8035 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8036
34f80b04
EG
8037 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8038 bp->common.bc_ver = val;
8039 BNX2X_DEV_INFO("bc_ver %X\n", val);
8040 if (val < BNX2X_BC_VER) {
8041 /* for now only warn
8042 * later we might need to enforce this */
8043 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8044 " please upgrade BC\n", BNX2X_BC_VER, val);
8045 }
4d295db0
EG
8046 bp->link_params.feature_config_flags |=
8047 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8048 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8049
8050 if (BP_E1HVN(bp) == 0) {
8051 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8052 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8053 } else {
8054 /* no WOL capability for E1HVN != 0 */
8055 bp->flags |= NO_WOL_FLAG;
8056 }
8057 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8058 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8059
8060 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8061 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8062 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8063 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8064
8065 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8066 val, val2, val3, val4);
8067}
8068
8069static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8070 u32 switch_cfg)
a2fbb9ea 8071{
34f80b04 8072 int port = BP_PORT(bp);
a2fbb9ea
ET
8073 u32 ext_phy_type;
8074
a2fbb9ea
ET
8075 switch (switch_cfg) {
8076 case SWITCH_CFG_1G:
8077 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8078
c18487ee
YR
8079 ext_phy_type =
8080 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8081 switch (ext_phy_type) {
8082 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8083 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8084 ext_phy_type);
8085
34f80b04
EG
8086 bp->port.supported |= (SUPPORTED_10baseT_Half |
8087 SUPPORTED_10baseT_Full |
8088 SUPPORTED_100baseT_Half |
8089 SUPPORTED_100baseT_Full |
8090 SUPPORTED_1000baseT_Full |
8091 SUPPORTED_2500baseX_Full |
8092 SUPPORTED_TP |
8093 SUPPORTED_FIBRE |
8094 SUPPORTED_Autoneg |
8095 SUPPORTED_Pause |
8096 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8097 break;
8098
8099 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8100 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8101 ext_phy_type);
8102
34f80b04
EG
8103 bp->port.supported |= (SUPPORTED_10baseT_Half |
8104 SUPPORTED_10baseT_Full |
8105 SUPPORTED_100baseT_Half |
8106 SUPPORTED_100baseT_Full |
8107 SUPPORTED_1000baseT_Full |
8108 SUPPORTED_TP |
8109 SUPPORTED_FIBRE |
8110 SUPPORTED_Autoneg |
8111 SUPPORTED_Pause |
8112 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8113 break;
8114
8115 default:
8116 BNX2X_ERR("NVRAM config error. "
8117 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8118 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8119 return;
8120 }
8121
34f80b04
EG
8122 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8123 port*0x10);
8124 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8125 break;
8126
8127 case SWITCH_CFG_10G:
8128 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8129
c18487ee
YR
8130 ext_phy_type =
8131 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8132 switch (ext_phy_type) {
8133 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8134 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8135 ext_phy_type);
8136
34f80b04
EG
8137 bp->port.supported |= (SUPPORTED_10baseT_Half |
8138 SUPPORTED_10baseT_Full |
8139 SUPPORTED_100baseT_Half |
8140 SUPPORTED_100baseT_Full |
8141 SUPPORTED_1000baseT_Full |
8142 SUPPORTED_2500baseX_Full |
8143 SUPPORTED_10000baseT_Full |
8144 SUPPORTED_TP |
8145 SUPPORTED_FIBRE |
8146 SUPPORTED_Autoneg |
8147 SUPPORTED_Pause |
8148 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8149 break;
8150
589abe3a
EG
8151 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8152 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8153 ext_phy_type);
f1410647 8154
34f80b04 8155 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8156 SUPPORTED_1000baseT_Full |
34f80b04 8157 SUPPORTED_FIBRE |
589abe3a 8158 SUPPORTED_Autoneg |
34f80b04
EG
8159 SUPPORTED_Pause |
8160 SUPPORTED_Asym_Pause);
f1410647
ET
8161 break;
8162
589abe3a
EG
8163 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8164 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8165 ext_phy_type);
8166
34f80b04 8167 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8168 SUPPORTED_2500baseX_Full |
34f80b04 8169 SUPPORTED_1000baseT_Full |
589abe3a
EG
8170 SUPPORTED_FIBRE |
8171 SUPPORTED_Autoneg |
8172 SUPPORTED_Pause |
8173 SUPPORTED_Asym_Pause);
8174 break;
8175
8176 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8177 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8178 ext_phy_type);
8179
8180 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8181 SUPPORTED_FIBRE |
8182 SUPPORTED_Pause |
8183 SUPPORTED_Asym_Pause);
f1410647
ET
8184 break;
8185
589abe3a
EG
8186 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8187 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8188 ext_phy_type);
8189
34f80b04
EG
8190 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8191 SUPPORTED_1000baseT_Full |
8192 SUPPORTED_FIBRE |
34f80b04
EG
8193 SUPPORTED_Pause |
8194 SUPPORTED_Asym_Pause);
f1410647
ET
8195 break;
8196
589abe3a
EG
8197 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8198 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8199 ext_phy_type);
8200
34f80b04 8201 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8202 SUPPORTED_1000baseT_Full |
34f80b04 8203 SUPPORTED_Autoneg |
589abe3a 8204 SUPPORTED_FIBRE |
34f80b04
EG
8205 SUPPORTED_Pause |
8206 SUPPORTED_Asym_Pause);
c18487ee
YR
8207 break;
8208
4d295db0
EG
8209 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8210 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8211 ext_phy_type);
8212
8213 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8214 SUPPORTED_1000baseT_Full |
8215 SUPPORTED_Autoneg |
8216 SUPPORTED_FIBRE |
8217 SUPPORTED_Pause |
8218 SUPPORTED_Asym_Pause);
8219 break;
8220
f1410647
ET
8221 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8222 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8223 ext_phy_type);
8224
34f80b04
EG
8225 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8226 SUPPORTED_TP |
8227 SUPPORTED_Autoneg |
8228 SUPPORTED_Pause |
8229 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8230 break;
8231
28577185
EG
8232 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8233 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8234 ext_phy_type);
8235
8236 bp->port.supported |= (SUPPORTED_10baseT_Half |
8237 SUPPORTED_10baseT_Full |
8238 SUPPORTED_100baseT_Half |
8239 SUPPORTED_100baseT_Full |
8240 SUPPORTED_1000baseT_Full |
8241 SUPPORTED_10000baseT_Full |
8242 SUPPORTED_TP |
8243 SUPPORTED_Autoneg |
8244 SUPPORTED_Pause |
8245 SUPPORTED_Asym_Pause);
8246 break;
8247
c18487ee
YR
8248 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8249 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8250 bp->link_params.ext_phy_config);
8251 break;
8252
a2fbb9ea
ET
8253 default:
8254 BNX2X_ERR("NVRAM config error. "
8255 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8256 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8257 return;
8258 }
8259
34f80b04
EG
8260 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8261 port*0x18);
8262 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8263
a2fbb9ea
ET
8264 break;
8265
8266 default:
8267 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8268 bp->port.link_config);
a2fbb9ea
ET
8269 return;
8270 }
34f80b04 8271 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8272
8273 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8274 if (!(bp->link_params.speed_cap_mask &
8275 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8276 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8277
c18487ee
YR
8278 if (!(bp->link_params.speed_cap_mask &
8279 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8280 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8281
c18487ee
YR
8282 if (!(bp->link_params.speed_cap_mask &
8283 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8284 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8285
c18487ee
YR
8286 if (!(bp->link_params.speed_cap_mask &
8287 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8288 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8289
c18487ee
YR
8290 if (!(bp->link_params.speed_cap_mask &
8291 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8292 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8293 SUPPORTED_1000baseT_Full);
a2fbb9ea 8294
c18487ee
YR
8295 if (!(bp->link_params.speed_cap_mask &
8296 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8297 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8298
c18487ee
YR
8299 if (!(bp->link_params.speed_cap_mask &
8300 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8301 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8302
34f80b04 8303 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8304}
8305
34f80b04 8306static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8307{
c18487ee 8308 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8309
34f80b04 8310 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8311 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8312 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8313 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8314 bp->port.advertising = bp->port.supported;
a2fbb9ea 8315 } else {
c18487ee
YR
8316 u32 ext_phy_type =
8317 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8318
8319 if ((ext_phy_type ==
8320 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8321 (ext_phy_type ==
8322 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8323 /* force 10G, no AN */
c18487ee 8324 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8325 bp->port.advertising =
a2fbb9ea
ET
8326 (ADVERTISED_10000baseT_Full |
8327 ADVERTISED_FIBRE);
8328 break;
8329 }
8330 BNX2X_ERR("NVRAM config error. "
8331 "Invalid link_config 0x%x"
8332 " Autoneg not supported\n",
34f80b04 8333 bp->port.link_config);
a2fbb9ea
ET
8334 return;
8335 }
8336 break;
8337
8338 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8339 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8340 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8341 bp->port.advertising = (ADVERTISED_10baseT_Full |
8342 ADVERTISED_TP);
a2fbb9ea
ET
8343 } else {
8344 BNX2X_ERR("NVRAM config error. "
8345 "Invalid link_config 0x%x"
8346 " speed_cap_mask 0x%x\n",
34f80b04 8347 bp->port.link_config,
c18487ee 8348 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8349 return;
8350 }
8351 break;
8352
8353 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8354 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8355 bp->link_params.req_line_speed = SPEED_10;
8356 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8357 bp->port.advertising = (ADVERTISED_10baseT_Half |
8358 ADVERTISED_TP);
a2fbb9ea
ET
8359 } else {
8360 BNX2X_ERR("NVRAM config error. "
8361 "Invalid link_config 0x%x"
8362 " speed_cap_mask 0x%x\n",
34f80b04 8363 bp->port.link_config,
c18487ee 8364 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8365 return;
8366 }
8367 break;
8368
8369 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8370 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8371 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8372 bp->port.advertising = (ADVERTISED_100baseT_Full |
8373 ADVERTISED_TP);
a2fbb9ea
ET
8374 } else {
8375 BNX2X_ERR("NVRAM config error. "
8376 "Invalid link_config 0x%x"
8377 " speed_cap_mask 0x%x\n",
34f80b04 8378 bp->port.link_config,
c18487ee 8379 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8380 return;
8381 }
8382 break;
8383
8384 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8385 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8386 bp->link_params.req_line_speed = SPEED_100;
8387 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8388 bp->port.advertising = (ADVERTISED_100baseT_Half |
8389 ADVERTISED_TP);
a2fbb9ea
ET
8390 } else {
8391 BNX2X_ERR("NVRAM config error. "
8392 "Invalid link_config 0x%x"
8393 " speed_cap_mask 0x%x\n",
34f80b04 8394 bp->port.link_config,
c18487ee 8395 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8396 return;
8397 }
8398 break;
8399
8400 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8401 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8402 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8403 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8404 ADVERTISED_TP);
a2fbb9ea
ET
8405 } else {
8406 BNX2X_ERR("NVRAM config error. "
8407 "Invalid link_config 0x%x"
8408 " speed_cap_mask 0x%x\n",
34f80b04 8409 bp->port.link_config,
c18487ee 8410 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8411 return;
8412 }
8413 break;
8414
8415 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8416 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8417 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8418 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8419 ADVERTISED_TP);
a2fbb9ea
ET
8420 } else {
8421 BNX2X_ERR("NVRAM config error. "
8422 "Invalid link_config 0x%x"
8423 " speed_cap_mask 0x%x\n",
34f80b04 8424 bp->port.link_config,
c18487ee 8425 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8426 return;
8427 }
8428 break;
8429
8430 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8431 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8432 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8433 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8434 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8435 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8436 ADVERTISED_FIBRE);
a2fbb9ea
ET
8437 } else {
8438 BNX2X_ERR("NVRAM config error. "
8439 "Invalid link_config 0x%x"
8440 " speed_cap_mask 0x%x\n",
34f80b04 8441 bp->port.link_config,
c18487ee 8442 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8443 return;
8444 }
8445 break;
8446
8447 default:
8448 BNX2X_ERR("NVRAM config error. "
8449 "BAD link speed link_config 0x%x\n",
34f80b04 8450 bp->port.link_config);
c18487ee 8451 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8452 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8453 break;
8454 }
a2fbb9ea 8455
34f80b04
EG
8456 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8457 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8458 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8459 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8460 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8461
c18487ee 8462 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8463 " advertising 0x%x\n",
c18487ee
YR
8464 bp->link_params.req_line_speed,
8465 bp->link_params.req_duplex,
34f80b04 8466 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8467}
8468
34f80b04 8469static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8470{
34f80b04
EG
8471 int port = BP_PORT(bp);
8472 u32 val, val2;
589abe3a 8473 u32 config;
c2c8b03e 8474 u16 i;
01cd4528 8475 u32 ext_phy_type;
a2fbb9ea 8476
c18487ee 8477 bp->link_params.bp = bp;
34f80b04 8478 bp->link_params.port = port;
c18487ee 8479
c18487ee 8480 bp->link_params.lane_config =
a2fbb9ea 8481 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8482 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8483 SHMEM_RD(bp,
8484 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8485 /* BCM8727_NOC => BCM8727 no over current */
8486 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8487 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8488 bp->link_params.ext_phy_config &=
8489 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8490 bp->link_params.ext_phy_config |=
8491 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8492 bp->link_params.feature_config_flags |=
8493 FEATURE_CONFIG_BCM8727_NOC;
8494 }
8495
c18487ee 8496 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8497 SHMEM_RD(bp,
8498 dev_info.port_hw_config[port].speed_capability_mask);
8499
34f80b04 8500 bp->port.link_config =
a2fbb9ea
ET
8501 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8502
c2c8b03e
EG
8503 /* Get the 4 lanes xgxs config rx and tx */
8504 for (i = 0; i < 2; i++) {
8505 val = SHMEM_RD(bp,
8506 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8507 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8508 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8509
8510 val = SHMEM_RD(bp,
8511 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8512 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8513 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8514 }
8515
3ce2c3f9
EG
8516 /* If the device is capable of WoL, set the default state according
8517 * to the HW
8518 */
4d295db0 8519 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8520 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8521 (config & PORT_FEATURE_WOL_ENABLED));
8522
c2c8b03e
EG
8523 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8524 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8525 bp->link_params.lane_config,
8526 bp->link_params.ext_phy_config,
34f80b04 8527 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8528
4d295db0
EG
8529 bp->link_params.switch_cfg |= (bp->port.link_config &
8530 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8531 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8532
8533 bnx2x_link_settings_requested(bp);
8534
01cd4528
EG
8535 /*
8536 * If connected directly, work with the internal PHY, otherwise, work
8537 * with the external PHY
8538 */
8539 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8540 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8541 bp->mdio.prtad = bp->link_params.phy_addr;
8542
8543 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8544 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8545 bp->mdio.prtad =
8546 (bp->link_params.ext_phy_config &
8547 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8548 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8549
a2fbb9ea
ET
8550 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8551 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8552 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8553 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8554 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8555 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8556 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8557 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8558 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8559 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8560}
8561
8562static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8563{
8564 int func = BP_FUNC(bp);
8565 u32 val, val2;
8566 int rc = 0;
a2fbb9ea 8567
34f80b04 8568 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8569
34f80b04
EG
8570 bp->e1hov = 0;
8571 bp->e1hmf = 0;
8572 if (CHIP_IS_E1H(bp)) {
8573 bp->mf_config =
8574 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8575
2691d51d 8576 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8577 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8578 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8579 bp->e1hmf = 1;
2691d51d
EG
8580 BNX2X_DEV_INFO("%s function mode\n",
8581 IS_E1HMF(bp) ? "multi" : "single");
8582
8583 if (IS_E1HMF(bp)) {
8584 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8585 e1hov_tag) &
8586 FUNC_MF_CFG_E1HOV_TAG_MASK);
8587 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8588 bp->e1hov = val;
8589 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8590 "(0x%04x)\n",
8591 func, bp->e1hov, bp->e1hov);
8592 } else {
34f80b04
EG
8593 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8594 " aborting\n", func);
8595 rc = -EPERM;
8596 }
2691d51d
EG
8597 } else {
8598 if (BP_E1HVN(bp)) {
8599 BNX2X_ERR("!!! VN %d in single function mode,"
8600 " aborting\n", BP_E1HVN(bp));
8601 rc = -EPERM;
8602 }
34f80b04
EG
8603 }
8604 }
a2fbb9ea 8605
34f80b04
EG
8606 if (!BP_NOMCP(bp)) {
8607 bnx2x_get_port_hwinfo(bp);
8608
8609 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8610 DRV_MSG_SEQ_NUMBER_MASK);
8611 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8612 }
8613
8614 if (IS_E1HMF(bp)) {
8615 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8616 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8617 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8618 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8619 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8620 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8621 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8622 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8623 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8624 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8625 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8626 ETH_ALEN);
8627 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8628 ETH_ALEN);
a2fbb9ea 8629 }
34f80b04
EG
8630
8631 return rc;
a2fbb9ea
ET
8632 }
8633
34f80b04
EG
8634 if (BP_NOMCP(bp)) {
8635 /* only supposed to happen on emulation/FPGA */
33471629 8636 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8637 random_ether_addr(bp->dev->dev_addr);
8638 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8639 }
a2fbb9ea 8640
34f80b04
EG
8641 return rc;
8642}
8643
8644static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8645{
8646 int func = BP_FUNC(bp);
87942b46 8647 int timer_interval;
34f80b04
EG
8648 int rc;
8649
da5a662a
VZ
8650 /* Disable interrupt handling until HW is initialized */
8651 atomic_set(&bp->intr_sem, 1);
e1510706 8652 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8653
34f80b04 8654 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8655
1cf167f2 8656 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8657 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8658
8659 rc = bnx2x_get_hwinfo(bp);
8660
8661 /* need to reset chip if undi was active */
8662 if (!BP_NOMCP(bp))
8663 bnx2x_undi_unload(bp);
8664
8665 if (CHIP_REV_IS_FPGA(bp))
8666 printk(KERN_ERR PFX "FPGA detected\n");
8667
8668 if (BP_NOMCP(bp) && (func == 0))
8669 printk(KERN_ERR PFX
8670 "MCP disabled, must load devices in order!\n");
8671
555f6c78 8672 /* Set multi queue mode */
8badd27a
EG
8673 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8674 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8675 printk(KERN_ERR PFX
8badd27a 8676 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8677 multi_mode = ETH_RSS_MODE_DISABLED;
8678 }
8679 bp->multi_mode = multi_mode;
8680
8681
7a9b2557
VZ
8682 /* Set TPA flags */
8683 if (disable_tpa) {
8684 bp->flags &= ~TPA_ENABLE_FLAG;
8685 bp->dev->features &= ~NETIF_F_LRO;
8686 } else {
8687 bp->flags |= TPA_ENABLE_FLAG;
8688 bp->dev->features |= NETIF_F_LRO;
8689 }
8690
a18f5128
EG
8691 if (CHIP_IS_E1(bp))
8692 bp->dropless_fc = 0;
8693 else
8694 bp->dropless_fc = dropless_fc;
8695
8d5726c4 8696 bp->mrrs = mrrs;
7a9b2557 8697
34f80b04
EG
8698 bp->tx_ring_size = MAX_TX_AVAIL;
8699 bp->rx_ring_size = MAX_RX_AVAIL;
8700
8701 bp->rx_csum = 1;
34f80b04
EG
8702
8703 bp->tx_ticks = 50;
8704 bp->rx_ticks = 25;
8705
87942b46
EG
8706 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8707 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8708
8709 init_timer(&bp->timer);
8710 bp->timer.expires = jiffies + bp->current_interval;
8711 bp->timer.data = (unsigned long) bp;
8712 bp->timer.function = bnx2x_timer;
8713
8714 return rc;
a2fbb9ea
ET
8715}
8716
8717/*
8718 * ethtool service functions
8719 */
8720
8721/* All ethtool functions called with rtnl_lock */
8722
8723static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8724{
8725 struct bnx2x *bp = netdev_priv(dev);
8726
34f80b04
EG
8727 cmd->supported = bp->port.supported;
8728 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8729
8730 if (netif_carrier_ok(dev)) {
c18487ee
YR
8731 cmd->speed = bp->link_vars.line_speed;
8732 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8733 } else {
c18487ee
YR
8734 cmd->speed = bp->link_params.req_line_speed;
8735 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8736 }
34f80b04
EG
8737 if (IS_E1HMF(bp)) {
8738 u16 vn_max_rate;
8739
8740 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8741 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8742 if (vn_max_rate < cmd->speed)
8743 cmd->speed = vn_max_rate;
8744 }
a2fbb9ea 8745
c18487ee
YR
8746 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8747 u32 ext_phy_type =
8748 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8749
8750 switch (ext_phy_type) {
8751 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8752 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8753 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8754 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8755 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8756 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8757 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8758 cmd->port = PORT_FIBRE;
8759 break;
8760
8761 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8762 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8763 cmd->port = PORT_TP;
8764 break;
8765
c18487ee
YR
8766 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8767 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8768 bp->link_params.ext_phy_config);
8769 break;
8770
f1410647
ET
8771 default:
8772 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8773 bp->link_params.ext_phy_config);
8774 break;
f1410647
ET
8775 }
8776 } else
a2fbb9ea 8777 cmd->port = PORT_TP;
a2fbb9ea 8778
01cd4528 8779 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
8780 cmd->transceiver = XCVR_INTERNAL;
8781
c18487ee 8782 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8783 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8784 else
a2fbb9ea 8785 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8786
8787 cmd->maxtxpkt = 0;
8788 cmd->maxrxpkt = 0;
8789
8790 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8791 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8792 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8793 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8794 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8795 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8796 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8797
8798 return 0;
8799}
8800
8801static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8802{
8803 struct bnx2x *bp = netdev_priv(dev);
8804 u32 advertising;
8805
34f80b04
EG
8806 if (IS_E1HMF(bp))
8807 return 0;
8808
a2fbb9ea
ET
8809 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8810 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8811 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8812 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8813 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8814 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8815 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8816
a2fbb9ea 8817 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8818 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8819 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8820 return -EINVAL;
f1410647 8821 }
a2fbb9ea
ET
8822
8823 /* advertise the requested speed and duplex if supported */
34f80b04 8824 cmd->advertising &= bp->port.supported;
a2fbb9ea 8825
c18487ee
YR
8826 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8827 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8828 bp->port.advertising |= (ADVERTISED_Autoneg |
8829 cmd->advertising);
a2fbb9ea
ET
8830
8831 } else { /* forced speed */
8832 /* advertise the requested speed and duplex if supported */
8833 switch (cmd->speed) {
8834 case SPEED_10:
8835 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8836 if (!(bp->port.supported &
f1410647
ET
8837 SUPPORTED_10baseT_Full)) {
8838 DP(NETIF_MSG_LINK,
8839 "10M full not supported\n");
a2fbb9ea 8840 return -EINVAL;
f1410647 8841 }
a2fbb9ea
ET
8842
8843 advertising = (ADVERTISED_10baseT_Full |
8844 ADVERTISED_TP);
8845 } else {
34f80b04 8846 if (!(bp->port.supported &
f1410647
ET
8847 SUPPORTED_10baseT_Half)) {
8848 DP(NETIF_MSG_LINK,
8849 "10M half not supported\n");
a2fbb9ea 8850 return -EINVAL;
f1410647 8851 }
a2fbb9ea
ET
8852
8853 advertising = (ADVERTISED_10baseT_Half |
8854 ADVERTISED_TP);
8855 }
8856 break;
8857
8858 case SPEED_100:
8859 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8860 if (!(bp->port.supported &
f1410647
ET
8861 SUPPORTED_100baseT_Full)) {
8862 DP(NETIF_MSG_LINK,
8863 "100M full not supported\n");
a2fbb9ea 8864 return -EINVAL;
f1410647 8865 }
a2fbb9ea
ET
8866
8867 advertising = (ADVERTISED_100baseT_Full |
8868 ADVERTISED_TP);
8869 } else {
34f80b04 8870 if (!(bp->port.supported &
f1410647
ET
8871 SUPPORTED_100baseT_Half)) {
8872 DP(NETIF_MSG_LINK,
8873 "100M half not supported\n");
a2fbb9ea 8874 return -EINVAL;
f1410647 8875 }
a2fbb9ea
ET
8876
8877 advertising = (ADVERTISED_100baseT_Half |
8878 ADVERTISED_TP);
8879 }
8880 break;
8881
8882 case SPEED_1000:
f1410647
ET
8883 if (cmd->duplex != DUPLEX_FULL) {
8884 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8885 return -EINVAL;
f1410647 8886 }
a2fbb9ea 8887
34f80b04 8888 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8889 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8890 return -EINVAL;
f1410647 8891 }
a2fbb9ea
ET
8892
8893 advertising = (ADVERTISED_1000baseT_Full |
8894 ADVERTISED_TP);
8895 break;
8896
8897 case SPEED_2500:
f1410647
ET
8898 if (cmd->duplex != DUPLEX_FULL) {
8899 DP(NETIF_MSG_LINK,
8900 "2.5G half not supported\n");
a2fbb9ea 8901 return -EINVAL;
f1410647 8902 }
a2fbb9ea 8903
34f80b04 8904 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8905 DP(NETIF_MSG_LINK,
8906 "2.5G full not supported\n");
a2fbb9ea 8907 return -EINVAL;
f1410647 8908 }
a2fbb9ea 8909
f1410647 8910 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8911 ADVERTISED_TP);
8912 break;
8913
8914 case SPEED_10000:
f1410647
ET
8915 if (cmd->duplex != DUPLEX_FULL) {
8916 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8917 return -EINVAL;
f1410647 8918 }
a2fbb9ea 8919
34f80b04 8920 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8921 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8922 return -EINVAL;
f1410647 8923 }
a2fbb9ea
ET
8924
8925 advertising = (ADVERTISED_10000baseT_Full |
8926 ADVERTISED_FIBRE);
8927 break;
8928
8929 default:
f1410647 8930 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8931 return -EINVAL;
8932 }
8933
c18487ee
YR
8934 bp->link_params.req_line_speed = cmd->speed;
8935 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8936 bp->port.advertising = advertising;
a2fbb9ea
ET
8937 }
8938
c18487ee 8939 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8940 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8941 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8942 bp->port.advertising);
a2fbb9ea 8943
34f80b04 8944 if (netif_running(dev)) {
bb2a0f7a 8945 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8946 bnx2x_link_set(bp);
8947 }
a2fbb9ea
ET
8948
8949 return 0;
8950}
8951
0a64ea57
EG
8952#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8953#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8954
8955static int bnx2x_get_regs_len(struct net_device *dev)
8956{
0a64ea57 8957 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 8958 int regdump_len = 0;
0a64ea57
EG
8959 int i;
8960
0a64ea57
EG
8961 if (CHIP_IS_E1(bp)) {
8962 for (i = 0; i < REGS_COUNT; i++)
8963 if (IS_E1_ONLINE(reg_addrs[i].info))
8964 regdump_len += reg_addrs[i].size;
8965
8966 for (i = 0; i < WREGS_COUNT_E1; i++)
8967 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8968 regdump_len += wreg_addrs_e1[i].size *
8969 (1 + wreg_addrs_e1[i].read_regs_count);
8970
8971 } else { /* E1H */
8972 for (i = 0; i < REGS_COUNT; i++)
8973 if (IS_E1H_ONLINE(reg_addrs[i].info))
8974 regdump_len += reg_addrs[i].size;
8975
8976 for (i = 0; i < WREGS_COUNT_E1H; i++)
8977 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8978 regdump_len += wreg_addrs_e1h[i].size *
8979 (1 + wreg_addrs_e1h[i].read_regs_count);
8980 }
8981 regdump_len *= 4;
8982 regdump_len += sizeof(struct dump_hdr);
8983
8984 return regdump_len;
8985}
8986
8987static void bnx2x_get_regs(struct net_device *dev,
8988 struct ethtool_regs *regs, void *_p)
8989{
8990 u32 *p = _p, i, j;
8991 struct bnx2x *bp = netdev_priv(dev);
8992 struct dump_hdr dump_hdr = {0};
8993
8994 regs->version = 0;
8995 memset(p, 0, regs->len);
8996
8997 if (!netif_running(bp->dev))
8998 return;
8999
9000 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9001 dump_hdr.dump_sign = dump_sign_all;
9002 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9003 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9004 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9005 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9006 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9007
9008 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9009 p += dump_hdr.hdr_size + 1;
9010
9011 if (CHIP_IS_E1(bp)) {
9012 for (i = 0; i < REGS_COUNT; i++)
9013 if (IS_E1_ONLINE(reg_addrs[i].info))
9014 for (j = 0; j < reg_addrs[i].size; j++)
9015 *p++ = REG_RD(bp,
9016 reg_addrs[i].addr + j*4);
9017
9018 } else { /* E1H */
9019 for (i = 0; i < REGS_COUNT; i++)
9020 if (IS_E1H_ONLINE(reg_addrs[i].info))
9021 for (j = 0; j < reg_addrs[i].size; j++)
9022 *p++ = REG_RD(bp,
9023 reg_addrs[i].addr + j*4);
9024 }
9025}
9026
0d28e49a
EG
9027#define PHY_FW_VER_LEN 10
9028
9029static void bnx2x_get_drvinfo(struct net_device *dev,
9030 struct ethtool_drvinfo *info)
9031{
9032 struct bnx2x *bp = netdev_priv(dev);
9033 u8 phy_fw_ver[PHY_FW_VER_LEN];
9034
9035 strcpy(info->driver, DRV_MODULE_NAME);
9036 strcpy(info->version, DRV_MODULE_VERSION);
9037
9038 phy_fw_ver[0] = '\0';
9039 if (bp->port.pmf) {
9040 bnx2x_acquire_phy_lock(bp);
9041 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9042 (bp->state != BNX2X_STATE_CLOSED),
9043 phy_fw_ver, PHY_FW_VER_LEN);
9044 bnx2x_release_phy_lock(bp);
9045 }
9046
9047 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9048 (bp->common.bc_ver & 0xff0000) >> 16,
9049 (bp->common.bc_ver & 0xff00) >> 8,
9050 (bp->common.bc_ver & 0xff),
9051 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9052 strcpy(info->bus_info, pci_name(bp->pdev));
9053 info->n_stats = BNX2X_NUM_STATS;
9054 info->testinfo_len = BNX2X_NUM_TESTS;
9055 info->eedump_len = bp->common.flash_size;
9056 info->regdump_len = bnx2x_get_regs_len(dev);
9057}
9058
a2fbb9ea
ET
9059static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9060{
9061 struct bnx2x *bp = netdev_priv(dev);
9062
9063 if (bp->flags & NO_WOL_FLAG) {
9064 wol->supported = 0;
9065 wol->wolopts = 0;
9066 } else {
9067 wol->supported = WAKE_MAGIC;
9068 if (bp->wol)
9069 wol->wolopts = WAKE_MAGIC;
9070 else
9071 wol->wolopts = 0;
9072 }
9073 memset(&wol->sopass, 0, sizeof(wol->sopass));
9074}
9075
9076static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9077{
9078 struct bnx2x *bp = netdev_priv(dev);
9079
9080 if (wol->wolopts & ~WAKE_MAGIC)
9081 return -EINVAL;
9082
9083 if (wol->wolopts & WAKE_MAGIC) {
9084 if (bp->flags & NO_WOL_FLAG)
9085 return -EINVAL;
9086
9087 bp->wol = 1;
34f80b04 9088 } else
a2fbb9ea 9089 bp->wol = 0;
34f80b04 9090
a2fbb9ea
ET
9091 return 0;
9092}
9093
9094static u32 bnx2x_get_msglevel(struct net_device *dev)
9095{
9096 struct bnx2x *bp = netdev_priv(dev);
9097
9098 return bp->msglevel;
9099}
9100
9101static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9102{
9103 struct bnx2x *bp = netdev_priv(dev);
9104
9105 if (capable(CAP_NET_ADMIN))
9106 bp->msglevel = level;
9107}
9108
9109static int bnx2x_nway_reset(struct net_device *dev)
9110{
9111 struct bnx2x *bp = netdev_priv(dev);
9112
34f80b04
EG
9113 if (!bp->port.pmf)
9114 return 0;
a2fbb9ea 9115
34f80b04 9116 if (netif_running(dev)) {
bb2a0f7a 9117 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9118 bnx2x_link_set(bp);
9119 }
a2fbb9ea
ET
9120
9121 return 0;
9122}
9123
01e53298
NO
9124static u32
9125bnx2x_get_link(struct net_device *dev)
9126{
9127 struct bnx2x *bp = netdev_priv(dev);
9128
9129 return bp->link_vars.link_up;
9130}
9131
a2fbb9ea
ET
9132static int bnx2x_get_eeprom_len(struct net_device *dev)
9133{
9134 struct bnx2x *bp = netdev_priv(dev);
9135
34f80b04 9136 return bp->common.flash_size;
a2fbb9ea
ET
9137}
9138
9139static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9140{
34f80b04 9141 int port = BP_PORT(bp);
a2fbb9ea
ET
9142 int count, i;
9143 u32 val = 0;
9144
9145 /* adjust timeout for emulation/FPGA */
9146 count = NVRAM_TIMEOUT_COUNT;
9147 if (CHIP_REV_IS_SLOW(bp))
9148 count *= 100;
9149
9150 /* request access to nvram interface */
9151 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9152 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9153
9154 for (i = 0; i < count*10; i++) {
9155 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9156 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9157 break;
9158
9159 udelay(5);
9160 }
9161
9162 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9163 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9164 return -EBUSY;
9165 }
9166
9167 return 0;
9168}
9169
9170static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9171{
34f80b04 9172 int port = BP_PORT(bp);
a2fbb9ea
ET
9173 int count, i;
9174 u32 val = 0;
9175
9176 /* adjust timeout for emulation/FPGA */
9177 count = NVRAM_TIMEOUT_COUNT;
9178 if (CHIP_REV_IS_SLOW(bp))
9179 count *= 100;
9180
9181 /* relinquish nvram interface */
9182 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9183 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9184
9185 for (i = 0; i < count*10; i++) {
9186 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9187 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9188 break;
9189
9190 udelay(5);
9191 }
9192
9193 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9194 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9195 return -EBUSY;
9196 }
9197
9198 return 0;
9199}
9200
9201static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9202{
9203 u32 val;
9204
9205 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9206
9207 /* enable both bits, even on read */
9208 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9209 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9210 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9211}
9212
9213static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9214{
9215 u32 val;
9216
9217 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9218
9219 /* disable both bits, even after read */
9220 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9221 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9222 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9223}
9224
4781bfad 9225static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9226 u32 cmd_flags)
9227{
f1410647 9228 int count, i, rc;
a2fbb9ea
ET
9229 u32 val;
9230
9231 /* build the command word */
9232 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9233
9234 /* need to clear DONE bit separately */
9235 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9236
9237 /* address of the NVRAM to read from */
9238 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9239 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9240
9241 /* issue a read command */
9242 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9243
9244 /* adjust timeout for emulation/FPGA */
9245 count = NVRAM_TIMEOUT_COUNT;
9246 if (CHIP_REV_IS_SLOW(bp))
9247 count *= 100;
9248
9249 /* wait for completion */
9250 *ret_val = 0;
9251 rc = -EBUSY;
9252 for (i = 0; i < count; i++) {
9253 udelay(5);
9254 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9255
9256 if (val & MCPR_NVM_COMMAND_DONE) {
9257 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9258 /* we read nvram data in cpu order
9259 * but ethtool sees it as an array of bytes
9260 * converting to big-endian will do the work */
4781bfad 9261 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9262 rc = 0;
9263 break;
9264 }
9265 }
9266
9267 return rc;
9268}
9269
9270static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9271 int buf_size)
9272{
9273 int rc;
9274 u32 cmd_flags;
4781bfad 9275 __be32 val;
a2fbb9ea
ET
9276
9277 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9278 DP(BNX2X_MSG_NVM,
c14423fe 9279 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9280 offset, buf_size);
9281 return -EINVAL;
9282 }
9283
34f80b04
EG
9284 if (offset + buf_size > bp->common.flash_size) {
9285 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9286 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9287 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9288 return -EINVAL;
9289 }
9290
9291 /* request access to nvram interface */
9292 rc = bnx2x_acquire_nvram_lock(bp);
9293 if (rc)
9294 return rc;
9295
9296 /* enable access to nvram interface */
9297 bnx2x_enable_nvram_access(bp);
9298
9299 /* read the first word(s) */
9300 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9301 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9302 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9303 memcpy(ret_buf, &val, 4);
9304
9305 /* advance to the next dword */
9306 offset += sizeof(u32);
9307 ret_buf += sizeof(u32);
9308 buf_size -= sizeof(u32);
9309 cmd_flags = 0;
9310 }
9311
9312 if (rc == 0) {
9313 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9314 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9315 memcpy(ret_buf, &val, 4);
9316 }
9317
9318 /* disable access to nvram interface */
9319 bnx2x_disable_nvram_access(bp);
9320 bnx2x_release_nvram_lock(bp);
9321
9322 return rc;
9323}
9324
9325static int bnx2x_get_eeprom(struct net_device *dev,
9326 struct ethtool_eeprom *eeprom, u8 *eebuf)
9327{
9328 struct bnx2x *bp = netdev_priv(dev);
9329 int rc;
9330
2add3acb
EG
9331 if (!netif_running(dev))
9332 return -EAGAIN;
9333
34f80b04 9334 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9335 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9336 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9337 eeprom->len, eeprom->len);
9338
9339 /* parameters already validated in ethtool_get_eeprom */
9340
9341 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9342
9343 return rc;
9344}
9345
9346static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9347 u32 cmd_flags)
9348{
f1410647 9349 int count, i, rc;
a2fbb9ea
ET
9350
9351 /* build the command word */
9352 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9353
9354 /* need to clear DONE bit separately */
9355 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9356
9357 /* write the data */
9358 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9359
9360 /* address of the NVRAM to write to */
9361 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9362 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9363
9364 /* issue the write command */
9365 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9366
9367 /* adjust timeout for emulation/FPGA */
9368 count = NVRAM_TIMEOUT_COUNT;
9369 if (CHIP_REV_IS_SLOW(bp))
9370 count *= 100;
9371
9372 /* wait for completion */
9373 rc = -EBUSY;
9374 for (i = 0; i < count; i++) {
9375 udelay(5);
9376 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9377 if (val & MCPR_NVM_COMMAND_DONE) {
9378 rc = 0;
9379 break;
9380 }
9381 }
9382
9383 return rc;
9384}
9385
f1410647 9386#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9387
9388static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9389 int buf_size)
9390{
9391 int rc;
9392 u32 cmd_flags;
9393 u32 align_offset;
4781bfad 9394 __be32 val;
a2fbb9ea 9395
34f80b04
EG
9396 if (offset + buf_size > bp->common.flash_size) {
9397 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9398 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9399 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9400 return -EINVAL;
9401 }
9402
9403 /* request access to nvram interface */
9404 rc = bnx2x_acquire_nvram_lock(bp);
9405 if (rc)
9406 return rc;
9407
9408 /* enable access to nvram interface */
9409 bnx2x_enable_nvram_access(bp);
9410
9411 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9412 align_offset = (offset & ~0x03);
9413 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9414
9415 if (rc == 0) {
9416 val &= ~(0xff << BYTE_OFFSET(offset));
9417 val |= (*data_buf << BYTE_OFFSET(offset));
9418
9419 /* nvram data is returned as an array of bytes
9420 * convert it back to cpu order */
9421 val = be32_to_cpu(val);
9422
a2fbb9ea
ET
9423 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9424 cmd_flags);
9425 }
9426
9427 /* disable access to nvram interface */
9428 bnx2x_disable_nvram_access(bp);
9429 bnx2x_release_nvram_lock(bp);
9430
9431 return rc;
9432}
9433
9434static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9435 int buf_size)
9436{
9437 int rc;
9438 u32 cmd_flags;
9439 u32 val;
9440 u32 written_so_far;
9441
34f80b04 9442 if (buf_size == 1) /* ethtool */
a2fbb9ea 9443 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9444
9445 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9446 DP(BNX2X_MSG_NVM,
c14423fe 9447 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9448 offset, buf_size);
9449 return -EINVAL;
9450 }
9451
34f80b04
EG
9452 if (offset + buf_size > bp->common.flash_size) {
9453 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9454 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9455 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9456 return -EINVAL;
9457 }
9458
9459 /* request access to nvram interface */
9460 rc = bnx2x_acquire_nvram_lock(bp);
9461 if (rc)
9462 return rc;
9463
9464 /* enable access to nvram interface */
9465 bnx2x_enable_nvram_access(bp);
9466
9467 written_so_far = 0;
9468 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9469 while ((written_so_far < buf_size) && (rc == 0)) {
9470 if (written_so_far == (buf_size - sizeof(u32)))
9471 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9472 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9473 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9474 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9475 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9476
9477 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9478
9479 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9480
9481 /* advance to the next dword */
9482 offset += sizeof(u32);
9483 data_buf += sizeof(u32);
9484 written_so_far += sizeof(u32);
9485 cmd_flags = 0;
9486 }
9487
9488 /* disable access to nvram interface */
9489 bnx2x_disable_nvram_access(bp);
9490 bnx2x_release_nvram_lock(bp);
9491
9492 return rc;
9493}
9494
9495static int bnx2x_set_eeprom(struct net_device *dev,
9496 struct ethtool_eeprom *eeprom, u8 *eebuf)
9497{
9498 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9499 int port = BP_PORT(bp);
9500 int rc = 0;
a2fbb9ea 9501
9f4c9583
EG
9502 if (!netif_running(dev))
9503 return -EAGAIN;
9504
34f80b04 9505 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9506 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9507 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9508 eeprom->len, eeprom->len);
9509
9510 /* parameters already validated in ethtool_set_eeprom */
9511
f57a6025
EG
9512 /* PHY eeprom can be accessed only by the PMF */
9513 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9514 !bp->port.pmf)
9515 return -EINVAL;
9516
9517 if (eeprom->magic == 0x50485950) {
9518 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9519 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9520
f57a6025
EG
9521 bnx2x_acquire_phy_lock(bp);
9522 rc |= bnx2x_link_reset(&bp->link_params,
9523 &bp->link_vars, 0);
9524 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9525 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9526 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9527 MISC_REGISTERS_GPIO_HIGH, port);
9528 bnx2x_release_phy_lock(bp);
9529 bnx2x_link_report(bp);
9530
9531 } else if (eeprom->magic == 0x50485952) {
9532 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9533 if ((bp->state == BNX2X_STATE_OPEN) ||
9534 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9535 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9536 rc |= bnx2x_link_reset(&bp->link_params,
9537 &bp->link_vars, 1);
9538
9539 rc |= bnx2x_phy_init(&bp->link_params,
9540 &bp->link_vars);
4a37fb66 9541 bnx2x_release_phy_lock(bp);
f57a6025
EG
9542 bnx2x_calc_fc_adv(bp);
9543 }
9544 } else if (eeprom->magic == 0x53985943) {
9545 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9546 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9547 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9548 u8 ext_phy_addr =
9549 (bp->link_params.ext_phy_config &
9550 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9551 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9552
9553 /* DSP Remove Download Mode */
9554 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9555 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9556
f57a6025
EG
9557 bnx2x_acquire_phy_lock(bp);
9558
9559 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9560
9561 /* wait 0.5 sec to allow it to run */
9562 msleep(500);
9563 bnx2x_ext_phy_hw_reset(bp, port);
9564 msleep(500);
9565 bnx2x_release_phy_lock(bp);
9566 }
9567 } else
c18487ee 9568 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9569
9570 return rc;
9571}
9572
9573static int bnx2x_get_coalesce(struct net_device *dev,
9574 struct ethtool_coalesce *coal)
9575{
9576 struct bnx2x *bp = netdev_priv(dev);
9577
9578 memset(coal, 0, sizeof(struct ethtool_coalesce));
9579
9580 coal->rx_coalesce_usecs = bp->rx_ticks;
9581 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9582
9583 return 0;
9584}
9585
ca00392c 9586#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9587static int bnx2x_set_coalesce(struct net_device *dev,
9588 struct ethtool_coalesce *coal)
9589{
9590 struct bnx2x *bp = netdev_priv(dev);
9591
9592 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9593 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9594 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9595
9596 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9597 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9598 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9599
34f80b04 9600 if (netif_running(dev))
a2fbb9ea
ET
9601 bnx2x_update_coalesce(bp);
9602
9603 return 0;
9604}
9605
9606static void bnx2x_get_ringparam(struct net_device *dev,
9607 struct ethtool_ringparam *ering)
9608{
9609 struct bnx2x *bp = netdev_priv(dev);
9610
9611 ering->rx_max_pending = MAX_RX_AVAIL;
9612 ering->rx_mini_max_pending = 0;
9613 ering->rx_jumbo_max_pending = 0;
9614
9615 ering->rx_pending = bp->rx_ring_size;
9616 ering->rx_mini_pending = 0;
9617 ering->rx_jumbo_pending = 0;
9618
9619 ering->tx_max_pending = MAX_TX_AVAIL;
9620 ering->tx_pending = bp->tx_ring_size;
9621}
9622
9623static int bnx2x_set_ringparam(struct net_device *dev,
9624 struct ethtool_ringparam *ering)
9625{
9626 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9627 int rc = 0;
a2fbb9ea
ET
9628
9629 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9630 (ering->tx_pending > MAX_TX_AVAIL) ||
9631 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9632 return -EINVAL;
9633
9634 bp->rx_ring_size = ering->rx_pending;
9635 bp->tx_ring_size = ering->tx_pending;
9636
34f80b04
EG
9637 if (netif_running(dev)) {
9638 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9639 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9640 }
9641
34f80b04 9642 return rc;
a2fbb9ea
ET
9643}
9644
9645static void bnx2x_get_pauseparam(struct net_device *dev,
9646 struct ethtool_pauseparam *epause)
9647{
9648 struct bnx2x *bp = netdev_priv(dev);
9649
356e2385
EG
9650 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9651 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9652 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9653
c0700f90
DM
9654 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9655 BNX2X_FLOW_CTRL_RX);
9656 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9657 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9658
9659 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9660 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9661 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9662}
9663
9664static int bnx2x_set_pauseparam(struct net_device *dev,
9665 struct ethtool_pauseparam *epause)
9666{
9667 struct bnx2x *bp = netdev_priv(dev);
9668
34f80b04
EG
9669 if (IS_E1HMF(bp))
9670 return 0;
9671
a2fbb9ea
ET
9672 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9673 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9674 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9675
c0700f90 9676 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9677
f1410647 9678 if (epause->rx_pause)
c0700f90 9679 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9680
f1410647 9681 if (epause->tx_pause)
c0700f90 9682 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9683
c0700f90
DM
9684 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9685 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9686
c18487ee 9687 if (epause->autoneg) {
34f80b04 9688 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9689 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9690 return -EINVAL;
9691 }
a2fbb9ea 9692
c18487ee 9693 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9694 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9695 }
a2fbb9ea 9696
c18487ee
YR
9697 DP(NETIF_MSG_LINK,
9698 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9699
9700 if (netif_running(dev)) {
bb2a0f7a 9701 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9702 bnx2x_link_set(bp);
9703 }
a2fbb9ea
ET
9704
9705 return 0;
9706}
9707
df0f2343
VZ
9708static int bnx2x_set_flags(struct net_device *dev, u32 data)
9709{
9710 struct bnx2x *bp = netdev_priv(dev);
9711 int changed = 0;
9712 int rc = 0;
9713
9714 /* TPA requires Rx CSUM offloading */
9715 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9716 if (!(dev->features & NETIF_F_LRO)) {
9717 dev->features |= NETIF_F_LRO;
9718 bp->flags |= TPA_ENABLE_FLAG;
9719 changed = 1;
9720 }
9721
9722 } else if (dev->features & NETIF_F_LRO) {
9723 dev->features &= ~NETIF_F_LRO;
9724 bp->flags &= ~TPA_ENABLE_FLAG;
9725 changed = 1;
9726 }
9727
9728 if (changed && netif_running(dev)) {
9729 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9730 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9731 }
9732
9733 return rc;
9734}
9735
a2fbb9ea
ET
9736static u32 bnx2x_get_rx_csum(struct net_device *dev)
9737{
9738 struct bnx2x *bp = netdev_priv(dev);
9739
9740 return bp->rx_csum;
9741}
9742
9743static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9744{
9745 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9746 int rc = 0;
a2fbb9ea
ET
9747
9748 bp->rx_csum = data;
df0f2343
VZ
9749
9750 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9751 TPA'ed packets will be discarded due to wrong TCP CSUM */
9752 if (!data) {
9753 u32 flags = ethtool_op_get_flags(dev);
9754
9755 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9756 }
9757
9758 return rc;
a2fbb9ea
ET
9759}
9760
9761static int bnx2x_set_tso(struct net_device *dev, u32 data)
9762{
755735eb 9763 if (data) {
a2fbb9ea 9764 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9765 dev->features |= NETIF_F_TSO6;
9766 } else {
a2fbb9ea 9767 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9768 dev->features &= ~NETIF_F_TSO6;
9769 }
9770
a2fbb9ea
ET
9771 return 0;
9772}
9773
f3c87cdd 9774static const struct {
a2fbb9ea
ET
9775 char string[ETH_GSTRING_LEN];
9776} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9777 { "register_test (offline)" },
9778 { "memory_test (offline)" },
9779 { "loopback_test (offline)" },
9780 { "nvram_test (online)" },
9781 { "interrupt_test (online)" },
9782 { "link_test (online)" },
d3d4f495 9783 { "idle check (online)" }
a2fbb9ea
ET
9784};
9785
9786static int bnx2x_self_test_count(struct net_device *dev)
9787{
9788 return BNX2X_NUM_TESTS;
9789}
9790
f3c87cdd
YG
9791static int bnx2x_test_registers(struct bnx2x *bp)
9792{
9793 int idx, i, rc = -ENODEV;
9794 u32 wr_val = 0;
9dabc424 9795 int port = BP_PORT(bp);
f3c87cdd
YG
9796 static const struct {
9797 u32 offset0;
9798 u32 offset1;
9799 u32 mask;
9800 } reg_tbl[] = {
9801/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9802 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9803 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9804 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9805 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9806 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9807 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9808 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9809 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9810 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9811/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9812 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9813 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9814 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9815 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9816 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9817 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9818 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9819 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9820 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9821/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9822 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9823 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9824 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9825 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9826 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9827 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9828 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9829 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9830 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9831/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9832 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9833 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9834 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9835 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9836 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9837 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9838
9839 { 0xffffffff, 0, 0x00000000 }
9840 };
9841
9842 if (!netif_running(bp->dev))
9843 return rc;
9844
9845 /* Repeat the test twice:
9846 First by writing 0x00000000, second by writing 0xffffffff */
9847 for (idx = 0; idx < 2; idx++) {
9848
9849 switch (idx) {
9850 case 0:
9851 wr_val = 0;
9852 break;
9853 case 1:
9854 wr_val = 0xffffffff;
9855 break;
9856 }
9857
9858 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9859 u32 offset, mask, save_val, val;
f3c87cdd
YG
9860
9861 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9862 mask = reg_tbl[i].mask;
9863
9864 save_val = REG_RD(bp, offset);
9865
9866 REG_WR(bp, offset, wr_val);
9867 val = REG_RD(bp, offset);
9868
9869 /* Restore the original register's value */
9870 REG_WR(bp, offset, save_val);
9871
9872 /* verify that value is as expected value */
9873 if ((val & mask) != (wr_val & mask))
9874 goto test_reg_exit;
9875 }
9876 }
9877
9878 rc = 0;
9879
9880test_reg_exit:
9881 return rc;
9882}
9883
9884static int bnx2x_test_memory(struct bnx2x *bp)
9885{
9886 int i, j, rc = -ENODEV;
9887 u32 val;
9888 static const struct {
9889 u32 offset;
9890 int size;
9891 } mem_tbl[] = {
9892 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9893 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9894 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9895 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9896 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9897 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9898 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9899
9900 { 0xffffffff, 0 }
9901 };
9902 static const struct {
9903 char *name;
9904 u32 offset;
9dabc424
YG
9905 u32 e1_mask;
9906 u32 e1h_mask;
f3c87cdd 9907 } prty_tbl[] = {
9dabc424
YG
9908 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9909 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9910 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9911 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9912 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9913 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9914
9915 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9916 };
9917
9918 if (!netif_running(bp->dev))
9919 return rc;
9920
9921 /* Go through all the memories */
9922 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9923 for (j = 0; j < mem_tbl[i].size; j++)
9924 REG_RD(bp, mem_tbl[i].offset + j*4);
9925
9926 /* Check the parity status */
9927 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9928 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9929 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9930 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9931 DP(NETIF_MSG_HW,
9932 "%s is 0x%x\n", prty_tbl[i].name, val);
9933 goto test_mem_exit;
9934 }
9935 }
9936
9937 rc = 0;
9938
9939test_mem_exit:
9940 return rc;
9941}
9942
f3c87cdd
YG
9943static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9944{
9945 int cnt = 1000;
9946
9947 if (link_up)
9948 while (bnx2x_link_test(bp) && cnt--)
9949 msleep(10);
9950}
9951
9952static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9953{
9954 unsigned int pkt_size, num_pkts, i;
9955 struct sk_buff *skb;
9956 unsigned char *packet;
ca00392c
EG
9957 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9958 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
9959 u16 tx_start_idx, tx_idx;
9960 u16 rx_start_idx, rx_idx;
ca00392c 9961 u16 pkt_prod, bd_prod;
f3c87cdd 9962 struct sw_tx_bd *tx_buf;
ca00392c
EG
9963 struct eth_tx_start_bd *tx_start_bd;
9964 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
9965 dma_addr_t mapping;
9966 union eth_rx_cqe *cqe;
9967 u8 cqe_fp_flags;
9968 struct sw_rx_bd *rx_buf;
9969 u16 len;
9970 int rc = -ENODEV;
9971
b5bf9068
EG
9972 /* check the loopback mode */
9973 switch (loopback_mode) {
9974 case BNX2X_PHY_LOOPBACK:
9975 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9976 return -EINVAL;
9977 break;
9978 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9979 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9980 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9981 break;
9982 default:
f3c87cdd 9983 return -EINVAL;
b5bf9068 9984 }
f3c87cdd 9985
b5bf9068
EG
9986 /* prepare the loopback packet */
9987 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9988 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9989 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9990 if (!skb) {
9991 rc = -ENOMEM;
9992 goto test_loopback_exit;
9993 }
9994 packet = skb_put(skb, pkt_size);
9995 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
9996 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9997 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
9998 for (i = ETH_HLEN; i < pkt_size; i++)
9999 packet[i] = (unsigned char) (i & 0xff);
10000
b5bf9068 10001 /* send the loopback packet */
f3c87cdd 10002 num_pkts = 0;
ca00392c
EG
10003 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10004 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10005
ca00392c
EG
10006 pkt_prod = fp_tx->tx_pkt_prod++;
10007 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10008 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10009 tx_buf->skb = skb;
ca00392c 10010 tx_buf->flags = 0;
f3c87cdd 10011
ca00392c
EG
10012 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10013 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10014 mapping = pci_map_single(bp->pdev, skb->data,
10015 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10016 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10017 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10018 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10019 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10020 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10021 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10022 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10023 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10024
10025 /* turn on parsing and get a BD */
10026 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10027 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10028
10029 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10030
58f4c4cf
EG
10031 wmb();
10032
ca00392c
EG
10033 fp_tx->tx_db.data.prod += 2;
10034 barrier();
10035 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10036
10037 mmiowb();
10038
10039 num_pkts++;
ca00392c 10040 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10041 bp->dev->trans_start = jiffies;
10042
10043 udelay(100);
10044
ca00392c 10045 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10046 if (tx_idx != tx_start_idx + num_pkts)
10047 goto test_loopback_exit;
10048
ca00392c 10049 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10050 if (rx_idx != rx_start_idx + num_pkts)
10051 goto test_loopback_exit;
10052
ca00392c 10053 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10054 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10055 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10056 goto test_loopback_rx_exit;
10057
10058 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10059 if (len != pkt_size)
10060 goto test_loopback_rx_exit;
10061
ca00392c 10062 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10063 skb = rx_buf->skb;
10064 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10065 for (i = ETH_HLEN; i < pkt_size; i++)
10066 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10067 goto test_loopback_rx_exit;
10068
10069 rc = 0;
10070
10071test_loopback_rx_exit:
f3c87cdd 10072
ca00392c
EG
10073 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10074 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10075 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10076 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10077
10078 /* Update producers */
ca00392c
EG
10079 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10080 fp_rx->rx_sge_prod);
f3c87cdd
YG
10081
10082test_loopback_exit:
10083 bp->link_params.loopback_mode = LOOPBACK_NONE;
10084
10085 return rc;
10086}
10087
10088static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10089{
b5bf9068 10090 int rc = 0, res;
f3c87cdd
YG
10091
10092 if (!netif_running(bp->dev))
10093 return BNX2X_LOOPBACK_FAILED;
10094
f8ef6e44 10095 bnx2x_netif_stop(bp, 1);
3910c8ae 10096 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10097
b5bf9068
EG
10098 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10099 if (res) {
10100 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10101 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10102 }
10103
b5bf9068
EG
10104 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10105 if (res) {
10106 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10107 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10108 }
10109
3910c8ae 10110 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10111 bnx2x_netif_start(bp);
10112
10113 return rc;
10114}
10115
10116#define CRC32_RESIDUAL 0xdebb20e3
10117
10118static int bnx2x_test_nvram(struct bnx2x *bp)
10119{
10120 static const struct {
10121 int offset;
10122 int size;
10123 } nvram_tbl[] = {
10124 { 0, 0x14 }, /* bootstrap */
10125 { 0x14, 0xec }, /* dir */
10126 { 0x100, 0x350 }, /* manuf_info */
10127 { 0x450, 0xf0 }, /* feature_info */
10128 { 0x640, 0x64 }, /* upgrade_key_info */
10129 { 0x6a4, 0x64 },
10130 { 0x708, 0x70 }, /* manuf_key_info */
10131 { 0x778, 0x70 },
10132 { 0, 0 }
10133 };
4781bfad 10134 __be32 buf[0x350 / 4];
f3c87cdd
YG
10135 u8 *data = (u8 *)buf;
10136 int i, rc;
10137 u32 magic, csum;
10138
10139 rc = bnx2x_nvram_read(bp, 0, data, 4);
10140 if (rc) {
f5372251 10141 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10142 goto test_nvram_exit;
10143 }
10144
10145 magic = be32_to_cpu(buf[0]);
10146 if (magic != 0x669955aa) {
10147 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10148 rc = -ENODEV;
10149 goto test_nvram_exit;
10150 }
10151
10152 for (i = 0; nvram_tbl[i].size; i++) {
10153
10154 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10155 nvram_tbl[i].size);
10156 if (rc) {
10157 DP(NETIF_MSG_PROBE,
f5372251 10158 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10159 goto test_nvram_exit;
10160 }
10161
10162 csum = ether_crc_le(nvram_tbl[i].size, data);
10163 if (csum != CRC32_RESIDUAL) {
10164 DP(NETIF_MSG_PROBE,
10165 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10166 rc = -ENODEV;
10167 goto test_nvram_exit;
10168 }
10169 }
10170
10171test_nvram_exit:
10172 return rc;
10173}
10174
10175static int bnx2x_test_intr(struct bnx2x *bp)
10176{
10177 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10178 int i, rc;
10179
10180 if (!netif_running(bp->dev))
10181 return -ENODEV;
10182
8d9c5f34 10183 config->hdr.length = 0;
af246401
EG
10184 if (CHIP_IS_E1(bp))
10185 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10186 else
10187 config->hdr.offset = BP_FUNC(bp);
0626b899 10188 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10189 config->hdr.reserved1 = 0;
10190
10191 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10192 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10193 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10194 if (rc == 0) {
10195 bp->set_mac_pending++;
10196 for (i = 0; i < 10; i++) {
10197 if (!bp->set_mac_pending)
10198 break;
10199 msleep_interruptible(10);
10200 }
10201 if (i == 10)
10202 rc = -ENODEV;
10203 }
10204
10205 return rc;
10206}
10207
a2fbb9ea
ET
10208static void bnx2x_self_test(struct net_device *dev,
10209 struct ethtool_test *etest, u64 *buf)
10210{
10211 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10212
10213 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10214
f3c87cdd 10215 if (!netif_running(dev))
a2fbb9ea 10216 return;
a2fbb9ea 10217
33471629 10218 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10219 if (IS_E1HMF(bp))
10220 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10221
10222 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10223 int port = BP_PORT(bp);
10224 u32 val;
f3c87cdd
YG
10225 u8 link_up;
10226
279abdf5
EG
10227 /* save current value of input enable for TX port IF */
10228 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10229 /* disable input for TX port IF */
10230 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10231
f3c87cdd
YG
10232 link_up = bp->link_vars.link_up;
10233 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10234 bnx2x_nic_load(bp, LOAD_DIAG);
10235 /* wait until link state is restored */
10236 bnx2x_wait_for_link(bp, link_up);
10237
10238 if (bnx2x_test_registers(bp) != 0) {
10239 buf[0] = 1;
10240 etest->flags |= ETH_TEST_FL_FAILED;
10241 }
10242 if (bnx2x_test_memory(bp) != 0) {
10243 buf[1] = 1;
10244 etest->flags |= ETH_TEST_FL_FAILED;
10245 }
10246 buf[2] = bnx2x_test_loopback(bp, link_up);
10247 if (buf[2] != 0)
10248 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10249
f3c87cdd 10250 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10251
10252 /* restore input for TX port IF */
10253 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10254
f3c87cdd
YG
10255 bnx2x_nic_load(bp, LOAD_NORMAL);
10256 /* wait until link state is restored */
10257 bnx2x_wait_for_link(bp, link_up);
10258 }
10259 if (bnx2x_test_nvram(bp) != 0) {
10260 buf[3] = 1;
a2fbb9ea
ET
10261 etest->flags |= ETH_TEST_FL_FAILED;
10262 }
f3c87cdd
YG
10263 if (bnx2x_test_intr(bp) != 0) {
10264 buf[4] = 1;
10265 etest->flags |= ETH_TEST_FL_FAILED;
10266 }
10267 if (bp->port.pmf)
10268 if (bnx2x_link_test(bp) != 0) {
10269 buf[5] = 1;
10270 etest->flags |= ETH_TEST_FL_FAILED;
10271 }
f3c87cdd
YG
10272
10273#ifdef BNX2X_EXTRA_DEBUG
10274 bnx2x_panic_dump(bp);
10275#endif
a2fbb9ea
ET
10276}
10277
de832a55
EG
10278static const struct {
10279 long offset;
10280 int size;
10281 u8 string[ETH_GSTRING_LEN];
10282} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10283/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10284 { Q_STATS_OFFSET32(error_bytes_received_hi),
10285 8, "[%d]: rx_error_bytes" },
10286 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10287 8, "[%d]: rx_ucast_packets" },
10288 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10289 8, "[%d]: rx_mcast_packets" },
10290 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10291 8, "[%d]: rx_bcast_packets" },
10292 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10293 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10294 4, "[%d]: rx_phy_ip_err_discards"},
10295 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10296 4, "[%d]: rx_skb_alloc_discard" },
10297 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10298
10299/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10300 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10301 8, "[%d]: tx_packets" }
10302};
10303
bb2a0f7a
YG
10304static const struct {
10305 long offset;
10306 int size;
10307 u32 flags;
66e855f3
YG
10308#define STATS_FLAGS_PORT 1
10309#define STATS_FLAGS_FUNC 2
de832a55 10310#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10311 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10312} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10313/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10314 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10315 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10316 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10317 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10318 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10319 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10320 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10321 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10322 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10323 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10324 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10325 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10326 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10327 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10328 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10329 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10330 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10331/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10332 8, STATS_FLAGS_PORT, "rx_fragments" },
10333 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10334 8, STATS_FLAGS_PORT, "rx_jabbers" },
10335 { STATS_OFFSET32(no_buff_discard_hi),
10336 8, STATS_FLAGS_BOTH, "rx_discards" },
10337 { STATS_OFFSET32(mac_filter_discard),
10338 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10339 { STATS_OFFSET32(xxoverflow_discard),
10340 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10341 { STATS_OFFSET32(brb_drop_hi),
10342 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10343 { STATS_OFFSET32(brb_truncate_hi),
10344 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10345 { STATS_OFFSET32(pause_frames_received_hi),
10346 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10347 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10348 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10349 { STATS_OFFSET32(nig_timer_max),
10350 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10351/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10352 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10353 { STATS_OFFSET32(rx_skb_alloc_failed),
10354 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10355 { STATS_OFFSET32(hw_csum_err),
10356 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10357
10358 { STATS_OFFSET32(total_bytes_transmitted_hi),
10359 8, STATS_FLAGS_BOTH, "tx_bytes" },
10360 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10361 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10362 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10363 8, STATS_FLAGS_BOTH, "tx_packets" },
10364 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10365 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10366 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10367 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10368 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10369 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10370 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10371 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10372/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10373 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10374 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10375 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10376 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10377 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10378 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10379 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10380 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10381 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10382 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10383 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10384 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10385 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10386 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10387 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10388 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10389 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10390 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10391 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10392/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10393 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10394 { STATS_OFFSET32(pause_frames_sent_hi),
10395 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10396};
10397
de832a55
EG
10398#define IS_PORT_STAT(i) \
10399 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10400#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10401#define IS_E1HMF_MODE_STAT(bp) \
10402 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10403
a2fbb9ea
ET
10404static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10405{
bb2a0f7a 10406 struct bnx2x *bp = netdev_priv(dev);
de832a55 10407 int i, j, k;
bb2a0f7a 10408
a2fbb9ea
ET
10409 switch (stringset) {
10410 case ETH_SS_STATS:
de832a55
EG
10411 if (is_multi(bp)) {
10412 k = 0;
ca00392c 10413 for_each_rx_queue(bp, i) {
de832a55
EG
10414 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10415 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10416 bnx2x_q_stats_arr[j].string, i);
10417 k += BNX2X_NUM_Q_STATS;
10418 }
10419 if (IS_E1HMF_MODE_STAT(bp))
10420 break;
10421 for (j = 0; j < BNX2X_NUM_STATS; j++)
10422 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10423 bnx2x_stats_arr[j].string);
10424 } else {
10425 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10426 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10427 continue;
10428 strcpy(buf + j*ETH_GSTRING_LEN,
10429 bnx2x_stats_arr[i].string);
10430 j++;
10431 }
bb2a0f7a 10432 }
a2fbb9ea
ET
10433 break;
10434
10435 case ETH_SS_TEST:
10436 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10437 break;
10438 }
10439}
10440
10441static int bnx2x_get_stats_count(struct net_device *dev)
10442{
bb2a0f7a 10443 struct bnx2x *bp = netdev_priv(dev);
de832a55 10444 int i, num_stats;
bb2a0f7a 10445
de832a55 10446 if (is_multi(bp)) {
ca00392c 10447 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
de832a55
EG
10448 if (!IS_E1HMF_MODE_STAT(bp))
10449 num_stats += BNX2X_NUM_STATS;
10450 } else {
10451 if (IS_E1HMF_MODE_STAT(bp)) {
10452 num_stats = 0;
10453 for (i = 0; i < BNX2X_NUM_STATS; i++)
10454 if (IS_FUNC_STAT(i))
10455 num_stats++;
10456 } else
10457 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10458 }
de832a55 10459
bb2a0f7a 10460 return num_stats;
a2fbb9ea
ET
10461}
10462
10463static void bnx2x_get_ethtool_stats(struct net_device *dev,
10464 struct ethtool_stats *stats, u64 *buf)
10465{
10466 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10467 u32 *hw_stats, *offset;
10468 int i, j, k;
bb2a0f7a 10469
de832a55
EG
10470 if (is_multi(bp)) {
10471 k = 0;
ca00392c 10472 for_each_rx_queue(bp, i) {
de832a55
EG
10473 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10474 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10475 if (bnx2x_q_stats_arr[j].size == 0) {
10476 /* skip this counter */
10477 buf[k + j] = 0;
10478 continue;
10479 }
10480 offset = (hw_stats +
10481 bnx2x_q_stats_arr[j].offset);
10482 if (bnx2x_q_stats_arr[j].size == 4) {
10483 /* 4-byte counter */
10484 buf[k + j] = (u64) *offset;
10485 continue;
10486 }
10487 /* 8-byte counter */
10488 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10489 }
10490 k += BNX2X_NUM_Q_STATS;
10491 }
10492 if (IS_E1HMF_MODE_STAT(bp))
10493 return;
10494 hw_stats = (u32 *)&bp->eth_stats;
10495 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10496 if (bnx2x_stats_arr[j].size == 0) {
10497 /* skip this counter */
10498 buf[k + j] = 0;
10499 continue;
10500 }
10501 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10502 if (bnx2x_stats_arr[j].size == 4) {
10503 /* 4-byte counter */
10504 buf[k + j] = (u64) *offset;
10505 continue;
10506 }
10507 /* 8-byte counter */
10508 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10509 }
de832a55
EG
10510 } else {
10511 hw_stats = (u32 *)&bp->eth_stats;
10512 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10513 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10514 continue;
10515 if (bnx2x_stats_arr[i].size == 0) {
10516 /* skip this counter */
10517 buf[j] = 0;
10518 j++;
10519 continue;
10520 }
10521 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10522 if (bnx2x_stats_arr[i].size == 4) {
10523 /* 4-byte counter */
10524 buf[j] = (u64) *offset;
10525 j++;
10526 continue;
10527 }
10528 /* 8-byte counter */
10529 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10530 j++;
a2fbb9ea 10531 }
a2fbb9ea
ET
10532 }
10533}
10534
10535static int bnx2x_phys_id(struct net_device *dev, u32 data)
10536{
10537 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10538 int port = BP_PORT(bp);
a2fbb9ea
ET
10539 int i;
10540
34f80b04
EG
10541 if (!netif_running(dev))
10542 return 0;
10543
10544 if (!bp->port.pmf)
10545 return 0;
10546
a2fbb9ea
ET
10547 if (data == 0)
10548 data = 2;
10549
10550 for (i = 0; i < (data * 2); i++) {
c18487ee 10551 if ((i % 2) == 0)
34f80b04 10552 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10553 bp->link_params.hw_led_mode,
10554 bp->link_params.chip_id);
10555 else
34f80b04 10556 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10557 bp->link_params.hw_led_mode,
10558 bp->link_params.chip_id);
10559
a2fbb9ea
ET
10560 msleep_interruptible(500);
10561 if (signal_pending(current))
10562 break;
10563 }
10564
c18487ee 10565 if (bp->link_vars.link_up)
34f80b04 10566 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10567 bp->link_vars.line_speed,
10568 bp->link_params.hw_led_mode,
10569 bp->link_params.chip_id);
a2fbb9ea
ET
10570
10571 return 0;
10572}
10573
10574static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10575 .get_settings = bnx2x_get_settings,
10576 .set_settings = bnx2x_set_settings,
10577 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10578 .get_regs_len = bnx2x_get_regs_len,
10579 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10580 .get_wol = bnx2x_get_wol,
10581 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10582 .get_msglevel = bnx2x_get_msglevel,
10583 .set_msglevel = bnx2x_set_msglevel,
10584 .nway_reset = bnx2x_nway_reset,
01e53298 10585 .get_link = bnx2x_get_link,
7a9b2557
VZ
10586 .get_eeprom_len = bnx2x_get_eeprom_len,
10587 .get_eeprom = bnx2x_get_eeprom,
10588 .set_eeprom = bnx2x_set_eeprom,
10589 .get_coalesce = bnx2x_get_coalesce,
10590 .set_coalesce = bnx2x_set_coalesce,
10591 .get_ringparam = bnx2x_get_ringparam,
10592 .set_ringparam = bnx2x_set_ringparam,
10593 .get_pauseparam = bnx2x_get_pauseparam,
10594 .set_pauseparam = bnx2x_set_pauseparam,
10595 .get_rx_csum = bnx2x_get_rx_csum,
10596 .set_rx_csum = bnx2x_set_rx_csum,
10597 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10598 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10599 .set_flags = bnx2x_set_flags,
10600 .get_flags = ethtool_op_get_flags,
10601 .get_sg = ethtool_op_get_sg,
10602 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10603 .get_tso = ethtool_op_get_tso,
10604 .set_tso = bnx2x_set_tso,
10605 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10606 .self_test = bnx2x_self_test,
10607 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10608 .phys_id = bnx2x_phys_id,
10609 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10610 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10611};
10612
10613/* end of ethtool_ops */
10614
10615/****************************************************************************
10616* General service functions
10617****************************************************************************/
10618
10619static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10620{
10621 u16 pmcsr;
10622
10623 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10624
10625 switch (state) {
10626 case PCI_D0:
34f80b04 10627 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10628 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10629 PCI_PM_CTRL_PME_STATUS));
10630
10631 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10632 /* delay required during transition out of D3hot */
a2fbb9ea 10633 msleep(20);
34f80b04 10634 break;
a2fbb9ea 10635
34f80b04
EG
10636 case PCI_D3hot:
10637 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10638 pmcsr |= 3;
a2fbb9ea 10639
34f80b04
EG
10640 if (bp->wol)
10641 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10642
34f80b04
EG
10643 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10644 pmcsr);
a2fbb9ea 10645
34f80b04
EG
10646 /* No more memory access after this point until
10647 * device is brought back to D0.
10648 */
10649 break;
10650
10651 default:
10652 return -EINVAL;
10653 }
10654 return 0;
a2fbb9ea
ET
10655}
10656
237907c1
EG
10657static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10658{
10659 u16 rx_cons_sb;
10660
10661 /* Tell compiler that status block fields can change */
10662 barrier();
10663 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10664 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10665 rx_cons_sb++;
10666 return (fp->rx_comp_cons != rx_cons_sb);
10667}
10668
34f80b04
EG
10669/*
10670 * net_device service functions
10671 */
10672
a2fbb9ea
ET
10673static int bnx2x_poll(struct napi_struct *napi, int budget)
10674{
10675 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10676 napi);
10677 struct bnx2x *bp = fp->bp;
10678 int work_done = 0;
10679
10680#ifdef BNX2X_STOP_ON_ERROR
10681 if (unlikely(bp->panic))
34f80b04 10682 goto poll_panic;
a2fbb9ea
ET
10683#endif
10684
a2fbb9ea
ET
10685 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10686 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10687
10688 bnx2x_update_fpsb_idx(fp);
10689
8534f32c 10690 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10691 work_done = bnx2x_rx_int(fp, budget);
356e2385 10692
8534f32c
EG
10693 /* must not complete if we consumed full budget */
10694 if (work_done >= budget)
10695 goto poll_again;
10696 }
a2fbb9ea 10697
ca00392c 10698 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10699 * ensure that status block indices have been actually read
ca00392c 10700 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10701 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10702 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10703 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10704 * may be postponed to right before bnx2x_ack_sb). In this case
10705 * there will never be another interrupt until there is another update
10706 * of the status block, while there is still unhandled work.
10707 */
10708 rmb();
a2fbb9ea 10709
ca00392c 10710 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10711#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10712poll_panic:
a2fbb9ea 10713#endif
288379f0 10714 napi_complete(napi);
a2fbb9ea 10715
0626b899 10716 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10717 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10718 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10719 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10720 }
356e2385 10721
8534f32c 10722poll_again:
a2fbb9ea
ET
10723 return work_done;
10724}
10725
755735eb
EG
10726
10727/* we split the first BD into headers and data BDs
33471629 10728 * to ease the pain of our fellow microcode engineers
755735eb
EG
10729 * we use one mapping for both BDs
10730 * So far this has only been observed to happen
10731 * in Other Operating Systems(TM)
10732 */
10733static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10734 struct bnx2x_fastpath *fp,
ca00392c
EG
10735 struct sw_tx_bd *tx_buf,
10736 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10737 u16 bd_prod, int nbd)
10738{
ca00392c 10739 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10740 struct eth_tx_bd *d_tx_bd;
10741 dma_addr_t mapping;
10742 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10743
10744 /* first fix first BD */
10745 h_tx_bd->nbd = cpu_to_le16(nbd);
10746 h_tx_bd->nbytes = cpu_to_le16(hlen);
10747
10748 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10749 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10750 h_tx_bd->addr_lo, h_tx_bd->nbd);
10751
10752 /* now get a new data BD
10753 * (after the pbd) and fill it */
10754 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10755 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10756
10757 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10758 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10759
10760 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10761 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10762 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10763
10764 /* this marks the BD as one that has no individual mapping */
10765 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10766
755735eb
EG
10767 DP(NETIF_MSG_TX_QUEUED,
10768 "TSO split data size is %d (%x:%x)\n",
10769 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10770
ca00392c
EG
10771 /* update tx_bd */
10772 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10773
10774 return bd_prod;
10775}
10776
10777static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10778{
10779 if (fix > 0)
10780 csum = (u16) ~csum_fold(csum_sub(csum,
10781 csum_partial(t_header - fix, fix, 0)));
10782
10783 else if (fix < 0)
10784 csum = (u16) ~csum_fold(csum_add(csum,
10785 csum_partial(t_header, -fix, 0)));
10786
10787 return swab16(csum);
10788}
10789
10790static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10791{
10792 u32 rc;
10793
10794 if (skb->ip_summed != CHECKSUM_PARTIAL)
10795 rc = XMIT_PLAIN;
10796
10797 else {
4781bfad 10798 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10799 rc = XMIT_CSUM_V6;
10800 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10801 rc |= XMIT_CSUM_TCP;
10802
10803 } else {
10804 rc = XMIT_CSUM_V4;
10805 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10806 rc |= XMIT_CSUM_TCP;
10807 }
10808 }
10809
10810 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10811 rc |= XMIT_GSO_V4;
10812
10813 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10814 rc |= XMIT_GSO_V6;
10815
10816 return rc;
10817}
10818
632da4d6 10819#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10820/* check if packet requires linearization (packet is too fragmented)
10821 no need to check fragmentation if page size > 8K (there will be no
10822 violation to FW restrictions) */
755735eb
EG
10823static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10824 u32 xmit_type)
10825{
10826 int to_copy = 0;
10827 int hlen = 0;
10828 int first_bd_sz = 0;
10829
10830 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10831 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10832
10833 if (xmit_type & XMIT_GSO) {
10834 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10835 /* Check if LSO packet needs to be copied:
10836 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10837 int wnd_size = MAX_FETCH_BD - 3;
33471629 10838 /* Number of windows to check */
755735eb
EG
10839 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10840 int wnd_idx = 0;
10841 int frag_idx = 0;
10842 u32 wnd_sum = 0;
10843
10844 /* Headers length */
10845 hlen = (int)(skb_transport_header(skb) - skb->data) +
10846 tcp_hdrlen(skb);
10847
10848 /* Amount of data (w/o headers) on linear part of SKB*/
10849 first_bd_sz = skb_headlen(skb) - hlen;
10850
10851 wnd_sum = first_bd_sz;
10852
10853 /* Calculate the first sum - it's special */
10854 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10855 wnd_sum +=
10856 skb_shinfo(skb)->frags[frag_idx].size;
10857
10858 /* If there was data on linear skb data - check it */
10859 if (first_bd_sz > 0) {
10860 if (unlikely(wnd_sum < lso_mss)) {
10861 to_copy = 1;
10862 goto exit_lbl;
10863 }
10864
10865 wnd_sum -= first_bd_sz;
10866 }
10867
10868 /* Others are easier: run through the frag list and
10869 check all windows */
10870 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10871 wnd_sum +=
10872 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10873
10874 if (unlikely(wnd_sum < lso_mss)) {
10875 to_copy = 1;
10876 break;
10877 }
10878 wnd_sum -=
10879 skb_shinfo(skb)->frags[wnd_idx].size;
10880 }
755735eb
EG
10881 } else {
10882 /* in non-LSO too fragmented packet should always
10883 be linearized */
10884 to_copy = 1;
10885 }
10886 }
10887
10888exit_lbl:
10889 if (unlikely(to_copy))
10890 DP(NETIF_MSG_TX_QUEUED,
10891 "Linearization IS REQUIRED for %s packet. "
10892 "num_frags %d hlen %d first_bd_sz %d\n",
10893 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10894 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10895
10896 return to_copy;
10897}
632da4d6 10898#endif
755735eb
EG
10899
10900/* called with netif_tx_lock
a2fbb9ea 10901 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10902 * netif_wake_queue()
a2fbb9ea
ET
10903 */
10904static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10905{
10906 struct bnx2x *bp = netdev_priv(dev);
ca00392c 10907 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 10908 struct netdev_queue *txq;
a2fbb9ea 10909 struct sw_tx_bd *tx_buf;
ca00392c
EG
10910 struct eth_tx_start_bd *tx_start_bd;
10911 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
10912 struct eth_tx_parse_bd *pbd = NULL;
10913 u16 pkt_prod, bd_prod;
755735eb 10914 int nbd, fp_index;
a2fbb9ea 10915 dma_addr_t mapping;
755735eb 10916 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
10917 int i;
10918 u8 hlen = 0;
ca00392c 10919 __le16 pkt_size = 0;
a2fbb9ea
ET
10920
10921#ifdef BNX2X_STOP_ON_ERROR
10922 if (unlikely(bp->panic))
10923 return NETDEV_TX_BUSY;
10924#endif
10925
555f6c78
EG
10926 fp_index = skb_get_queue_mapping(skb);
10927 txq = netdev_get_tx_queue(dev, fp_index);
10928
ca00392c
EG
10929 fp = &bp->fp[fp_index + bp->num_rx_queues];
10930 fp_stat = &bp->fp[fp_index];
755735eb 10931
231fd58a 10932 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 10933 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 10934 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10935 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10936 return NETDEV_TX_BUSY;
10937 }
10938
755735eb
EG
10939 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10940 " gso type %x xmit_type %x\n",
10941 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10942 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10943
632da4d6 10944#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10945 /* First, check if we need to linearize the skb (due to FW
10946 restrictions). No need to check fragmentation if page size > 8K
10947 (there will be no violation to FW restrictions) */
755735eb
EG
10948 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10949 /* Statistics of linearization */
10950 bp->lin_cnt++;
10951 if (skb_linearize(skb) != 0) {
10952 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10953 "silently dropping this SKB\n");
10954 dev_kfree_skb_any(skb);
da5a662a 10955 return NETDEV_TX_OK;
755735eb
EG
10956 }
10957 }
632da4d6 10958#endif
755735eb 10959
a2fbb9ea 10960 /*
755735eb 10961 Please read carefully. First we use one BD which we mark as start,
ca00392c 10962 then we have a parsing info BD (used for TSO or xsum),
755735eb 10963 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10964 (don't forget to mark the last one as last,
10965 and to unmap only AFTER you write to the BD ...)
755735eb 10966 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10967 */
10968
10969 pkt_prod = fp->tx_pkt_prod++;
755735eb 10970 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10971
755735eb 10972 /* get a tx_buf and first BD */
a2fbb9ea 10973 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 10974 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 10975
ca00392c
EG
10976 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10977 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10978 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 10979 /* header nbd */
ca00392c 10980 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10981
755735eb
EG
10982 /* remember the first BD of the packet */
10983 tx_buf->first_bd = fp->tx_bd_prod;
10984 tx_buf->skb = skb;
ca00392c 10985 tx_buf->flags = 0;
a2fbb9ea
ET
10986
10987 DP(NETIF_MSG_TX_QUEUED,
10988 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 10989 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 10990
0c6671b0
EG
10991#ifdef BCM_VLAN
10992 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10993 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
10994 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10995 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 10996 } else
0c6671b0 10997#endif
ca00392c 10998 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10999
ca00392c
EG
11000 /* turn on parsing and get a BD */
11001 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11002 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11003
ca00392c 11004 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11005
11006 if (xmit_type & XMIT_CSUM) {
ca00392c 11007 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11008
11009 /* for now NS flag is not used in Linux */
4781bfad
EG
11010 pbd->global_data =
11011 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11012 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11013
755735eb
EG
11014 pbd->ip_hlen = (skb_transport_header(skb) -
11015 skb_network_header(skb)) / 2;
11016
11017 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11018
755735eb 11019 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11020 hlen = hlen*2;
a2fbb9ea 11021
ca00392c 11022 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11023
11024 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11025 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11026 ETH_TX_BD_FLAGS_IP_CSUM;
11027 else
ca00392c
EG
11028 tx_start_bd->bd_flags.as_bitfield |=
11029 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11030
11031 if (xmit_type & XMIT_CSUM_TCP) {
11032 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11033
11034 } else {
11035 s8 fix = SKB_CS_OFF(skb); /* signed! */
11036
ca00392c 11037 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11038
755735eb 11039 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11040 "hlen %d fix %d csum before fix %x\n",
11041 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11042
11043 /* HW bug: fixup the CSUM */
11044 pbd->tcp_pseudo_csum =
11045 bnx2x_csum_fix(skb_transport_header(skb),
11046 SKB_CS(skb), fix);
11047
11048 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11049 pbd->tcp_pseudo_csum);
11050 }
a2fbb9ea
ET
11051 }
11052
11053 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11054 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11055
ca00392c
EG
11056 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11057 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11058 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11059 tx_start_bd->nbd = cpu_to_le16(nbd);
11060 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11061 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11062
11063 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11064 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11065 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11066 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11067 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11068
755735eb 11069 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11070
11071 DP(NETIF_MSG_TX_QUEUED,
11072 "TSO packet len %d hlen %d total len %d tso size %d\n",
11073 skb->len, hlen, skb_headlen(skb),
11074 skb_shinfo(skb)->gso_size);
11075
ca00392c 11076 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11077
755735eb 11078 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11079 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11080 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11081
11082 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11083 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11084 pbd->tcp_flags = pbd_tcp_flags(skb);
11085
11086 if (xmit_type & XMIT_GSO_V4) {
11087 pbd->ip_id = swab16(ip_hdr(skb)->id);
11088 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11089 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11090 ip_hdr(skb)->daddr,
11091 0, IPPROTO_TCP, 0));
755735eb
EG
11092
11093 } else
11094 pbd->tcp_pseudo_csum =
11095 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11096 &ipv6_hdr(skb)->daddr,
11097 0, IPPROTO_TCP, 0));
11098
a2fbb9ea
ET
11099 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11100 }
ca00392c 11101 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11102
755735eb
EG
11103 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11104 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11105
755735eb 11106 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11107 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11108 if (total_pkt_bd == NULL)
11109 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11110
755735eb
EG
11111 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11112 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11113
ca00392c
EG
11114 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11115 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11116 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11117 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11118
755735eb 11119 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11120 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11121 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11122 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11123 }
11124
ca00392c 11125 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11126
a2fbb9ea
ET
11127 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11128
755735eb 11129 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11130 * if the packet contains or ends with it
11131 */
11132 if (TX_BD_POFF(bd_prod) < nbd)
11133 nbd++;
11134
ca00392c
EG
11135 if (total_pkt_bd != NULL)
11136 total_pkt_bd->total_pkt_bytes = pkt_size;
11137
a2fbb9ea
ET
11138 if (pbd)
11139 DP(NETIF_MSG_TX_QUEUED,
11140 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11141 " tcp_flags %x xsum %x seq %u hlen %u\n",
11142 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11143 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11144 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11145
755735eb 11146 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11147
58f4c4cf
EG
11148 /*
11149 * Make sure that the BD data is updated before updating the producer
11150 * since FW might read the BD right after the producer is updated.
11151 * This is only applicable for weak-ordered memory model archs such
11152 * as IA-64. The following barrier is also mandatory since FW will
11153 * assumes packets must have BDs.
11154 */
11155 wmb();
11156
ca00392c
EG
11157 fp->tx_db.data.prod += nbd;
11158 barrier();
11159 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11160
11161 mmiowb();
11162
755735eb 11163 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11164
11165 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11166 netif_tx_stop_queue(txq);
58f4c4cf
EG
11167 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11168 if we put Tx into XOFF state. */
11169 smp_mb();
ca00392c 11170 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11171 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11172 netif_tx_wake_queue(txq);
a2fbb9ea 11173 }
ca00392c 11174 fp_stat->tx_pkt++;
a2fbb9ea
ET
11175
11176 return NETDEV_TX_OK;
11177}
11178
bb2a0f7a 11179/* called with rtnl_lock */
a2fbb9ea
ET
11180static int bnx2x_open(struct net_device *dev)
11181{
11182 struct bnx2x *bp = netdev_priv(dev);
11183
6eccabb3
EG
11184 netif_carrier_off(dev);
11185
a2fbb9ea
ET
11186 bnx2x_set_power_state(bp, PCI_D0);
11187
bb2a0f7a 11188 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11189}
11190
bb2a0f7a 11191/* called with rtnl_lock */
a2fbb9ea
ET
11192static int bnx2x_close(struct net_device *dev)
11193{
a2fbb9ea
ET
11194 struct bnx2x *bp = netdev_priv(dev);
11195
11196 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11197 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11198 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11199 if (!CHIP_REV_IS_SLOW(bp))
11200 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11201
11202 return 0;
11203}
11204
f5372251 11205/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11206static void bnx2x_set_rx_mode(struct net_device *dev)
11207{
11208 struct bnx2x *bp = netdev_priv(dev);
11209 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11210 int port = BP_PORT(bp);
11211
11212 if (bp->state != BNX2X_STATE_OPEN) {
11213 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11214 return;
11215 }
11216
11217 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11218
11219 if (dev->flags & IFF_PROMISC)
11220 rx_mode = BNX2X_RX_MODE_PROMISC;
11221
11222 else if ((dev->flags & IFF_ALLMULTI) ||
11223 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11224 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11225
11226 else { /* some multicasts */
11227 if (CHIP_IS_E1(bp)) {
11228 int i, old, offset;
11229 struct dev_mc_list *mclist;
11230 struct mac_configuration_cmd *config =
11231 bnx2x_sp(bp, mcast_config);
11232
11233 for (i = 0, mclist = dev->mc_list;
11234 mclist && (i < dev->mc_count);
11235 i++, mclist = mclist->next) {
11236
11237 config->config_table[i].
11238 cam_entry.msb_mac_addr =
11239 swab16(*(u16 *)&mclist->dmi_addr[0]);
11240 config->config_table[i].
11241 cam_entry.middle_mac_addr =
11242 swab16(*(u16 *)&mclist->dmi_addr[2]);
11243 config->config_table[i].
11244 cam_entry.lsb_mac_addr =
11245 swab16(*(u16 *)&mclist->dmi_addr[4]);
11246 config->config_table[i].cam_entry.flags =
11247 cpu_to_le16(port);
11248 config->config_table[i].
11249 target_table_entry.flags = 0;
ca00392c
EG
11250 config->config_table[i].target_table_entry.
11251 clients_bit_vector =
11252 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11253 config->config_table[i].
11254 target_table_entry.vlan_id = 0;
11255
11256 DP(NETIF_MSG_IFUP,
11257 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11258 config->config_table[i].
11259 cam_entry.msb_mac_addr,
11260 config->config_table[i].
11261 cam_entry.middle_mac_addr,
11262 config->config_table[i].
11263 cam_entry.lsb_mac_addr);
11264 }
8d9c5f34 11265 old = config->hdr.length;
34f80b04
EG
11266 if (old > i) {
11267 for (; i < old; i++) {
11268 if (CAM_IS_INVALID(config->
11269 config_table[i])) {
af246401 11270 /* already invalidated */
34f80b04
EG
11271 break;
11272 }
11273 /* invalidate */
11274 CAM_INVALIDATE(config->
11275 config_table[i]);
11276 }
11277 }
11278
11279 if (CHIP_REV_IS_SLOW(bp))
11280 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11281 else
11282 offset = BNX2X_MAX_MULTICAST*(1 + port);
11283
8d9c5f34 11284 config->hdr.length = i;
34f80b04 11285 config->hdr.offset = offset;
8d9c5f34 11286 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11287 config->hdr.reserved1 = 0;
11288
11289 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11290 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11291 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11292 0);
11293 } else { /* E1H */
11294 /* Accept one or more multicasts */
11295 struct dev_mc_list *mclist;
11296 u32 mc_filter[MC_HASH_SIZE];
11297 u32 crc, bit, regidx;
11298 int i;
11299
11300 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11301
11302 for (i = 0, mclist = dev->mc_list;
11303 mclist && (i < dev->mc_count);
11304 i++, mclist = mclist->next) {
11305
7c510e4b
JB
11306 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11307 mclist->dmi_addr);
34f80b04
EG
11308
11309 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11310 bit = (crc >> 24) & 0xff;
11311 regidx = bit >> 5;
11312 bit &= 0x1f;
11313 mc_filter[regidx] |= (1 << bit);
11314 }
11315
11316 for (i = 0; i < MC_HASH_SIZE; i++)
11317 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11318 mc_filter[i]);
11319 }
11320 }
11321
11322 bp->rx_mode = rx_mode;
11323 bnx2x_set_storm_rx_mode(bp);
11324}
11325
11326/* called with rtnl_lock */
a2fbb9ea
ET
11327static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11328{
11329 struct sockaddr *addr = p;
11330 struct bnx2x *bp = netdev_priv(dev);
11331
34f80b04 11332 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11333 return -EINVAL;
11334
11335 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11336 if (netif_running(dev)) {
11337 if (CHIP_IS_E1(bp))
3101c2bc 11338 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 11339 else
3101c2bc 11340 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 11341 }
a2fbb9ea
ET
11342
11343 return 0;
11344}
11345
c18487ee 11346/* called with rtnl_lock */
01cd4528
EG
11347static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11348 int devad, u16 addr)
a2fbb9ea 11349{
01cd4528
EG
11350 struct bnx2x *bp = netdev_priv(netdev);
11351 u16 value;
11352 int rc;
11353 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11354
01cd4528
EG
11355 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11356 prtad, devad, addr);
a2fbb9ea 11357
01cd4528
EG
11358 if (prtad != bp->mdio.prtad) {
11359 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11360 prtad, bp->mdio.prtad);
11361 return -EINVAL;
11362 }
11363
11364 /* The HW expects different devad if CL22 is used */
11365 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11366
01cd4528
EG
11367 bnx2x_acquire_phy_lock(bp);
11368 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11369 devad, addr, &value);
11370 bnx2x_release_phy_lock(bp);
11371 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11372
01cd4528
EG
11373 if (!rc)
11374 rc = value;
11375 return rc;
11376}
a2fbb9ea 11377
01cd4528
EG
11378/* called with rtnl_lock */
11379static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11380 u16 addr, u16 value)
11381{
11382 struct bnx2x *bp = netdev_priv(netdev);
11383 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11384 int rc;
11385
11386 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11387 " value 0x%x\n", prtad, devad, addr, value);
11388
11389 if (prtad != bp->mdio.prtad) {
11390 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11391 prtad, bp->mdio.prtad);
11392 return -EINVAL;
a2fbb9ea
ET
11393 }
11394
01cd4528
EG
11395 /* The HW expects different devad if CL22 is used */
11396 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11397
01cd4528
EG
11398 bnx2x_acquire_phy_lock(bp);
11399 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11400 devad, addr, value);
11401 bnx2x_release_phy_lock(bp);
11402 return rc;
11403}
c18487ee 11404
01cd4528
EG
11405/* called with rtnl_lock */
11406static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11407{
11408 struct bnx2x *bp = netdev_priv(dev);
11409 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11410
01cd4528
EG
11411 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11412 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11413
01cd4528
EG
11414 if (!netif_running(dev))
11415 return -EAGAIN;
11416
11417 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11418}
11419
34f80b04 11420/* called with rtnl_lock */
a2fbb9ea
ET
11421static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11422{
11423 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11424 int rc = 0;
a2fbb9ea
ET
11425
11426 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11427 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11428 return -EINVAL;
11429
11430 /* This does not race with packet allocation
c14423fe 11431 * because the actual alloc size is
a2fbb9ea
ET
11432 * only updated as part of load
11433 */
11434 dev->mtu = new_mtu;
11435
11436 if (netif_running(dev)) {
34f80b04
EG
11437 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11438 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11439 }
34f80b04
EG
11440
11441 return rc;
a2fbb9ea
ET
11442}
11443
11444static void bnx2x_tx_timeout(struct net_device *dev)
11445{
11446 struct bnx2x *bp = netdev_priv(dev);
11447
11448#ifdef BNX2X_STOP_ON_ERROR
11449 if (!bp->panic)
11450 bnx2x_panic();
11451#endif
11452 /* This allows the netif to be shutdown gracefully before resetting */
11453 schedule_work(&bp->reset_task);
11454}
11455
11456#ifdef BCM_VLAN
34f80b04 11457/* called with rtnl_lock */
a2fbb9ea
ET
11458static void bnx2x_vlan_rx_register(struct net_device *dev,
11459 struct vlan_group *vlgrp)
11460{
11461 struct bnx2x *bp = netdev_priv(dev);
11462
11463 bp->vlgrp = vlgrp;
0c6671b0
EG
11464
11465 /* Set flags according to the required capabilities */
11466 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11467
11468 if (dev->features & NETIF_F_HW_VLAN_TX)
11469 bp->flags |= HW_VLAN_TX_FLAG;
11470
11471 if (dev->features & NETIF_F_HW_VLAN_RX)
11472 bp->flags |= HW_VLAN_RX_FLAG;
11473
a2fbb9ea 11474 if (netif_running(dev))
49d66772 11475 bnx2x_set_client_config(bp);
a2fbb9ea 11476}
34f80b04 11477
a2fbb9ea
ET
11478#endif
11479
11480#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11481static void poll_bnx2x(struct net_device *dev)
11482{
11483 struct bnx2x *bp = netdev_priv(dev);
11484
11485 disable_irq(bp->pdev->irq);
11486 bnx2x_interrupt(bp->pdev->irq, dev);
11487 enable_irq(bp->pdev->irq);
11488}
11489#endif
11490
c64213cd
SH
11491static const struct net_device_ops bnx2x_netdev_ops = {
11492 .ndo_open = bnx2x_open,
11493 .ndo_stop = bnx2x_close,
11494 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11495 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11496 .ndo_set_mac_address = bnx2x_change_mac_addr,
11497 .ndo_validate_addr = eth_validate_addr,
11498 .ndo_do_ioctl = bnx2x_ioctl,
11499 .ndo_change_mtu = bnx2x_change_mtu,
11500 .ndo_tx_timeout = bnx2x_tx_timeout,
11501#ifdef BCM_VLAN
11502 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11503#endif
11504#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11505 .ndo_poll_controller = poll_bnx2x,
11506#endif
11507};
11508
34f80b04
EG
11509static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11510 struct net_device *dev)
a2fbb9ea
ET
11511{
11512 struct bnx2x *bp;
11513 int rc;
11514
11515 SET_NETDEV_DEV(dev, &pdev->dev);
11516 bp = netdev_priv(dev);
11517
34f80b04
EG
11518 bp->dev = dev;
11519 bp->pdev = pdev;
a2fbb9ea 11520 bp->flags = 0;
34f80b04 11521 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11522
11523 rc = pci_enable_device(pdev);
11524 if (rc) {
11525 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11526 goto err_out;
11527 }
11528
11529 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11530 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11531 " aborting\n");
11532 rc = -ENODEV;
11533 goto err_out_disable;
11534 }
11535
11536 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11537 printk(KERN_ERR PFX "Cannot find second PCI device"
11538 " base address, aborting\n");
11539 rc = -ENODEV;
11540 goto err_out_disable;
11541 }
11542
34f80b04
EG
11543 if (atomic_read(&pdev->enable_cnt) == 1) {
11544 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11545 if (rc) {
11546 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11547 " aborting\n");
11548 goto err_out_disable;
11549 }
a2fbb9ea 11550
34f80b04
EG
11551 pci_set_master(pdev);
11552 pci_save_state(pdev);
11553 }
a2fbb9ea
ET
11554
11555 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11556 if (bp->pm_cap == 0) {
11557 printk(KERN_ERR PFX "Cannot find power management"
11558 " capability, aborting\n");
11559 rc = -EIO;
11560 goto err_out_release;
11561 }
11562
11563 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11564 if (bp->pcie_cap == 0) {
11565 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11566 " aborting\n");
11567 rc = -EIO;
11568 goto err_out_release;
11569 }
11570
6a35528a 11571 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11572 bp->flags |= USING_DAC_FLAG;
6a35528a 11573 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11574 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11575 " failed, aborting\n");
11576 rc = -EIO;
11577 goto err_out_release;
11578 }
11579
284901a9 11580 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11581 printk(KERN_ERR PFX "System does not support DMA,"
11582 " aborting\n");
11583 rc = -EIO;
11584 goto err_out_release;
11585 }
11586
34f80b04
EG
11587 dev->mem_start = pci_resource_start(pdev, 0);
11588 dev->base_addr = dev->mem_start;
11589 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11590
11591 dev->irq = pdev->irq;
11592
275f165f 11593 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11594 if (!bp->regview) {
11595 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11596 rc = -ENOMEM;
11597 goto err_out_release;
11598 }
11599
34f80b04
EG
11600 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11601 min_t(u64, BNX2X_DB_SIZE,
11602 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11603 if (!bp->doorbells) {
11604 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11605 rc = -ENOMEM;
11606 goto err_out_unmap;
11607 }
11608
11609 bnx2x_set_power_state(bp, PCI_D0);
11610
34f80b04
EG
11611 /* clean indirect addresses */
11612 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11613 PCICFG_VENDOR_ID_OFFSET);
11614 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11615 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11616 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11617 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11618
34f80b04 11619 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11620
c64213cd 11621 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11622 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11623 dev->features |= NETIF_F_SG;
11624 dev->features |= NETIF_F_HW_CSUM;
11625 if (bp->flags & USING_DAC_FLAG)
11626 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11627 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11628 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11629#ifdef BCM_VLAN
11630 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11631 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11632
11633 dev->vlan_features |= NETIF_F_SG;
11634 dev->vlan_features |= NETIF_F_HW_CSUM;
11635 if (bp->flags & USING_DAC_FLAG)
11636 dev->vlan_features |= NETIF_F_HIGHDMA;
11637 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11638 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11639#endif
a2fbb9ea 11640
01cd4528
EG
11641 /* get_port_hwinfo() will set prtad and mmds properly */
11642 bp->mdio.prtad = MDIO_PRTAD_NONE;
11643 bp->mdio.mmds = 0;
11644 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11645 bp->mdio.dev = dev;
11646 bp->mdio.mdio_read = bnx2x_mdio_read;
11647 bp->mdio.mdio_write = bnx2x_mdio_write;
11648
a2fbb9ea
ET
11649 return 0;
11650
11651err_out_unmap:
11652 if (bp->regview) {
11653 iounmap(bp->regview);
11654 bp->regview = NULL;
11655 }
a2fbb9ea
ET
11656 if (bp->doorbells) {
11657 iounmap(bp->doorbells);
11658 bp->doorbells = NULL;
11659 }
11660
11661err_out_release:
34f80b04
EG
11662 if (atomic_read(&pdev->enable_cnt) == 1)
11663 pci_release_regions(pdev);
a2fbb9ea
ET
11664
11665err_out_disable:
11666 pci_disable_device(pdev);
11667 pci_set_drvdata(pdev, NULL);
11668
11669err_out:
11670 return rc;
11671}
11672
37f9ce62
EG
11673static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11674 int *width, int *speed)
25047950
ET
11675{
11676 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11677
37f9ce62 11678 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11679
37f9ce62
EG
11680 /* return value of 1=2.5GHz 2=5GHz */
11681 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11682}
37f9ce62 11683
94a78b79
VZ
11684static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11685{
37f9ce62 11686 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11687 struct bnx2x_fw_file_hdr *fw_hdr;
11688 struct bnx2x_fw_file_section *sections;
94a78b79 11689 u32 offset, len, num_ops;
37f9ce62 11690 u16 *ops_offsets;
94a78b79 11691 int i;
37f9ce62 11692 const u8 *fw_ver;
94a78b79
VZ
11693
11694 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11695 return -EINVAL;
11696
11697 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11698 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11699
11700 /* Make sure none of the offsets and sizes make us read beyond
11701 * the end of the firmware data */
11702 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11703 offset = be32_to_cpu(sections[i].offset);
11704 len = be32_to_cpu(sections[i].len);
11705 if (offset + len > firmware->size) {
37f9ce62
EG
11706 printk(KERN_ERR PFX "Section %d length is out of "
11707 "bounds\n", i);
94a78b79
VZ
11708 return -EINVAL;
11709 }
11710 }
11711
11712 /* Likewise for the init_ops offsets */
11713 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11714 ops_offsets = (u16 *)(firmware->data + offset);
11715 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11716
11717 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11718 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
11719 printk(KERN_ERR PFX "Section offset %d is out of "
11720 "bounds\n", i);
94a78b79
VZ
11721 return -EINVAL;
11722 }
11723 }
11724
11725 /* Check FW version */
11726 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11727 fw_ver = firmware->data + offset;
11728 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11729 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11730 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11731 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11732 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11733 " Should be %d.%d.%d.%d\n",
11734 fw_ver[0], fw_ver[1], fw_ver[2],
11735 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11736 BCM_5710_FW_MINOR_VERSION,
11737 BCM_5710_FW_REVISION_VERSION,
11738 BCM_5710_FW_ENGINEERING_VERSION);
11739 return -EINVAL;
11740 }
11741
11742 return 0;
11743}
11744
11745static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11746{
11747 u32 i;
11748 const __be32 *source = (const __be32*)_source;
11749 u32 *target = (u32*)_target;
11750
11751 for (i = 0; i < n/4; i++)
11752 target[i] = be32_to_cpu(source[i]);
11753}
11754
11755/*
11756 Ops array is stored in the following format:
11757 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11758 */
11759static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11760{
11761 u32 i, j, tmp;
11762 const __be32 *source = (const __be32*)_source;
11763 struct raw_op *target = (struct raw_op*)_target;
11764
11765 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11766 tmp = be32_to_cpu(source[j]);
11767 target[i].op = (tmp >> 24) & 0xff;
11768 target[i].offset = tmp & 0xffffff;
11769 target[i].raw_data = be32_to_cpu(source[j+1]);
11770 }
11771}
11772static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11773{
11774 u32 i;
11775 u16 *target = (u16*)_target;
11776 const __be16 *source = (const __be16*)_source;
11777
11778 for (i = 0; i < n/2; i++)
11779 target[i] = be16_to_cpu(source[i]);
11780}
11781
11782#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11783 do { \
11784 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11785 bp->arr = kmalloc(len, GFP_KERNEL); \
11786 if (!bp->arr) { \
11787 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11788 goto lbl; \
11789 } \
11790 func(bp->firmware->data + \
11791 be32_to_cpu(fw_hdr->arr.offset), \
11792 (u8*)bp->arr, len); \
11793 } while (0)
11794
11795
11796static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11797{
11798 char fw_file_name[40] = {0};
11799 int rc, offset;
11800 struct bnx2x_fw_file_hdr *fw_hdr;
11801
11802 /* Create a FW file name */
11803 if (CHIP_IS_E1(bp))
11804 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11805 else
11806 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11807
11808 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11809 BCM_5710_FW_MAJOR_VERSION,
11810 BCM_5710_FW_MINOR_VERSION,
11811 BCM_5710_FW_REVISION_VERSION,
11812 BCM_5710_FW_ENGINEERING_VERSION);
11813
11814 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11815
11816 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11817 if (rc) {
11818 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11819 goto request_firmware_exit;
11820 }
11821
11822 rc = bnx2x_check_firmware(bp);
11823 if (rc) {
11824 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11825 goto request_firmware_exit;
11826 }
11827
11828 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11829
11830 /* Initialize the pointers to the init arrays */
11831 /* Blob */
11832 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11833
11834 /* Opcodes */
11835 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11836
11837 /* Offsets */
11838 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11839
11840 /* STORMs firmware */
11841 bp->tsem_int_table_data = bp->firmware->data +
11842 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11843 bp->tsem_pram_data = bp->firmware->data +
11844 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11845 bp->usem_int_table_data = bp->firmware->data +
11846 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11847 bp->usem_pram_data = bp->firmware->data +
11848 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11849 bp->xsem_int_table_data = bp->firmware->data +
11850 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11851 bp->xsem_pram_data = bp->firmware->data +
11852 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11853 bp->csem_int_table_data = bp->firmware->data +
11854 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11855 bp->csem_pram_data = bp->firmware->data +
11856 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11857
11858 return 0;
11859init_offsets_alloc_err:
11860 kfree(bp->init_ops);
11861init_ops_alloc_err:
11862 kfree(bp->init_data);
11863request_firmware_exit:
11864 release_firmware(bp->firmware);
11865
11866 return rc;
11867}
11868
11869
25047950 11870
a2fbb9ea
ET
11871static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11872 const struct pci_device_id *ent)
11873{
a2fbb9ea
ET
11874 struct net_device *dev = NULL;
11875 struct bnx2x *bp;
37f9ce62 11876 int pcie_width, pcie_speed;
25047950 11877 int rc;
a2fbb9ea 11878
a2fbb9ea 11879 /* dev zeroed in init_etherdev */
555f6c78 11880 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11881 if (!dev) {
11882 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11883 return -ENOMEM;
34f80b04 11884 }
a2fbb9ea 11885
a2fbb9ea
ET
11886 bp = netdev_priv(dev);
11887 bp->msglevel = debug;
11888
df4770de
EG
11889 pci_set_drvdata(pdev, dev);
11890
34f80b04 11891 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11892 if (rc < 0) {
11893 free_netdev(dev);
11894 return rc;
11895 }
11896
34f80b04 11897 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11898 if (rc)
11899 goto init_one_exit;
11900
94a78b79
VZ
11901 /* Set init arrays */
11902 rc = bnx2x_init_firmware(bp, &pdev->dev);
11903 if (rc) {
11904 printk(KERN_ERR PFX "Error loading firmware\n");
11905 goto init_one_exit;
11906 }
11907
693fc0d1 11908 rc = register_netdev(dev);
34f80b04 11909 if (rc) {
693fc0d1 11910 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11911 goto init_one_exit;
11912 }
11913
37f9ce62 11914 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 11915 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11916 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11917 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 11918 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 11919 dev->base_addr, bp->pdev->irq);
e174961c 11920 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11921
a2fbb9ea 11922 return 0;
34f80b04
EG
11923
11924init_one_exit:
11925 if (bp->regview)
11926 iounmap(bp->regview);
11927
11928 if (bp->doorbells)
11929 iounmap(bp->doorbells);
11930
11931 free_netdev(dev);
11932
11933 if (atomic_read(&pdev->enable_cnt) == 1)
11934 pci_release_regions(pdev);
11935
11936 pci_disable_device(pdev);
11937 pci_set_drvdata(pdev, NULL);
11938
11939 return rc;
a2fbb9ea
ET
11940}
11941
11942static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11943{
11944 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11945 struct bnx2x *bp;
11946
11947 if (!dev) {
228241eb
ET
11948 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11949 return;
11950 }
228241eb 11951 bp = netdev_priv(dev);
a2fbb9ea 11952
a2fbb9ea
ET
11953 unregister_netdev(dev);
11954
94a78b79
VZ
11955 kfree(bp->init_ops_offsets);
11956 kfree(bp->init_ops);
11957 kfree(bp->init_data);
11958 release_firmware(bp->firmware);
11959
a2fbb9ea
ET
11960 if (bp->regview)
11961 iounmap(bp->regview);
11962
11963 if (bp->doorbells)
11964 iounmap(bp->doorbells);
11965
11966 free_netdev(dev);
34f80b04
EG
11967
11968 if (atomic_read(&pdev->enable_cnt) == 1)
11969 pci_release_regions(pdev);
11970
a2fbb9ea
ET
11971 pci_disable_device(pdev);
11972 pci_set_drvdata(pdev, NULL);
11973}
11974
11975static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11976{
11977 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11978 struct bnx2x *bp;
11979
34f80b04
EG
11980 if (!dev) {
11981 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11982 return -ENODEV;
11983 }
11984 bp = netdev_priv(dev);
a2fbb9ea 11985
34f80b04 11986 rtnl_lock();
a2fbb9ea 11987
34f80b04 11988 pci_save_state(pdev);
228241eb 11989
34f80b04
EG
11990 if (!netif_running(dev)) {
11991 rtnl_unlock();
11992 return 0;
11993 }
a2fbb9ea
ET
11994
11995 netif_device_detach(dev);
a2fbb9ea 11996
da5a662a 11997 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11998
a2fbb9ea 11999 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12000
34f80b04
EG
12001 rtnl_unlock();
12002
a2fbb9ea
ET
12003 return 0;
12004}
12005
12006static int bnx2x_resume(struct pci_dev *pdev)
12007{
12008 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12009 struct bnx2x *bp;
a2fbb9ea
ET
12010 int rc;
12011
228241eb
ET
12012 if (!dev) {
12013 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12014 return -ENODEV;
12015 }
228241eb 12016 bp = netdev_priv(dev);
a2fbb9ea 12017
34f80b04
EG
12018 rtnl_lock();
12019
228241eb 12020 pci_restore_state(pdev);
34f80b04
EG
12021
12022 if (!netif_running(dev)) {
12023 rtnl_unlock();
12024 return 0;
12025 }
12026
a2fbb9ea
ET
12027 bnx2x_set_power_state(bp, PCI_D0);
12028 netif_device_attach(dev);
12029
da5a662a 12030 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12031
34f80b04
EG
12032 rtnl_unlock();
12033
12034 return rc;
a2fbb9ea
ET
12035}
12036
f8ef6e44
YG
12037static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12038{
12039 int i;
12040
12041 bp->state = BNX2X_STATE_ERROR;
12042
12043 bp->rx_mode = BNX2X_RX_MODE_NONE;
12044
12045 bnx2x_netif_stop(bp, 0);
12046
12047 del_timer_sync(&bp->timer);
12048 bp->stats_state = STATS_STATE_DISABLED;
12049 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12050
12051 /* Release IRQs */
12052 bnx2x_free_irq(bp);
12053
12054 if (CHIP_IS_E1(bp)) {
12055 struct mac_configuration_cmd *config =
12056 bnx2x_sp(bp, mcast_config);
12057
8d9c5f34 12058 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12059 CAM_INVALIDATE(config->config_table[i]);
12060 }
12061
12062 /* Free SKBs, SGEs, TPA pool and driver internals */
12063 bnx2x_free_skbs(bp);
555f6c78 12064 for_each_rx_queue(bp, i)
f8ef6e44 12065 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12066 for_each_rx_queue(bp, i)
7cde1c8b 12067 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12068 bnx2x_free_mem(bp);
12069
12070 bp->state = BNX2X_STATE_CLOSED;
12071
12072 netif_carrier_off(bp->dev);
12073
12074 return 0;
12075}
12076
12077static void bnx2x_eeh_recover(struct bnx2x *bp)
12078{
12079 u32 val;
12080
12081 mutex_init(&bp->port.phy_mutex);
12082
12083 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12084 bp->link_params.shmem_base = bp->common.shmem_base;
12085 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12086
12087 if (!bp->common.shmem_base ||
12088 (bp->common.shmem_base < 0xA0000) ||
12089 (bp->common.shmem_base >= 0xC0000)) {
12090 BNX2X_DEV_INFO("MCP not active\n");
12091 bp->flags |= NO_MCP_FLAG;
12092 return;
12093 }
12094
12095 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12096 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12097 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12098 BNX2X_ERR("BAD MCP validity signature\n");
12099
12100 if (!BP_NOMCP(bp)) {
12101 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12102 & DRV_MSG_SEQ_NUMBER_MASK);
12103 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12104 }
12105}
12106
493adb1f
WX
12107/**
12108 * bnx2x_io_error_detected - called when PCI error is detected
12109 * @pdev: Pointer to PCI device
12110 * @state: The current pci connection state
12111 *
12112 * This function is called after a PCI bus error affecting
12113 * this device has been detected.
12114 */
12115static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12116 pci_channel_state_t state)
12117{
12118 struct net_device *dev = pci_get_drvdata(pdev);
12119 struct bnx2x *bp = netdev_priv(dev);
12120
12121 rtnl_lock();
12122
12123 netif_device_detach(dev);
12124
07ce50e4
DN
12125 if (state == pci_channel_io_perm_failure) {
12126 rtnl_unlock();
12127 return PCI_ERS_RESULT_DISCONNECT;
12128 }
12129
493adb1f 12130 if (netif_running(dev))
f8ef6e44 12131 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12132
12133 pci_disable_device(pdev);
12134
12135 rtnl_unlock();
12136
12137 /* Request a slot reset */
12138 return PCI_ERS_RESULT_NEED_RESET;
12139}
12140
12141/**
12142 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12143 * @pdev: Pointer to PCI device
12144 *
12145 * Restart the card from scratch, as if from a cold-boot.
12146 */
12147static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12148{
12149 struct net_device *dev = pci_get_drvdata(pdev);
12150 struct bnx2x *bp = netdev_priv(dev);
12151
12152 rtnl_lock();
12153
12154 if (pci_enable_device(pdev)) {
12155 dev_err(&pdev->dev,
12156 "Cannot re-enable PCI device after reset\n");
12157 rtnl_unlock();
12158 return PCI_ERS_RESULT_DISCONNECT;
12159 }
12160
12161 pci_set_master(pdev);
12162 pci_restore_state(pdev);
12163
12164 if (netif_running(dev))
12165 bnx2x_set_power_state(bp, PCI_D0);
12166
12167 rtnl_unlock();
12168
12169 return PCI_ERS_RESULT_RECOVERED;
12170}
12171
12172/**
12173 * bnx2x_io_resume - called when traffic can start flowing again
12174 * @pdev: Pointer to PCI device
12175 *
12176 * This callback is called when the error recovery driver tells us that
12177 * its OK to resume normal operation.
12178 */
12179static void bnx2x_io_resume(struct pci_dev *pdev)
12180{
12181 struct net_device *dev = pci_get_drvdata(pdev);
12182 struct bnx2x *bp = netdev_priv(dev);
12183
12184 rtnl_lock();
12185
f8ef6e44
YG
12186 bnx2x_eeh_recover(bp);
12187
493adb1f 12188 if (netif_running(dev))
f8ef6e44 12189 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12190
12191 netif_device_attach(dev);
12192
12193 rtnl_unlock();
12194}
12195
12196static struct pci_error_handlers bnx2x_err_handler = {
12197 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12198 .slot_reset = bnx2x_io_slot_reset,
12199 .resume = bnx2x_io_resume,
493adb1f
WX
12200};
12201
a2fbb9ea 12202static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12203 .name = DRV_MODULE_NAME,
12204 .id_table = bnx2x_pci_tbl,
12205 .probe = bnx2x_init_one,
12206 .remove = __devexit_p(bnx2x_remove_one),
12207 .suspend = bnx2x_suspend,
12208 .resume = bnx2x_resume,
12209 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12210};
12211
12212static int __init bnx2x_init(void)
12213{
dd21ca6d
SG
12214 int ret;
12215
938cf541
EG
12216 printk(KERN_INFO "%s", version);
12217
1cf167f2
EG
12218 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12219 if (bnx2x_wq == NULL) {
12220 printk(KERN_ERR PFX "Cannot create workqueue\n");
12221 return -ENOMEM;
12222 }
12223
dd21ca6d
SG
12224 ret = pci_register_driver(&bnx2x_pci_driver);
12225 if (ret) {
12226 printk(KERN_ERR PFX "Cannot register driver\n");
12227 destroy_workqueue(bnx2x_wq);
12228 }
12229 return ret;
a2fbb9ea
ET
12230}
12231
12232static void __exit bnx2x_cleanup(void)
12233{
12234 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12235
12236 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12237}
12238
12239module_init(bnx2x_init);
12240module_exit(bnx2x_cleanup);
12241
94a78b79 12242
This page took 2.639368 seconds and 5 git commands to generate.