bnx2x: PBA Table Page Alignment Workaround
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
e35c3269
EG
63#define DRV_MODULE_VERSION "1.45.6"
64#define DRV_MODULE_RELDATE "2008/06/23"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 78
19680c48 79static int disable_tpa;
a2fbb9ea
ET
80static int use_inta;
81static int poll;
a2fbb9ea 82static int debug;
34f80b04 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
84static int use_multi;
85
19680c48 86module_param(disable_tpa, int, 0);
a2fbb9ea
ET
87module_param(use_inta, int, 0);
88module_param(poll, int, 0);
a2fbb9ea 89module_param(debug, int, 0);
19680c48 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 93MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
94
95#ifdef BNX2X_MULTI
96module_param(use_multi, int, 0);
97MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98#endif
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
240 msleep(100);
241 else
242 udelay(5);
243
244 if (!cnt) {
a2fbb9ea
ET
245 BNX2X_ERR("dmae timeout!\n");
246 break;
247 }
ad8d3948 248 cnt--;
a2fbb9ea 249 }
ad8d3948
EG
250
251 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
252}
253
c18487ee 254void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 255{
ad8d3948 256 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
258 int cnt = 200;
259
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
262 int i;
263
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 return;
269 }
270
271 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
272
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
275
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279#ifdef __BIG_ENDIAN
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281#else
282 DMAE_CMD_ENDIANITY_DW_SWAP |
283#endif
34f80b04
EG
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->len = len32;
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 293 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 294
ad8d3948 295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
302
303 *wb_comp = 0;
304
34f80b04 305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
306
307 udelay(5);
ad8d3948
EG
308
309 while (*wb_comp != DMAE_COMP_VAL) {
310
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
313 msleep(100);
314 else
315 udelay(5);
316
317 if (!cnt) {
a2fbb9ea
ET
318 BNX2X_ERR("dmae timeout!\n");
319 break;
320 }
ad8d3948 321 cnt--;
a2fbb9ea 322 }
ad8d3948 323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
326
327 mutex_unlock(&bp->dmae_mutex);
328}
329
330/* used only for slowpath so not inlined */
331static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332{
333 u32 wb_write[2];
334
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 338}
a2fbb9ea 339
ad8d3948
EG
340#ifdef USE_WB_RD
341static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342{
343 u32 wb_data[2];
344
345 REG_RD_DMAE(bp, reg, wb_data, 2);
346
347 return HILO_U64(wb_data[0], wb_data[1]);
348}
349#endif
350
a2fbb9ea
ET
351static int bnx2x_mc_assert(struct bnx2x *bp)
352{
a2fbb9ea 353 char last_idx;
34f80b04
EG
354 int i, rc = 0;
355 u32 row0, row1, row2, row3;
356
357 /* XSTORM */
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
360 if (last_idx)
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
379 rc++;
380 } else {
381 break;
382 }
383 }
384
385 /* TSTORM */
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
388 if (last_idx)
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
407 rc++;
408 } else {
409 break;
410 }
411 }
412
413 /* CSTORM */
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
416 if (last_idx)
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
435 rc++;
436 } else {
437 break;
438 }
439 }
440
441 /* USTORM */
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
444 if (last_idx)
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
463 rc++;
464 } else {
465 break;
a2fbb9ea
ET
466 }
467 }
34f80b04 468
a2fbb9ea
ET
469 return rc;
470}
c14423fe 471
a2fbb9ea
ET
472static void bnx2x_fw_dump(struct bnx2x *bp)
473{
474 u32 mark, offset;
475 u32 data[9];
476 int word;
477
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
481
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 offset + 4*word));
486 data[8] = 0x0;
49d66772 487 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
488 }
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
497}
498
499static void bnx2x_panic_dump(struct bnx2x *bp)
500{
501 int i;
502 u16 j, start, end;
503
66e855f3
YG
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
506
a2fbb9ea
ET
507 BNX2X_ERR("begin crash dump -----------------\n");
508
509 for_each_queue(bp, i) {
510 struct bnx2x_fastpath *fp = &bp->fp[i];
511 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
512
513 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 514 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 515 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 516 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
517 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
518 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
519 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
520 fp->rx_bd_prod, fp->rx_bd_cons,
521 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
524 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
525 " *sb_u_idx(%x) bd data(%x,%x)\n",
526 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527 fp->status_blk->c_status_block.status_block_index,
528 fp->fp_u_idx,
529 fp->status_blk->u_status_block.status_block_index,
530 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
531
532 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534 for (j = start; j < end; j++) {
535 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
536
537 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538 sw_bd->skb, sw_bd->first_bd);
539 }
540
541 start = TX_BD(fp->tx_bd_cons - 10);
542 end = TX_BD(fp->tx_bd_cons + 254);
543 for (j = start; j < end; j++) {
544 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
545
546 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
548 }
549
550 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552 for (j = start; j < end; j++) {
553 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
555
556 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 557 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
558 }
559
7a9b2557
VZ
560 start = 0;
561 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562 for (j = start; j < end; j++) {
563 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
565
566 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
567 j, rx_sge[1], rx_sge[0], sw_page->page);
568 }
569
a2fbb9ea
ET
570 start = RCQ_BD(fp->rx_comp_cons - 10);
571 end = RCQ_BD(fp->rx_comp_cons + 503);
572 for (j = start; j < end; j++) {
573 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
574
575 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576 j, cqe[0], cqe[1], cqe[2], cqe[3]);
577 }
578 }
579
49d66772
ET
580 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
581 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 582 " spq_prod_idx(%u)\n",
49d66772 583 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
584 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
585
34f80b04 586 bnx2x_fw_dump(bp);
a2fbb9ea
ET
587 bnx2x_mc_assert(bp);
588 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
589}
590
615f8fd9 591static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 592{
34f80b04 593 int port = BP_PORT(bp);
a2fbb9ea
ET
594 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595 u32 val = REG_RD(bp, addr);
596 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
597
598 if (msix) {
599 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
602 } else {
603 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 604 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
605 HC_CONFIG_0_REG_INT_LINE_EN_0 |
606 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 607
615f8fd9
ET
608 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
609 val, port, addr, msix);
610
611 REG_WR(bp, addr, val);
612
a2fbb9ea
ET
613 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
614 }
615
615f8fd9 616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
617 val, port, addr, msix);
618
619 REG_WR(bp, addr, val);
34f80b04
EG
620
621 if (CHIP_IS_E1H(bp)) {
622 /* init leading/trailing edge */
623 if (IS_E1HMF(bp)) {
624 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
625 if (bp->port.pmf)
626 /* enable nig attention */
627 val |= 0x0100;
628 } else
629 val = 0xffff;
630
631 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
633 }
a2fbb9ea
ET
634}
635
615f8fd9 636static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 637{
34f80b04 638 int port = BP_PORT(bp);
a2fbb9ea
ET
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
641
642 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0 |
645 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
646
647 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
648 val, port, addr);
649
650 REG_WR(bp, addr, val);
651 if (REG_RD(bp, addr) != val)
652 BNX2X_ERR("BUG! proper val not read from IGU!\n");
653}
654
615f8fd9 655static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 656{
a2fbb9ea
ET
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
658 int i;
659
34f80b04 660 /* disable interrupt handling */
a2fbb9ea 661 atomic_inc(&bp->intr_sem);
c14423fe 662 /* prevent the HW from sending interrupts */
615f8fd9 663 bnx2x_int_disable(bp);
a2fbb9ea
ET
664
665 /* make sure all ISRs are done */
666 if (msix) {
667 for_each_queue(bp, i)
668 synchronize_irq(bp->msix_table[i].vector);
669
670 /* one more for the Slow Path IRQ */
671 synchronize_irq(bp->msix_table[i].vector);
672 } else
673 synchronize_irq(bp->pdev->irq);
674
675 /* make sure sp_task is not running */
676 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
677}
678
34f80b04 679/* fast path */
a2fbb9ea
ET
680
681/*
34f80b04 682 * General service functions
a2fbb9ea
ET
683 */
684
34f80b04 685static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
686 u8 storm, u16 index, u8 op, u8 update)
687{
5c862848
EG
688 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
689 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
690 struct igu_ack_register igu_ack;
691
692 igu_ack.status_block_index = index;
693 igu_ack.sb_id_and_flags =
34f80b04 694 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
695 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
696 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
697 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698
5c862848
EG
699 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
700 (*(u32 *)&igu_ack), hc_addr);
701 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
702}
703
704static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705{
706 struct host_status_block *fpsb = fp->status_blk;
707 u16 rc = 0;
708
709 barrier(); /* status block is written to by the chip */
710 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
711 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 rc |= 1;
713 }
714 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
715 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
716 rc |= 2;
717 }
718 return rc;
719}
720
a2fbb9ea
ET
721static u16 bnx2x_ack_int(struct bnx2x *bp)
722{
5c862848
EG
723 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
724 COMMAND_REG_SIMD_MASK);
725 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 726
5c862848
EG
727 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
728 result, hc_addr);
a2fbb9ea 729
a2fbb9ea
ET
730 return result;
731}
732
733
734/*
735 * fast path service functions
736 */
737
738/* free skb in the packet ring at pos idx
739 * return idx of last bd freed
740 */
741static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742 u16 idx)
743{
744 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
745 struct eth_tx_bd *tx_bd;
746 struct sk_buff *skb = tx_buf->skb;
34f80b04 747 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
748 int nbd;
749
750 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
751 idx, tx_buf, skb);
752
753 /* unmap first bd */
754 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
755 tx_bd = &fp->tx_desc_ring[bd_idx];
756 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
757 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
758
759 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 760 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
761#ifdef BNX2X_STOP_ON_ERROR
762 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 763 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
764 bnx2x_panic();
765 }
766#endif
767
768 /* Skip a parse bd and the TSO split header bd
769 since they have no mapping */
770 if (nbd)
771 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
772
773 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
774 ETH_TX_BD_FLAGS_TCP_CSUM |
775 ETH_TX_BD_FLAGS_SW_LSO)) {
776 if (--nbd)
777 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778 tx_bd = &fp->tx_desc_ring[bd_idx];
779 /* is this a TSO split header bd? */
780 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
781 if (--nbd)
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 }
784 }
785
786 /* now free frags */
787 while (nbd > 0) {
788
789 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
790 tx_bd = &fp->tx_desc_ring[bd_idx];
791 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
792 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
793 if (--nbd)
794 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795 }
796
797 /* release skb */
53e5e96e 798 WARN_ON(!skb);
a2fbb9ea
ET
799 dev_kfree_skb(skb);
800 tx_buf->first_bd = 0;
801 tx_buf->skb = NULL;
802
34f80b04 803 return new_cons;
a2fbb9ea
ET
804}
805
34f80b04 806static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 807{
34f80b04
EG
808 s16 used;
809 u16 prod;
810 u16 cons;
a2fbb9ea 811
34f80b04 812 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
813 prod = fp->tx_bd_prod;
814 cons = fp->tx_bd_cons;
815
34f80b04
EG
816 /* NUM_TX_RINGS = number of "next-page" entries
817 It will be used as a threshold */
818 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 819
34f80b04 820#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
821 WARN_ON(used < 0);
822 WARN_ON(used > fp->bp->tx_ring_size);
823 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 824#endif
a2fbb9ea 825
34f80b04 826 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
827}
828
829static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
830{
831 struct bnx2x *bp = fp->bp;
832 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833 int done = 0;
834
835#ifdef BNX2X_STOP_ON_ERROR
836 if (unlikely(bp->panic))
837 return;
838#endif
839
840 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
841 sw_cons = fp->tx_pkt_cons;
842
843 while (sw_cons != hw_cons) {
844 u16 pkt_cons;
845
846 pkt_cons = TX_BD(sw_cons);
847
848 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
849
34f80b04 850 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
851 hw_cons, sw_cons, pkt_cons);
852
34f80b04 853/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
854 rmb();
855 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 }
857*/
858 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
859 sw_cons++;
860 done++;
861
862 if (done == work)
863 break;
864 }
865
866 fp->tx_pkt_cons = sw_cons;
867 fp->tx_bd_cons = bd_cons;
868
869 /* Need to make the tx_cons update visible to start_xmit()
870 * before checking for netif_queue_stopped(). Without the
871 * memory barrier, there is a small possibility that start_xmit()
872 * will miss it and cause the queue to be stopped forever.
873 */
874 smp_mb();
875
876 /* TBD need a thresh? */
877 if (unlikely(netif_queue_stopped(bp->dev))) {
878
879 netif_tx_lock(bp->dev);
880
881 if (netif_queue_stopped(bp->dev) &&
da5a662a 882 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
883 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
884 netif_wake_queue(bp->dev);
885
886 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
887 }
888}
889
890static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
891 union eth_rx_cqe *rr_cqe)
892{
893 struct bnx2x *bp = fp->bp;
894 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896
34f80b04 897 DP(BNX2X_MSG_SP,
a2fbb9ea 898 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
899 FP_IDX(fp), cid, command, bp->state,
900 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
901
902 bp->spq_left++;
903
34f80b04 904 if (FP_IDX(fp)) {
a2fbb9ea
ET
905 switch (command | fp->state) {
906 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
907 BNX2X_FP_STATE_OPENING):
908 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
909 cid);
910 fp->state = BNX2X_FP_STATE_OPEN;
911 break;
912
913 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
914 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
915 cid);
916 fp->state = BNX2X_FP_STATE_HALTED;
917 break;
918
919 default:
34f80b04
EG
920 BNX2X_ERR("unexpected MC reply (%d) "
921 "fp->state is %x\n", command, fp->state);
922 break;
a2fbb9ea 923 }
34f80b04 924 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
925 return;
926 }
c14423fe 927
a2fbb9ea
ET
928 switch (command | bp->state) {
929 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
930 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
931 bp->state = BNX2X_STATE_OPEN;
932 break;
933
934 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
935 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
936 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
937 fp->state = BNX2X_FP_STATE_HALTED;
938 break;
939
a2fbb9ea 940 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 941 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 942 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
943 break;
944
945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 948 bp->set_mac_pending = 0;
a2fbb9ea
ET
949 break;
950
49d66772 951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 952 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
953 break;
954
a2fbb9ea 955 default:
34f80b04 956 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 957 command, bp->state);
34f80b04 958 break;
a2fbb9ea 959 }
34f80b04 960 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
961}
962
7a9b2557
VZ
963static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964 struct bnx2x_fastpath *fp, u16 index)
965{
966 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967 struct page *page = sw_buf->page;
968 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970 /* Skip "next page" elements */
971 if (!page)
972 return;
973
974 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976 __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978 sw_buf->page = NULL;
979 sge->addr_hi = 0;
980 sge->addr_lo = 0;
981}
982
983static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984 struct bnx2x_fastpath *fp, int last)
985{
986 int i;
987
988 for (i = 0; i < last; i++)
989 bnx2x_free_rx_sge(bp, fp, i);
990}
991
992static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998 dma_addr_t mapping;
999
1000 if (unlikely(page == NULL))
1001 return -ENOMEM;
1002
1003 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1004 PCI_DMA_FROMDEVICE);
8d8bb39b 1005 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1006 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 return -ENOMEM;
1008 }
1009
1010 sw_buf->page = page;
1011 pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016 return 0;
1017}
1018
a2fbb9ea
ET
1019static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020 struct bnx2x_fastpath *fp, u16 index)
1021{
1022 struct sk_buff *skb;
1023 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025 dma_addr_t mapping;
1026
1027 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028 if (unlikely(skb == NULL))
1029 return -ENOMEM;
1030
1031 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1032 PCI_DMA_FROMDEVICE);
8d8bb39b 1033 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1034 dev_kfree_skb(skb);
1035 return -ENOMEM;
1036 }
1037
1038 rx_buf->skb = skb;
1039 pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044 return 0;
1045}
1046
1047/* note that we are not allocating a new skb,
1048 * we are just moving one from cons to prod
1049 * we are not creating a new mapping,
1050 * so there is no need to check for dma_mapping_error().
1051 */
1052static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053 struct sk_buff *skb, u16 cons, u16 prod)
1054{
1055 struct bnx2x *bp = fp->bp;
1056 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061 pci_dma_sync_single_for_device(bp->pdev,
1062 pci_unmap_addr(cons_rx_buf, mapping),
1063 bp->rx_offset + RX_COPY_THRESH,
1064 PCI_DMA_FROMDEVICE);
1065
1066 prod_rx_buf->skb = cons_rx_buf->skb;
1067 pci_unmap_addr_set(prod_rx_buf, mapping,
1068 pci_unmap_addr(cons_rx_buf, mapping));
1069 *prod_bd = *cons_bd;
1070}
1071
7a9b2557
VZ
1072static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073 u16 idx)
1074{
1075 u16 last_max = fp->last_max_sge;
1076
1077 if (SUB_S16(idx, last_max) > 0)
1078 fp->last_max_sge = idx;
1079}
1080
1081static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082{
1083 int i, j;
1084
1085 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086 int idx = RX_SGE_CNT * i - 1;
1087
1088 for (j = 0; j < 2; j++) {
1089 SGE_MASK_CLEAR_BIT(fp, idx);
1090 idx--;
1091 }
1092 }
1093}
1094
1095static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096 struct eth_fast_path_rx_cqe *fp_cqe)
1097{
1098 struct bnx2x *bp = fp->bp;
1099 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100 le16_to_cpu(fp_cqe->len_on_bd)) >>
1101 BCM_PAGE_SHIFT;
1102 u16 last_max, last_elem, first_elem;
1103 u16 delta = 0;
1104 u16 i;
1105
1106 if (!sge_len)
1107 return;
1108
1109 /* First mark all used pages */
1110 for (i = 0; i < sge_len; i++)
1111 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116 /* Here we assume that the last SGE index is the biggest */
1117 prefetch((void *)(fp->sge_mask));
1118 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120 last_max = RX_SGE(fp->last_max_sge);
1121 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124 /* If ring is not full */
1125 if (last_elem + 1 != first_elem)
1126 last_elem++;
1127
1128 /* Now update the prod */
1129 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130 if (likely(fp->sge_mask[i]))
1131 break;
1132
1133 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134 delta += RX_SGE_MASK_ELEM_SZ;
1135 }
1136
1137 if (delta > 0) {
1138 fp->rx_sge_prod += delta;
1139 /* clear page-end entries */
1140 bnx2x_clear_sge_mask_next_elems(fp);
1141 }
1142
1143 DP(NETIF_MSG_RX_STATUS,
1144 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1145 fp->last_max_sge, fp->rx_sge_prod);
1146}
1147
1148static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149{
1150 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151 memset(fp->sge_mask, 0xff,
1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
1154 /* Clear the two last indeces in the page to 1:
1155 these are the indeces that correspond to the "next" element,
1156 hence will never be indicated and should be removed from
1157 the calculations. */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1159}
1160
1161static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162 struct sk_buff *skb, u16 cons, u16 prod)
1163{
1164 struct bnx2x *bp = fp->bp;
1165 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168 dma_addr_t mapping;
1169
1170 /* move empty skb from pool to prod and map it */
1171 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1174 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176 /* move partial skb from cons to pool (don't unmap yet) */
1177 fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179 /* mark bin state as start - print error if current state != stop */
1180 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183 fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185 /* point prod_bd to new skb */
1186 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189#ifdef BNX2X_STOP_ON_ERROR
1190 fp->tpa_queue_used |= (1 << queue);
1191#ifdef __powerpc64__
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193#else
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195#endif
1196 fp->tpa_queue_used);
1197#endif
1198}
1199
1200static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201 struct sk_buff *skb,
1202 struct eth_fast_path_rx_cqe *fp_cqe,
1203 u16 cqe_idx)
1204{
1205 struct sw_rx_page *rx_pg, old_rx_pg;
1206 struct page *sge;
1207 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1208 u32 i, frag_len, frag_size, pages;
1209 int err;
1210 int j;
1211
1212 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1213 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1214
1215 /* This is needed in order to enable forwarding support */
1216 if (frag_size)
1217 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1218 max(frag_size, (u32)len_on_bd));
1219
1220#ifdef BNX2X_STOP_ON_ERROR
1221 if (pages > 8*PAGES_PER_SGE) {
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223 pages, cqe_idx);
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1225 fp_cqe->pkt_len, len_on_bd);
1226 bnx2x_panic();
1227 return -EINVAL;
1228 }
1229#endif
1230
1231 /* Run through the SGL and compose the fragmented skb */
1232 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */
1237 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1238 rx_pg = &fp->rx_page_ring[sge_idx];
1239 sge = rx_pg->page;
1240 old_rx_pg = *rx_pg;
1241
1242 /* If we fail to allocate a substitute page, we simply stop
1243 where we are and drop the whole packet */
1244 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1245 if (unlikely(err)) {
66e855f3 1246 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1247 return err;
1248 }
1249
1250 /* Unmap the page as we r going to pass it to the stack */
1251 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1252 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253
1254 /* Add one frag and update the appropriate fields in the skb */
1255 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256
1257 skb->data_len += frag_len;
1258 skb->truesize += frag_len;
1259 skb->len += frag_len;
1260
1261 frag_size -= frag_len;
1262 }
1263
1264 return 0;
1265}
1266
1267static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1269 u16 cqe_idx)
1270{
1271 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1272 struct sk_buff *skb = rx_buf->skb;
1273 /* alloc new skb */
1274 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275
1276 /* Unmap skb in the pool anyway, as we are going to change
1277 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278 fails. */
1279 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1280 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1281
7a9b2557 1282 if (likely(new_skb)) {
66e855f3
YG
1283 /* fix ip xsum and give it to the stack */
1284 /* (no need to map the new skb) */
7a9b2557
VZ
1285
1286 prefetch(skb);
1287 prefetch(((char *)(skb)) + 128);
1288
7a9b2557
VZ
1289#ifdef BNX2X_STOP_ON_ERROR
1290 if (pad + len > bp->rx_buf_size) {
1291 BNX2X_ERR("skb_put is about to fail... "
1292 "pad %d len %d rx_buf_size %d\n",
1293 pad, len, bp->rx_buf_size);
1294 bnx2x_panic();
1295 return;
1296 }
1297#endif
1298
1299 skb_reserve(skb, pad);
1300 skb_put(skb, len);
1301
1302 skb->protocol = eth_type_trans(skb, bp->dev);
1303 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304
1305 {
1306 struct iphdr *iph;
1307
1308 iph = (struct iphdr *)skb->data;
1309 iph->check = 0;
1310 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1311 }
1312
1313 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1314 &cqe->fast_path_cqe, cqe_idx)) {
1315#ifdef BCM_VLAN
1316 if ((bp->vlgrp != NULL) &&
1317 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1318 PARSING_FLAGS_VLAN))
1319 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1320 le16_to_cpu(cqe->fast_path_cqe.
1321 vlan_tag));
1322 else
1323#endif
1324 netif_receive_skb(skb);
1325 } else {
1326 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1327 " - dropping packet!\n");
1328 dev_kfree_skb(skb);
1329 }
1330
1331 bp->dev->last_rx = jiffies;
1332
1333 /* put new skb in bin */
1334 fp->tpa_pool[queue].skb = new_skb;
1335
1336 } else {
66e855f3 1337 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1338 DP(NETIF_MSG_RX_STATUS,
1339 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1340 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1341 }
1342
1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1344}
1345
1346static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347 struct bnx2x_fastpath *fp,
1348 u16 bd_prod, u16 rx_comp_prod,
1349 u16 rx_sge_prod)
1350{
1351 struct tstorm_eth_rx_producers rx_prods = {0};
1352 int i;
1353
1354 /* Update producers */
1355 rx_prods.bd_prod = bd_prod;
1356 rx_prods.cqe_prod = rx_comp_prod;
1357 rx_prods.sge_prod = rx_sge_prod;
1358
1359 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1360 REG_WR(bp, BAR_TSTRORM_INTMEM +
1361 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1362 ((u32 *)&rx_prods)[i]);
1363
1364 DP(NETIF_MSG_RX_STATUS,
1365 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1366 bd_prod, rx_comp_prod, rx_sge_prod);
1367}
1368
a2fbb9ea
ET
1369static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1370{
1371 struct bnx2x *bp = fp->bp;
34f80b04 1372 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1373 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1374 int rx_pkt = 0;
7a9b2557 1375 u16 queue;
a2fbb9ea
ET
1376
1377#ifdef BNX2X_STOP_ON_ERROR
1378 if (unlikely(bp->panic))
1379 return 0;
1380#endif
1381
34f80b04
EG
1382 /* CQ "next element" is of the size of the regular element,
1383 that's why it's ok here */
a2fbb9ea
ET
1384 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1385 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1386 hw_comp_cons++;
1387
1388 bd_cons = fp->rx_bd_cons;
1389 bd_prod = fp->rx_bd_prod;
34f80b04 1390 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1391 sw_comp_cons = fp->rx_comp_cons;
1392 sw_comp_prod = fp->rx_comp_prod;
1393
1394 /* Memory barrier necessary as speculative reads of the rx
1395 * buffer can be ahead of the index in the status block
1396 */
1397 rmb();
1398
1399 DP(NETIF_MSG_RX_STATUS,
1400 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1401 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1402
1403 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1404 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1405 struct sk_buff *skb;
1406 union eth_rx_cqe *cqe;
34f80b04
EG
1407 u8 cqe_fp_flags;
1408 u16 len, pad;
a2fbb9ea
ET
1409
1410 comp_ring_cons = RCQ_BD(sw_comp_cons);
1411 bd_prod = RX_BD(bd_prod);
1412 bd_cons = RX_BD(bd_cons);
1413
1414 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1415 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1416
a2fbb9ea 1417 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1418 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1419 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1420 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1421 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1422 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1423
1424 /* is this a slowpath msg? */
34f80b04 1425 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1426 bnx2x_sp_event(fp, cqe);
1427 goto next_cqe;
1428
1429 /* this is an rx packet */
1430 } else {
1431 rx_buf = &fp->rx_buf_ring[bd_cons];
1432 skb = rx_buf->skb;
a2fbb9ea
ET
1433 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1434 pad = cqe->fast_path_cqe.placement_offset;
1435
7a9b2557
VZ
1436 /* If CQE is marked both TPA_START and TPA_END
1437 it is a non-TPA CQE */
1438 if ((!fp->disable_tpa) &&
1439 (TPA_TYPE(cqe_fp_flags) !=
1440 (TPA_TYPE_START | TPA_TYPE_END))) {
1441 queue = cqe->fast_path_cqe.queue_index;
1442
1443 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1444 DP(NETIF_MSG_RX_STATUS,
1445 "calling tpa_start on queue %d\n",
1446 queue);
1447
1448 bnx2x_tpa_start(fp, queue, skb,
1449 bd_cons, bd_prod);
1450 goto next_rx;
1451 }
1452
1453 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1454 DP(NETIF_MSG_RX_STATUS,
1455 "calling tpa_stop on queue %d\n",
1456 queue);
1457
1458 if (!BNX2X_RX_SUM_FIX(cqe))
1459 BNX2X_ERR("STOP on none TCP "
1460 "data\n");
1461
1462 /* This is a size of the linear data
1463 on this skb */
1464 len = le16_to_cpu(cqe->fast_path_cqe.
1465 len_on_bd);
1466 bnx2x_tpa_stop(bp, fp, queue, pad,
1467 len, cqe, comp_ring_cons);
1468#ifdef BNX2X_STOP_ON_ERROR
1469 if (bp->panic)
1470 return -EINVAL;
1471#endif
1472
1473 bnx2x_update_sge_prod(fp,
1474 &cqe->fast_path_cqe);
1475 goto next_cqe;
1476 }
1477 }
1478
a2fbb9ea
ET
1479 pci_dma_sync_single_for_device(bp->pdev,
1480 pci_unmap_addr(rx_buf, mapping),
1481 pad + RX_COPY_THRESH,
1482 PCI_DMA_FROMDEVICE);
1483 prefetch(skb);
1484 prefetch(((char *)(skb)) + 128);
1485
1486 /* is this an error packet? */
34f80b04 1487 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1488 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1489 "ERROR flags %x rx packet %u\n",
1490 cqe_fp_flags, sw_comp_cons);
66e855f3 1491 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1492 goto reuse_rx;
1493 }
1494
1495 /* Since we don't have a jumbo ring
1496 * copy small packets if mtu > 1500
1497 */
1498 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1499 (len <= RX_COPY_THRESH)) {
1500 struct sk_buff *new_skb;
1501
1502 new_skb = netdev_alloc_skb(bp->dev,
1503 len + pad);
1504 if (new_skb == NULL) {
1505 DP(NETIF_MSG_RX_ERR,
34f80b04 1506 "ERROR packet dropped "
a2fbb9ea 1507 "because of alloc failure\n");
66e855f3 1508 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1509 goto reuse_rx;
1510 }
1511
1512 /* aligned copy */
1513 skb_copy_from_linear_data_offset(skb, pad,
1514 new_skb->data + pad, len);
1515 skb_reserve(new_skb, pad);
1516 skb_put(new_skb, len);
1517
1518 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1519
1520 skb = new_skb;
1521
1522 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1523 pci_unmap_single(bp->pdev,
1524 pci_unmap_addr(rx_buf, mapping),
1525 bp->rx_buf_use_size,
1526 PCI_DMA_FROMDEVICE);
1527 skb_reserve(skb, pad);
1528 skb_put(skb, len);
1529
1530 } else {
1531 DP(NETIF_MSG_RX_ERR,
34f80b04 1532 "ERROR packet dropped because "
a2fbb9ea 1533 "of alloc failure\n");
66e855f3 1534 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1535reuse_rx:
1536 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1537 goto next_rx;
1538 }
1539
1540 skb->protocol = eth_type_trans(skb, bp->dev);
1541
1542 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1543 if (bp->rx_csum) {
1adcd8be
EG
1544 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1545 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1546 else
1547 bp->eth_stats.hw_csum_err++;
1548 }
a2fbb9ea
ET
1549 }
1550
1551#ifdef BCM_VLAN
34f80b04
EG
1552 if ((bp->vlgrp != NULL) &&
1553 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1554 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1555 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1556 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1557 else
1558#endif
34f80b04 1559 netif_receive_skb(skb);
a2fbb9ea
ET
1560
1561 bp->dev->last_rx = jiffies;
1562
1563next_rx:
1564 rx_buf->skb = NULL;
1565
1566 bd_cons = NEXT_RX_IDX(bd_cons);
1567 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1568 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1569 rx_pkt++;
a2fbb9ea
ET
1570next_cqe:
1571 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1572 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1573
34f80b04 1574 if (rx_pkt == budget)
a2fbb9ea
ET
1575 break;
1576 } /* while */
1577
1578 fp->rx_bd_cons = bd_cons;
34f80b04 1579 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1580 fp->rx_comp_cons = sw_comp_cons;
1581 fp->rx_comp_prod = sw_comp_prod;
1582
7a9b2557
VZ
1583 /* Update producers */
1584 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1585 fp->rx_sge_prod);
a2fbb9ea
ET
1586 mmiowb(); /* keep prod updates ordered */
1587
1588 fp->rx_pkt += rx_pkt;
1589 fp->rx_calls++;
1590
1591 return rx_pkt;
1592}
1593
1594static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1595{
1596 struct bnx2x_fastpath *fp = fp_cookie;
1597 struct bnx2x *bp = fp->bp;
1598 struct net_device *dev = bp->dev;
34f80b04 1599 int index = FP_IDX(fp);
a2fbb9ea 1600
da5a662a
VZ
1601 /* Return here if interrupt is disabled */
1602 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1603 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1604 return IRQ_HANDLED;
1605 }
1606
34f80b04
EG
1607 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1608 index, FP_SB_ID(fp));
1609 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1610
1611#ifdef BNX2X_STOP_ON_ERROR
1612 if (unlikely(bp->panic))
1613 return IRQ_HANDLED;
1614#endif
1615
1616 prefetch(fp->rx_cons_sb);
1617 prefetch(fp->tx_cons_sb);
1618 prefetch(&fp->status_blk->c_status_block.status_block_index);
1619 prefetch(&fp->status_blk->u_status_block.status_block_index);
1620
1621 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1622
a2fbb9ea
ET
1623 return IRQ_HANDLED;
1624}
1625
1626static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1627{
1628 struct net_device *dev = dev_instance;
1629 struct bnx2x *bp = netdev_priv(dev);
1630 u16 status = bnx2x_ack_int(bp);
34f80b04 1631 u16 mask;
a2fbb9ea 1632
34f80b04 1633 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1634 if (unlikely(status == 0)) {
1635 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1636 return IRQ_NONE;
1637 }
34f80b04 1638 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1639
1640#ifdef BNX2X_STOP_ON_ERROR
1641 if (unlikely(bp->panic))
1642 return IRQ_HANDLED;
1643#endif
1644
34f80b04 1645 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1646 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1647 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1648 return IRQ_HANDLED;
1649 }
1650
34f80b04
EG
1651 mask = 0x2 << bp->fp[0].sb_id;
1652 if (status & mask) {
a2fbb9ea
ET
1653 struct bnx2x_fastpath *fp = &bp->fp[0];
1654
1655 prefetch(fp->rx_cons_sb);
1656 prefetch(fp->tx_cons_sb);
1657 prefetch(&fp->status_blk->c_status_block.status_block_index);
1658 prefetch(&fp->status_blk->u_status_block.status_block_index);
1659
1660 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1661
34f80b04 1662 status &= ~mask;
a2fbb9ea
ET
1663 }
1664
a2fbb9ea 1665
34f80b04 1666 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1667 schedule_work(&bp->sp_task);
1668
1669 status &= ~0x1;
1670 if (!status)
1671 return IRQ_HANDLED;
1672 }
1673
34f80b04
EG
1674 if (status)
1675 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1676 status);
a2fbb9ea 1677
c18487ee 1678 return IRQ_HANDLED;
a2fbb9ea
ET
1679}
1680
c18487ee 1681/* end of fast path */
a2fbb9ea 1682
bb2a0f7a 1683static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1684
c18487ee
YR
1685/* Link */
1686
1687/*
1688 * General service functions
1689 */
a2fbb9ea 1690
4a37fb66 1691static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1692{
1693 u32 lock_status;
1694 u32 resource_bit = (1 << resource);
4a37fb66
YG
1695 int func = BP_FUNC(bp);
1696 u32 hw_lock_control_reg;
c18487ee 1697 int cnt;
a2fbb9ea 1698
c18487ee
YR
1699 /* Validating that the resource is within range */
1700 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1701 DP(NETIF_MSG_HW,
1702 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1703 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1704 return -EINVAL;
1705 }
a2fbb9ea 1706
4a37fb66
YG
1707 if (func <= 5) {
1708 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1709 } else {
1710 hw_lock_control_reg =
1711 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1712 }
1713
c18487ee 1714 /* Validating that the resource is not already taken */
4a37fb66 1715 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1716 if (lock_status & resource_bit) {
1717 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1718 lock_status, resource_bit);
1719 return -EEXIST;
1720 }
a2fbb9ea 1721
c18487ee
YR
1722 /* Try for 1 second every 5ms */
1723 for (cnt = 0; cnt < 200; cnt++) {
1724 /* Try to acquire the lock */
4a37fb66
YG
1725 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1726 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1727 if (lock_status & resource_bit)
1728 return 0;
a2fbb9ea 1729
c18487ee 1730 msleep(5);
a2fbb9ea 1731 }
c18487ee
YR
1732 DP(NETIF_MSG_HW, "Timeout\n");
1733 return -EAGAIN;
1734}
a2fbb9ea 1735
4a37fb66 1736static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1737{
1738 u32 lock_status;
1739 u32 resource_bit = (1 << resource);
4a37fb66
YG
1740 int func = BP_FUNC(bp);
1741 u32 hw_lock_control_reg;
a2fbb9ea 1742
c18487ee
YR
1743 /* Validating that the resource is within range */
1744 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1745 DP(NETIF_MSG_HW,
1746 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1747 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1748 return -EINVAL;
1749 }
1750
4a37fb66
YG
1751 if (func <= 5) {
1752 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1753 } else {
1754 hw_lock_control_reg =
1755 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1756 }
1757
c18487ee 1758 /* Validating that the resource is currently taken */
4a37fb66 1759 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1760 if (!(lock_status & resource_bit)) {
1761 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1762 lock_status, resource_bit);
1763 return -EFAULT;
a2fbb9ea
ET
1764 }
1765
4a37fb66 1766 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1767 return 0;
1768}
1769
1770/* HW Lock for shared dual port PHYs */
4a37fb66 1771static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1772{
1773 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1774
34f80b04 1775 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1776
c18487ee
YR
1777 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1778 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1779 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1780}
a2fbb9ea 1781
4a37fb66 1782static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1783{
1784 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1785
c18487ee
YR
1786 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1787 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1788 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1789
34f80b04 1790 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1791}
a2fbb9ea 1792
c18487ee
YR
1793int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1794{
1795 /* The GPIO should be swapped if swap register is set and active */
1796 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
34f80b04 1797 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
c18487ee
YR
1798 int gpio_shift = gpio_num +
1799 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1800 u32 gpio_mask = (1 << gpio_shift);
1801 u32 gpio_reg;
a2fbb9ea 1802
c18487ee
YR
1803 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1804 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1805 return -EINVAL;
1806 }
a2fbb9ea 1807
4a37fb66 1808 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1809 /* read GPIO and mask except the float bits */
1810 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1811
c18487ee
YR
1812 switch (mode) {
1813 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1814 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1815 gpio_num, gpio_shift);
1816 /* clear FLOAT and set CLR */
1817 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1818 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1819 break;
a2fbb9ea 1820
c18487ee
YR
1821 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1822 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1823 gpio_num, gpio_shift);
1824 /* clear FLOAT and set SET */
1825 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1826 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1827 break;
a2fbb9ea 1828
c18487ee
YR
1829 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1830 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1831 gpio_num, gpio_shift);
1832 /* set FLOAT */
1833 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1834 break;
a2fbb9ea 1835
c18487ee
YR
1836 default:
1837 break;
a2fbb9ea
ET
1838 }
1839
c18487ee 1840 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1841 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1842
c18487ee 1843 return 0;
a2fbb9ea
ET
1844}
1845
c18487ee 1846static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1847{
c18487ee
YR
1848 u32 spio_mask = (1 << spio_num);
1849 u32 spio_reg;
a2fbb9ea 1850
c18487ee
YR
1851 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1852 (spio_num > MISC_REGISTERS_SPIO_7)) {
1853 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1854 return -EINVAL;
a2fbb9ea
ET
1855 }
1856
4a37fb66 1857 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1858 /* read SPIO and mask except the float bits */
1859 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1860
c18487ee
YR
1861 switch (mode) {
1862 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1863 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1864 /* clear FLOAT and set CLR */
1865 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1866 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1867 break;
a2fbb9ea 1868
c18487ee
YR
1869 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1870 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1871 /* clear FLOAT and set SET */
1872 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1873 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1874 break;
a2fbb9ea 1875
c18487ee
YR
1876 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1877 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1878 /* set FLOAT */
1879 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1880 break;
a2fbb9ea 1881
c18487ee
YR
1882 default:
1883 break;
a2fbb9ea
ET
1884 }
1885
c18487ee 1886 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1887 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1888
a2fbb9ea
ET
1889 return 0;
1890}
1891
c18487ee 1892static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1893{
c18487ee
YR
1894 switch (bp->link_vars.ieee_fc) {
1895 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1896 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1897 ADVERTISED_Pause);
1898 break;
1899 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1900 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1901 ADVERTISED_Pause);
1902 break;
1903 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1904 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1905 break;
1906 default:
34f80b04 1907 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1908 ADVERTISED_Pause);
1909 break;
1910 }
1911}
f1410647 1912
c18487ee
YR
1913static void bnx2x_link_report(struct bnx2x *bp)
1914{
1915 if (bp->link_vars.link_up) {
1916 if (bp->state == BNX2X_STATE_OPEN)
1917 netif_carrier_on(bp->dev);
1918 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1919
c18487ee 1920 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1921
c18487ee
YR
1922 if (bp->link_vars.duplex == DUPLEX_FULL)
1923 printk("full duplex");
1924 else
1925 printk("half duplex");
f1410647 1926
c18487ee
YR
1927 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1928 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1929 printk(", receive ");
1930 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1931 printk("& transmit ");
1932 } else {
1933 printk(", transmit ");
1934 }
1935 printk("flow control ON");
1936 }
1937 printk("\n");
f1410647 1938
c18487ee
YR
1939 } else { /* link_down */
1940 netif_carrier_off(bp->dev);
1941 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1942 }
c18487ee
YR
1943}
1944
1945static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1946{
19680c48
EG
1947 if (!BP_NOMCP(bp)) {
1948 u8 rc;
a2fbb9ea 1949
19680c48
EG
1950 /* Initialize link parameters structure variables */
1951 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1952
4a37fb66 1953 bnx2x_acquire_phy_lock(bp);
19680c48 1954 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1955 bnx2x_release_phy_lock(bp);
a2fbb9ea 1956
19680c48
EG
1957 if (bp->link_vars.link_up)
1958 bnx2x_link_report(bp);
a2fbb9ea 1959
19680c48 1960 bnx2x_calc_fc_adv(bp);
34f80b04 1961
19680c48
EG
1962 return rc;
1963 }
1964 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1965 return -EINVAL;
a2fbb9ea
ET
1966}
1967
c18487ee 1968static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1969{
19680c48 1970 if (!BP_NOMCP(bp)) {
4a37fb66 1971 bnx2x_acquire_phy_lock(bp);
19680c48 1972 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1973 bnx2x_release_phy_lock(bp);
a2fbb9ea 1974
19680c48
EG
1975 bnx2x_calc_fc_adv(bp);
1976 } else
1977 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1978}
a2fbb9ea 1979
c18487ee
YR
1980static void bnx2x__link_reset(struct bnx2x *bp)
1981{
19680c48 1982 if (!BP_NOMCP(bp)) {
4a37fb66 1983 bnx2x_acquire_phy_lock(bp);
19680c48 1984 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 1985 bnx2x_release_phy_lock(bp);
19680c48
EG
1986 } else
1987 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1988}
a2fbb9ea 1989
c18487ee
YR
1990static u8 bnx2x_link_test(struct bnx2x *bp)
1991{
1992 u8 rc;
a2fbb9ea 1993
4a37fb66 1994 bnx2x_acquire_phy_lock(bp);
c18487ee 1995 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 1996 bnx2x_release_phy_lock(bp);
a2fbb9ea 1997
c18487ee
YR
1998 return rc;
1999}
a2fbb9ea 2000
34f80b04
EG
2001/* Calculates the sum of vn_min_rates.
2002 It's needed for further normalizing of the min_rates.
2003
2004 Returns:
2005 sum of vn_min_rates
2006 or
2007 0 - if all the min_rates are 0.
2008 In the later case fainess algorithm should be deactivated.
2009 If not all min_rates are zero then those that are zeroes will
2010 be set to 1.
2011 */
2012static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2013{
2014 int i, port = BP_PORT(bp);
2015 u32 wsum = 0;
2016 int all_zero = 1;
2017
2018 for (i = 0; i < E1HVN_MAX; i++) {
2019 u32 vn_cfg =
2020 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2021 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2022 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2023 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2024 /* If min rate is zero - set it to 1 */
2025 if (!vn_min_rate)
2026 vn_min_rate = DEF_MIN_RATE;
2027 else
2028 all_zero = 0;
2029
2030 wsum += vn_min_rate;
2031 }
2032 }
2033
2034 /* ... only if all min rates are zeros - disable FAIRNESS */
2035 if (all_zero)
2036 return 0;
2037
2038 return wsum;
2039}
2040
2041static void bnx2x_init_port_minmax(struct bnx2x *bp,
2042 int en_fness,
2043 u16 port_rate,
2044 struct cmng_struct_per_port *m_cmng_port)
2045{
2046 u32 r_param = port_rate / 8;
2047 int port = BP_PORT(bp);
2048 int i;
2049
2050 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2051
2052 /* Enable minmax only if we are in e1hmf mode */
2053 if (IS_E1HMF(bp)) {
2054 u32 fair_periodic_timeout_usec;
2055 u32 t_fair;
2056
2057 /* Enable rate shaping and fairness */
2058 m_cmng_port->flags.cmng_vn_enable = 1;
2059 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2060 m_cmng_port->flags.rate_shaping_enable = 1;
2061
2062 if (!en_fness)
2063 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2064 " fairness will be disabled\n");
2065
2066 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2067 m_cmng_port->rs_vars.rs_periodic_timeout =
2068 RS_PERIODIC_TIMEOUT_USEC / 4;
2069
2070 /* this is the threshold below which no timer arming will occur
2071 1.25 coefficient is for the threshold to be a little bigger
2072 than the real time, to compensate for timer in-accuracy */
2073 m_cmng_port->rs_vars.rs_threshold =
2074 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2075
2076 /* resolution of fairness timer */
2077 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2078 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2079 t_fair = T_FAIR_COEF / port_rate;
2080
2081 /* this is the threshold below which we won't arm
2082 the timer anymore */
2083 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2084
2085 /* we multiply by 1e3/8 to get bytes/msec.
2086 We don't want the credits to pass a credit
2087 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2088 m_cmng_port->fair_vars.upper_bound =
2089 r_param * t_fair * FAIR_MEM;
2090 /* since each tick is 4 usec */
2091 m_cmng_port->fair_vars.fairness_timeout =
2092 fair_periodic_timeout_usec / 4;
2093
2094 } else {
2095 /* Disable rate shaping and fairness */
2096 m_cmng_port->flags.cmng_vn_enable = 0;
2097 m_cmng_port->flags.fairness_enable = 0;
2098 m_cmng_port->flags.rate_shaping_enable = 0;
2099
2100 DP(NETIF_MSG_IFUP,
2101 "Single function mode minmax will be disabled\n");
2102 }
2103
2104 /* Store it to internal memory */
2105 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2106 REG_WR(bp, BAR_XSTRORM_INTMEM +
2107 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2108 ((u32 *)(m_cmng_port))[i]);
2109}
2110
2111static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2112 u32 wsum, u16 port_rate,
2113 struct cmng_struct_per_port *m_cmng_port)
2114{
2115 struct rate_shaping_vars_per_vn m_rs_vn;
2116 struct fairness_vars_per_vn m_fair_vn;
2117 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2118 u16 vn_min_rate, vn_max_rate;
2119 int i;
2120
2121 /* If function is hidden - set min and max to zeroes */
2122 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2123 vn_min_rate = 0;
2124 vn_max_rate = 0;
2125
2126 } else {
2127 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2128 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2129 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2130 if current min rate is zero - set it to 1.
2131 This is a requirment of the algorithm. */
2132 if ((vn_min_rate == 0) && wsum)
2133 vn_min_rate = DEF_MIN_RATE;
2134 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2135 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2136 }
2137
2138 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2139 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2140
2141 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2142 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2143
2144 /* global vn counter - maximal Mbps for this vn */
2145 m_rs_vn.vn_counter.rate = vn_max_rate;
2146
2147 /* quota - number of bytes transmitted in this period */
2148 m_rs_vn.vn_counter.quota =
2149 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2150
2151#ifdef BNX2X_PER_PROT_QOS
2152 /* per protocol counter */
2153 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2154 /* maximal Mbps for this protocol */
2155 m_rs_vn.protocol_counters[protocol].rate =
2156 protocol_max_rate[protocol];
2157 /* the quota in each timer period -
2158 number of bytes transmitted in this period */
2159 m_rs_vn.protocol_counters[protocol].quota =
2160 (u32)(rs_periodic_timeout_usec *
2161 ((double)m_rs_vn.
2162 protocol_counters[protocol].rate/8));
2163 }
2164#endif
2165
2166 if (wsum) {
2167 /* credit for each period of the fairness algorithm:
2168 number of bytes in T_FAIR (the vn share the port rate).
2169 wsum should not be larger than 10000, thus
2170 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2171 m_fair_vn.vn_credit_delta =
2172 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2173 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2174 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2175 m_fair_vn.vn_credit_delta);
2176 }
2177
2178#ifdef BNX2X_PER_PROT_QOS
2179 do {
2180 u32 protocolWeightSum = 0;
2181
2182 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2183 protocolWeightSum +=
2184 drvInit.protocol_min_rate[protocol];
2185 /* per protocol counter -
2186 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2187 if (protocolWeightSum > 0) {
2188 for (protocol = 0;
2189 protocol < NUM_OF_PROTOCOLS; protocol++)
2190 /* credit for each period of the
2191 fairness algorithm - number of bytes in
2192 T_FAIR (the protocol share the vn rate) */
2193 m_fair_vn.protocol_credit_delta[protocol] =
2194 (u32)((vn_min_rate / 8) * t_fair *
2195 protocol_min_rate / protocolWeightSum);
2196 }
2197 } while (0);
2198#endif
2199
2200 /* Store it to internal memory */
2201 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2202 REG_WR(bp, BAR_XSTRORM_INTMEM +
2203 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2204 ((u32 *)(&m_rs_vn))[i]);
2205
2206 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2207 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2209 ((u32 *)(&m_fair_vn))[i]);
2210}
2211
c18487ee
YR
2212/* This function is called upon link interrupt */
2213static void bnx2x_link_attn(struct bnx2x *bp)
2214{
34f80b04
EG
2215 int vn;
2216
bb2a0f7a
YG
2217 /* Make sure that we are synced with the current statistics */
2218 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2219
4a37fb66 2220 bnx2x_acquire_phy_lock(bp);
c18487ee 2221 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2222 bnx2x_release_phy_lock(bp);
a2fbb9ea 2223
bb2a0f7a
YG
2224 if (bp->link_vars.link_up) {
2225
2226 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2227 struct host_port_stats *pstats;
2228
2229 pstats = bnx2x_sp(bp, port_stats);
2230 /* reset old bmac stats */
2231 memset(&(pstats->mac_stx[0]), 0,
2232 sizeof(struct mac_stx));
2233 }
2234 if ((bp->state == BNX2X_STATE_OPEN) ||
2235 (bp->state == BNX2X_STATE_DISABLED))
2236 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2237 }
2238
c18487ee
YR
2239 /* indicate link status */
2240 bnx2x_link_report(bp);
34f80b04
EG
2241
2242 if (IS_E1HMF(bp)) {
2243 int func;
2244
2245 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2246 if (vn == BP_E1HVN(bp))
2247 continue;
2248
2249 func = ((vn << 1) | BP_PORT(bp));
2250
2251 /* Set the attention towards other drivers
2252 on the same port */
2253 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2254 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2255 }
2256 }
2257
2258 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2259 struct cmng_struct_per_port m_cmng_port;
2260 u32 wsum;
2261 int port = BP_PORT(bp);
2262
2263 /* Init RATE SHAPING and FAIRNESS contexts */
2264 wsum = bnx2x_calc_vn_wsum(bp);
2265 bnx2x_init_port_minmax(bp, (int)wsum,
2266 bp->link_vars.line_speed,
2267 &m_cmng_port);
2268 if (IS_E1HMF(bp))
2269 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2270 bnx2x_init_vn_minmax(bp, 2*vn + port,
2271 wsum, bp->link_vars.line_speed,
2272 &m_cmng_port);
2273 }
c18487ee 2274}
a2fbb9ea 2275
c18487ee
YR
2276static void bnx2x__link_status_update(struct bnx2x *bp)
2277{
2278 if (bp->state != BNX2X_STATE_OPEN)
2279 return;
a2fbb9ea 2280
c18487ee 2281 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2282
bb2a0f7a
YG
2283 if (bp->link_vars.link_up)
2284 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2285 else
2286 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2287
c18487ee
YR
2288 /* indicate link status */
2289 bnx2x_link_report(bp);
a2fbb9ea 2290}
a2fbb9ea 2291
34f80b04
EG
2292static void bnx2x_pmf_update(struct bnx2x *bp)
2293{
2294 int port = BP_PORT(bp);
2295 u32 val;
2296
2297 bp->port.pmf = 1;
2298 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2299
2300 /* enable nig attention */
2301 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2302 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2303 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2304
2305 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2306}
2307
c18487ee 2308/* end of Link */
a2fbb9ea
ET
2309
2310/* slow path */
2311
2312/*
2313 * General service functions
2314 */
2315
2316/* the slow path queue is odd since completions arrive on the fastpath ring */
2317static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2318 u32 data_hi, u32 data_lo, int common)
2319{
34f80b04 2320 int func = BP_FUNC(bp);
a2fbb9ea 2321
34f80b04
EG
2322 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2323 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2324 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2325 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2326 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2327
2328#ifdef BNX2X_STOP_ON_ERROR
2329 if (unlikely(bp->panic))
2330 return -EIO;
2331#endif
2332
34f80b04 2333 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2334
2335 if (!bp->spq_left) {
2336 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2337 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2338 bnx2x_panic();
2339 return -EBUSY;
2340 }
f1410647 2341
a2fbb9ea
ET
2342 /* CID needs port number to be encoded int it */
2343 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2344 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2345 HW_CID(bp, cid)));
2346 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2347 if (common)
2348 bp->spq_prod_bd->hdr.type |=
2349 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2350
2351 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2352 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2353
2354 bp->spq_left--;
2355
2356 if (bp->spq_prod_bd == bp->spq_last_bd) {
2357 bp->spq_prod_bd = bp->spq;
2358 bp->spq_prod_idx = 0;
2359 DP(NETIF_MSG_TIMER, "end of spq\n");
2360
2361 } else {
2362 bp->spq_prod_bd++;
2363 bp->spq_prod_idx++;
2364 }
2365
34f80b04 2366 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2367 bp->spq_prod_idx);
2368
34f80b04 2369 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2370 return 0;
2371}
2372
2373/* acquire split MCP access lock register */
4a37fb66 2374static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2375{
a2fbb9ea 2376 u32 i, j, val;
34f80b04 2377 int rc = 0;
a2fbb9ea
ET
2378
2379 might_sleep();
2380 i = 100;
2381 for (j = 0; j < i*10; j++) {
2382 val = (1UL << 31);
2383 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2384 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2385 if (val & (1L << 31))
2386 break;
2387
2388 msleep(5);
2389 }
a2fbb9ea 2390 if (!(val & (1L << 31))) {
19680c48 2391 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2392 rc = -EBUSY;
2393 }
2394
2395 return rc;
2396}
2397
4a37fb66
YG
2398/* release split MCP access lock register */
2399static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2400{
2401 u32 val = 0;
2402
2403 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2404}
2405
2406static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2407{
2408 struct host_def_status_block *def_sb = bp->def_status_blk;
2409 u16 rc = 0;
2410
2411 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2412 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2413 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2414 rc |= 1;
2415 }
2416 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2417 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2418 rc |= 2;
2419 }
2420 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2421 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2422 rc |= 4;
2423 }
2424 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2425 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2426 rc |= 8;
2427 }
2428 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2429 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2430 rc |= 16;
2431 }
2432 return rc;
2433}
2434
2435/*
2436 * slow path service functions
2437 */
2438
2439static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2440{
34f80b04 2441 int port = BP_PORT(bp);
5c862848
EG
2442 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2443 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2444 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2445 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2446 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2447 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2448 u32 aeu_mask;
a2fbb9ea 2449
a2fbb9ea
ET
2450 if (bp->attn_state & asserted)
2451 BNX2X_ERR("IGU ERROR\n");
2452
3fcaf2e5
EG
2453 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2454 aeu_mask = REG_RD(bp, aeu_addr);
2455
a2fbb9ea 2456 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2457 aeu_mask, asserted);
2458 aeu_mask &= ~(asserted & 0xff);
2459 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2460
3fcaf2e5
EG
2461 REG_WR(bp, aeu_addr, aeu_mask);
2462 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2463
3fcaf2e5 2464 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2465 bp->attn_state |= asserted;
3fcaf2e5 2466 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2467
2468 if (asserted & ATTN_HARD_WIRED_MASK) {
2469 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2470
877e9aa4
ET
2471 /* save nig interrupt mask */
2472 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2473 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2474
c18487ee 2475 bnx2x_link_attn(bp);
a2fbb9ea
ET
2476
2477 /* handle unicore attn? */
2478 }
2479 if (asserted & ATTN_SW_TIMER_4_FUNC)
2480 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2481
2482 if (asserted & GPIO_2_FUNC)
2483 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2484
2485 if (asserted & GPIO_3_FUNC)
2486 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2487
2488 if (asserted & GPIO_4_FUNC)
2489 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2490
2491 if (port == 0) {
2492 if (asserted & ATTN_GENERAL_ATTN_1) {
2493 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2494 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2495 }
2496 if (asserted & ATTN_GENERAL_ATTN_2) {
2497 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2498 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2499 }
2500 if (asserted & ATTN_GENERAL_ATTN_3) {
2501 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2502 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2503 }
2504 } else {
2505 if (asserted & ATTN_GENERAL_ATTN_4) {
2506 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2508 }
2509 if (asserted & ATTN_GENERAL_ATTN_5) {
2510 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2511 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2512 }
2513 if (asserted & ATTN_GENERAL_ATTN_6) {
2514 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2515 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2516 }
2517 }
2518
2519 } /* if hardwired */
2520
5c862848
EG
2521 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2522 asserted, hc_addr);
2523 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2524
2525 /* now set back the mask */
2526 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2527 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2528}
2529
877e9aa4 2530static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2531{
34f80b04 2532 int port = BP_PORT(bp);
877e9aa4
ET
2533 int reg_offset;
2534 u32 val;
2535
34f80b04
EG
2536 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2537 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2538
34f80b04 2539 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2540
2541 val = REG_RD(bp, reg_offset);
2542 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2543 REG_WR(bp, reg_offset, val);
2544
2545 BNX2X_ERR("SPIO5 hw attention\n");
2546
34f80b04 2547 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2548 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2549 /* Fan failure attention */
2550
2551 /* The PHY reset is controled by GPIO 1 */
2552 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2553 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2554 /* Low power mode is controled by GPIO 2 */
2555 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2556 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2557 /* mark the failure */
c18487ee 2558 bp->link_params.ext_phy_config &=
877e9aa4 2559 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2560 bp->link_params.ext_phy_config |=
877e9aa4
ET
2561 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2562 SHMEM_WR(bp,
2563 dev_info.port_hw_config[port].
2564 external_phy_config,
c18487ee 2565 bp->link_params.ext_phy_config);
877e9aa4
ET
2566 /* log the failure */
2567 printk(KERN_ERR PFX "Fan Failure on Network"
2568 " Controller %s has caused the driver to"
2569 " shutdown the card to prevent permanent"
2570 " damage. Please contact Dell Support for"
2571 " assistance\n", bp->dev->name);
2572 break;
2573
2574 default:
2575 break;
2576 }
2577 }
34f80b04
EG
2578
2579 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2580
2581 val = REG_RD(bp, reg_offset);
2582 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2583 REG_WR(bp, reg_offset, val);
2584
2585 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2586 (attn & HW_INTERRUT_ASSERT_SET_0));
2587 bnx2x_panic();
2588 }
877e9aa4
ET
2589}
2590
2591static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2592{
2593 u32 val;
2594
2595 if (attn & BNX2X_DOORQ_ASSERT) {
2596
2597 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2598 BNX2X_ERR("DB hw attention 0x%x\n", val);
2599 /* DORQ discard attention */
2600 if (val & 0x2)
2601 BNX2X_ERR("FATAL error from DORQ\n");
2602 }
34f80b04
EG
2603
2604 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2605
2606 int port = BP_PORT(bp);
2607 int reg_offset;
2608
2609 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2610 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2611
2612 val = REG_RD(bp, reg_offset);
2613 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2614 REG_WR(bp, reg_offset, val);
2615
2616 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2617 (attn & HW_INTERRUT_ASSERT_SET_1));
2618 bnx2x_panic();
2619 }
877e9aa4
ET
2620}
2621
2622static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2623{
2624 u32 val;
2625
2626 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2627
2628 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2629 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2630 /* CFC error attention */
2631 if (val & 0x2)
2632 BNX2X_ERR("FATAL error from CFC\n");
2633 }
2634
2635 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2636
2637 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2638 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2639 /* RQ_USDMDP_FIFO_OVERFLOW */
2640 if (val & 0x18000)
2641 BNX2X_ERR("FATAL error from PXP\n");
2642 }
34f80b04
EG
2643
2644 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2645
2646 int port = BP_PORT(bp);
2647 int reg_offset;
2648
2649 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2650 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2651
2652 val = REG_RD(bp, reg_offset);
2653 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2654 REG_WR(bp, reg_offset, val);
2655
2656 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2657 (attn & HW_INTERRUT_ASSERT_SET_2));
2658 bnx2x_panic();
2659 }
877e9aa4
ET
2660}
2661
2662static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2663{
34f80b04
EG
2664 u32 val;
2665
877e9aa4
ET
2666 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2667
34f80b04
EG
2668 if (attn & BNX2X_PMF_LINK_ASSERT) {
2669 int func = BP_FUNC(bp);
2670
2671 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2672 bnx2x__link_status_update(bp);
2673 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2674 DRV_STATUS_PMF)
2675 bnx2x_pmf_update(bp);
2676
2677 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2678
2679 BNX2X_ERR("MC assert!\n");
2680 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2681 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2682 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2684 bnx2x_panic();
2685
2686 } else if (attn & BNX2X_MCP_ASSERT) {
2687
2688 BNX2X_ERR("MCP assert!\n");
2689 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2690 bnx2x_fw_dump(bp);
877e9aa4
ET
2691
2692 } else
2693 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2694 }
2695
2696 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2697 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2698 if (attn & BNX2X_GRC_TIMEOUT) {
2699 val = CHIP_IS_E1H(bp) ?
2700 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2701 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2702 }
2703 if (attn & BNX2X_GRC_RSV) {
2704 val = CHIP_IS_E1H(bp) ?
2705 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2706 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2707 }
877e9aa4 2708 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2709 }
2710}
2711
2712static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2713{
a2fbb9ea
ET
2714 struct attn_route attn;
2715 struct attn_route group_mask;
34f80b04 2716 int port = BP_PORT(bp);
877e9aa4 2717 int index;
a2fbb9ea
ET
2718 u32 reg_addr;
2719 u32 val;
3fcaf2e5 2720 u32 aeu_mask;
a2fbb9ea
ET
2721
2722 /* need to take HW lock because MCP or other port might also
2723 try to handle this event */
4a37fb66 2724 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2725
2726 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2727 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2728 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2729 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2730 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2731 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2732
2733 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2734 if (deasserted & (1 << index)) {
2735 group_mask = bp->attn_group[index];
2736
34f80b04
EG
2737 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2738 index, group_mask.sig[0], group_mask.sig[1],
2739 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2740
877e9aa4
ET
2741 bnx2x_attn_int_deasserted3(bp,
2742 attn.sig[3] & group_mask.sig[3]);
2743 bnx2x_attn_int_deasserted1(bp,
2744 attn.sig[1] & group_mask.sig[1]);
2745 bnx2x_attn_int_deasserted2(bp,
2746 attn.sig[2] & group_mask.sig[2]);
2747 bnx2x_attn_int_deasserted0(bp,
2748 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2749
a2fbb9ea
ET
2750 if ((attn.sig[0] & group_mask.sig[0] &
2751 HW_PRTY_ASSERT_SET_0) ||
2752 (attn.sig[1] & group_mask.sig[1] &
2753 HW_PRTY_ASSERT_SET_1) ||
2754 (attn.sig[2] & group_mask.sig[2] &
2755 HW_PRTY_ASSERT_SET_2))
877e9aa4 2756 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2757 }
2758 }
2759
4a37fb66 2760 bnx2x_release_alr(bp);
a2fbb9ea 2761
5c862848 2762 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2763
2764 val = ~deasserted;
3fcaf2e5
EG
2765 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2766 val, reg_addr);
5c862848 2767 REG_WR(bp, reg_addr, val);
a2fbb9ea 2768
a2fbb9ea 2769 if (~bp->attn_state & deasserted)
3fcaf2e5 2770 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2771
2772 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2773 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2774
3fcaf2e5
EG
2775 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2776 aeu_mask = REG_RD(bp, reg_addr);
2777
2778 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2779 aeu_mask, deasserted);
2780 aeu_mask |= (deasserted & 0xff);
2781 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2782
3fcaf2e5
EG
2783 REG_WR(bp, reg_addr, aeu_mask);
2784 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2785
2786 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2787 bp->attn_state &= ~deasserted;
2788 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2789}
2790
2791static void bnx2x_attn_int(struct bnx2x *bp)
2792{
2793 /* read local copy of bits */
2794 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2795 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2796 u32 attn_state = bp->attn_state;
2797
2798 /* look for changed bits */
2799 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2800 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2801
2802 DP(NETIF_MSG_HW,
2803 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2804 attn_bits, attn_ack, asserted, deasserted);
2805
2806 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2807 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2808
2809 /* handle bits that were raised */
2810 if (asserted)
2811 bnx2x_attn_int_asserted(bp, asserted);
2812
2813 if (deasserted)
2814 bnx2x_attn_int_deasserted(bp, deasserted);
2815}
2816
2817static void bnx2x_sp_task(struct work_struct *work)
2818{
2819 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2820 u16 status;
2821
34f80b04 2822
a2fbb9ea
ET
2823 /* Return here if interrupt is disabled */
2824 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2825 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2826 return;
2827 }
2828
2829 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2830/* if (status == 0) */
2831/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2832
34f80b04 2833 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2834
877e9aa4
ET
2835 /* HW attentions */
2836 if (status & 0x1)
a2fbb9ea 2837 bnx2x_attn_int(bp);
a2fbb9ea 2838
bb2a0f7a
YG
2839 /* CStorm events: query_stats, port delete ramrod */
2840 if (status & 0x2)
2841 bp->stats_pending = 0;
2842
a2fbb9ea
ET
2843 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2844 IGU_INT_NOP, 1);
2845 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2846 IGU_INT_NOP, 1);
2847 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2848 IGU_INT_NOP, 1);
2849 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2850 IGU_INT_NOP, 1);
2851 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2852 IGU_INT_ENABLE, 1);
877e9aa4 2853
a2fbb9ea
ET
2854}
2855
2856static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2857{
2858 struct net_device *dev = dev_instance;
2859 struct bnx2x *bp = netdev_priv(dev);
2860
2861 /* Return here if interrupt is disabled */
2862 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2863 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2864 return IRQ_HANDLED;
2865 }
2866
877e9aa4 2867 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2868
2869#ifdef BNX2X_STOP_ON_ERROR
2870 if (unlikely(bp->panic))
2871 return IRQ_HANDLED;
2872#endif
2873
2874 schedule_work(&bp->sp_task);
2875
2876 return IRQ_HANDLED;
2877}
2878
2879/* end of slow path */
2880
2881/* Statistics */
2882
2883/****************************************************************************
2884* Macros
2885****************************************************************************/
2886
a2fbb9ea
ET
2887/* sum[hi:lo] += add[hi:lo] */
2888#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2889 do { \
2890 s_lo += a_lo; \
2891 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2892 } while (0)
2893
2894/* difference = minuend - subtrahend */
2895#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2896 do { \
bb2a0f7a
YG
2897 if (m_lo < s_lo) { \
2898 /* underflow */ \
a2fbb9ea 2899 d_hi = m_hi - s_hi; \
bb2a0f7a
YG
2900 if (d_hi > 0) { \
2901 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2902 d_hi--; \
2903 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a
YG
2904 } else { \
2905 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2906 d_hi = 0; \
2907 d_lo = 0; \
2908 } \
bb2a0f7a
YG
2909 } else { \
2910 /* m_lo >= s_lo */ \
a2fbb9ea 2911 if (m_hi < s_hi) { \
bb2a0f7a
YG
2912 d_hi = 0; \
2913 d_lo = 0; \
2914 } else { \
2915 /* m_hi >= s_hi */ \
2916 d_hi = m_hi - s_hi; \
2917 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2918 } \
2919 } \
2920 } while (0)
2921
bb2a0f7a 2922#define UPDATE_STAT64(s, t) \
a2fbb9ea 2923 do { \
bb2a0f7a
YG
2924 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2925 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2926 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2927 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2928 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2929 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2930 } while (0)
2931
bb2a0f7a 2932#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2933 do { \
bb2a0f7a
YG
2934 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2935 diff.lo, new->s##_lo, old->s##_lo); \
2936 ADD_64(estats->t##_hi, diff.hi, \
2937 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2938 } while (0)
2939
2940/* sum[hi:lo] += add */
2941#define ADD_EXTEND_64(s_hi, s_lo, a) \
2942 do { \
2943 s_lo += a; \
2944 s_hi += (s_lo < a) ? 1 : 0; \
2945 } while (0)
2946
bb2a0f7a 2947#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2948 do { \
bb2a0f7a
YG
2949 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2950 pstats->mac_stx[1].s##_lo, \
2951 new->s); \
a2fbb9ea
ET
2952 } while (0)
2953
bb2a0f7a 2954#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2955 do { \
2956 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2957 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2958 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2959 } while (0)
2960
2961#define UPDATE_EXTEND_XSTAT(s, t) \
2962 do { \
2963 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2964 old_xclient->s = le32_to_cpu(xclient->s); \
2965 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2966 } while (0)
2967
2968/*
2969 * General service functions
2970 */
2971
2972static inline long bnx2x_hilo(u32 *hiref)
2973{
2974 u32 lo = *(hiref + 1);
2975#if (BITS_PER_LONG == 64)
2976 u32 hi = *hiref;
2977
2978 return HILO_U64(hi, lo);
2979#else
2980 return lo;
2981#endif
2982}
2983
2984/*
2985 * Init service functions
2986 */
2987
bb2a0f7a
YG
2988static void bnx2x_storm_stats_post(struct bnx2x *bp)
2989{
2990 if (!bp->stats_pending) {
2991 struct eth_query_ramrod_data ramrod_data = {0};
2992 int rc;
2993
2994 ramrod_data.drv_counter = bp->stats_counter++;
2995 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2996 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2997
2998 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2999 ((u32 *)&ramrod_data)[1],
3000 ((u32 *)&ramrod_data)[0], 0);
3001 if (rc == 0) {
3002 /* stats ramrod has it's own slot on the spq */
3003 bp->spq_left++;
3004 bp->stats_pending = 1;
3005 }
3006 }
3007}
3008
3009static void bnx2x_stats_init(struct bnx2x *bp)
3010{
3011 int port = BP_PORT(bp);
3012
3013 bp->executer_idx = 0;
3014 bp->stats_counter = 0;
3015
3016 /* port stats */
3017 if (!BP_NOMCP(bp))
3018 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3019 else
3020 bp->port.port_stx = 0;
3021 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3022
3023 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3024 bp->port.old_nig_stats.brb_discard =
3025 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3026 bp->port.old_nig_stats.brb_truncate =
3027 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3028 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3029 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3030 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3031 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3032
3033 /* function stats */
3034 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3035 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3036 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3037 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3038
3039 bp->stats_state = STATS_STATE_DISABLED;
3040 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3041 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3042}
3043
3044static void bnx2x_hw_stats_post(struct bnx2x *bp)
3045{
3046 struct dmae_command *dmae = &bp->stats_dmae;
3047 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3048
3049 *stats_comp = DMAE_COMP_VAL;
3050
3051 /* loader */
3052 if (bp->executer_idx) {
3053 int loader_idx = PMF_DMAE_C(bp);
3054
3055 memset(dmae, 0, sizeof(struct dmae_command));
3056
3057 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3058 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3059 DMAE_CMD_DST_RESET |
3060#ifdef __BIG_ENDIAN
3061 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3062#else
3063 DMAE_CMD_ENDIANITY_DW_SWAP |
3064#endif
3065 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3066 DMAE_CMD_PORT_0) |
3067 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3068 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3069 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3070 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3071 sizeof(struct dmae_command) *
3072 (loader_idx + 1)) >> 2;
3073 dmae->dst_addr_hi = 0;
3074 dmae->len = sizeof(struct dmae_command) >> 2;
3075 if (CHIP_IS_E1(bp))
3076 dmae->len--;
3077 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3078 dmae->comp_addr_hi = 0;
3079 dmae->comp_val = 1;
3080
3081 *stats_comp = 0;
3082 bnx2x_post_dmae(bp, dmae, loader_idx);
3083
3084 } else if (bp->func_stx) {
3085 *stats_comp = 0;
3086 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3087 }
3088}
3089
3090static int bnx2x_stats_comp(struct bnx2x *bp)
3091{
3092 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3093 int cnt = 10;
3094
3095 might_sleep();
3096 while (*stats_comp != DMAE_COMP_VAL) {
3097 msleep(1);
3098 if (!cnt) {
3099 BNX2X_ERR("timeout waiting for stats finished\n");
3100 break;
3101 }
3102 cnt--;
3103 }
3104 return 1;
3105}
3106
3107/*
3108 * Statistics service functions
3109 */
3110
3111static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3112{
3113 struct dmae_command *dmae;
3114 u32 opcode;
3115 int loader_idx = PMF_DMAE_C(bp);
3116 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3117
3118 /* sanity */
3119 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3120 BNX2X_ERR("BUG!\n");
3121 return;
3122 }
3123
3124 bp->executer_idx = 0;
3125
3126 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3127 DMAE_CMD_C_ENABLE |
3128 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3129#ifdef __BIG_ENDIAN
3130 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3131#else
3132 DMAE_CMD_ENDIANITY_DW_SWAP |
3133#endif
3134 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3135 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3136
3137 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3138 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3139 dmae->src_addr_lo = bp->port.port_stx >> 2;
3140 dmae->src_addr_hi = 0;
3141 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3142 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3143 dmae->len = DMAE_LEN32_RD_MAX;
3144 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3145 dmae->comp_addr_hi = 0;
3146 dmae->comp_val = 1;
3147
3148 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3149 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3150 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3151 dmae->src_addr_hi = 0;
7a9b2557
VZ
3152 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3153 DMAE_LEN32_RD_MAX * 4);
3154 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3155 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3156 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3157 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3158 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3159 dmae->comp_val = DMAE_COMP_VAL;
3160
3161 *stats_comp = 0;
3162 bnx2x_hw_stats_post(bp);
3163 bnx2x_stats_comp(bp);
3164}
3165
3166static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3167{
3168 struct dmae_command *dmae;
34f80b04 3169 int port = BP_PORT(bp);
bb2a0f7a 3170 int vn = BP_E1HVN(bp);
a2fbb9ea 3171 u32 opcode;
bb2a0f7a 3172 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3173 u32 mac_addr;
bb2a0f7a
YG
3174 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3175
3176 /* sanity */
3177 if (!bp->link_vars.link_up || !bp->port.pmf) {
3178 BNX2X_ERR("BUG!\n");
3179 return;
3180 }
a2fbb9ea
ET
3181
3182 bp->executer_idx = 0;
bb2a0f7a
YG
3183
3184 /* MCP */
3185 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3186 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3187 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3188#ifdef __BIG_ENDIAN
bb2a0f7a 3189 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3190#else
bb2a0f7a 3191 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3192#endif
bb2a0f7a
YG
3193 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3194 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3195
bb2a0f7a 3196 if (bp->port.port_stx) {
a2fbb9ea
ET
3197
3198 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3199 dmae->opcode = opcode;
bb2a0f7a
YG
3200 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3201 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3202 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3203 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3204 dmae->len = sizeof(struct host_port_stats) >> 2;
3205 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3206 dmae->comp_addr_hi = 0;
3207 dmae->comp_val = 1;
a2fbb9ea
ET
3208 }
3209
bb2a0f7a
YG
3210 if (bp->func_stx) {
3211
3212 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3213 dmae->opcode = opcode;
3214 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3215 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3216 dmae->dst_addr_lo = bp->func_stx >> 2;
3217 dmae->dst_addr_hi = 0;
3218 dmae->len = sizeof(struct host_func_stats) >> 2;
3219 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3220 dmae->comp_addr_hi = 0;
3221 dmae->comp_val = 1;
a2fbb9ea
ET
3222 }
3223
bb2a0f7a 3224 /* MAC */
a2fbb9ea
ET
3225 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3226 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3227 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3228#ifdef __BIG_ENDIAN
3229 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3230#else
3231 DMAE_CMD_ENDIANITY_DW_SWAP |
3232#endif
bb2a0f7a
YG
3233 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3234 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3235
c18487ee 3236 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3237
3238 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3239 NIG_REG_INGRESS_BMAC0_MEM);
3240
3241 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3242 BIGMAC_REGISTER_TX_STAT_GTBYT */
3243 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3244 dmae->opcode = opcode;
3245 dmae->src_addr_lo = (mac_addr +
3246 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3247 dmae->src_addr_hi = 0;
3248 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3249 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3250 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3251 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3252 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3253 dmae->comp_addr_hi = 0;
3254 dmae->comp_val = 1;
3255
3256 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3257 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3258 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3259 dmae->opcode = opcode;
3260 dmae->src_addr_lo = (mac_addr +
3261 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3262 dmae->src_addr_hi = 0;
3263 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3264 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3265 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3266 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3267 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3268 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3270 dmae->comp_addr_hi = 0;
3271 dmae->comp_val = 1;
3272
c18487ee 3273 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3274
3275 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3276
3277 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3278 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3279 dmae->opcode = opcode;
3280 dmae->src_addr_lo = (mac_addr +
3281 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3282 dmae->src_addr_hi = 0;
3283 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3284 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3285 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3286 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3287 dmae->comp_addr_hi = 0;
3288 dmae->comp_val = 1;
3289
3290 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3291 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3292 dmae->opcode = opcode;
3293 dmae->src_addr_lo = (mac_addr +
3294 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3295 dmae->src_addr_hi = 0;
3296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3297 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3298 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3299 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3300 dmae->len = 1;
3301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302 dmae->comp_addr_hi = 0;
3303 dmae->comp_val = 1;
3304
3305 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3306 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3307 dmae->opcode = opcode;
3308 dmae->src_addr_lo = (mac_addr +
3309 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3310 dmae->src_addr_hi = 0;
3311 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3312 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3313 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3314 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3315 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3316 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3317 dmae->comp_addr_hi = 0;
3318 dmae->comp_val = 1;
3319 }
3320
3321 /* NIG */
bb2a0f7a
YG
3322 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3323 dmae->opcode = opcode;
3324 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3325 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3326 dmae->src_addr_hi = 0;
3327 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3328 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3329 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3330 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3331 dmae->comp_addr_hi = 0;
3332 dmae->comp_val = 1;
3333
3334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335 dmae->opcode = opcode;
3336 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3337 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3338 dmae->src_addr_hi = 0;
3339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3340 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3341 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3342 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3343 dmae->len = (2*sizeof(u32)) >> 2;
3344 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345 dmae->comp_addr_hi = 0;
3346 dmae->comp_val = 1;
3347
a2fbb9ea
ET
3348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3349 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3350 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3351 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3352#ifdef __BIG_ENDIAN
3353 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3354#else
3355 DMAE_CMD_ENDIANITY_DW_SWAP |
3356#endif
bb2a0f7a
YG
3357 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3358 (vn << DMAE_CMD_E1HVN_SHIFT));
3359 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3360 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3361 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3362 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3363 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3364 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3365 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3366 dmae->len = (2*sizeof(u32)) >> 2;
3367 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3368 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3369 dmae->comp_val = DMAE_COMP_VAL;
3370
3371 *stats_comp = 0;
a2fbb9ea
ET
3372}
3373
bb2a0f7a 3374static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3375{
bb2a0f7a
YG
3376 struct dmae_command *dmae = &bp->stats_dmae;
3377 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3378
bb2a0f7a
YG
3379 /* sanity */
3380 if (!bp->func_stx) {
3381 BNX2X_ERR("BUG!\n");
3382 return;
3383 }
a2fbb9ea 3384
bb2a0f7a
YG
3385 bp->executer_idx = 0;
3386 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3387
bb2a0f7a
YG
3388 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3389 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3390 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3391#ifdef __BIG_ENDIAN
3392 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3393#else
3394 DMAE_CMD_ENDIANITY_DW_SWAP |
3395#endif
3396 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3397 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3398 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3399 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3400 dmae->dst_addr_lo = bp->func_stx >> 2;
3401 dmae->dst_addr_hi = 0;
3402 dmae->len = sizeof(struct host_func_stats) >> 2;
3403 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3404 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3405 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3406
bb2a0f7a
YG
3407 *stats_comp = 0;
3408}
a2fbb9ea 3409
bb2a0f7a
YG
3410static void bnx2x_stats_start(struct bnx2x *bp)
3411{
3412 if (bp->port.pmf)
3413 bnx2x_port_stats_init(bp);
3414
3415 else if (bp->func_stx)
3416 bnx2x_func_stats_init(bp);
3417
3418 bnx2x_hw_stats_post(bp);
3419 bnx2x_storm_stats_post(bp);
3420}
3421
3422static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3423{
3424 bnx2x_stats_comp(bp);
3425 bnx2x_stats_pmf_update(bp);
3426 bnx2x_stats_start(bp);
3427}
3428
3429static void bnx2x_stats_restart(struct bnx2x *bp)
3430{
3431 bnx2x_stats_comp(bp);
3432 bnx2x_stats_start(bp);
3433}
3434
3435static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3436{
3437 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3438 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3439 struct regpair diff;
3440
3441 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3442 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3443 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3444 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3445 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3446 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3447 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3448 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3449 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3450 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3451 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3452 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3453 UPDATE_STAT64(tx_stat_gt127,
3454 tx_stat_etherstatspkts65octetsto127octets);
3455 UPDATE_STAT64(tx_stat_gt255,
3456 tx_stat_etherstatspkts128octetsto255octets);
3457 UPDATE_STAT64(tx_stat_gt511,
3458 tx_stat_etherstatspkts256octetsto511octets);
3459 UPDATE_STAT64(tx_stat_gt1023,
3460 tx_stat_etherstatspkts512octetsto1023octets);
3461 UPDATE_STAT64(tx_stat_gt1518,
3462 tx_stat_etherstatspkts1024octetsto1522octets);
3463 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3464 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3465 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3466 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3467 UPDATE_STAT64(tx_stat_gterr,
3468 tx_stat_dot3statsinternalmactransmiterrors);
3469 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3470}
3471
3472static void bnx2x_emac_stats_update(struct bnx2x *bp)
3473{
3474 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3475 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3476
3477 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3478 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3479 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3480 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3481 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3482 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3483 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3484 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3485 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3486 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3487 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3488 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3489 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3490 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3491 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3492 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3493 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3494 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3495 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3496 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3497 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3498 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3499 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3500 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3501 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3502 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3503 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3504 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3505 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3507 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3508}
3509
3510static int bnx2x_hw_stats_update(struct bnx2x *bp)
3511{
3512 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3513 struct nig_stats *old = &(bp->port.old_nig_stats);
3514 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3515 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3516 struct regpair diff;
3517
3518 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3519 bnx2x_bmac_stats_update(bp);
3520
3521 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3522 bnx2x_emac_stats_update(bp);
3523
3524 else { /* unreached */
3525 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3526 return -1;
3527 }
a2fbb9ea 3528
bb2a0f7a
YG
3529 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3530 new->brb_discard - old->brb_discard);
66e855f3
YG
3531 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3532 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3533
bb2a0f7a
YG
3534 UPDATE_STAT64_NIG(egress_mac_pkt0,
3535 etherstatspkts1024octetsto1522octets);
3536 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3537
bb2a0f7a 3538 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3539
bb2a0f7a
YG
3540 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3541 sizeof(struct mac_stx));
3542 estats->brb_drop_hi = pstats->brb_drop_hi;
3543 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3544
bb2a0f7a 3545 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3546
bb2a0f7a 3547 return 0;
a2fbb9ea
ET
3548}
3549
bb2a0f7a 3550static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3551{
3552 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3553 int cl_id = BP_CL_ID(bp);
3554 struct tstorm_per_port_stats *tport =
3555 &stats->tstorm_common.port_statistics;
a2fbb9ea 3556 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3557 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3558 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3559 struct xstorm_per_client_stats *xclient =
3560 &stats->xstorm_common.client_statistics[cl_id];
3561 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3562 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3563 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3564 u32 diff;
3565
bb2a0f7a
YG
3566 /* are storm stats valid? */
3567 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3568 bp->stats_counter) {
3569 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3570 " tstorm counter (%d) != stats_counter (%d)\n",
3571 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3572 return -1;
3573 }
bb2a0f7a
YG
3574 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3575 bp->stats_counter) {
3576 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3577 " xstorm counter (%d) != stats_counter (%d)\n",
3578 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3579 return -2;
3580 }
a2fbb9ea 3581
bb2a0f7a
YG
3582 fstats->total_bytes_received_hi =
3583 fstats->valid_bytes_received_hi =
a2fbb9ea 3584 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3585 fstats->total_bytes_received_lo =
3586 fstats->valid_bytes_received_lo =
a2fbb9ea 3587 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3588
3589 estats->error_bytes_received_hi =
3590 le32_to_cpu(tclient->rcv_error_bytes.hi);
3591 estats->error_bytes_received_lo =
3592 le32_to_cpu(tclient->rcv_error_bytes.lo);
3593 ADD_64(estats->error_bytes_received_hi,
3594 estats->rx_stat_ifhcinbadoctets_hi,
3595 estats->error_bytes_received_lo,
3596 estats->rx_stat_ifhcinbadoctets_lo);
3597
3598 ADD_64(fstats->total_bytes_received_hi,
3599 estats->error_bytes_received_hi,
3600 fstats->total_bytes_received_lo,
3601 estats->error_bytes_received_lo);
3602
3603 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3604 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3605 total_multicast_packets_received);
a2fbb9ea 3606 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3607 total_broadcast_packets_received);
3608
3609 fstats->total_bytes_transmitted_hi =
3610 le32_to_cpu(xclient->total_sent_bytes.hi);
3611 fstats->total_bytes_transmitted_lo =
3612 le32_to_cpu(xclient->total_sent_bytes.lo);
3613
3614 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3615 total_unicast_packets_transmitted);
3616 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3617 total_multicast_packets_transmitted);
3618 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3619 total_broadcast_packets_transmitted);
3620
3621 memcpy(estats, &(fstats->total_bytes_received_hi),
3622 sizeof(struct host_func_stats) - 2*sizeof(u32));
3623
3624 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3625 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3626 estats->brb_truncate_discard =
3627 le32_to_cpu(tport->brb_truncate_discard);
3628 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3629
3630 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3631 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3632 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3633 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3634 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3635 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3636 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3637 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3638 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3639 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3640 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3641 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3642 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3643
bb2a0f7a
YG
3644 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3645 old_tclient->packets_too_big_discard =
a2fbb9ea 3646 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3647 estats->no_buff_discard =
3648 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3649 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3650
3651 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3652 old_xclient->unicast_bytes_sent.hi =
3653 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3654 old_xclient->unicast_bytes_sent.lo =
3655 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3656 old_xclient->multicast_bytes_sent.hi =
3657 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3658 old_xclient->multicast_bytes_sent.lo =
3659 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3660 old_xclient->broadcast_bytes_sent.hi =
3661 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3662 old_xclient->broadcast_bytes_sent.lo =
3663 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3664
3665 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3666
3667 return 0;
3668}
3669
bb2a0f7a 3670static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3671{
bb2a0f7a
YG
3672 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3673 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3674 struct net_device_stats *nstats = &bp->dev->stats;
3675
3676 nstats->rx_packets =
3677 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3678 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3679 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3680
3681 nstats->tx_packets =
3682 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3683 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3684 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3685
bb2a0f7a 3686 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3687
0e39e645 3688 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3689
bb2a0f7a
YG
3690 nstats->rx_dropped = old_tclient->checksum_discard +
3691 estats->mac_discard;
a2fbb9ea
ET
3692 nstats->tx_dropped = 0;
3693
3694 nstats->multicast =
3695 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3696
bb2a0f7a
YG
3697 nstats->collisions =
3698 estats->tx_stat_dot3statssinglecollisionframes_lo +
3699 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3700 estats->tx_stat_dot3statslatecollisions_lo +
3701 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3702
bb2a0f7a
YG
3703 estats->jabber_packets_received =
3704 old_tclient->packets_too_big_discard +
3705 estats->rx_stat_dot3statsframestoolong_lo;
3706
3707 nstats->rx_length_errors =
3708 estats->rx_stat_etherstatsundersizepkts_lo +
3709 estats->jabber_packets_received;
66e855f3 3710 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3711 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3712 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3713 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3714 nstats->rx_missed_errors = estats->xxoverflow_discard;
3715
3716 nstats->rx_errors = nstats->rx_length_errors +
3717 nstats->rx_over_errors +
3718 nstats->rx_crc_errors +
3719 nstats->rx_frame_errors +
0e39e645
ET
3720 nstats->rx_fifo_errors +
3721 nstats->rx_missed_errors;
a2fbb9ea 3722
bb2a0f7a
YG
3723 nstats->tx_aborted_errors =
3724 estats->tx_stat_dot3statslatecollisions_lo +
3725 estats->tx_stat_dot3statsexcessivecollisions_lo;
3726 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3727 nstats->tx_fifo_errors = 0;
3728 nstats->tx_heartbeat_errors = 0;
3729 nstats->tx_window_errors = 0;
3730
3731 nstats->tx_errors = nstats->tx_aborted_errors +
3732 nstats->tx_carrier_errors;
a2fbb9ea
ET
3733}
3734
bb2a0f7a 3735static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3736{
bb2a0f7a
YG
3737 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3738 int update = 0;
a2fbb9ea 3739
bb2a0f7a
YG
3740 if (*stats_comp != DMAE_COMP_VAL)
3741 return;
3742
3743 if (bp->port.pmf)
3744 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3745
bb2a0f7a 3746 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3747
bb2a0f7a
YG
3748 if (update)
3749 bnx2x_net_stats_update(bp);
a2fbb9ea 3750
bb2a0f7a
YG
3751 else {
3752 if (bp->stats_pending) {
3753 bp->stats_pending++;
3754 if (bp->stats_pending == 3) {
3755 BNX2X_ERR("stats not updated for 3 times\n");
3756 bnx2x_panic();
3757 return;
3758 }
3759 }
a2fbb9ea
ET
3760 }
3761
3762 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3763 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3764 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3765 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3766 int i;
a2fbb9ea
ET
3767
3768 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3769 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3770 " tx pkt (%lx)\n",
3771 bnx2x_tx_avail(bp->fp),
7a9b2557 3772 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3773 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3774 " rx pkt (%lx)\n",
7a9b2557
VZ
3775 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3776 bp->fp->rx_comp_cons),
3777 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea
ET
3778 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3779 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
bb2a0f7a 3780 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3781 printk(KERN_DEBUG "tstats: checksum_discard %u "
3782 "packets_too_big_discard %u no_buff_discard %u "
3783 "mac_discard %u mac_filter_discard %u "
3784 "xxovrflow_discard %u brb_truncate_discard %u "
3785 "ttl0_discard %u\n",
bb2a0f7a
YG
3786 old_tclient->checksum_discard,
3787 old_tclient->packets_too_big_discard,
3788 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3789 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3790 estats->brb_truncate_discard,
3791 old_tclient->ttl0_discard);
a2fbb9ea
ET
3792
3793 for_each_queue(bp, i) {
3794 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3795 bnx2x_fp(bp, i, tx_pkt),
3796 bnx2x_fp(bp, i, rx_pkt),
3797 bnx2x_fp(bp, i, rx_calls));
3798 }
3799 }
3800
bb2a0f7a
YG
3801 bnx2x_hw_stats_post(bp);
3802 bnx2x_storm_stats_post(bp);
3803}
a2fbb9ea 3804
bb2a0f7a
YG
3805static void bnx2x_port_stats_stop(struct bnx2x *bp)
3806{
3807 struct dmae_command *dmae;
3808 u32 opcode;
3809 int loader_idx = PMF_DMAE_C(bp);
3810 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3811
bb2a0f7a 3812 bp->executer_idx = 0;
a2fbb9ea 3813
bb2a0f7a
YG
3814 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3815 DMAE_CMD_C_ENABLE |
3816 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3817#ifdef __BIG_ENDIAN
bb2a0f7a 3818 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3819#else
bb2a0f7a 3820 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3821#endif
bb2a0f7a
YG
3822 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3823 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3824
3825 if (bp->port.port_stx) {
3826
3827 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3828 if (bp->func_stx)
3829 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3830 else
3831 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3832 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3833 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3834 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3835 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3836 dmae->len = sizeof(struct host_port_stats) >> 2;
3837 if (bp->func_stx) {
3838 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3839 dmae->comp_addr_hi = 0;
3840 dmae->comp_val = 1;
3841 } else {
3842 dmae->comp_addr_lo =
3843 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3844 dmae->comp_addr_hi =
3845 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3846 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3847
bb2a0f7a
YG
3848 *stats_comp = 0;
3849 }
a2fbb9ea
ET
3850 }
3851
bb2a0f7a
YG
3852 if (bp->func_stx) {
3853
3854 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3855 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3856 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3857 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3858 dmae->dst_addr_lo = bp->func_stx >> 2;
3859 dmae->dst_addr_hi = 0;
3860 dmae->len = sizeof(struct host_func_stats) >> 2;
3861 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3862 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3863 dmae->comp_val = DMAE_COMP_VAL;
3864
3865 *stats_comp = 0;
a2fbb9ea 3866 }
bb2a0f7a
YG
3867}
3868
3869static void bnx2x_stats_stop(struct bnx2x *bp)
3870{
3871 int update = 0;
3872
3873 bnx2x_stats_comp(bp);
3874
3875 if (bp->port.pmf)
3876 update = (bnx2x_hw_stats_update(bp) == 0);
3877
3878 update |= (bnx2x_storm_stats_update(bp) == 0);
3879
3880 if (update) {
3881 bnx2x_net_stats_update(bp);
a2fbb9ea 3882
bb2a0f7a
YG
3883 if (bp->port.pmf)
3884 bnx2x_port_stats_stop(bp);
3885
3886 bnx2x_hw_stats_post(bp);
3887 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3888 }
3889}
3890
bb2a0f7a
YG
3891static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3892{
3893}
3894
3895static const struct {
3896 void (*action)(struct bnx2x *bp);
3897 enum bnx2x_stats_state next_state;
3898} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3899/* state event */
3900{
3901/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3902/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3903/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3904/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3905},
3906{
3907/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3908/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3909/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3910/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3911}
3912};
3913
3914static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3915{
3916 enum bnx2x_stats_state state = bp->stats_state;
3917
3918 bnx2x_stats_stm[state][event].action(bp);
3919 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3920
3921 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3922 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3923 state, event, bp->stats_state);
3924}
3925
a2fbb9ea
ET
3926static void bnx2x_timer(unsigned long data)
3927{
3928 struct bnx2x *bp = (struct bnx2x *) data;
3929
3930 if (!netif_running(bp->dev))
3931 return;
3932
3933 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3934 goto timer_restart;
a2fbb9ea
ET
3935
3936 if (poll) {
3937 struct bnx2x_fastpath *fp = &bp->fp[0];
3938 int rc;
3939
3940 bnx2x_tx_int(fp, 1000);
3941 rc = bnx2x_rx_int(fp, 1000);
3942 }
3943
34f80b04
EG
3944 if (!BP_NOMCP(bp)) {
3945 int func = BP_FUNC(bp);
a2fbb9ea
ET
3946 u32 drv_pulse;
3947 u32 mcp_pulse;
3948
3949 ++bp->fw_drv_pulse_wr_seq;
3950 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3951 /* TBD - add SYSTEM_TIME */
3952 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3953 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3954
34f80b04 3955 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3956 MCP_PULSE_SEQ_MASK);
3957 /* The delta between driver pulse and mcp response
3958 * should be 1 (before mcp response) or 0 (after mcp response)
3959 */
3960 if ((drv_pulse != mcp_pulse) &&
3961 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3962 /* someone lost a heartbeat... */
3963 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3964 drv_pulse, mcp_pulse);
3965 }
3966 }
3967
bb2a0f7a
YG
3968 if ((bp->state == BNX2X_STATE_OPEN) ||
3969 (bp->state == BNX2X_STATE_DISABLED))
3970 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3971
f1410647 3972timer_restart:
a2fbb9ea
ET
3973 mod_timer(&bp->timer, jiffies + bp->current_interval);
3974}
3975
3976/* end of Statistics */
3977
3978/* nic init */
3979
3980/*
3981 * nic init service functions
3982 */
3983
34f80b04 3984static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3985{
34f80b04
EG
3986 int port = BP_PORT(bp);
3987
3988 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3989 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3990 sizeof(struct ustorm_def_status_block)/4);
3991 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3992 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3993 sizeof(struct cstorm_def_status_block)/4);
3994}
3995
5c862848
EG
3996static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
3997 dma_addr_t mapping, int sb_id)
34f80b04
EG
3998{
3999 int port = BP_PORT(bp);
bb2a0f7a 4000 int func = BP_FUNC(bp);
a2fbb9ea 4001 int index;
34f80b04 4002 u64 section;
a2fbb9ea
ET
4003
4004 /* USTORM */
4005 section = ((u64)mapping) + offsetof(struct host_status_block,
4006 u_status_block);
34f80b04 4007 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4008
4009 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4010 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4011 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4012 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4013 U64_HI(section));
bb2a0f7a
YG
4014 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4015 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4016
4017 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4018 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4019 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4020
4021 /* CSTORM */
4022 section = ((u64)mapping) + offsetof(struct host_status_block,
4023 c_status_block);
34f80b04 4024 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4025
4026 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4027 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4028 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4029 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4030 U64_HI(section));
7a9b2557
VZ
4031 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4032 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4033
4034 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4035 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4036 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4037
4038 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4039}
4040
4041static void bnx2x_zero_def_sb(struct bnx2x *bp)
4042{
4043 int func = BP_FUNC(bp);
a2fbb9ea 4044
34f80b04
EG
4045 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4046 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4047 sizeof(struct ustorm_def_status_block)/4);
4048 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4049 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4050 sizeof(struct cstorm_def_status_block)/4);
4051 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4052 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4053 sizeof(struct xstorm_def_status_block)/4);
4054 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4055 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4056 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4057}
4058
4059static void bnx2x_init_def_sb(struct bnx2x *bp,
4060 struct host_def_status_block *def_sb,
34f80b04 4061 dma_addr_t mapping, int sb_id)
a2fbb9ea 4062{
34f80b04
EG
4063 int port = BP_PORT(bp);
4064 int func = BP_FUNC(bp);
a2fbb9ea
ET
4065 int index, val, reg_offset;
4066 u64 section;
4067
4068 /* ATTN */
4069 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4070 atten_status_block);
34f80b04 4071 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4072
49d66772
ET
4073 bp->attn_state = 0;
4074
a2fbb9ea
ET
4075 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4076 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4077
34f80b04 4078 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4079 bp->attn_group[index].sig[0] = REG_RD(bp,
4080 reg_offset + 0x10*index);
4081 bp->attn_group[index].sig[1] = REG_RD(bp,
4082 reg_offset + 0x4 + 0x10*index);
4083 bp->attn_group[index].sig[2] = REG_RD(bp,
4084 reg_offset + 0x8 + 0x10*index);
4085 bp->attn_group[index].sig[3] = REG_RD(bp,
4086 reg_offset + 0xc + 0x10*index);
4087 }
4088
a2fbb9ea
ET
4089 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4090 HC_REG_ATTN_MSG0_ADDR_L);
4091
4092 REG_WR(bp, reg_offset, U64_LO(section));
4093 REG_WR(bp, reg_offset + 4, U64_HI(section));
4094
4095 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4096
4097 val = REG_RD(bp, reg_offset);
34f80b04 4098 val |= sb_id;
a2fbb9ea
ET
4099 REG_WR(bp, reg_offset, val);
4100
4101 /* USTORM */
4102 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4103 u_def_status_block);
34f80b04 4104 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4105
4106 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4107 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4108 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4109 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4110 U64_HI(section));
5c862848 4111 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4112 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4113
4114 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4115 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4116 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4117
4118 /* CSTORM */
4119 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4120 c_def_status_block);
34f80b04 4121 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4122
4123 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4124 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4125 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4126 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4127 U64_HI(section));
5c862848 4128 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4129 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4130
4131 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4132 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4133 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4134
4135 /* TSTORM */
4136 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4137 t_def_status_block);
34f80b04 4138 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4139
4140 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4141 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4142 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4143 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4144 U64_HI(section));
5c862848 4145 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4146 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4147
4148 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4149 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4150 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4151
4152 /* XSTORM */
4153 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4154 x_def_status_block);
34f80b04 4155 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4156
4157 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4158 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4159 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4160 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4161 U64_HI(section));
5c862848 4162 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4163 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4164
4165 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4166 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4167 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4168
bb2a0f7a 4169 bp->stats_pending = 0;
66e855f3 4170 bp->set_mac_pending = 0;
bb2a0f7a 4171
34f80b04 4172 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4173}
4174
4175static void bnx2x_update_coalesce(struct bnx2x *bp)
4176{
34f80b04 4177 int port = BP_PORT(bp);
a2fbb9ea
ET
4178 int i;
4179
4180 for_each_queue(bp, i) {
34f80b04 4181 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4182
4183 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4184 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4185 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4186 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4187 bp->rx_ticks/12);
a2fbb9ea 4188 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4189 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4190 U_SB_ETH_RX_CQ_INDEX),
4191 bp->rx_ticks ? 0 : 1);
4192 REG_WR16(bp, BAR_USTRORM_INTMEM +
4193 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4194 U_SB_ETH_RX_BD_INDEX),
34f80b04 4195 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4196
4197 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4198 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4199 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4200 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4201 bp->tx_ticks/12);
a2fbb9ea 4202 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4203 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4204 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4205 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4206 }
4207}
4208
7a9b2557
VZ
4209static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4210 struct bnx2x_fastpath *fp, int last)
4211{
4212 int i;
4213
4214 for (i = 0; i < last; i++) {
4215 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4216 struct sk_buff *skb = rx_buf->skb;
4217
4218 if (skb == NULL) {
4219 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4220 continue;
4221 }
4222
4223 if (fp->tpa_state[i] == BNX2X_TPA_START)
4224 pci_unmap_single(bp->pdev,
4225 pci_unmap_addr(rx_buf, mapping),
4226 bp->rx_buf_use_size,
4227 PCI_DMA_FROMDEVICE);
4228
4229 dev_kfree_skb(skb);
4230 rx_buf->skb = NULL;
4231 }
4232}
4233
a2fbb9ea
ET
4234static void bnx2x_init_rx_rings(struct bnx2x *bp)
4235{
7a9b2557 4236 int func = BP_FUNC(bp);
32626230
EG
4237 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4238 ETH_MAX_AGGREGATION_QUEUES_E1H;
4239 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4240 int i, j;
a2fbb9ea
ET
4241
4242 bp->rx_buf_use_size = bp->dev->mtu;
a2fbb9ea
ET
4243 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4244 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4245
7a9b2557
VZ
4246 if (bp->flags & TPA_ENABLE_FLAG) {
4247 DP(NETIF_MSG_IFUP,
4248 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4249 bp->rx_buf_use_size, bp->rx_buf_size,
4250 bp->dev->mtu + ETH_OVREHEAD);
4251
4252 for_each_queue(bp, j) {
32626230 4253 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4254
32626230 4255 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4256 fp->tpa_pool[i].skb =
4257 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4258 if (!fp->tpa_pool[i].skb) {
4259 BNX2X_ERR("Failed to allocate TPA "
4260 "skb pool for queue[%d] - "
4261 "disabling TPA on this "
4262 "queue!\n", j);
4263 bnx2x_free_tpa_pool(bp, fp, i);
4264 fp->disable_tpa = 1;
4265 break;
4266 }
4267 pci_unmap_addr_set((struct sw_rx_bd *)
4268 &bp->fp->tpa_pool[i],
4269 mapping, 0);
4270 fp->tpa_state[i] = BNX2X_TPA_STOP;
4271 }
4272 }
4273 }
4274
a2fbb9ea
ET
4275 for_each_queue(bp, j) {
4276 struct bnx2x_fastpath *fp = &bp->fp[j];
4277
4278 fp->rx_bd_cons = 0;
4279 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4280 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4281
4282 /* "next page" elements initialization */
4283 /* SGE ring */
4284 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4285 struct eth_rx_sge *sge;
4286
4287 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4288 sge->addr_hi =
4289 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4290 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4291 sge->addr_lo =
4292 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4293 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4294 }
4295
4296 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4297
7a9b2557 4298 /* RX BD ring */
a2fbb9ea
ET
4299 for (i = 1; i <= NUM_RX_RINGS; i++) {
4300 struct eth_rx_bd *rx_bd;
4301
4302 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4303 rx_bd->addr_hi =
4304 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4305 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4306 rx_bd->addr_lo =
4307 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4308 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4309 }
4310
34f80b04 4311 /* CQ ring */
a2fbb9ea
ET
4312 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4313 struct eth_rx_cqe_next_page *nextpg;
4314
4315 nextpg = (struct eth_rx_cqe_next_page *)
4316 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4317 nextpg->addr_hi =
4318 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4319 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4320 nextpg->addr_lo =
4321 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4322 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4323 }
4324
7a9b2557
VZ
4325 /* Allocate SGEs and initialize the ring elements */
4326 for (i = 0, ring_prod = 0;
4327 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4328
7a9b2557
VZ
4329 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4330 BNX2X_ERR("was only able to allocate "
4331 "%d rx sges\n", i);
4332 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4333 /* Cleanup already allocated elements */
4334 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4335 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4336 fp->disable_tpa = 1;
4337 ring_prod = 0;
4338 break;
4339 }
4340 ring_prod = NEXT_SGE_IDX(ring_prod);
4341 }
4342 fp->rx_sge_prod = ring_prod;
4343
4344 /* Allocate BDs and initialize BD ring */
66e855f3 4345 fp->rx_comp_cons = 0;
7a9b2557 4346 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4347 for (i = 0; i < bp->rx_ring_size; i++) {
4348 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4349 BNX2X_ERR("was only able to allocate "
4350 "%d rx skbs\n", i);
66e855f3 4351 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4352 break;
4353 }
4354 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4355 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4356 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4357 }
4358
7a9b2557
VZ
4359 fp->rx_bd_prod = ring_prod;
4360 /* must not have more available CQEs than BDs */
4361 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4362 cqe_ring_prod);
a2fbb9ea
ET
4363 fp->rx_pkt = fp->rx_calls = 0;
4364
7a9b2557
VZ
4365 /* Warning!
4366 * this will generate an interrupt (to the TSTORM)
4367 * must only be done after chip is initialized
4368 */
4369 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4370 fp->rx_sge_prod);
a2fbb9ea
ET
4371 if (j != 0)
4372 continue;
4373
4374 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4375 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4376 U64_LO(fp->rx_comp_mapping));
4377 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4378 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4379 U64_HI(fp->rx_comp_mapping));
4380 }
4381}
4382
4383static void bnx2x_init_tx_ring(struct bnx2x *bp)
4384{
4385 int i, j;
4386
4387 for_each_queue(bp, j) {
4388 struct bnx2x_fastpath *fp = &bp->fp[j];
4389
4390 for (i = 1; i <= NUM_TX_RINGS; i++) {
4391 struct eth_tx_bd *tx_bd =
4392 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4393
4394 tx_bd->addr_hi =
4395 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4396 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4397 tx_bd->addr_lo =
4398 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4399 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4400 }
4401
4402 fp->tx_pkt_prod = 0;
4403 fp->tx_pkt_cons = 0;
4404 fp->tx_bd_prod = 0;
4405 fp->tx_bd_cons = 0;
4406 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4407 fp->tx_pkt = 0;
4408 }
4409}
4410
4411static void bnx2x_init_sp_ring(struct bnx2x *bp)
4412{
34f80b04 4413 int func = BP_FUNC(bp);
a2fbb9ea
ET
4414
4415 spin_lock_init(&bp->spq_lock);
4416
4417 bp->spq_left = MAX_SPQ_PENDING;
4418 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4419 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4420 bp->spq_prod_bd = bp->spq;
4421 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4422
34f80b04 4423 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4424 U64_LO(bp->spq_mapping));
34f80b04
EG
4425 REG_WR(bp,
4426 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4427 U64_HI(bp->spq_mapping));
4428
34f80b04 4429 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4430 bp->spq_prod_idx);
4431}
4432
4433static void bnx2x_init_context(struct bnx2x *bp)
4434{
4435 int i;
4436
4437 for_each_queue(bp, i) {
4438 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4439 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4440 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4441
4442 context->xstorm_st_context.tx_bd_page_base_hi =
4443 U64_HI(fp->tx_desc_mapping);
4444 context->xstorm_st_context.tx_bd_page_base_lo =
4445 U64_LO(fp->tx_desc_mapping);
4446 context->xstorm_st_context.db_data_addr_hi =
4447 U64_HI(fp->tx_prods_mapping);
4448 context->xstorm_st_context.db_data_addr_lo =
4449 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4450 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4451 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4452
4453 context->ustorm_st_context.common.sb_index_numbers =
4454 BNX2X_RX_SB_INDEX_NUM;
4455 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4456 context->ustorm_st_context.common.status_block_id = sb_id;
4457 context->ustorm_st_context.common.flags =
4458 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4459 context->ustorm_st_context.common.mc_alignment_size = 64;
4460 context->ustorm_st_context.common.bd_buff_size =
4461 bp->rx_buf_use_size;
4462 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4463 U64_HI(fp->rx_desc_mapping);
34f80b04 4464 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4465 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4466 if (!fp->disable_tpa) {
4467 context->ustorm_st_context.common.flags |=
4468 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4469 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4470 context->ustorm_st_context.common.sge_buff_size =
4471 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4472 context->ustorm_st_context.common.sge_page_base_hi =
4473 U64_HI(fp->rx_sge_mapping);
4474 context->ustorm_st_context.common.sge_page_base_lo =
4475 U64_LO(fp->rx_sge_mapping);
4476 }
4477
a2fbb9ea 4478 context->cstorm_st_context.sb_index_number =
5c862848 4479 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4480 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4481
4482 context->xstorm_ag_context.cdu_reserved =
4483 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4484 CDU_REGION_NUMBER_XCM_AG,
4485 ETH_CONNECTION_TYPE);
4486 context->ustorm_ag_context.cdu_usage =
4487 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4488 CDU_REGION_NUMBER_UCM_AG,
4489 ETH_CONNECTION_TYPE);
4490 }
4491}
4492
4493static void bnx2x_init_ind_table(struct bnx2x *bp)
4494{
34f80b04 4495 int port = BP_PORT(bp);
a2fbb9ea
ET
4496 int i;
4497
4498 if (!is_multi(bp))
4499 return;
4500
34f80b04 4501 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4502 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4503 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4504 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4505 i % bp->num_queues);
4506
4507 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4508}
4509
49d66772
ET
4510static void bnx2x_set_client_config(struct bnx2x *bp)
4511{
49d66772 4512 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4513 int port = BP_PORT(bp);
4514 int i;
49d66772 4515
34f80b04 4516 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4517 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4518 tstorm_client.config_flags =
4519 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4520#ifdef BCM_VLAN
34f80b04 4521 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4522 tstorm_client.config_flags |=
4523 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4524 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4525 }
4526#endif
49d66772 4527
7a9b2557
VZ
4528 if (bp->flags & TPA_ENABLE_FLAG) {
4529 tstorm_client.max_sges_for_packet =
4530 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4531 tstorm_client.max_sges_for_packet =
4532 ((tstorm_client.max_sges_for_packet +
4533 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4534 PAGES_PER_SGE_SHIFT;
4535
4536 tstorm_client.config_flags |=
4537 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4538 }
4539
49d66772
ET
4540 for_each_queue(bp, i) {
4541 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4542 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4543 ((u32 *)&tstorm_client)[0]);
4544 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4545 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4546 ((u32 *)&tstorm_client)[1]);
4547 }
4548
34f80b04
EG
4549 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4550 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4551}
4552
a2fbb9ea
ET
4553static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4554{
a2fbb9ea 4555 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4556 int mode = bp->rx_mode;
4557 int mask = (1 << BP_L_ID(bp));
4558 int func = BP_FUNC(bp);
a2fbb9ea
ET
4559 int i;
4560
4561 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4562
4563 switch (mode) {
4564 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4565 tstorm_mac_filter.ucast_drop_all = mask;
4566 tstorm_mac_filter.mcast_drop_all = mask;
4567 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4568 break;
4569 case BNX2X_RX_MODE_NORMAL:
34f80b04 4570 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4571 break;
4572 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4573 tstorm_mac_filter.mcast_accept_all = mask;
4574 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4575 break;
4576 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4577 tstorm_mac_filter.ucast_accept_all = mask;
4578 tstorm_mac_filter.mcast_accept_all = mask;
4579 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4580 break;
4581 default:
34f80b04
EG
4582 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4583 break;
a2fbb9ea
ET
4584 }
4585
4586 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4587 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4588 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4589 ((u32 *)&tstorm_mac_filter)[i]);
4590
34f80b04 4591/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4592 ((u32 *)&tstorm_mac_filter)[i]); */
4593 }
a2fbb9ea 4594
49d66772
ET
4595 if (mode != BNX2X_RX_MODE_NONE)
4596 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4597}
4598
471de716
EG
4599static void bnx2x_init_internal_common(struct bnx2x *bp)
4600{
4601 int i;
4602
4603 /* Zero this manually as its initialization is
4604 currently missing in the initTool */
4605 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4606 REG_WR(bp, BAR_USTRORM_INTMEM +
4607 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4608}
4609
4610static void bnx2x_init_internal_port(struct bnx2x *bp)
4611{
4612 int port = BP_PORT(bp);
4613
4614 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4615 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4616 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4617 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4618}
4619
4620static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4621{
a2fbb9ea
ET
4622 struct tstorm_eth_function_common_config tstorm_config = {0};
4623 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4624 int port = BP_PORT(bp);
4625 int func = BP_FUNC(bp);
4626 int i;
471de716 4627 u16 max_agg_size;
a2fbb9ea
ET
4628
4629 if (is_multi(bp)) {
4630 tstorm_config.config_flags = MULTI_FLAGS;
4631 tstorm_config.rss_result_mask = MULTI_MASK;
4632 }
4633
34f80b04
EG
4634 tstorm_config.leading_client_id = BP_L_ID(bp);
4635
a2fbb9ea 4636 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4637 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4638 (*(u32 *)&tstorm_config));
4639
c14423fe 4640 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4641 bnx2x_set_storm_rx_mode(bp);
4642
66e855f3
YG
4643 /* reset xstorm per client statistics */
4644 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4645 REG_WR(bp, BAR_XSTRORM_INTMEM +
4646 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4647 i*4, 0);
4648 }
4649 /* reset tstorm per client statistics */
4650 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4651 REG_WR(bp, BAR_TSTRORM_INTMEM +
4652 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4653 i*4, 0);
4654 }
4655
4656 /* Init statistics related context */
34f80b04 4657 stats_flags.collect_eth = 1;
a2fbb9ea 4658
66e855f3 4659 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4660 ((u32 *)&stats_flags)[0]);
66e855f3 4661 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4662 ((u32 *)&stats_flags)[1]);
4663
66e855f3 4664 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4665 ((u32 *)&stats_flags)[0]);
66e855f3 4666 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4667 ((u32 *)&stats_flags)[1]);
4668
66e855f3 4669 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4670 ((u32 *)&stats_flags)[0]);
66e855f3 4671 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4672 ((u32 *)&stats_flags)[1]);
4673
66e855f3
YG
4674 REG_WR(bp, BAR_XSTRORM_INTMEM +
4675 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4676 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4677 REG_WR(bp, BAR_XSTRORM_INTMEM +
4678 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4679 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4680
4681 REG_WR(bp, BAR_TSTRORM_INTMEM +
4682 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4683 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4684 REG_WR(bp, BAR_TSTRORM_INTMEM +
4685 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4686 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4687
4688 if (CHIP_IS_E1H(bp)) {
4689 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4690 IS_E1HMF(bp));
4691 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4692 IS_E1HMF(bp));
4693 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4694 IS_E1HMF(bp));
4695 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4696 IS_E1HMF(bp));
4697
7a9b2557
VZ
4698 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4699 bp->e1hov);
34f80b04
EG
4700 }
4701
471de716
EG
4702 /* Init CQ ring mapping and aggregation size */
4703 max_agg_size = min((u32)(bp->rx_buf_use_size +
4704 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4705 (u32)0xffff);
7a9b2557
VZ
4706 for_each_queue(bp, i) {
4707 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4708
4709 REG_WR(bp, BAR_USTRORM_INTMEM +
4710 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4711 U64_LO(fp->rx_comp_mapping));
4712 REG_WR(bp, BAR_USTRORM_INTMEM +
4713 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4714 U64_HI(fp->rx_comp_mapping));
4715
7a9b2557
VZ
4716 REG_WR16(bp, BAR_USTRORM_INTMEM +
4717 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4718 max_agg_size);
4719 }
a2fbb9ea
ET
4720}
4721
471de716
EG
4722static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4723{
4724 switch (load_code) {
4725 case FW_MSG_CODE_DRV_LOAD_COMMON:
4726 bnx2x_init_internal_common(bp);
4727 /* no break */
4728
4729 case FW_MSG_CODE_DRV_LOAD_PORT:
4730 bnx2x_init_internal_port(bp);
4731 /* no break */
4732
4733 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4734 bnx2x_init_internal_func(bp);
4735 break;
4736
4737 default:
4738 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4739 break;
4740 }
4741}
4742
4743static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4744{
4745 int i;
4746
4747 for_each_queue(bp, i) {
4748 struct bnx2x_fastpath *fp = &bp->fp[i];
4749
34f80b04 4750 fp->bp = bp;
a2fbb9ea 4751 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4752 fp->index = i;
34f80b04
EG
4753 fp->cl_id = BP_L_ID(bp) + i;
4754 fp->sb_id = fp->cl_id;
4755 DP(NETIF_MSG_IFUP,
4756 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4757 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4758 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4759 FP_SB_ID(fp));
4760 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4761 }
4762
5c862848
EG
4763 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4764 DEF_SB_ID);
4765 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4766 bnx2x_update_coalesce(bp);
4767 bnx2x_init_rx_rings(bp);
4768 bnx2x_init_tx_ring(bp);
4769 bnx2x_init_sp_ring(bp);
4770 bnx2x_init_context(bp);
471de716 4771 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4772 bnx2x_init_ind_table(bp);
615f8fd9 4773 bnx2x_int_enable(bp);
a2fbb9ea
ET
4774}
4775
4776/* end of nic init */
4777
4778/*
4779 * gzip service functions
4780 */
4781
4782static int bnx2x_gunzip_init(struct bnx2x *bp)
4783{
4784 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4785 &bp->gunzip_mapping);
4786 if (bp->gunzip_buf == NULL)
4787 goto gunzip_nomem1;
4788
4789 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4790 if (bp->strm == NULL)
4791 goto gunzip_nomem2;
4792
4793 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4794 GFP_KERNEL);
4795 if (bp->strm->workspace == NULL)
4796 goto gunzip_nomem3;
4797
4798 return 0;
4799
4800gunzip_nomem3:
4801 kfree(bp->strm);
4802 bp->strm = NULL;
4803
4804gunzip_nomem2:
4805 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4806 bp->gunzip_mapping);
4807 bp->gunzip_buf = NULL;
4808
4809gunzip_nomem1:
4810 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4811 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4812 return -ENOMEM;
4813}
4814
4815static void bnx2x_gunzip_end(struct bnx2x *bp)
4816{
4817 kfree(bp->strm->workspace);
4818
4819 kfree(bp->strm);
4820 bp->strm = NULL;
4821
4822 if (bp->gunzip_buf) {
4823 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4824 bp->gunzip_mapping);
4825 bp->gunzip_buf = NULL;
4826 }
4827}
4828
4829static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4830{
4831 int n, rc;
4832
4833 /* check gzip header */
4834 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4835 return -EINVAL;
4836
4837 n = 10;
4838
34f80b04 4839#define FNAME 0x8
a2fbb9ea
ET
4840
4841 if (zbuf[3] & FNAME)
4842 while ((zbuf[n++] != 0) && (n < len));
4843
4844 bp->strm->next_in = zbuf + n;
4845 bp->strm->avail_in = len - n;
4846 bp->strm->next_out = bp->gunzip_buf;
4847 bp->strm->avail_out = FW_BUF_SIZE;
4848
4849 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4850 if (rc != Z_OK)
4851 return rc;
4852
4853 rc = zlib_inflate(bp->strm, Z_FINISH);
4854 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4855 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4856 bp->dev->name, bp->strm->msg);
4857
4858 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4859 if (bp->gunzip_outlen & 0x3)
4860 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4861 " gunzip_outlen (%d) not aligned\n",
4862 bp->dev->name, bp->gunzip_outlen);
4863 bp->gunzip_outlen >>= 2;
4864
4865 zlib_inflateEnd(bp->strm);
4866
4867 if (rc == Z_STREAM_END)
4868 return 0;
4869
4870 return rc;
4871}
4872
4873/* nic load/unload */
4874
4875/*
34f80b04 4876 * General service functions
a2fbb9ea
ET
4877 */
4878
4879/* send a NIG loopback debug packet */
4880static void bnx2x_lb_pckt(struct bnx2x *bp)
4881{
a2fbb9ea 4882 u32 wb_write[3];
a2fbb9ea
ET
4883
4884 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4885 wb_write[0] = 0x55555555;
4886 wb_write[1] = 0x55555555;
34f80b04 4887 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4888 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4889
4890 /* NON-IP protocol */
a2fbb9ea
ET
4891 wb_write[0] = 0x09000000;
4892 wb_write[1] = 0x55555555;
34f80b04 4893 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4894 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4895}
4896
4897/* some of the internal memories
4898 * are not directly readable from the driver
4899 * to test them we send debug packets
4900 */
4901static int bnx2x_int_mem_test(struct bnx2x *bp)
4902{
4903 int factor;
4904 int count, i;
4905 u32 val = 0;
4906
ad8d3948 4907 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4908 factor = 120;
ad8d3948
EG
4909 else if (CHIP_REV_IS_EMUL(bp))
4910 factor = 200;
4911 else
a2fbb9ea 4912 factor = 1;
a2fbb9ea
ET
4913
4914 DP(NETIF_MSG_HW, "start part1\n");
4915
4916 /* Disable inputs of parser neighbor blocks */
4917 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4918 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4919 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4920 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4921
4922 /* Write 0 to parser credits for CFC search request */
4923 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4924
4925 /* send Ethernet packet */
4926 bnx2x_lb_pckt(bp);
4927
4928 /* TODO do i reset NIG statistic? */
4929 /* Wait until NIG register shows 1 packet of size 0x10 */
4930 count = 1000 * factor;
4931 while (count) {
34f80b04 4932
a2fbb9ea
ET
4933 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4934 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4935 if (val == 0x10)
4936 break;
4937
4938 msleep(10);
4939 count--;
4940 }
4941 if (val != 0x10) {
4942 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4943 return -1;
4944 }
4945
4946 /* Wait until PRS register shows 1 packet */
4947 count = 1000 * factor;
4948 while (count) {
4949 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4950 if (val == 1)
4951 break;
4952
4953 msleep(10);
4954 count--;
4955 }
4956 if (val != 0x1) {
4957 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4958 return -2;
4959 }
4960
4961 /* Reset and init BRB, PRS */
34f80b04 4962 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4963 msleep(50);
34f80b04 4964 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4965 msleep(50);
4966 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4967 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4968
4969 DP(NETIF_MSG_HW, "part2\n");
4970
4971 /* Disable inputs of parser neighbor blocks */
4972 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4973 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4974 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4975 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4976
4977 /* Write 0 to parser credits for CFC search request */
4978 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4979
4980 /* send 10 Ethernet packets */
4981 for (i = 0; i < 10; i++)
4982 bnx2x_lb_pckt(bp);
4983
4984 /* Wait until NIG register shows 10 + 1
4985 packets of size 11*0x10 = 0xb0 */
4986 count = 1000 * factor;
4987 while (count) {
34f80b04 4988
a2fbb9ea
ET
4989 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4990 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4991 if (val == 0xb0)
4992 break;
4993
4994 msleep(10);
4995 count--;
4996 }
4997 if (val != 0xb0) {
4998 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4999 return -3;
5000 }
5001
5002 /* Wait until PRS register shows 2 packets */
5003 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5004 if (val != 2)
5005 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5006
5007 /* Write 1 to parser credits for CFC search request */
5008 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5009
5010 /* Wait until PRS register shows 3 packets */
5011 msleep(10 * factor);
5012 /* Wait until NIG register shows 1 packet of size 0x10 */
5013 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5014 if (val != 3)
5015 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5016
5017 /* clear NIG EOP FIFO */
5018 for (i = 0; i < 11; i++)
5019 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5020 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5021 if (val != 1) {
5022 BNX2X_ERR("clear of NIG failed\n");
5023 return -4;
5024 }
5025
5026 /* Reset and init BRB, PRS, NIG */
5027 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5028 msleep(50);
5029 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5030 msleep(50);
5031 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5032 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5033#ifndef BCM_ISCSI
5034 /* set NIC mode */
5035 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5036#endif
5037
5038 /* Enable inputs of parser neighbor blocks */
5039 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5040 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5041 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5042 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5043
5044 DP(NETIF_MSG_HW, "done\n");
5045
5046 return 0; /* OK */
5047}
5048
5049static void enable_blocks_attention(struct bnx2x *bp)
5050{
5051 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5052 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5053 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5054 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5055 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5056 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5057 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5058 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5059 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5060/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5061/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5062 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5063 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5064 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5065/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5066/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5067 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5068 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5069 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5070 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5071/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5072/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5073 if (CHIP_REV_IS_FPGA(bp))
5074 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5075 else
5076 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5077 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5078 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5079 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5080/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5081/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5082 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5083 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5084/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5085 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5086}
5087
34f80b04
EG
5088
5089static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5090{
a2fbb9ea 5091 u32 val, i;
a2fbb9ea 5092
34f80b04 5093 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5094
34f80b04
EG
5095 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5096 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5097
34f80b04
EG
5098 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5099 if (CHIP_IS_E1H(bp))
5100 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5101
34f80b04
EG
5102 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5103 msleep(30);
5104 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5105
34f80b04
EG
5106 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5107 if (CHIP_IS_E1(bp)) {
5108 /* enable HW interrupt from PXP on USDM overflow
5109 bit 16 on INT_MASK_0 */
5110 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5111 }
a2fbb9ea 5112
34f80b04
EG
5113 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5114 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5115
5116#ifdef __BIG_ENDIAN
34f80b04
EG
5117 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5118 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5119 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5120 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5121 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5122 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5123
5124/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5125 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5126 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5127 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5128 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5129#endif
5130
5131#ifndef BCM_ISCSI
5132 /* set NIC mode */
5133 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5134#endif
5135
34f80b04 5136 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5137#ifdef BCM_ISCSI
34f80b04
EG
5138 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5139 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5140 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5141#endif
5142
34f80b04
EG
5143 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5144 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5145
34f80b04
EG
5146 /* let the HW do it's magic ... */
5147 msleep(100);
5148 /* finish PXP init */
5149 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5150 if (val != 1) {
5151 BNX2X_ERR("PXP2 CFG failed\n");
5152 return -EBUSY;
5153 }
5154 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5155 if (val != 1) {
5156 BNX2X_ERR("PXP2 RD_INIT failed\n");
5157 return -EBUSY;
5158 }
a2fbb9ea 5159
34f80b04
EG
5160 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5161 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5162
34f80b04 5163 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5164
34f80b04
EG
5165 /* clean the DMAE memory */
5166 bp->dmae_ready = 1;
5167 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5168
34f80b04
EG
5169 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5170 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5171 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5172 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5173
34f80b04
EG
5174 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5175 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5176 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5177 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5178
5179 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5180 /* soft reset pulse */
5181 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5182 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5183
5184#ifdef BCM_ISCSI
34f80b04 5185 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5186#endif
a2fbb9ea 5187
34f80b04
EG
5188 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5189 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5190 if (!CHIP_REV_IS_SLOW(bp)) {
5191 /* enable hw interrupt from doorbell Q */
5192 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5193 }
a2fbb9ea 5194
34f80b04
EG
5195 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5196 if (CHIP_REV_IS_SLOW(bp)) {
5197 /* fix for emulation and FPGA for no pause */
5198 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5199 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5200 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5201 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5202 }
a2fbb9ea 5203
34f80b04
EG
5204 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5205 if (CHIP_IS_E1H(bp))
5206 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5207
34f80b04
EG
5208 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5209 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5210 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5211 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5212
34f80b04
EG
5213 if (CHIP_IS_E1H(bp)) {
5214 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5215 STORM_INTMEM_SIZE_E1H/2);
5216 bnx2x_init_fill(bp,
5217 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5218 0, STORM_INTMEM_SIZE_E1H/2);
5219 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5220 STORM_INTMEM_SIZE_E1H/2);
5221 bnx2x_init_fill(bp,
5222 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5223 0, STORM_INTMEM_SIZE_E1H/2);
5224 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5225 STORM_INTMEM_SIZE_E1H/2);
5226 bnx2x_init_fill(bp,
5227 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5228 0, STORM_INTMEM_SIZE_E1H/2);
5229 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5230 STORM_INTMEM_SIZE_E1H/2);
5231 bnx2x_init_fill(bp,
5232 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5233 0, STORM_INTMEM_SIZE_E1H/2);
5234 } else { /* E1 */
ad8d3948
EG
5235 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5236 STORM_INTMEM_SIZE_E1);
5237 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5238 STORM_INTMEM_SIZE_E1);
5239 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5240 STORM_INTMEM_SIZE_E1);
5241 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5242 STORM_INTMEM_SIZE_E1);
34f80b04 5243 }
a2fbb9ea 5244
34f80b04
EG
5245 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5246 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5247 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5248 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5249
34f80b04
EG
5250 /* sync semi rtc */
5251 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5252 0x80000000);
5253 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5254 0x80000000);
a2fbb9ea 5255
34f80b04
EG
5256 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5257 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5258 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5259
34f80b04
EG
5260 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5261 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5262 REG_WR(bp, i, 0xc0cac01a);
5263 /* TODO: replace with something meaningful */
5264 }
5265 if (CHIP_IS_E1H(bp))
5266 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5267 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5268
34f80b04
EG
5269 if (sizeof(union cdu_context) != 1024)
5270 /* we currently assume that a context is 1024 bytes */
5271 printk(KERN_ALERT PFX "please adjust the size of"
5272 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5273
34f80b04
EG
5274 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5275 val = (4 << 24) + (0 << 12) + 1024;
5276 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5277 if (CHIP_IS_E1(bp)) {
5278 /* !!! fix pxp client crdit until excel update */
5279 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5280 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5281 }
a2fbb9ea 5282
34f80b04
EG
5283 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5284 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5285
34f80b04
EG
5286 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5287 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5288
34f80b04
EG
5289 /* PXPCS COMMON comes here */
5290 /* Reset PCIE errors for debug */
5291 REG_WR(bp, 0x2814, 0xffffffff);
5292 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5293
34f80b04
EG
5294 /* EMAC0 COMMON comes here */
5295 /* EMAC1 COMMON comes here */
5296 /* DBU COMMON comes here */
5297 /* DBG COMMON comes here */
5298
5299 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5300 if (CHIP_IS_E1H(bp)) {
5301 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5302 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5303 }
5304
5305 if (CHIP_REV_IS_SLOW(bp))
5306 msleep(200);
5307
5308 /* finish CFC init */
5309 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5310 if (val != 1) {
5311 BNX2X_ERR("CFC LL_INIT failed\n");
5312 return -EBUSY;
5313 }
5314 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5315 if (val != 1) {
5316 BNX2X_ERR("CFC AC_INIT failed\n");
5317 return -EBUSY;
5318 }
5319 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5320 if (val != 1) {
5321 BNX2X_ERR("CFC CAM_INIT failed\n");
5322 return -EBUSY;
5323 }
5324 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5325
34f80b04
EG
5326 /* read NIG statistic
5327 to see if this is our first up since powerup */
5328 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5329 val = *bnx2x_sp(bp, wb_data[0]);
5330
5331 /* do internal memory self test */
5332 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5333 BNX2X_ERR("internal mem self test failed\n");
5334 return -EBUSY;
5335 }
5336
5337 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5338 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5339 /* Fan failure is indicated by SPIO 5 */
5340 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5341 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5342
5343 /* set to active low mode */
5344 val = REG_RD(bp, MISC_REG_SPIO_INT);
5345 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5346 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5347 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5348
34f80b04
EG
5349 /* enable interrupt to signal the IGU */
5350 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5351 val |= (1 << MISC_REGISTERS_SPIO_5);
5352 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5353 break;
f1410647 5354
34f80b04
EG
5355 default:
5356 break;
5357 }
f1410647 5358
34f80b04
EG
5359 /* clear PXP2 attentions */
5360 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5361
34f80b04 5362 enable_blocks_attention(bp);
a2fbb9ea 5363
7a9b2557
VZ
5364 if (bp->flags & TPA_ENABLE_FLAG) {
5365 struct tstorm_eth_tpa_exist tmp = {0};
5366
5367 tmp.tpa_exist = 1;
5368
5369 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5370 ((u32 *)&tmp)[0]);
5371 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5372 ((u32 *)&tmp)[1]);
5373 }
5374
34f80b04
EG
5375 return 0;
5376}
a2fbb9ea 5377
34f80b04
EG
5378static int bnx2x_init_port(struct bnx2x *bp)
5379{
5380 int port = BP_PORT(bp);
5381 u32 val;
a2fbb9ea 5382
34f80b04
EG
5383 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5384
5385 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5386
5387 /* Port PXP comes here */
5388 /* Port PXP2 comes here */
a2fbb9ea
ET
5389#ifdef BCM_ISCSI
5390 /* Port0 1
5391 * Port1 385 */
5392 i++;
5393 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5394 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5395 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5396 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5397
5398 /* Port0 2
5399 * Port1 386 */
5400 i++;
5401 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5402 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5403 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5404 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5405
5406 /* Port0 3
5407 * Port1 387 */
5408 i++;
5409 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5410 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5411 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5412 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5413#endif
34f80b04 5414 /* Port CMs come here */
a2fbb9ea
ET
5415
5416 /* Port QM comes here */
a2fbb9ea
ET
5417#ifdef BCM_ISCSI
5418 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5419 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5420
5421 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5422 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5423#endif
5424 /* Port DQ comes here */
5425 /* Port BRB1 comes here */
ad8d3948 5426 /* Port PRS comes here */
a2fbb9ea
ET
5427 /* Port TSDM comes here */
5428 /* Port CSDM comes here */
5429 /* Port USDM comes here */
5430 /* Port XSDM comes here */
34f80b04
EG
5431 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5432 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5433 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5434 port ? USEM_PORT1_END : USEM_PORT0_END);
5435 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5436 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5437 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5438 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5439 /* Port UPB comes here */
34f80b04
EG
5440 /* Port XPB comes here */
5441
5442 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5443 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5444
5445 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5446 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5447
5448 /* update threshold */
34f80b04 5449 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5450 /* update init credit */
34f80b04 5451 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5452
5453 /* probe changes */
34f80b04 5454 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5455 msleep(5);
34f80b04 5456 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5457
5458#ifdef BCM_ISCSI
5459 /* tell the searcher where the T2 table is */
5460 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5461
5462 wb_write[0] = U64_LO(bp->t2_mapping);
5463 wb_write[1] = U64_HI(bp->t2_mapping);
5464 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5465 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5466 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5467 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5468
5469 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5470 /* Port SRCH comes here */
5471#endif
5472 /* Port CDU comes here */
5473 /* Port CFC comes here */
34f80b04
EG
5474
5475 if (CHIP_IS_E1(bp)) {
5476 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5477 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5478 }
5479 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5480 port ? HC_PORT1_END : HC_PORT0_END);
5481
5482 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5483 MISC_AEU_PORT0_START,
34f80b04
EG
5484 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5485 /* init aeu_mask_attn_func_0/1:
5486 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5487 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5488 * bits 4-7 are used for "per vn group attention" */
5489 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5490 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5491
a2fbb9ea
ET
5492 /* Port PXPCS comes here */
5493 /* Port EMAC0 comes here */
5494 /* Port EMAC1 comes here */
5495 /* Port DBU comes here */
5496 /* Port DBG comes here */
34f80b04
EG
5497 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5498 port ? NIG_PORT1_END : NIG_PORT0_END);
5499
5500 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5501
5502 if (CHIP_IS_E1H(bp)) {
5503 u32 wsum;
5504 struct cmng_struct_per_port m_cmng_port;
5505 int vn;
5506
5507 /* 0x2 disable e1hov, 0x1 enable */
5508 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5509 (IS_E1HMF(bp) ? 0x1 : 0x2));
5510
5511 /* Init RATE SHAPING and FAIRNESS contexts.
5512 Initialize as if there is 10G link. */
5513 wsum = bnx2x_calc_vn_wsum(bp);
5514 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5515 if (IS_E1HMF(bp))
5516 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5517 bnx2x_init_vn_minmax(bp, 2*vn + port,
5518 wsum, 10000, &m_cmng_port);
5519 }
5520
a2fbb9ea
ET
5521 /* Port MCP comes here */
5522 /* Port DMAE comes here */
5523
34f80b04 5524 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
5525 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5526 /* add SPIO 5 to group 0 */
5527 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5528 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5529 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5530 break;
5531
5532 default:
5533 break;
5534 }
5535
c18487ee 5536 bnx2x__link_reset(bp);
a2fbb9ea 5537
34f80b04
EG
5538 return 0;
5539}
5540
5541#define ILT_PER_FUNC (768/2)
5542#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5543/* the phys address is shifted right 12 bits and has an added
5544 1=valid bit added to the 53rd bit
5545 then since this is a wide register(TM)
5546 we split it into two 32 bit writes
5547 */
5548#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5549#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5550#define PXP_ONE_ILT(x) (((x) << 10) | x)
5551#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5552
5553#define CNIC_ILT_LINES 0
5554
5555static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5556{
5557 int reg;
5558
5559 if (CHIP_IS_E1H(bp))
5560 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5561 else /* E1 */
5562 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5563
5564 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5565}
5566
5567static int bnx2x_init_func(struct bnx2x *bp)
5568{
5569 int port = BP_PORT(bp);
5570 int func = BP_FUNC(bp);
5571 int i;
5572
5573 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5574
5575 i = FUNC_ILT_BASE(func);
5576
5577 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5578 if (CHIP_IS_E1H(bp)) {
5579 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5580 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5581 } else /* E1 */
5582 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5583 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5584
5585
5586 if (CHIP_IS_E1H(bp)) {
5587 for (i = 0; i < 9; i++)
5588 bnx2x_init_block(bp,
5589 cm_start[func][i], cm_end[func][i]);
5590
5591 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5592 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5593 }
5594
5595 /* HC init per function */
5596 if (CHIP_IS_E1H(bp)) {
5597 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5598
5599 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5600 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5601 }
5602 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5603
5604 if (CHIP_IS_E1H(bp))
5605 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5606
c14423fe 5607 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5608 REG_WR(bp, 0x2114, 0xffffffff);
5609 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5610
34f80b04
EG
5611 return 0;
5612}
5613
5614static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5615{
5616 int i, rc = 0;
a2fbb9ea 5617
34f80b04
EG
5618 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5619 BP_FUNC(bp), load_code);
a2fbb9ea 5620
34f80b04
EG
5621 bp->dmae_ready = 0;
5622 mutex_init(&bp->dmae_mutex);
5623 bnx2x_gunzip_init(bp);
a2fbb9ea 5624
34f80b04
EG
5625 switch (load_code) {
5626 case FW_MSG_CODE_DRV_LOAD_COMMON:
5627 rc = bnx2x_init_common(bp);
5628 if (rc)
5629 goto init_hw_err;
5630 /* no break */
5631
5632 case FW_MSG_CODE_DRV_LOAD_PORT:
5633 bp->dmae_ready = 1;
5634 rc = bnx2x_init_port(bp);
5635 if (rc)
5636 goto init_hw_err;
5637 /* no break */
5638
5639 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5640 bp->dmae_ready = 1;
5641 rc = bnx2x_init_func(bp);
5642 if (rc)
5643 goto init_hw_err;
5644 break;
5645
5646 default:
5647 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5648 break;
5649 }
5650
5651 if (!BP_NOMCP(bp)) {
5652 int func = BP_FUNC(bp);
a2fbb9ea
ET
5653
5654 bp->fw_drv_pulse_wr_seq =
34f80b04 5655 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5656 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5657 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5658 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5659 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5660 } else
5661 bp->func_stx = 0;
a2fbb9ea 5662
34f80b04
EG
5663 /* this needs to be done before gunzip end */
5664 bnx2x_zero_def_sb(bp);
5665 for_each_queue(bp, i)
5666 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5667
5668init_hw_err:
5669 bnx2x_gunzip_end(bp);
5670
5671 return rc;
a2fbb9ea
ET
5672}
5673
c14423fe 5674/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5675static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5676{
34f80b04 5677 int func = BP_FUNC(bp);
f1410647
ET
5678 u32 seq = ++bp->fw_seq;
5679 u32 rc = 0;
19680c48
EG
5680 u32 cnt = 1;
5681 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5682
34f80b04 5683 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5684 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5685
19680c48
EG
5686 do {
5687 /* let the FW do it's magic ... */
5688 msleep(delay);
a2fbb9ea 5689
19680c48 5690 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5691
19680c48
EG
5692 /* Give the FW up to 2 second (200*10ms) */
5693 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5694
5695 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5696 cnt*delay, rc, seq);
a2fbb9ea
ET
5697
5698 /* is this a reply to our command? */
5699 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5700 rc &= FW_MSG_CODE_MASK;
f1410647 5701
a2fbb9ea
ET
5702 } else {
5703 /* FW BUG! */
5704 BNX2X_ERR("FW failed to respond!\n");
5705 bnx2x_fw_dump(bp);
5706 rc = 0;
5707 }
f1410647 5708
a2fbb9ea
ET
5709 return rc;
5710}
5711
5712static void bnx2x_free_mem(struct bnx2x *bp)
5713{
5714
5715#define BNX2X_PCI_FREE(x, y, size) \
5716 do { \
5717 if (x) { \
5718 pci_free_consistent(bp->pdev, size, x, y); \
5719 x = NULL; \
5720 y = 0; \
5721 } \
5722 } while (0)
5723
5724#define BNX2X_FREE(x) \
5725 do { \
5726 if (x) { \
5727 vfree(x); \
5728 x = NULL; \
5729 } \
5730 } while (0)
5731
5732 int i;
5733
5734 /* fastpath */
5735 for_each_queue(bp, i) {
5736
5737 /* Status blocks */
5738 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5739 bnx2x_fp(bp, i, status_blk_mapping),
5740 sizeof(struct host_status_block) +
5741 sizeof(struct eth_tx_db_data));
5742
5743 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5744 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5745 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5746 bnx2x_fp(bp, i, tx_desc_mapping),
5747 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5748
5749 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5750 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5751 bnx2x_fp(bp, i, rx_desc_mapping),
5752 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5753
5754 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5755 bnx2x_fp(bp, i, rx_comp_mapping),
5756 sizeof(struct eth_fast_path_rx_cqe) *
5757 NUM_RCQ_BD);
a2fbb9ea 5758
7a9b2557 5759 /* SGE ring */
32626230 5760 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5761 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5762 bnx2x_fp(bp, i, rx_sge_mapping),
5763 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5764 }
a2fbb9ea
ET
5765 /* end of fastpath */
5766
5767 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5768 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5769
5770 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5771 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5772
5773#ifdef BCM_ISCSI
5774 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5775 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5776 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5777 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5778#endif
7a9b2557 5779 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5780
5781#undef BNX2X_PCI_FREE
5782#undef BNX2X_KFREE
5783}
5784
5785static int bnx2x_alloc_mem(struct bnx2x *bp)
5786{
5787
5788#define BNX2X_PCI_ALLOC(x, y, size) \
5789 do { \
5790 x = pci_alloc_consistent(bp->pdev, size, y); \
5791 if (x == NULL) \
5792 goto alloc_mem_err; \
5793 memset(x, 0, size); \
5794 } while (0)
5795
5796#define BNX2X_ALLOC(x, size) \
5797 do { \
5798 x = vmalloc(size); \
5799 if (x == NULL) \
5800 goto alloc_mem_err; \
5801 memset(x, 0, size); \
5802 } while (0)
5803
5804 int i;
5805
5806 /* fastpath */
a2fbb9ea
ET
5807 for_each_queue(bp, i) {
5808 bnx2x_fp(bp, i, bp) = bp;
5809
5810 /* Status blocks */
5811 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5812 &bnx2x_fp(bp, i, status_blk_mapping),
5813 sizeof(struct host_status_block) +
5814 sizeof(struct eth_tx_db_data));
5815
5816 bnx2x_fp(bp, i, hw_tx_prods) =
5817 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5818
5819 bnx2x_fp(bp, i, tx_prods_mapping) =
5820 bnx2x_fp(bp, i, status_blk_mapping) +
5821 sizeof(struct host_status_block);
5822
5823 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5824 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5825 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5826 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5827 &bnx2x_fp(bp, i, tx_desc_mapping),
5828 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5829
5830 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5831 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5832 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5833 &bnx2x_fp(bp, i, rx_desc_mapping),
5834 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5835
5836 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5837 &bnx2x_fp(bp, i, rx_comp_mapping),
5838 sizeof(struct eth_fast_path_rx_cqe) *
5839 NUM_RCQ_BD);
5840
7a9b2557
VZ
5841 /* SGE ring */
5842 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5843 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5844 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5845 &bnx2x_fp(bp, i, rx_sge_mapping),
5846 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5847 }
5848 /* end of fastpath */
5849
5850 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5851 sizeof(struct host_def_status_block));
5852
5853 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5854 sizeof(struct bnx2x_slowpath));
5855
5856#ifdef BCM_ISCSI
5857 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5858
5859 /* Initialize T1 */
5860 for (i = 0; i < 64*1024; i += 64) {
5861 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5862 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5863 }
5864
5865 /* allocate searcher T2 table
5866 we allocate 1/4 of alloc num for T2
5867 (which is not entered into the ILT) */
5868 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5869
5870 /* Initialize T2 */
5871 for (i = 0; i < 16*1024; i += 64)
5872 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5873
c14423fe 5874 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5875 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5876
5877 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5878 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5879
5880 /* QM queues (128*MAX_CONN) */
5881 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5882#endif
5883
5884 /* Slow path ring */
5885 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5886
5887 return 0;
5888
5889alloc_mem_err:
5890 bnx2x_free_mem(bp);
5891 return -ENOMEM;
5892
5893#undef BNX2X_PCI_ALLOC
5894#undef BNX2X_ALLOC
5895}
5896
5897static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5898{
5899 int i;
5900
5901 for_each_queue(bp, i) {
5902 struct bnx2x_fastpath *fp = &bp->fp[i];
5903
5904 u16 bd_cons = fp->tx_bd_cons;
5905 u16 sw_prod = fp->tx_pkt_prod;
5906 u16 sw_cons = fp->tx_pkt_cons;
5907
a2fbb9ea
ET
5908 while (sw_cons != sw_prod) {
5909 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5910 sw_cons++;
5911 }
5912 }
5913}
5914
5915static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5916{
5917 int i, j;
5918
5919 for_each_queue(bp, j) {
5920 struct bnx2x_fastpath *fp = &bp->fp[j];
5921
a2fbb9ea
ET
5922 for (i = 0; i < NUM_RX_BD; i++) {
5923 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5924 struct sk_buff *skb = rx_buf->skb;
5925
5926 if (skb == NULL)
5927 continue;
5928
5929 pci_unmap_single(bp->pdev,
5930 pci_unmap_addr(rx_buf, mapping),
5931 bp->rx_buf_use_size,
5932 PCI_DMA_FROMDEVICE);
5933
5934 rx_buf->skb = NULL;
5935 dev_kfree_skb(skb);
5936 }
7a9b2557 5937 if (!fp->disable_tpa)
32626230
EG
5938 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5939 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5940 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5941 }
5942}
5943
5944static void bnx2x_free_skbs(struct bnx2x *bp)
5945{
5946 bnx2x_free_tx_skbs(bp);
5947 bnx2x_free_rx_skbs(bp);
5948}
5949
5950static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5951{
34f80b04 5952 int i, offset = 1;
a2fbb9ea
ET
5953
5954 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5955 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5956 bp->msix_table[0].vector);
5957
5958 for_each_queue(bp, i) {
c14423fe 5959 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5960 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5961 bnx2x_fp(bp, i, state));
5962
228241eb
ET
5963 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5964 BNX2X_ERR("IRQ of fp #%d being freed while "
5965 "state != closed\n", i);
a2fbb9ea 5966
34f80b04 5967 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5968 }
a2fbb9ea
ET
5969}
5970
5971static void bnx2x_free_irq(struct bnx2x *bp)
5972{
a2fbb9ea 5973 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5974 bnx2x_free_msix_irqs(bp);
5975 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5976 bp->flags &= ~USING_MSIX_FLAG;
5977
5978 } else
5979 free_irq(bp->pdev->irq, bp->dev);
5980}
5981
5982static int bnx2x_enable_msix(struct bnx2x *bp)
5983{
34f80b04 5984 int i, rc, offset;
a2fbb9ea
ET
5985
5986 bp->msix_table[0].entry = 0;
34f80b04
EG
5987 offset = 1;
5988 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 5989
34f80b04
EG
5990 for_each_queue(bp, i) {
5991 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 5992
34f80b04
EG
5993 bp->msix_table[i + offset].entry = igu_vec;
5994 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5995 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
5996 }
5997
34f80b04
EG
5998 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5999 bp->num_queues + offset);
6000 if (rc) {
6001 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6002 return -1;
6003 }
a2fbb9ea
ET
6004 bp->flags |= USING_MSIX_FLAG;
6005
6006 return 0;
a2fbb9ea
ET
6007}
6008
a2fbb9ea
ET
6009static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6010{
34f80b04 6011 int i, rc, offset = 1;
a2fbb9ea 6012
a2fbb9ea
ET
6013 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6014 bp->dev->name, bp->dev);
a2fbb9ea
ET
6015 if (rc) {
6016 BNX2X_ERR("request sp irq failed\n");
6017 return -EBUSY;
6018 }
6019
6020 for_each_queue(bp, i) {
34f80b04 6021 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6022 bnx2x_msix_fp_int, 0,
6023 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6024 if (rc) {
34f80b04
EG
6025 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6026 i + offset, rc);
a2fbb9ea
ET
6027 bnx2x_free_msix_irqs(bp);
6028 return -EBUSY;
6029 }
6030
6031 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6032 }
6033
6034 return 0;
a2fbb9ea
ET
6035}
6036
6037static int bnx2x_req_irq(struct bnx2x *bp)
6038{
34f80b04 6039 int rc;
a2fbb9ea 6040
34f80b04
EG
6041 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6042 bp->dev->name, bp->dev);
a2fbb9ea
ET
6043 if (!rc)
6044 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6045
6046 return rc;
a2fbb9ea
ET
6047}
6048
6049/*
6050 * Init service functions
6051 */
6052
34f80b04 6053static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
a2fbb9ea
ET
6054{
6055 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6056 int port = BP_PORT(bp);
a2fbb9ea
ET
6057
6058 /* CAM allocation
6059 * unicasts 0-31:port0 32-63:port1
6060 * multicast 64-127:port0 128-191:port1
6061 */
6062 config->hdr.length_6b = 2;
34f80b04
EG
6063 config->hdr.offset = port ? 31 : 0;
6064 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6065 config->hdr.reserved1 = 0;
6066
6067 /* primary MAC */
6068 config->config_table[0].cam_entry.msb_mac_addr =
6069 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6070 config->config_table[0].cam_entry.middle_mac_addr =
6071 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6072 config->config_table[0].cam_entry.lsb_mac_addr =
6073 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6074 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6075 config->config_table[0].target_table_entry.flags = 0;
6076 config->config_table[0].target_table_entry.client_id = 0;
6077 config->config_table[0].target_table_entry.vlan_id = 0;
6078
6079 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6080 config->config_table[0].cam_entry.msb_mac_addr,
6081 config->config_table[0].cam_entry.middle_mac_addr,
6082 config->config_table[0].cam_entry.lsb_mac_addr);
6083
6084 /* broadcast */
6085 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6086 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6087 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6088 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6089 config->config_table[1].target_table_entry.flags =
6090 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6091 config->config_table[1].target_table_entry.client_id = 0;
6092 config->config_table[1].target_table_entry.vlan_id = 0;
6093
6094 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6095 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6096 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6097}
6098
34f80b04
EG
6099static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6100{
6101 struct mac_configuration_cmd_e1h *config =
6102 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6103
6104 if (bp->state != BNX2X_STATE_OPEN) {
6105 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6106 return;
6107 }
6108
6109 /* CAM allocation for E1H
6110 * unicasts: by func number
6111 * multicast: 20+FUNC*20, 20 each
6112 */
6113 config->hdr.length_6b = 1;
6114 config->hdr.offset = BP_FUNC(bp);
6115 config->hdr.client_id = BP_CL_ID(bp);
6116 config->hdr.reserved1 = 0;
6117
6118 /* primary MAC */
6119 config->config_table[0].msb_mac_addr =
6120 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6121 config->config_table[0].middle_mac_addr =
6122 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6123 config->config_table[0].lsb_mac_addr =
6124 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6125 config->config_table[0].client_id = BP_L_ID(bp);
6126 config->config_table[0].vlan_id = 0;
6127 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6128 config->config_table[0].flags = BP_PORT(bp);
6129
6130 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6131 config->config_table[0].msb_mac_addr,
6132 config->config_table[0].middle_mac_addr,
6133 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6134
6135 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6136 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6137 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6138}
6139
a2fbb9ea
ET
6140static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6141 int *state_p, int poll)
6142{
6143 /* can take a while if any port is running */
34f80b04 6144 int cnt = 500;
a2fbb9ea 6145
c14423fe
ET
6146 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6147 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6148
6149 might_sleep();
34f80b04 6150 while (cnt--) {
a2fbb9ea
ET
6151 if (poll) {
6152 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6153 /* if index is different from 0
6154 * the reply for some commands will
a2fbb9ea
ET
6155 * be on the none default queue
6156 */
6157 if (idx)
6158 bnx2x_rx_int(&bp->fp[idx], 10);
6159 }
34f80b04 6160 mb(); /* state is changed by bnx2x_sp_event() */
a2fbb9ea 6161
49d66772 6162 if (*state_p == state)
a2fbb9ea
ET
6163 return 0;
6164
a2fbb9ea 6165 msleep(1);
a2fbb9ea
ET
6166 }
6167
a2fbb9ea 6168 /* timeout! */
49d66772
ET
6169 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6170 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6171#ifdef BNX2X_STOP_ON_ERROR
6172 bnx2x_panic();
6173#endif
a2fbb9ea 6174
49d66772 6175 return -EBUSY;
a2fbb9ea
ET
6176}
6177
6178static int bnx2x_setup_leading(struct bnx2x *bp)
6179{
34f80b04 6180 int rc;
a2fbb9ea 6181
c14423fe 6182 /* reset IGU state */
34f80b04 6183 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6184
6185 /* SETUP ramrod */
6186 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6187
34f80b04
EG
6188 /* Wait for completion */
6189 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6190
34f80b04 6191 return rc;
a2fbb9ea
ET
6192}
6193
6194static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6195{
a2fbb9ea 6196 /* reset IGU state */
34f80b04 6197 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6198
228241eb 6199 /* SETUP ramrod */
a2fbb9ea
ET
6200 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6201 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6202
6203 /* Wait for completion */
6204 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6205 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6206}
6207
a2fbb9ea
ET
6208static int bnx2x_poll(struct napi_struct *napi, int budget);
6209static void bnx2x_set_rx_mode(struct net_device *dev);
6210
34f80b04
EG
6211/* must be called with rtnl_lock */
6212static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6213{
228241eb 6214 u32 load_code;
34f80b04
EG
6215 int i, rc;
6216
6217#ifdef BNX2X_STOP_ON_ERROR
6218 if (unlikely(bp->panic))
6219 return -EPERM;
6220#endif
a2fbb9ea
ET
6221
6222 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6223
34f80b04
EG
6224 /* Send LOAD_REQUEST command to MCP
6225 Returns the type of LOAD command:
6226 if it is the first port to be initialized
6227 common blocks should be initialized, otherwise - not
a2fbb9ea 6228 */
34f80b04 6229 if (!BP_NOMCP(bp)) {
228241eb
ET
6230 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6231 if (!load_code) {
da5a662a 6232 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6233 return -EBUSY;
6234 }
34f80b04 6235 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6236 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6237
a2fbb9ea 6238 } else {
da5a662a
VZ
6239 int port = BP_PORT(bp);
6240
34f80b04
EG
6241 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6242 load_count[0], load_count[1], load_count[2]);
6243 load_count[0]++;
da5a662a 6244 load_count[1 + port]++;
34f80b04
EG
6245 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6246 load_count[0], load_count[1], load_count[2]);
6247 if (load_count[0] == 1)
6248 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6249 else if (load_count[1 + port] == 1)
34f80b04
EG
6250 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6251 else
6252 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6253 }
6254
34f80b04
EG
6255 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6256 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6257 bp->port.pmf = 1;
6258 else
6259 bp->port.pmf = 0;
6260 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6261
6262 /* if we can't use MSI-X we only need one fp,
6263 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6264 * and fallback to inta with one fp
6265 */
34f80b04
EG
6266 if (use_inta) {
6267 bp->num_queues = 1;
6268
6269 } else {
6270 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6271 /* user requested number */
6272 bp->num_queues = use_multi;
6273
6274 else if (use_multi)
6275 bp->num_queues = min_t(u32, num_online_cpus(),
6276 BP_MAX_QUEUES(bp));
6277 else
a2fbb9ea 6278 bp->num_queues = 1;
34f80b04
EG
6279
6280 if (bnx2x_enable_msix(bp)) {
6281 /* failed to enable MSI-X */
6282 bp->num_queues = 1;
6283 if (use_multi)
6284 BNX2X_ERR("Multi requested but failed"
6285 " to enable MSI-X\n");
a2fbb9ea
ET
6286 }
6287 }
34f80b04
EG
6288 DP(NETIF_MSG_IFUP,
6289 "set number of queues to %d\n", bp->num_queues);
c14423fe 6290
a2fbb9ea
ET
6291 if (bnx2x_alloc_mem(bp))
6292 return -ENOMEM;
6293
7a9b2557
VZ
6294 for_each_queue(bp, i)
6295 bnx2x_fp(bp, i, disable_tpa) =
6296 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6297
34f80b04
EG
6298 if (bp->flags & USING_MSIX_FLAG) {
6299 rc = bnx2x_req_msix_irqs(bp);
6300 if (rc) {
6301 pci_disable_msix(bp->pdev);
6302 goto load_error;
6303 }
6304 } else {
6305 bnx2x_ack_int(bp);
6306 rc = bnx2x_req_irq(bp);
6307 if (rc) {
6308 BNX2X_ERR("IRQ request failed, aborting\n");
6309 goto load_error;
a2fbb9ea
ET
6310 }
6311 }
6312
6313 for_each_queue(bp, i)
6314 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6315 bnx2x_poll, 128);
6316
a2fbb9ea 6317 /* Initialize HW */
34f80b04
EG
6318 rc = bnx2x_init_hw(bp, load_code);
6319 if (rc) {
a2fbb9ea 6320 BNX2X_ERR("HW init failed, aborting\n");
228241eb 6321 goto load_error;
a2fbb9ea
ET
6322 }
6323
a2fbb9ea 6324 /* Setup NIC internals and enable interrupts */
471de716 6325 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6326
6327 /* Send LOAD_DONE command to MCP */
34f80b04 6328 if (!BP_NOMCP(bp)) {
228241eb
ET
6329 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6330 if (!load_code) {
da5a662a 6331 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6332 rc = -EBUSY;
228241eb 6333 goto load_int_disable;
a2fbb9ea
ET
6334 }
6335 }
6336
bb2a0f7a
YG
6337 bnx2x_stats_init(bp);
6338
a2fbb9ea
ET
6339 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6340
6341 /* Enable Rx interrupt handling before sending the ramrod
6342 as it's completed on Rx FP queue */
6343 for_each_queue(bp, i)
6344 napi_enable(&bnx2x_fp(bp, i, napi));
6345
da5a662a
VZ
6346 /* Enable interrupt handling */
6347 atomic_set(&bp->intr_sem, 0);
6348
34f80b04
EG
6349 rc = bnx2x_setup_leading(bp);
6350 if (rc) {
da5a662a 6351 BNX2X_ERR("Setup leading failed!\n");
228241eb 6352 goto load_stop_netif;
34f80b04 6353 }
a2fbb9ea 6354
34f80b04
EG
6355 if (CHIP_IS_E1H(bp))
6356 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6357 BNX2X_ERR("!!! mf_cfg function disabled\n");
6358 bp->state = BNX2X_STATE_DISABLED;
6359 }
a2fbb9ea 6360
34f80b04
EG
6361 if (bp->state == BNX2X_STATE_OPEN)
6362 for_each_nondefault_queue(bp, i) {
6363 rc = bnx2x_setup_multi(bp, i);
6364 if (rc)
6365 goto load_stop_netif;
6366 }
a2fbb9ea 6367
34f80b04
EG
6368 if (CHIP_IS_E1(bp))
6369 bnx2x_set_mac_addr_e1(bp);
6370 else
6371 bnx2x_set_mac_addr_e1h(bp);
6372
6373 if (bp->port.pmf)
6374 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6375
6376 /* Start fast path */
34f80b04
EG
6377 switch (load_mode) {
6378 case LOAD_NORMAL:
6379 /* Tx queue should be only reenabled */
6380 netif_wake_queue(bp->dev);
6381 bnx2x_set_rx_mode(bp->dev);
6382 break;
6383
6384 case LOAD_OPEN:
a2fbb9ea 6385 netif_start_queue(bp->dev);
34f80b04 6386 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6387 if (bp->flags & USING_MSIX_FLAG)
6388 printk(KERN_INFO PFX "%s: using MSI-X\n",
6389 bp->dev->name);
34f80b04 6390 break;
a2fbb9ea 6391
34f80b04 6392 case LOAD_DIAG:
a2fbb9ea 6393 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6394 bp->state = BNX2X_STATE_DIAG;
6395 break;
6396
6397 default:
6398 break;
a2fbb9ea
ET
6399 }
6400
34f80b04
EG
6401 if (!bp->port.pmf)
6402 bnx2x__link_status_update(bp);
6403
a2fbb9ea
ET
6404 /* start the timer */
6405 mod_timer(&bp->timer, jiffies + bp->current_interval);
6406
34f80b04 6407
a2fbb9ea
ET
6408 return 0;
6409
228241eb 6410load_stop_netif:
a2fbb9ea
ET
6411 for_each_queue(bp, i)
6412 napi_disable(&bnx2x_fp(bp, i, napi));
6413
228241eb 6414load_int_disable:
615f8fd9 6415 bnx2x_int_disable_sync(bp);
a2fbb9ea 6416
34f80b04 6417 /* Release IRQs */
a2fbb9ea
ET
6418 bnx2x_free_irq(bp);
6419
7a9b2557
VZ
6420 /* Free SKBs, SGEs, TPA pool and driver internals */
6421 bnx2x_free_skbs(bp);
6422 for_each_queue(bp, i)
6423 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6424 RX_SGE_CNT*NUM_RX_SGE_PAGES);
228241eb 6425load_error:
a2fbb9ea
ET
6426 bnx2x_free_mem(bp);
6427
6428 /* TBD we really need to reset the chip
6429 if we want to recover from this */
34f80b04 6430 return rc;
a2fbb9ea
ET
6431}
6432
6433static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6434{
a2fbb9ea
ET
6435 int rc;
6436
c14423fe 6437 /* halt the connection */
a2fbb9ea
ET
6438 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6439 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6440
34f80b04 6441 /* Wait for completion */
a2fbb9ea 6442 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6443 &(bp->fp[index].state), 1);
c14423fe 6444 if (rc) /* timeout */
a2fbb9ea
ET
6445 return rc;
6446
6447 /* delete cfc entry */
6448 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6449
34f80b04
EG
6450 /* Wait for completion */
6451 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6452 &(bp->fp[index].state), 1);
6453 return rc;
a2fbb9ea
ET
6454}
6455
da5a662a 6456static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6457{
49d66772 6458 u16 dsb_sp_prod_idx;
c14423fe 6459 /* if the other port is handling traffic,
a2fbb9ea 6460 this can take a lot of time */
34f80b04
EG
6461 int cnt = 500;
6462 int rc;
a2fbb9ea
ET
6463
6464 might_sleep();
6465
6466 /* Send HALT ramrod */
6467 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6468 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6469
34f80b04
EG
6470 /* Wait for completion */
6471 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6472 &(bp->fp[0].state), 1);
6473 if (rc) /* timeout */
da5a662a 6474 return rc;
a2fbb9ea 6475
49d66772 6476 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6477
228241eb 6478 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6479 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6480
49d66772 6481 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6482 we are going to reset the chip anyway
6483 so there is not much to do if this times out
6484 */
34f80b04 6485 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
49d66772 6486 msleep(1);
34f80b04
EG
6487 if (!cnt) {
6488 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6489 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6490 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6491#ifdef BNX2X_STOP_ON_ERROR
6492 bnx2x_panic();
da5a662a
VZ
6493#else
6494 rc = -EBUSY;
34f80b04
EG
6495#endif
6496 break;
6497 }
6498 cnt--;
da5a662a 6499 msleep(1);
49d66772
ET
6500 }
6501 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6502 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6503
6504 return rc;
a2fbb9ea
ET
6505}
6506
34f80b04
EG
6507static void bnx2x_reset_func(struct bnx2x *bp)
6508{
6509 int port = BP_PORT(bp);
6510 int func = BP_FUNC(bp);
6511 int base, i;
6512
6513 /* Configure IGU */
6514 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6515 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6516
6517 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6518
6519 /* Clear ILT */
6520 base = FUNC_ILT_BASE(func);
6521 for (i = base; i < base + ILT_PER_FUNC; i++)
6522 bnx2x_ilt_wr(bp, i, 0);
6523}
6524
6525static void bnx2x_reset_port(struct bnx2x *bp)
6526{
6527 int port = BP_PORT(bp);
6528 u32 val;
6529
6530 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6531
6532 /* Do not rcv packets to BRB */
6533 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6534 /* Do not direct rcv packets that are not for MCP to the BRB */
6535 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6536 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6537
6538 /* Configure AEU */
6539 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6540
6541 msleep(100);
6542 /* Check for BRB port occupancy */
6543 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6544 if (val)
6545 DP(NETIF_MSG_IFDOWN,
6546 "BRB1 is not empty %d blooks are occupied\n", val);
6547
6548 /* TODO: Close Doorbell port? */
6549}
6550
6551static void bnx2x_reset_common(struct bnx2x *bp)
6552{
6553 /* reset_common */
6554 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6555 0xd3ffff7f);
6556 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6557}
6558
6559static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6560{
6561 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6562 BP_FUNC(bp), reset_code);
6563
6564 switch (reset_code) {
6565 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6566 bnx2x_reset_port(bp);
6567 bnx2x_reset_func(bp);
6568 bnx2x_reset_common(bp);
6569 break;
6570
6571 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6572 bnx2x_reset_port(bp);
6573 bnx2x_reset_func(bp);
6574 break;
6575
6576 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6577 bnx2x_reset_func(bp);
6578 break;
49d66772 6579
34f80b04
EG
6580 default:
6581 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6582 break;
6583 }
6584}
6585
6586/* msut be called with rtnl_lock */
6587static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6588{
da5a662a 6589 int port = BP_PORT(bp);
a2fbb9ea 6590 u32 reset_code = 0;
da5a662a 6591 int i, cnt, rc;
a2fbb9ea
ET
6592
6593 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6594
228241eb
ET
6595 bp->rx_mode = BNX2X_RX_MODE_NONE;
6596 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6597
228241eb
ET
6598 if (netif_running(bp->dev)) {
6599 netif_tx_disable(bp->dev);
6600 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6601 }
6602
34f80b04
EG
6603 del_timer_sync(&bp->timer);
6604 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6605 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6606 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6607
da5a662a 6608 /* Wait until tx fast path tasks complete */
228241eb
ET
6609 for_each_queue(bp, i) {
6610 struct bnx2x_fastpath *fp = &bp->fp[i];
6611
34f80b04
EG
6612 cnt = 1000;
6613 smp_rmb();
da5a662a
VZ
6614 while (BNX2X_HAS_TX_WORK(fp)) {
6615
6616 if (!netif_running(bp->dev))
6617 bnx2x_tx_int(fp, 1000);
6618
34f80b04
EG
6619 if (!cnt) {
6620 BNX2X_ERR("timeout waiting for queue[%d]\n",
6621 i);
6622#ifdef BNX2X_STOP_ON_ERROR
6623 bnx2x_panic();
6624 return -EBUSY;
6625#else
6626 break;
6627#endif
6628 }
6629 cnt--;
da5a662a 6630 msleep(1);
34f80b04
EG
6631 smp_rmb();
6632 }
228241eb 6633 }
a2fbb9ea 6634
da5a662a
VZ
6635 /* Give HW time to discard old tx messages */
6636 msleep(1);
a2fbb9ea 6637
228241eb
ET
6638 for_each_queue(bp, i)
6639 napi_disable(&bnx2x_fp(bp, i, napi));
6640 /* Disable interrupts after Tx and Rx are disabled on stack level */
6641 bnx2x_int_disable_sync(bp);
a2fbb9ea 6642
34f80b04
EG
6643 /* Release IRQs */
6644 bnx2x_free_irq(bp);
6645
da5a662a
VZ
6646 if (unload_mode == UNLOAD_NORMAL)
6647 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6648
6649 else if (bp->flags & NO_WOL_FLAG) {
a2fbb9ea 6650 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
da5a662a
VZ
6651 if (CHIP_IS_E1H(bp))
6652 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
228241eb 6653
da5a662a
VZ
6654 } else if (bp->wol) {
6655 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6656 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6657 u32 val;
34f80b04
EG
6658 /* The mac address is written to entries 1-4 to
6659 preserve entry 0 which is used by the PMF */
da5a662a
VZ
6660 u8 entry = (BP_E1HVN(bp) + 1)*8;
6661
a2fbb9ea 6662 val = (mac_addr[0] << 8) | mac_addr[1];
da5a662a 6663 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
a2fbb9ea
ET
6664
6665 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6666 (mac_addr[4] << 8) | mac_addr[5];
da5a662a 6667 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
a2fbb9ea
ET
6668
6669 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6670
a2fbb9ea
ET
6671 } else
6672 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6673
da5a662a
VZ
6674 if (CHIP_IS_E1H(bp))
6675 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6676
34f80b04
EG
6677 /* Close multi and leading connections
6678 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6679 for_each_nondefault_queue(bp, i)
6680 if (bnx2x_stop_multi(bp, i))
228241eb 6681 goto unload_error;
a2fbb9ea 6682
da5a662a
VZ
6683 rc = bnx2x_stop_leading(bp);
6684 if (rc) {
34f80b04 6685 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6686#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6687 return -EBUSY;
da5a662a
VZ
6688#else
6689 goto unload_error;
34f80b04 6690#endif
228241eb
ET
6691 }
6692
6693unload_error:
34f80b04 6694 if (!BP_NOMCP(bp))
228241eb 6695 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6696 else {
6697 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6698 load_count[0], load_count[1], load_count[2]);
6699 load_count[0]--;
da5a662a 6700 load_count[1 + port]--;
34f80b04
EG
6701 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6702 load_count[0], load_count[1], load_count[2]);
6703 if (load_count[0] == 0)
6704 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6705 else if (load_count[1 + port] == 0)
34f80b04
EG
6706 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6707 else
6708 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6709 }
a2fbb9ea 6710
34f80b04
EG
6711 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6712 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6713 bnx2x__link_reset(bp);
a2fbb9ea
ET
6714
6715 /* Reset the chip */
228241eb 6716 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6717
6718 /* Report UNLOAD_DONE to MCP */
34f80b04 6719 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6720 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6721
7a9b2557 6722 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6723 bnx2x_free_skbs(bp);
7a9b2557
VZ
6724 for_each_queue(bp, i)
6725 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6726 RX_SGE_CNT*NUM_RX_SGE_PAGES);
a2fbb9ea
ET
6727 bnx2x_free_mem(bp);
6728
6729 bp->state = BNX2X_STATE_CLOSED;
228241eb 6730
a2fbb9ea
ET
6731 netif_carrier_off(bp->dev);
6732
6733 return 0;
6734}
6735
34f80b04
EG
6736static void bnx2x_reset_task(struct work_struct *work)
6737{
6738 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6739
6740#ifdef BNX2X_STOP_ON_ERROR
6741 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6742 " so reset not done to allow debug dump,\n"
6743 KERN_ERR " you will need to reboot when done\n");
6744 return;
6745#endif
6746
6747 rtnl_lock();
6748
6749 if (!netif_running(bp->dev))
6750 goto reset_task_exit;
6751
6752 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6753 bnx2x_nic_load(bp, LOAD_NORMAL);
6754
6755reset_task_exit:
6756 rtnl_unlock();
6757}
6758
a2fbb9ea
ET
6759/* end of nic load/unload */
6760
6761/* ethtool_ops */
6762
6763/*
6764 * Init service functions
6765 */
6766
34f80b04
EG
6767static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6768{
6769 u32 val;
6770
6771 /* Check if there is any driver already loaded */
6772 val = REG_RD(bp, MISC_REG_UNPREPARED);
6773 if (val == 0x1) {
6774 /* Check if it is the UNDI driver
6775 * UNDI driver initializes CID offset for normal bell to 0x7
6776 */
4a37fb66 6777 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6778 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6779 if (val == 0x7) {
6780 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6781 /* save our func */
34f80b04 6782 int func = BP_FUNC(bp);
da5a662a
VZ
6783 u32 swap_en;
6784 u32 swap_val;
34f80b04
EG
6785
6786 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6787
6788 /* try unload UNDI on port 0 */
6789 bp->func = 0;
da5a662a
VZ
6790 bp->fw_seq =
6791 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6792 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6793 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6794
6795 /* if UNDI is loaded on the other port */
6796 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6797
da5a662a
VZ
6798 /* send "DONE" for previous unload */
6799 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6800
6801 /* unload UNDI on port 1 */
34f80b04 6802 bp->func = 1;
da5a662a
VZ
6803 bp->fw_seq =
6804 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6805 DRV_MSG_SEQ_NUMBER_MASK);
6806 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6807
6808 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6809 }
6810
da5a662a
VZ
6811 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6812 HC_REG_CONFIG_0), 0x1000);
6813
6814 /* close input traffic and wait for it */
6815 /* Do not rcv packets to BRB */
6816 REG_WR(bp,
6817 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6818 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6819 /* Do not direct rcv packets that are not for MCP to
6820 * the BRB */
6821 REG_WR(bp,
6822 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6823 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6824 /* clear AEU */
6825 REG_WR(bp,
6826 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6827 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6828 msleep(10);
6829
6830 /* save NIG port swap info */
6831 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6832 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6833 /* reset device */
6834 REG_WR(bp,
6835 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6836 0xd3ffffff);
34f80b04
EG
6837 REG_WR(bp,
6838 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6839 0x1403);
da5a662a
VZ
6840 /* take the NIG out of reset and restore swap values */
6841 REG_WR(bp,
6842 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6843 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6844 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6845 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6846
6847 /* send unload done to the MCP */
6848 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6849
6850 /* restore our func and fw_seq */
6851 bp->func = func;
6852 bp->fw_seq =
6853 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6854 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6855 }
4a37fb66 6856 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6857 }
6858}
6859
6860static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6861{
6862 u32 val, val2, val3, val4, id;
6863
6864 /* Get the chip revision id and number. */
6865 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6866 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6867 id = ((val & 0xffff) << 16);
6868 val = REG_RD(bp, MISC_REG_CHIP_REV);
6869 id |= ((val & 0xf) << 12);
6870 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6871 id |= ((val & 0xff) << 4);
6872 REG_RD(bp, MISC_REG_BOND_ID);
6873 id |= (val & 0xf);
6874 bp->common.chip_id = id;
6875 bp->link_params.chip_id = bp->common.chip_id;
6876 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6877
6878 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6879 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6880 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6881 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6882 bp->common.flash_size, bp->common.flash_size);
6883
6884 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6885 bp->link_params.shmem_base = bp->common.shmem_base;
6886 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6887
6888 if (!bp->common.shmem_base ||
6889 (bp->common.shmem_base < 0xA0000) ||
6890 (bp->common.shmem_base >= 0xC0000)) {
6891 BNX2X_DEV_INFO("MCP not active\n");
6892 bp->flags |= NO_MCP_FLAG;
6893 return;
6894 }
6895
6896 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6897 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6898 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6899 BNX2X_ERR("BAD MCP validity signature\n");
6900
6901 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6902 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6903
6904 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6905 bp->common.hw_config, bp->common.board);
6906
6907 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6908 SHARED_HW_CFG_LED_MODE_MASK) >>
6909 SHARED_HW_CFG_LED_MODE_SHIFT);
6910
6911 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6912 bp->common.bc_ver = val;
6913 BNX2X_DEV_INFO("bc_ver %X\n", val);
6914 if (val < BNX2X_BC_VER) {
6915 /* for now only warn
6916 * later we might need to enforce this */
6917 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6918 " please upgrade BC\n", BNX2X_BC_VER, val);
6919 }
6920 BNX2X_DEV_INFO("%sWoL Capable\n",
6921 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6922
6923 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6924 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6925 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6926 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6927
6928 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6929 val, val2, val3, val4);
6930}
6931
6932static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6933 u32 switch_cfg)
a2fbb9ea 6934{
34f80b04 6935 int port = BP_PORT(bp);
a2fbb9ea
ET
6936 u32 ext_phy_type;
6937
a2fbb9ea
ET
6938 switch (switch_cfg) {
6939 case SWITCH_CFG_1G:
6940 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6941
c18487ee
YR
6942 ext_phy_type =
6943 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6944 switch (ext_phy_type) {
6945 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6946 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6947 ext_phy_type);
6948
34f80b04
EG
6949 bp->port.supported |= (SUPPORTED_10baseT_Half |
6950 SUPPORTED_10baseT_Full |
6951 SUPPORTED_100baseT_Half |
6952 SUPPORTED_100baseT_Full |
6953 SUPPORTED_1000baseT_Full |
6954 SUPPORTED_2500baseX_Full |
6955 SUPPORTED_TP |
6956 SUPPORTED_FIBRE |
6957 SUPPORTED_Autoneg |
6958 SUPPORTED_Pause |
6959 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6960 break;
6961
6962 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6963 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6964 ext_phy_type);
6965
34f80b04
EG
6966 bp->port.supported |= (SUPPORTED_10baseT_Half |
6967 SUPPORTED_10baseT_Full |
6968 SUPPORTED_100baseT_Half |
6969 SUPPORTED_100baseT_Full |
6970 SUPPORTED_1000baseT_Full |
6971 SUPPORTED_TP |
6972 SUPPORTED_FIBRE |
6973 SUPPORTED_Autoneg |
6974 SUPPORTED_Pause |
6975 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6976 break;
6977
6978 default:
6979 BNX2X_ERR("NVRAM config error. "
6980 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6981 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6982 return;
6983 }
6984
34f80b04
EG
6985 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6986 port*0x10);
6987 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6988 break;
6989
6990 case SWITCH_CFG_10G:
6991 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6992
c18487ee
YR
6993 ext_phy_type =
6994 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6995 switch (ext_phy_type) {
6996 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6997 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6998 ext_phy_type);
6999
34f80b04
EG
7000 bp->port.supported |= (SUPPORTED_10baseT_Half |
7001 SUPPORTED_10baseT_Full |
7002 SUPPORTED_100baseT_Half |
7003 SUPPORTED_100baseT_Full |
7004 SUPPORTED_1000baseT_Full |
7005 SUPPORTED_2500baseX_Full |
7006 SUPPORTED_10000baseT_Full |
7007 SUPPORTED_TP |
7008 SUPPORTED_FIBRE |
7009 SUPPORTED_Autoneg |
7010 SUPPORTED_Pause |
7011 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7012 break;
7013
7014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7015 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7016 ext_phy_type);
f1410647 7017
34f80b04
EG
7018 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7019 SUPPORTED_FIBRE |
7020 SUPPORTED_Pause |
7021 SUPPORTED_Asym_Pause);
f1410647
ET
7022 break;
7023
a2fbb9ea 7024 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7025 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7026 ext_phy_type);
7027
34f80b04
EG
7028 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7029 SUPPORTED_1000baseT_Full |
7030 SUPPORTED_FIBRE |
7031 SUPPORTED_Pause |
7032 SUPPORTED_Asym_Pause);
f1410647
ET
7033 break;
7034
7035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7036 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7037 ext_phy_type);
7038
34f80b04
EG
7039 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7040 SUPPORTED_1000baseT_Full |
7041 SUPPORTED_FIBRE |
7042 SUPPORTED_Autoneg |
7043 SUPPORTED_Pause |
7044 SUPPORTED_Asym_Pause);
f1410647
ET
7045 break;
7046
c18487ee
YR
7047 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7048 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7049 ext_phy_type);
7050
34f80b04
EG
7051 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7052 SUPPORTED_2500baseX_Full |
7053 SUPPORTED_1000baseT_Full |
7054 SUPPORTED_FIBRE |
7055 SUPPORTED_Autoneg |
7056 SUPPORTED_Pause |
7057 SUPPORTED_Asym_Pause);
c18487ee
YR
7058 break;
7059
f1410647
ET
7060 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7061 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7062 ext_phy_type);
7063
34f80b04
EG
7064 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7065 SUPPORTED_TP |
7066 SUPPORTED_Autoneg |
7067 SUPPORTED_Pause |
7068 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7069 break;
7070
c18487ee
YR
7071 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7072 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7073 bp->link_params.ext_phy_config);
7074 break;
7075
a2fbb9ea
ET
7076 default:
7077 BNX2X_ERR("NVRAM config error. "
7078 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7079 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7080 return;
7081 }
7082
34f80b04
EG
7083 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7084 port*0x18);
7085 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7086
a2fbb9ea
ET
7087 break;
7088
7089 default:
7090 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7091 bp->port.link_config);
a2fbb9ea
ET
7092 return;
7093 }
34f80b04 7094 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7095
7096 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7097 if (!(bp->link_params.speed_cap_mask &
7098 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7099 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7100
c18487ee
YR
7101 if (!(bp->link_params.speed_cap_mask &
7102 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7103 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7104
c18487ee
YR
7105 if (!(bp->link_params.speed_cap_mask &
7106 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7107 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7108
c18487ee
YR
7109 if (!(bp->link_params.speed_cap_mask &
7110 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7111 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7112
c18487ee
YR
7113 if (!(bp->link_params.speed_cap_mask &
7114 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7115 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7116 SUPPORTED_1000baseT_Full);
a2fbb9ea 7117
c18487ee
YR
7118 if (!(bp->link_params.speed_cap_mask &
7119 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7120 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7121
c18487ee
YR
7122 if (!(bp->link_params.speed_cap_mask &
7123 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7124 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7125
34f80b04 7126 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7127}
7128
34f80b04 7129static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7130{
c18487ee 7131 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7132
34f80b04 7133 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7134 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7135 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7136 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7137 bp->port.advertising = bp->port.supported;
a2fbb9ea 7138 } else {
c18487ee
YR
7139 u32 ext_phy_type =
7140 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7141
7142 if ((ext_phy_type ==
7143 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7144 (ext_phy_type ==
7145 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7146 /* force 10G, no AN */
c18487ee 7147 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7148 bp->port.advertising =
a2fbb9ea
ET
7149 (ADVERTISED_10000baseT_Full |
7150 ADVERTISED_FIBRE);
7151 break;
7152 }
7153 BNX2X_ERR("NVRAM config error. "
7154 "Invalid link_config 0x%x"
7155 " Autoneg not supported\n",
34f80b04 7156 bp->port.link_config);
a2fbb9ea
ET
7157 return;
7158 }
7159 break;
7160
7161 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7162 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7163 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7164 bp->port.advertising = (ADVERTISED_10baseT_Full |
7165 ADVERTISED_TP);
a2fbb9ea
ET
7166 } else {
7167 BNX2X_ERR("NVRAM config error. "
7168 "Invalid link_config 0x%x"
7169 " speed_cap_mask 0x%x\n",
34f80b04 7170 bp->port.link_config,
c18487ee 7171 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7172 return;
7173 }
7174 break;
7175
7176 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7177 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7178 bp->link_params.req_line_speed = SPEED_10;
7179 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7180 bp->port.advertising = (ADVERTISED_10baseT_Half |
7181 ADVERTISED_TP);
a2fbb9ea
ET
7182 } else {
7183 BNX2X_ERR("NVRAM config error. "
7184 "Invalid link_config 0x%x"
7185 " speed_cap_mask 0x%x\n",
34f80b04 7186 bp->port.link_config,
c18487ee 7187 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7188 return;
7189 }
7190 break;
7191
7192 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7193 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7194 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7195 bp->port.advertising = (ADVERTISED_100baseT_Full |
7196 ADVERTISED_TP);
a2fbb9ea
ET
7197 } else {
7198 BNX2X_ERR("NVRAM config error. "
7199 "Invalid link_config 0x%x"
7200 " speed_cap_mask 0x%x\n",
34f80b04 7201 bp->port.link_config,
c18487ee 7202 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7203 return;
7204 }
7205 break;
7206
7207 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7208 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7209 bp->link_params.req_line_speed = SPEED_100;
7210 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7211 bp->port.advertising = (ADVERTISED_100baseT_Half |
7212 ADVERTISED_TP);
a2fbb9ea
ET
7213 } else {
7214 BNX2X_ERR("NVRAM config error. "
7215 "Invalid link_config 0x%x"
7216 " speed_cap_mask 0x%x\n",
34f80b04 7217 bp->port.link_config,
c18487ee 7218 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7219 return;
7220 }
7221 break;
7222
7223 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7224 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7225 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7226 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7227 ADVERTISED_TP);
a2fbb9ea
ET
7228 } else {
7229 BNX2X_ERR("NVRAM config error. "
7230 "Invalid link_config 0x%x"
7231 " speed_cap_mask 0x%x\n",
34f80b04 7232 bp->port.link_config,
c18487ee 7233 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7234 return;
7235 }
7236 break;
7237
7238 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7239 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7240 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7241 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7242 ADVERTISED_TP);
a2fbb9ea
ET
7243 } else {
7244 BNX2X_ERR("NVRAM config error. "
7245 "Invalid link_config 0x%x"
7246 " speed_cap_mask 0x%x\n",
34f80b04 7247 bp->port.link_config,
c18487ee 7248 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7249 return;
7250 }
7251 break;
7252
7253 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7254 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7255 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7256 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7257 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7258 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7259 ADVERTISED_FIBRE);
a2fbb9ea
ET
7260 } else {
7261 BNX2X_ERR("NVRAM config error. "
7262 "Invalid link_config 0x%x"
7263 " speed_cap_mask 0x%x\n",
34f80b04 7264 bp->port.link_config,
c18487ee 7265 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7266 return;
7267 }
7268 break;
7269
7270 default:
7271 BNX2X_ERR("NVRAM config error. "
7272 "BAD link speed link_config 0x%x\n",
34f80b04 7273 bp->port.link_config);
c18487ee 7274 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7275 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7276 break;
7277 }
a2fbb9ea 7278
34f80b04
EG
7279 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7280 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7281 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7282 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7283 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7284
c18487ee 7285 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7286 " advertising 0x%x\n",
c18487ee
YR
7287 bp->link_params.req_line_speed,
7288 bp->link_params.req_duplex,
34f80b04 7289 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7290}
7291
34f80b04 7292static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7293{
34f80b04
EG
7294 int port = BP_PORT(bp);
7295 u32 val, val2;
a2fbb9ea 7296
c18487ee 7297 bp->link_params.bp = bp;
34f80b04 7298 bp->link_params.port = port;
c18487ee 7299
c18487ee 7300 bp->link_params.serdes_config =
f1410647 7301 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7302 bp->link_params.lane_config =
a2fbb9ea 7303 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7304 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7305 SHMEM_RD(bp,
7306 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7307 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7308 SHMEM_RD(bp,
7309 dev_info.port_hw_config[port].speed_capability_mask);
7310
34f80b04 7311 bp->port.link_config =
a2fbb9ea
ET
7312 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7313
34f80b04
EG
7314 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7315 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7316 " link_config 0x%08x\n",
c18487ee
YR
7317 bp->link_params.serdes_config,
7318 bp->link_params.lane_config,
7319 bp->link_params.ext_phy_config,
34f80b04 7320 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7321
34f80b04 7322 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7323 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7324 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7325
7326 bnx2x_link_settings_requested(bp);
7327
7328 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7329 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7330 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7331 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7332 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7333 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7334 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7335 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7336 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7337 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7338}
7339
7340static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7341{
7342 int func = BP_FUNC(bp);
7343 u32 val, val2;
7344 int rc = 0;
a2fbb9ea 7345
34f80b04 7346 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7347
34f80b04
EG
7348 bp->e1hov = 0;
7349 bp->e1hmf = 0;
7350 if (CHIP_IS_E1H(bp)) {
7351 bp->mf_config =
7352 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7353
34f80b04
EG
7354 val =
7355 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7356 FUNC_MF_CFG_E1HOV_TAG_MASK);
7357 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7358
34f80b04
EG
7359 bp->e1hov = val;
7360 bp->e1hmf = 1;
7361 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7362 "(0x%04x)\n",
7363 func, bp->e1hov, bp->e1hov);
7364 } else {
7365 BNX2X_DEV_INFO("Single function mode\n");
7366 if (BP_E1HVN(bp)) {
7367 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7368 " aborting\n", func);
7369 rc = -EPERM;
7370 }
7371 }
7372 }
a2fbb9ea 7373
34f80b04
EG
7374 if (!BP_NOMCP(bp)) {
7375 bnx2x_get_port_hwinfo(bp);
7376
7377 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7378 DRV_MSG_SEQ_NUMBER_MASK);
7379 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7380 }
7381
7382 if (IS_E1HMF(bp)) {
7383 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7384 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7385 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7386 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7387 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7388 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7389 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7390 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7391 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7392 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7393 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7394 ETH_ALEN);
7395 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7396 ETH_ALEN);
a2fbb9ea 7397 }
34f80b04
EG
7398
7399 return rc;
a2fbb9ea
ET
7400 }
7401
34f80b04
EG
7402 if (BP_NOMCP(bp)) {
7403 /* only supposed to happen on emulation/FPGA */
7404 BNX2X_ERR("warning rendom MAC workaround active\n");
7405 random_ether_addr(bp->dev->dev_addr);
7406 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7407 }
a2fbb9ea 7408
34f80b04
EG
7409 return rc;
7410}
7411
7412static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7413{
7414 int func = BP_FUNC(bp);
7415 int rc;
7416
da5a662a
VZ
7417 /* Disable interrupt handling until HW is initialized */
7418 atomic_set(&bp->intr_sem, 1);
7419
34f80b04 7420 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7421
34f80b04
EG
7422 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7423 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7424
7425 rc = bnx2x_get_hwinfo(bp);
7426
7427 /* need to reset chip if undi was active */
7428 if (!BP_NOMCP(bp))
7429 bnx2x_undi_unload(bp);
7430
7431 if (CHIP_REV_IS_FPGA(bp))
7432 printk(KERN_ERR PFX "FPGA detected\n");
7433
7434 if (BP_NOMCP(bp) && (func == 0))
7435 printk(KERN_ERR PFX
7436 "MCP disabled, must load devices in order!\n");
7437
7a9b2557
VZ
7438 /* Set TPA flags */
7439 if (disable_tpa) {
7440 bp->flags &= ~TPA_ENABLE_FLAG;
7441 bp->dev->features &= ~NETIF_F_LRO;
7442 } else {
7443 bp->flags |= TPA_ENABLE_FLAG;
7444 bp->dev->features |= NETIF_F_LRO;
7445 }
7446
7447
34f80b04
EG
7448 bp->tx_ring_size = MAX_TX_AVAIL;
7449 bp->rx_ring_size = MAX_RX_AVAIL;
7450
7451 bp->rx_csum = 1;
7452 bp->rx_offset = 0;
7453
7454 bp->tx_ticks = 50;
7455 bp->rx_ticks = 25;
7456
34f80b04
EG
7457 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7458 bp->current_interval = (poll ? poll : bp->timer_interval);
7459
7460 init_timer(&bp->timer);
7461 bp->timer.expires = jiffies + bp->current_interval;
7462 bp->timer.data = (unsigned long) bp;
7463 bp->timer.function = bnx2x_timer;
7464
7465 return rc;
a2fbb9ea
ET
7466}
7467
7468/*
7469 * ethtool service functions
7470 */
7471
7472/* All ethtool functions called with rtnl_lock */
7473
7474static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7475{
7476 struct bnx2x *bp = netdev_priv(dev);
7477
34f80b04
EG
7478 cmd->supported = bp->port.supported;
7479 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7480
7481 if (netif_carrier_ok(dev)) {
c18487ee
YR
7482 cmd->speed = bp->link_vars.line_speed;
7483 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7484 } else {
c18487ee
YR
7485 cmd->speed = bp->link_params.req_line_speed;
7486 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7487 }
34f80b04
EG
7488 if (IS_E1HMF(bp)) {
7489 u16 vn_max_rate;
7490
7491 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7492 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7493 if (vn_max_rate < cmd->speed)
7494 cmd->speed = vn_max_rate;
7495 }
a2fbb9ea 7496
c18487ee
YR
7497 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7498 u32 ext_phy_type =
7499 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7500
7501 switch (ext_phy_type) {
7502 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7503 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7504 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7505 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7506 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7507 cmd->port = PORT_FIBRE;
7508 break;
7509
7510 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7511 cmd->port = PORT_TP;
7512 break;
7513
c18487ee
YR
7514 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7515 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7516 bp->link_params.ext_phy_config);
7517 break;
7518
f1410647
ET
7519 default:
7520 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7521 bp->link_params.ext_phy_config);
7522 break;
f1410647
ET
7523 }
7524 } else
a2fbb9ea 7525 cmd->port = PORT_TP;
a2fbb9ea 7526
34f80b04 7527 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7528 cmd->transceiver = XCVR_INTERNAL;
7529
c18487ee 7530 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7531 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7532 else
a2fbb9ea 7533 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7534
7535 cmd->maxtxpkt = 0;
7536 cmd->maxrxpkt = 0;
7537
7538 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7539 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7540 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7541 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7542 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7543 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7544 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7545
7546 return 0;
7547}
7548
7549static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7550{
7551 struct bnx2x *bp = netdev_priv(dev);
7552 u32 advertising;
7553
34f80b04
EG
7554 if (IS_E1HMF(bp))
7555 return 0;
7556
a2fbb9ea
ET
7557 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7558 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7559 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7560 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7561 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7562 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7563 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7564
a2fbb9ea 7565 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7566 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7567 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7568 return -EINVAL;
f1410647 7569 }
a2fbb9ea
ET
7570
7571 /* advertise the requested speed and duplex if supported */
34f80b04 7572 cmd->advertising &= bp->port.supported;
a2fbb9ea 7573
c18487ee
YR
7574 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7575 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7576 bp->port.advertising |= (ADVERTISED_Autoneg |
7577 cmd->advertising);
a2fbb9ea
ET
7578
7579 } else { /* forced speed */
7580 /* advertise the requested speed and duplex if supported */
7581 switch (cmd->speed) {
7582 case SPEED_10:
7583 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7584 if (!(bp->port.supported &
f1410647
ET
7585 SUPPORTED_10baseT_Full)) {
7586 DP(NETIF_MSG_LINK,
7587 "10M full not supported\n");
a2fbb9ea 7588 return -EINVAL;
f1410647 7589 }
a2fbb9ea
ET
7590
7591 advertising = (ADVERTISED_10baseT_Full |
7592 ADVERTISED_TP);
7593 } else {
34f80b04 7594 if (!(bp->port.supported &
f1410647
ET
7595 SUPPORTED_10baseT_Half)) {
7596 DP(NETIF_MSG_LINK,
7597 "10M half not supported\n");
a2fbb9ea 7598 return -EINVAL;
f1410647 7599 }
a2fbb9ea
ET
7600
7601 advertising = (ADVERTISED_10baseT_Half |
7602 ADVERTISED_TP);
7603 }
7604 break;
7605
7606 case SPEED_100:
7607 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7608 if (!(bp->port.supported &
f1410647
ET
7609 SUPPORTED_100baseT_Full)) {
7610 DP(NETIF_MSG_LINK,
7611 "100M full not supported\n");
a2fbb9ea 7612 return -EINVAL;
f1410647 7613 }
a2fbb9ea
ET
7614
7615 advertising = (ADVERTISED_100baseT_Full |
7616 ADVERTISED_TP);
7617 } else {
34f80b04 7618 if (!(bp->port.supported &
f1410647
ET
7619 SUPPORTED_100baseT_Half)) {
7620 DP(NETIF_MSG_LINK,
7621 "100M half not supported\n");
a2fbb9ea 7622 return -EINVAL;
f1410647 7623 }
a2fbb9ea
ET
7624
7625 advertising = (ADVERTISED_100baseT_Half |
7626 ADVERTISED_TP);
7627 }
7628 break;
7629
7630 case SPEED_1000:
f1410647
ET
7631 if (cmd->duplex != DUPLEX_FULL) {
7632 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7633 return -EINVAL;
f1410647 7634 }
a2fbb9ea 7635
34f80b04 7636 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7637 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7638 return -EINVAL;
f1410647 7639 }
a2fbb9ea
ET
7640
7641 advertising = (ADVERTISED_1000baseT_Full |
7642 ADVERTISED_TP);
7643 break;
7644
7645 case SPEED_2500:
f1410647
ET
7646 if (cmd->duplex != DUPLEX_FULL) {
7647 DP(NETIF_MSG_LINK,
7648 "2.5G half not supported\n");
a2fbb9ea 7649 return -EINVAL;
f1410647 7650 }
a2fbb9ea 7651
34f80b04 7652 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7653 DP(NETIF_MSG_LINK,
7654 "2.5G full not supported\n");
a2fbb9ea 7655 return -EINVAL;
f1410647 7656 }
a2fbb9ea 7657
f1410647 7658 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7659 ADVERTISED_TP);
7660 break;
7661
7662 case SPEED_10000:
f1410647
ET
7663 if (cmd->duplex != DUPLEX_FULL) {
7664 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7665 return -EINVAL;
f1410647 7666 }
a2fbb9ea 7667
34f80b04 7668 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7669 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7670 return -EINVAL;
f1410647 7671 }
a2fbb9ea
ET
7672
7673 advertising = (ADVERTISED_10000baseT_Full |
7674 ADVERTISED_FIBRE);
7675 break;
7676
7677 default:
f1410647 7678 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7679 return -EINVAL;
7680 }
7681
c18487ee
YR
7682 bp->link_params.req_line_speed = cmd->speed;
7683 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7684 bp->port.advertising = advertising;
a2fbb9ea
ET
7685 }
7686
c18487ee 7687 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7688 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7689 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7690 bp->port.advertising);
a2fbb9ea 7691
34f80b04 7692 if (netif_running(dev)) {
bb2a0f7a 7693 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7694 bnx2x_link_set(bp);
7695 }
a2fbb9ea
ET
7696
7697 return 0;
7698}
7699
c18487ee
YR
7700#define PHY_FW_VER_LEN 10
7701
a2fbb9ea
ET
7702static void bnx2x_get_drvinfo(struct net_device *dev,
7703 struct ethtool_drvinfo *info)
7704{
7705 struct bnx2x *bp = netdev_priv(dev);
c18487ee 7706 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7707
7708 strcpy(info->driver, DRV_MODULE_NAME);
7709 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7710
7711 phy_fw_ver[0] = '\0';
34f80b04 7712 if (bp->port.pmf) {
4a37fb66 7713 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7714 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7715 (bp->state != BNX2X_STATE_CLOSED),
7716 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7717 bnx2x_release_phy_lock(bp);
34f80b04 7718 }
c18487ee
YR
7719
7720 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 7721 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 7722 BCM_5710_FW_REVISION_VERSION,
34f80b04 7723 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 7724 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
7725 strcpy(info->bus_info, pci_name(bp->pdev));
7726 info->n_stats = BNX2X_NUM_STATS;
7727 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7728 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7729 info->regdump_len = 0;
7730}
7731
7732static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7733{
7734 struct bnx2x *bp = netdev_priv(dev);
7735
7736 if (bp->flags & NO_WOL_FLAG) {
7737 wol->supported = 0;
7738 wol->wolopts = 0;
7739 } else {
7740 wol->supported = WAKE_MAGIC;
7741 if (bp->wol)
7742 wol->wolopts = WAKE_MAGIC;
7743 else
7744 wol->wolopts = 0;
7745 }
7746 memset(&wol->sopass, 0, sizeof(wol->sopass));
7747}
7748
7749static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7750{
7751 struct bnx2x *bp = netdev_priv(dev);
7752
7753 if (wol->wolopts & ~WAKE_MAGIC)
7754 return -EINVAL;
7755
7756 if (wol->wolopts & WAKE_MAGIC) {
7757 if (bp->flags & NO_WOL_FLAG)
7758 return -EINVAL;
7759
7760 bp->wol = 1;
34f80b04 7761 } else
a2fbb9ea 7762 bp->wol = 0;
34f80b04 7763
a2fbb9ea
ET
7764 return 0;
7765}
7766
7767static u32 bnx2x_get_msglevel(struct net_device *dev)
7768{
7769 struct bnx2x *bp = netdev_priv(dev);
7770
7771 return bp->msglevel;
7772}
7773
7774static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7775{
7776 struct bnx2x *bp = netdev_priv(dev);
7777
7778 if (capable(CAP_NET_ADMIN))
7779 bp->msglevel = level;
7780}
7781
7782static int bnx2x_nway_reset(struct net_device *dev)
7783{
7784 struct bnx2x *bp = netdev_priv(dev);
7785
34f80b04
EG
7786 if (!bp->port.pmf)
7787 return 0;
a2fbb9ea 7788
34f80b04 7789 if (netif_running(dev)) {
bb2a0f7a 7790 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7791 bnx2x_link_set(bp);
7792 }
a2fbb9ea
ET
7793
7794 return 0;
7795}
7796
7797static int bnx2x_get_eeprom_len(struct net_device *dev)
7798{
7799 struct bnx2x *bp = netdev_priv(dev);
7800
34f80b04 7801 return bp->common.flash_size;
a2fbb9ea
ET
7802}
7803
7804static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7805{
34f80b04 7806 int port = BP_PORT(bp);
a2fbb9ea
ET
7807 int count, i;
7808 u32 val = 0;
7809
7810 /* adjust timeout for emulation/FPGA */
7811 count = NVRAM_TIMEOUT_COUNT;
7812 if (CHIP_REV_IS_SLOW(bp))
7813 count *= 100;
7814
7815 /* request access to nvram interface */
7816 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7817 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7818
7819 for (i = 0; i < count*10; i++) {
7820 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7821 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7822 break;
7823
7824 udelay(5);
7825 }
7826
7827 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7828 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7829 return -EBUSY;
7830 }
7831
7832 return 0;
7833}
7834
7835static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7836{
34f80b04 7837 int port = BP_PORT(bp);
a2fbb9ea
ET
7838 int count, i;
7839 u32 val = 0;
7840
7841 /* adjust timeout for emulation/FPGA */
7842 count = NVRAM_TIMEOUT_COUNT;
7843 if (CHIP_REV_IS_SLOW(bp))
7844 count *= 100;
7845
7846 /* relinquish nvram interface */
7847 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7848 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7849
7850 for (i = 0; i < count*10; i++) {
7851 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7852 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7853 break;
7854
7855 udelay(5);
7856 }
7857
7858 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7859 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7860 return -EBUSY;
7861 }
7862
7863 return 0;
7864}
7865
7866static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7867{
7868 u32 val;
7869
7870 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7871
7872 /* enable both bits, even on read */
7873 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7874 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7875 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7876}
7877
7878static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7879{
7880 u32 val;
7881
7882 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7883
7884 /* disable both bits, even after read */
7885 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7886 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7887 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7888}
7889
7890static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7891 u32 cmd_flags)
7892{
f1410647 7893 int count, i, rc;
a2fbb9ea
ET
7894 u32 val;
7895
7896 /* build the command word */
7897 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7898
7899 /* need to clear DONE bit separately */
7900 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7901
7902 /* address of the NVRAM to read from */
7903 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7904 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7905
7906 /* issue a read command */
7907 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7908
7909 /* adjust timeout for emulation/FPGA */
7910 count = NVRAM_TIMEOUT_COUNT;
7911 if (CHIP_REV_IS_SLOW(bp))
7912 count *= 100;
7913
7914 /* wait for completion */
7915 *ret_val = 0;
7916 rc = -EBUSY;
7917 for (i = 0; i < count; i++) {
7918 udelay(5);
7919 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7920
7921 if (val & MCPR_NVM_COMMAND_DONE) {
7922 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7923 /* we read nvram data in cpu order
7924 * but ethtool sees it as an array of bytes
7925 * converting to big-endian will do the work */
7926 val = cpu_to_be32(val);
7927 *ret_val = val;
7928 rc = 0;
7929 break;
7930 }
7931 }
7932
7933 return rc;
7934}
7935
7936static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7937 int buf_size)
7938{
7939 int rc;
7940 u32 cmd_flags;
7941 u32 val;
7942
7943 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7944 DP(BNX2X_MSG_NVM,
c14423fe 7945 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7946 offset, buf_size);
7947 return -EINVAL;
7948 }
7949
34f80b04
EG
7950 if (offset + buf_size > bp->common.flash_size) {
7951 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7952 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7953 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7954 return -EINVAL;
7955 }
7956
7957 /* request access to nvram interface */
7958 rc = bnx2x_acquire_nvram_lock(bp);
7959 if (rc)
7960 return rc;
7961
7962 /* enable access to nvram interface */
7963 bnx2x_enable_nvram_access(bp);
7964
7965 /* read the first word(s) */
7966 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7967 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7968 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7969 memcpy(ret_buf, &val, 4);
7970
7971 /* advance to the next dword */
7972 offset += sizeof(u32);
7973 ret_buf += sizeof(u32);
7974 buf_size -= sizeof(u32);
7975 cmd_flags = 0;
7976 }
7977
7978 if (rc == 0) {
7979 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7980 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7981 memcpy(ret_buf, &val, 4);
7982 }
7983
7984 /* disable access to nvram interface */
7985 bnx2x_disable_nvram_access(bp);
7986 bnx2x_release_nvram_lock(bp);
7987
7988 return rc;
7989}
7990
7991static int bnx2x_get_eeprom(struct net_device *dev,
7992 struct ethtool_eeprom *eeprom, u8 *eebuf)
7993{
7994 struct bnx2x *bp = netdev_priv(dev);
7995 int rc;
7996
34f80b04 7997 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
7998 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7999 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8000 eeprom->len, eeprom->len);
8001
8002 /* parameters already validated in ethtool_get_eeprom */
8003
8004 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8005
8006 return rc;
8007}
8008
8009static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8010 u32 cmd_flags)
8011{
f1410647 8012 int count, i, rc;
a2fbb9ea
ET
8013
8014 /* build the command word */
8015 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8016
8017 /* need to clear DONE bit separately */
8018 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8019
8020 /* write the data */
8021 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8022
8023 /* address of the NVRAM to write to */
8024 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8025 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8026
8027 /* issue the write command */
8028 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8029
8030 /* adjust timeout for emulation/FPGA */
8031 count = NVRAM_TIMEOUT_COUNT;
8032 if (CHIP_REV_IS_SLOW(bp))
8033 count *= 100;
8034
8035 /* wait for completion */
8036 rc = -EBUSY;
8037 for (i = 0; i < count; i++) {
8038 udelay(5);
8039 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8040 if (val & MCPR_NVM_COMMAND_DONE) {
8041 rc = 0;
8042 break;
8043 }
8044 }
8045
8046 return rc;
8047}
8048
f1410647 8049#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8050
8051static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8052 int buf_size)
8053{
8054 int rc;
8055 u32 cmd_flags;
8056 u32 align_offset;
8057 u32 val;
8058
34f80b04
EG
8059 if (offset + buf_size > bp->common.flash_size) {
8060 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8061 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8062 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8063 return -EINVAL;
8064 }
8065
8066 /* request access to nvram interface */
8067 rc = bnx2x_acquire_nvram_lock(bp);
8068 if (rc)
8069 return rc;
8070
8071 /* enable access to nvram interface */
8072 bnx2x_enable_nvram_access(bp);
8073
8074 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8075 align_offset = (offset & ~0x03);
8076 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8077
8078 if (rc == 0) {
8079 val &= ~(0xff << BYTE_OFFSET(offset));
8080 val |= (*data_buf << BYTE_OFFSET(offset));
8081
8082 /* nvram data is returned as an array of bytes
8083 * convert it back to cpu order */
8084 val = be32_to_cpu(val);
8085
a2fbb9ea
ET
8086 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8087 cmd_flags);
8088 }
8089
8090 /* disable access to nvram interface */
8091 bnx2x_disable_nvram_access(bp);
8092 bnx2x_release_nvram_lock(bp);
8093
8094 return rc;
8095}
8096
8097static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8098 int buf_size)
8099{
8100 int rc;
8101 u32 cmd_flags;
8102 u32 val;
8103 u32 written_so_far;
8104
34f80b04 8105 if (buf_size == 1) /* ethtool */
a2fbb9ea 8106 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8107
8108 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8109 DP(BNX2X_MSG_NVM,
c14423fe 8110 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8111 offset, buf_size);
8112 return -EINVAL;
8113 }
8114
34f80b04
EG
8115 if (offset + buf_size > bp->common.flash_size) {
8116 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8117 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8118 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8119 return -EINVAL;
8120 }
8121
8122 /* request access to nvram interface */
8123 rc = bnx2x_acquire_nvram_lock(bp);
8124 if (rc)
8125 return rc;
8126
8127 /* enable access to nvram interface */
8128 bnx2x_enable_nvram_access(bp);
8129
8130 written_so_far = 0;
8131 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8132 while ((written_so_far < buf_size) && (rc == 0)) {
8133 if (written_so_far == (buf_size - sizeof(u32)))
8134 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8135 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8136 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8137 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8138 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8139
8140 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8141
8142 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8143
8144 /* advance to the next dword */
8145 offset += sizeof(u32);
8146 data_buf += sizeof(u32);
8147 written_so_far += sizeof(u32);
8148 cmd_flags = 0;
8149 }
8150
8151 /* disable access to nvram interface */
8152 bnx2x_disable_nvram_access(bp);
8153 bnx2x_release_nvram_lock(bp);
8154
8155 return rc;
8156}
8157
8158static int bnx2x_set_eeprom(struct net_device *dev,
8159 struct ethtool_eeprom *eeprom, u8 *eebuf)
8160{
8161 struct bnx2x *bp = netdev_priv(dev);
8162 int rc;
8163
34f80b04 8164 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8165 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8166 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8167 eeprom->len, eeprom->len);
8168
8169 /* parameters already validated in ethtool_set_eeprom */
8170
c18487ee 8171 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8172 if (eeprom->magic == 0x00504859)
8173 if (bp->port.pmf) {
8174
4a37fb66 8175 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8176 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8177 bp->link_params.ext_phy_config,
8178 (bp->state != BNX2X_STATE_CLOSED),
8179 eebuf, eeprom->len);
bb2a0f7a
YG
8180 if ((bp->state == BNX2X_STATE_OPEN) ||
8181 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8182 rc |= bnx2x_link_reset(&bp->link_params,
8183 &bp->link_vars);
8184 rc |= bnx2x_phy_init(&bp->link_params,
8185 &bp->link_vars);
bb2a0f7a 8186 }
4a37fb66 8187 bnx2x_release_phy_lock(bp);
34f80b04
EG
8188
8189 } else /* Only the PMF can access the PHY */
8190 return -EINVAL;
8191 else
c18487ee 8192 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8193
8194 return rc;
8195}
8196
8197static int bnx2x_get_coalesce(struct net_device *dev,
8198 struct ethtool_coalesce *coal)
8199{
8200 struct bnx2x *bp = netdev_priv(dev);
8201
8202 memset(coal, 0, sizeof(struct ethtool_coalesce));
8203
8204 coal->rx_coalesce_usecs = bp->rx_ticks;
8205 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8206
8207 return 0;
8208}
8209
8210static int bnx2x_set_coalesce(struct net_device *dev,
8211 struct ethtool_coalesce *coal)
8212{
8213 struct bnx2x *bp = netdev_priv(dev);
8214
8215 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8216 if (bp->rx_ticks > 3000)
8217 bp->rx_ticks = 3000;
8218
8219 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8220 if (bp->tx_ticks > 0x3000)
8221 bp->tx_ticks = 0x3000;
8222
34f80b04 8223 if (netif_running(dev))
a2fbb9ea
ET
8224 bnx2x_update_coalesce(bp);
8225
8226 return 0;
8227}
8228
7a9b2557
VZ
8229static int bnx2x_set_flags(struct net_device *dev, u32 data)
8230{
8231 struct bnx2x *bp = netdev_priv(dev);
8232 int changed = 0;
8233 int rc = 0;
8234
8235 if (data & ETH_FLAG_LRO) {
8236 if (!(dev->features & NETIF_F_LRO)) {
8237 dev->features |= NETIF_F_LRO;
8238 bp->flags |= TPA_ENABLE_FLAG;
8239 changed = 1;
8240 }
8241
8242 } else if (dev->features & NETIF_F_LRO) {
8243 dev->features &= ~NETIF_F_LRO;
8244 bp->flags &= ~TPA_ENABLE_FLAG;
8245 changed = 1;
8246 }
8247
8248 if (changed && netif_running(dev)) {
8249 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8250 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8251 }
8252
8253 return rc;
8254}
8255
a2fbb9ea
ET
8256static void bnx2x_get_ringparam(struct net_device *dev,
8257 struct ethtool_ringparam *ering)
8258{
8259 struct bnx2x *bp = netdev_priv(dev);
8260
8261 ering->rx_max_pending = MAX_RX_AVAIL;
8262 ering->rx_mini_max_pending = 0;
8263 ering->rx_jumbo_max_pending = 0;
8264
8265 ering->rx_pending = bp->rx_ring_size;
8266 ering->rx_mini_pending = 0;
8267 ering->rx_jumbo_pending = 0;
8268
8269 ering->tx_max_pending = MAX_TX_AVAIL;
8270 ering->tx_pending = bp->tx_ring_size;
8271}
8272
8273static int bnx2x_set_ringparam(struct net_device *dev,
8274 struct ethtool_ringparam *ering)
8275{
8276 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8277 int rc = 0;
a2fbb9ea
ET
8278
8279 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8280 (ering->tx_pending > MAX_TX_AVAIL) ||
8281 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8282 return -EINVAL;
8283
8284 bp->rx_ring_size = ering->rx_pending;
8285 bp->tx_ring_size = ering->tx_pending;
8286
34f80b04
EG
8287 if (netif_running(dev)) {
8288 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8289 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8290 }
8291
34f80b04 8292 return rc;
a2fbb9ea
ET
8293}
8294
8295static void bnx2x_get_pauseparam(struct net_device *dev,
8296 struct ethtool_pauseparam *epause)
8297{
8298 struct bnx2x *bp = netdev_priv(dev);
8299
c18487ee
YR
8300 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8301 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8302
8303 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8304 FLOW_CTRL_RX);
8305 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8306 FLOW_CTRL_TX);
a2fbb9ea
ET
8307
8308 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8309 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8310 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8311}
8312
8313static int bnx2x_set_pauseparam(struct net_device *dev,
8314 struct ethtool_pauseparam *epause)
8315{
8316 struct bnx2x *bp = netdev_priv(dev);
8317
34f80b04
EG
8318 if (IS_E1HMF(bp))
8319 return 0;
8320
a2fbb9ea
ET
8321 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8322 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8323 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8324
c18487ee 8325 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8326
f1410647 8327 if (epause->rx_pause)
c18487ee
YR
8328 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8329
f1410647 8330 if (epause->tx_pause)
c18487ee
YR
8331 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8332
8333 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8334 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8335
c18487ee 8336 if (epause->autoneg) {
34f80b04 8337 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
8338 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8339 return -EINVAL;
8340 }
a2fbb9ea 8341
c18487ee
YR
8342 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8343 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8344 }
a2fbb9ea 8345
c18487ee
YR
8346 DP(NETIF_MSG_LINK,
8347 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8348
8349 if (netif_running(dev)) {
bb2a0f7a 8350 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8351 bnx2x_link_set(bp);
8352 }
a2fbb9ea
ET
8353
8354 return 0;
8355}
8356
8357static u32 bnx2x_get_rx_csum(struct net_device *dev)
8358{
8359 struct bnx2x *bp = netdev_priv(dev);
8360
8361 return bp->rx_csum;
8362}
8363
8364static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8365{
8366 struct bnx2x *bp = netdev_priv(dev);
8367
8368 bp->rx_csum = data;
8369 return 0;
8370}
8371
8372static int bnx2x_set_tso(struct net_device *dev, u32 data)
8373{
755735eb 8374 if (data) {
a2fbb9ea 8375 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8376 dev->features |= NETIF_F_TSO6;
8377 } else {
a2fbb9ea 8378 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8379 dev->features &= ~NETIF_F_TSO6;
8380 }
8381
a2fbb9ea
ET
8382 return 0;
8383}
8384
f3c87cdd 8385static const struct {
a2fbb9ea
ET
8386 char string[ETH_GSTRING_LEN];
8387} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8388 { "register_test (offline)" },
8389 { "memory_test (offline)" },
8390 { "loopback_test (offline)" },
8391 { "nvram_test (online)" },
8392 { "interrupt_test (online)" },
8393 { "link_test (online)" },
8394 { "idle check (online)" },
8395 { "MC errors (online)" }
a2fbb9ea
ET
8396};
8397
8398static int bnx2x_self_test_count(struct net_device *dev)
8399{
8400 return BNX2X_NUM_TESTS;
8401}
8402
f3c87cdd
YG
8403static int bnx2x_test_registers(struct bnx2x *bp)
8404{
8405 int idx, i, rc = -ENODEV;
8406 u32 wr_val = 0;
9dabc424 8407 int port = BP_PORT(bp);
f3c87cdd
YG
8408 static const struct {
8409 u32 offset0;
8410 u32 offset1;
8411 u32 mask;
8412 } reg_tbl[] = {
8413/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8414 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8415 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8416 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8417 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8418 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8419 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8420 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8421 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8422 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8423/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8424 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8425 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8426 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8427 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8428 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8429 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8430 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8431 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8432 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8433/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8434 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8435 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8436 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8437 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8438 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8439 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8440 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8441 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8442 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8443/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8444 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8445 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8446 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8447 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8448 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8449 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8450 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8451
8452 { 0xffffffff, 0, 0x00000000 }
8453 };
8454
8455 if (!netif_running(bp->dev))
8456 return rc;
8457
8458 /* Repeat the test twice:
8459 First by writing 0x00000000, second by writing 0xffffffff */
8460 for (idx = 0; idx < 2; idx++) {
8461
8462 switch (idx) {
8463 case 0:
8464 wr_val = 0;
8465 break;
8466 case 1:
8467 wr_val = 0xffffffff;
8468 break;
8469 }
8470
8471 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8472 u32 offset, mask, save_val, val;
f3c87cdd
YG
8473
8474 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8475 mask = reg_tbl[i].mask;
8476
8477 save_val = REG_RD(bp, offset);
8478
8479 REG_WR(bp, offset, wr_val);
8480 val = REG_RD(bp, offset);
8481
8482 /* Restore the original register's value */
8483 REG_WR(bp, offset, save_val);
8484
8485 /* verify that value is as expected value */
8486 if ((val & mask) != (wr_val & mask))
8487 goto test_reg_exit;
8488 }
8489 }
8490
8491 rc = 0;
8492
8493test_reg_exit:
8494 return rc;
8495}
8496
8497static int bnx2x_test_memory(struct bnx2x *bp)
8498{
8499 int i, j, rc = -ENODEV;
8500 u32 val;
8501 static const struct {
8502 u32 offset;
8503 int size;
8504 } mem_tbl[] = {
8505 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8506 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8507 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8508 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8509 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8510 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8511 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8512
8513 { 0xffffffff, 0 }
8514 };
8515 static const struct {
8516 char *name;
8517 u32 offset;
9dabc424
YG
8518 u32 e1_mask;
8519 u32 e1h_mask;
f3c87cdd 8520 } prty_tbl[] = {
9dabc424
YG
8521 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8522 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8523 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8524 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8525 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8526 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8527
8528 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8529 };
8530
8531 if (!netif_running(bp->dev))
8532 return rc;
8533
8534 /* Go through all the memories */
8535 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8536 for (j = 0; j < mem_tbl[i].size; j++)
8537 REG_RD(bp, mem_tbl[i].offset + j*4);
8538
8539 /* Check the parity status */
8540 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8541 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8542 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8543 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8544 DP(NETIF_MSG_HW,
8545 "%s is 0x%x\n", prty_tbl[i].name, val);
8546 goto test_mem_exit;
8547 }
8548 }
8549
8550 rc = 0;
8551
8552test_mem_exit:
8553 return rc;
8554}
8555
8556static void bnx2x_netif_start(struct bnx2x *bp)
8557{
8558 int i;
8559
8560 if (atomic_dec_and_test(&bp->intr_sem)) {
8561 if (netif_running(bp->dev)) {
8562 bnx2x_int_enable(bp);
8563 for_each_queue(bp, i)
8564 napi_enable(&bnx2x_fp(bp, i, napi));
8565 if (bp->state == BNX2X_STATE_OPEN)
8566 netif_wake_queue(bp->dev);
8567 }
8568 }
8569}
8570
8571static void bnx2x_netif_stop(struct bnx2x *bp)
8572{
8573 int i;
8574
8575 if (netif_running(bp->dev)) {
8576 netif_tx_disable(bp->dev);
8577 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8578 for_each_queue(bp, i)
8579 napi_disable(&bnx2x_fp(bp, i, napi));
8580 }
8581 bnx2x_int_disable_sync(bp);
8582}
8583
8584static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8585{
8586 int cnt = 1000;
8587
8588 if (link_up)
8589 while (bnx2x_link_test(bp) && cnt--)
8590 msleep(10);
8591}
8592
8593static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8594{
8595 unsigned int pkt_size, num_pkts, i;
8596 struct sk_buff *skb;
8597 unsigned char *packet;
8598 struct bnx2x_fastpath *fp = &bp->fp[0];
8599 u16 tx_start_idx, tx_idx;
8600 u16 rx_start_idx, rx_idx;
8601 u16 pkt_prod;
8602 struct sw_tx_bd *tx_buf;
8603 struct eth_tx_bd *tx_bd;
8604 dma_addr_t mapping;
8605 union eth_rx_cqe *cqe;
8606 u8 cqe_fp_flags;
8607 struct sw_rx_bd *rx_buf;
8608 u16 len;
8609 int rc = -ENODEV;
8610
8611 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8612 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8613 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8614 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8615 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8616
8617 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8618 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8619 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8620 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8621 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8622 /* wait until link state is restored */
8623 bnx2x_wait_for_link(bp, link_up);
8624
8625 } else
8626 return -EINVAL;
8627
8628 pkt_size = 1514;
8629 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8630 if (!skb) {
8631 rc = -ENOMEM;
8632 goto test_loopback_exit;
8633 }
8634 packet = skb_put(skb, pkt_size);
8635 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8636 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8637 for (i = ETH_HLEN; i < pkt_size; i++)
8638 packet[i] = (unsigned char) (i & 0xff);
8639
8640 num_pkts = 0;
8641 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8642 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8643
8644 pkt_prod = fp->tx_pkt_prod++;
8645 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8646 tx_buf->first_bd = fp->tx_bd_prod;
8647 tx_buf->skb = skb;
8648
8649 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8650 mapping = pci_map_single(bp->pdev, skb->data,
8651 skb_headlen(skb), PCI_DMA_TODEVICE);
8652 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8653 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8654 tx_bd->nbd = cpu_to_le16(1);
8655 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8656 tx_bd->vlan = cpu_to_le16(pkt_prod);
8657 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8658 ETH_TX_BD_FLAGS_END_BD);
8659 tx_bd->general_data = ((UNICAST_ADDRESS <<
8660 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8661
8662 fp->hw_tx_prods->bds_prod =
8663 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8664 mb(); /* FW restriction: must not reorder writing nbd and packets */
8665 fp->hw_tx_prods->packets_prod =
8666 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8667 DOORBELL(bp, FP_IDX(fp), 0);
8668
8669 mmiowb();
8670
8671 num_pkts++;
8672 fp->tx_bd_prod++;
8673 bp->dev->trans_start = jiffies;
8674
8675 udelay(100);
8676
8677 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8678 if (tx_idx != tx_start_idx + num_pkts)
8679 goto test_loopback_exit;
8680
8681 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8682 if (rx_idx != rx_start_idx + num_pkts)
8683 goto test_loopback_exit;
8684
8685 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8686 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8687 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8688 goto test_loopback_rx_exit;
8689
8690 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8691 if (len != pkt_size)
8692 goto test_loopback_rx_exit;
8693
8694 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8695 skb = rx_buf->skb;
8696 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8697 for (i = ETH_HLEN; i < pkt_size; i++)
8698 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8699 goto test_loopback_rx_exit;
8700
8701 rc = 0;
8702
8703test_loopback_rx_exit:
8704 bp->dev->last_rx = jiffies;
8705
8706 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8707 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8708 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8709 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8710
8711 /* Update producers */
8712 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8713 fp->rx_sge_prod);
8714 mmiowb(); /* keep prod updates ordered */
8715
8716test_loopback_exit:
8717 bp->link_params.loopback_mode = LOOPBACK_NONE;
8718
8719 return rc;
8720}
8721
8722static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8723{
8724 int rc = 0;
8725
8726 if (!netif_running(bp->dev))
8727 return BNX2X_LOOPBACK_FAILED;
8728
8729 bnx2x_netif_stop(bp);
8730
8731 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8732 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8733 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8734 }
8735
8736 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8737 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8738 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8739 }
8740
8741 bnx2x_netif_start(bp);
8742
8743 return rc;
8744}
8745
8746#define CRC32_RESIDUAL 0xdebb20e3
8747
8748static int bnx2x_test_nvram(struct bnx2x *bp)
8749{
8750 static const struct {
8751 int offset;
8752 int size;
8753 } nvram_tbl[] = {
8754 { 0, 0x14 }, /* bootstrap */
8755 { 0x14, 0xec }, /* dir */
8756 { 0x100, 0x350 }, /* manuf_info */
8757 { 0x450, 0xf0 }, /* feature_info */
8758 { 0x640, 0x64 }, /* upgrade_key_info */
8759 { 0x6a4, 0x64 },
8760 { 0x708, 0x70 }, /* manuf_key_info */
8761 { 0x778, 0x70 },
8762 { 0, 0 }
8763 };
8764 u32 buf[0x350 / 4];
8765 u8 *data = (u8 *)buf;
8766 int i, rc;
8767 u32 magic, csum;
8768
8769 rc = bnx2x_nvram_read(bp, 0, data, 4);
8770 if (rc) {
8771 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8772 goto test_nvram_exit;
8773 }
8774
8775 magic = be32_to_cpu(buf[0]);
8776 if (magic != 0x669955aa) {
8777 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8778 rc = -ENODEV;
8779 goto test_nvram_exit;
8780 }
8781
8782 for (i = 0; nvram_tbl[i].size; i++) {
8783
8784 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8785 nvram_tbl[i].size);
8786 if (rc) {
8787 DP(NETIF_MSG_PROBE,
8788 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8789 goto test_nvram_exit;
8790 }
8791
8792 csum = ether_crc_le(nvram_tbl[i].size, data);
8793 if (csum != CRC32_RESIDUAL) {
8794 DP(NETIF_MSG_PROBE,
8795 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8796 rc = -ENODEV;
8797 goto test_nvram_exit;
8798 }
8799 }
8800
8801test_nvram_exit:
8802 return rc;
8803}
8804
8805static int bnx2x_test_intr(struct bnx2x *bp)
8806{
8807 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8808 int i, rc;
8809
8810 if (!netif_running(bp->dev))
8811 return -ENODEV;
8812
8813 config->hdr.length_6b = 0;
8814 config->hdr.offset = 0;
8815 config->hdr.client_id = BP_CL_ID(bp);
8816 config->hdr.reserved1 = 0;
8817
8818 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8819 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8820 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8821 if (rc == 0) {
8822 bp->set_mac_pending++;
8823 for (i = 0; i < 10; i++) {
8824 if (!bp->set_mac_pending)
8825 break;
8826 msleep_interruptible(10);
8827 }
8828 if (i == 10)
8829 rc = -ENODEV;
8830 }
8831
8832 return rc;
8833}
8834
a2fbb9ea
ET
8835static void bnx2x_self_test(struct net_device *dev,
8836 struct ethtool_test *etest, u64 *buf)
8837{
8838 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8839
8840 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8841
f3c87cdd 8842 if (!netif_running(dev))
a2fbb9ea 8843 return;
a2fbb9ea 8844
f3c87cdd
YG
8845 /* offline tests are not suppoerted in MF mode */
8846 if (IS_E1HMF(bp))
8847 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8848
8849 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8850 u8 link_up;
8851
8852 link_up = bp->link_vars.link_up;
8853 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8854 bnx2x_nic_load(bp, LOAD_DIAG);
8855 /* wait until link state is restored */
8856 bnx2x_wait_for_link(bp, link_up);
8857
8858 if (bnx2x_test_registers(bp) != 0) {
8859 buf[0] = 1;
8860 etest->flags |= ETH_TEST_FL_FAILED;
8861 }
8862 if (bnx2x_test_memory(bp) != 0) {
8863 buf[1] = 1;
8864 etest->flags |= ETH_TEST_FL_FAILED;
8865 }
8866 buf[2] = bnx2x_test_loopback(bp, link_up);
8867 if (buf[2] != 0)
8868 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8869
f3c87cdd
YG
8870 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8871 bnx2x_nic_load(bp, LOAD_NORMAL);
8872 /* wait until link state is restored */
8873 bnx2x_wait_for_link(bp, link_up);
8874 }
8875 if (bnx2x_test_nvram(bp) != 0) {
8876 buf[3] = 1;
a2fbb9ea
ET
8877 etest->flags |= ETH_TEST_FL_FAILED;
8878 }
f3c87cdd
YG
8879 if (bnx2x_test_intr(bp) != 0) {
8880 buf[4] = 1;
8881 etest->flags |= ETH_TEST_FL_FAILED;
8882 }
8883 if (bp->port.pmf)
8884 if (bnx2x_link_test(bp) != 0) {
8885 buf[5] = 1;
8886 etest->flags |= ETH_TEST_FL_FAILED;
8887 }
8888 buf[7] = bnx2x_mc_assert(bp);
8889 if (buf[7] != 0)
8890 etest->flags |= ETH_TEST_FL_FAILED;
8891
8892#ifdef BNX2X_EXTRA_DEBUG
8893 bnx2x_panic_dump(bp);
8894#endif
a2fbb9ea
ET
8895}
8896
bb2a0f7a
YG
8897static const struct {
8898 long offset;
8899 int size;
8900 u32 flags;
66e855f3
YG
8901#define STATS_FLAGS_PORT 1
8902#define STATS_FLAGS_FUNC 2
8903 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8904} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8905/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8906 8, STATS_FLAGS_FUNC, "rx_bytes" },
8907 { STATS_OFFSET32(error_bytes_received_hi),
8908 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8909 { STATS_OFFSET32(total_bytes_transmitted_hi),
8910 8, STATS_FLAGS_FUNC, "tx_bytes" },
8911 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8912 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8913 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8914 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8915 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8916 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8917 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8918 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8919 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8920 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8921 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8922 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8923/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8924 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8925 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8926 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8927 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8928 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8929 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8930 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8931 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 8932 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 8933 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 8934 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 8935 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 8936 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 8937 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 8938 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 8939 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 8940 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 8941 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
8942 8, STATS_FLAGS_PORT, "rx_fragments" },
8943/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8944 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 8945 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 8946 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 8947 { STATS_OFFSET32(jabber_packets_received),
66e855f3 8948 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 8949 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 8950 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 8951 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 8952 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 8953 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 8954 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 8955 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 8956 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 8957 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 8958 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 8959 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 8960 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 8961 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 8962 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 8963/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 8964 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 8965 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
8966 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8967 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8968 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8969 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8970 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 8971 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
8972 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8973 { STATS_OFFSET32(mac_filter_discard),
8974 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8975 { STATS_OFFSET32(no_buff_discard),
8976 4, STATS_FLAGS_FUNC, "rx_discards" },
8977 { STATS_OFFSET32(xxoverflow_discard),
8978 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8979 { STATS_OFFSET32(brb_drop_hi),
8980 8, STATS_FLAGS_PORT, "brb_discard" },
8981 { STATS_OFFSET32(brb_truncate_hi),
8982 8, STATS_FLAGS_PORT, "brb_truncate" },
8983/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8984 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8985 { STATS_OFFSET32(rx_skb_alloc_failed),
8986 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8987/* 42 */{ STATS_OFFSET32(hw_csum_err),
8988 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
8989};
8990
66e855f3
YG
8991#define IS_NOT_E1HMF_STAT(bp, i) \
8992 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
8993
a2fbb9ea
ET
8994static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8995{
bb2a0f7a
YG
8996 struct bnx2x *bp = netdev_priv(dev);
8997 int i, j;
8998
a2fbb9ea
ET
8999 switch (stringset) {
9000 case ETH_SS_STATS:
bb2a0f7a 9001 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9002 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9003 continue;
9004 strcpy(buf + j*ETH_GSTRING_LEN,
9005 bnx2x_stats_arr[i].string);
9006 j++;
9007 }
a2fbb9ea
ET
9008 break;
9009
9010 case ETH_SS_TEST:
9011 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9012 break;
9013 }
9014}
9015
9016static int bnx2x_get_stats_count(struct net_device *dev)
9017{
bb2a0f7a
YG
9018 struct bnx2x *bp = netdev_priv(dev);
9019 int i, num_stats = 0;
9020
9021 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9022 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9023 continue;
9024 num_stats++;
9025 }
9026 return num_stats;
a2fbb9ea
ET
9027}
9028
9029static void bnx2x_get_ethtool_stats(struct net_device *dev,
9030 struct ethtool_stats *stats, u64 *buf)
9031{
9032 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9033 u32 *hw_stats = (u32 *)&bp->eth_stats;
9034 int i, j;
a2fbb9ea 9035
bb2a0f7a 9036 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9037 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9038 continue;
bb2a0f7a
YG
9039
9040 if (bnx2x_stats_arr[i].size == 0) {
9041 /* skip this counter */
9042 buf[j] = 0;
9043 j++;
a2fbb9ea
ET
9044 continue;
9045 }
bb2a0f7a 9046 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9047 /* 4-byte counter */
bb2a0f7a
YG
9048 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9049 j++;
a2fbb9ea
ET
9050 continue;
9051 }
9052 /* 8-byte counter */
bb2a0f7a
YG
9053 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9054 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9055 j++;
a2fbb9ea
ET
9056 }
9057}
9058
9059static int bnx2x_phys_id(struct net_device *dev, u32 data)
9060{
9061 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9062 int port = BP_PORT(bp);
a2fbb9ea
ET
9063 int i;
9064
34f80b04
EG
9065 if (!netif_running(dev))
9066 return 0;
9067
9068 if (!bp->port.pmf)
9069 return 0;
9070
a2fbb9ea
ET
9071 if (data == 0)
9072 data = 2;
9073
9074 for (i = 0; i < (data * 2); i++) {
c18487ee 9075 if ((i % 2) == 0)
34f80b04 9076 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9077 bp->link_params.hw_led_mode,
9078 bp->link_params.chip_id);
9079 else
34f80b04 9080 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9081 bp->link_params.hw_led_mode,
9082 bp->link_params.chip_id);
9083
a2fbb9ea
ET
9084 msleep_interruptible(500);
9085 if (signal_pending(current))
9086 break;
9087 }
9088
c18487ee 9089 if (bp->link_vars.link_up)
34f80b04 9090 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9091 bp->link_vars.line_speed,
9092 bp->link_params.hw_led_mode,
9093 bp->link_params.chip_id);
a2fbb9ea
ET
9094
9095 return 0;
9096}
9097
9098static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9099 .get_settings = bnx2x_get_settings,
9100 .set_settings = bnx2x_set_settings,
9101 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9102 .get_wol = bnx2x_get_wol,
9103 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9104 .get_msglevel = bnx2x_get_msglevel,
9105 .set_msglevel = bnx2x_set_msglevel,
9106 .nway_reset = bnx2x_nway_reset,
9107 .get_link = ethtool_op_get_link,
9108 .get_eeprom_len = bnx2x_get_eeprom_len,
9109 .get_eeprom = bnx2x_get_eeprom,
9110 .set_eeprom = bnx2x_set_eeprom,
9111 .get_coalesce = bnx2x_get_coalesce,
9112 .set_coalesce = bnx2x_set_coalesce,
9113 .get_ringparam = bnx2x_get_ringparam,
9114 .set_ringparam = bnx2x_set_ringparam,
9115 .get_pauseparam = bnx2x_get_pauseparam,
9116 .set_pauseparam = bnx2x_set_pauseparam,
9117 .get_rx_csum = bnx2x_get_rx_csum,
9118 .set_rx_csum = bnx2x_set_rx_csum,
9119 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9120 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9121 .set_flags = bnx2x_set_flags,
9122 .get_flags = ethtool_op_get_flags,
9123 .get_sg = ethtool_op_get_sg,
9124 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9125 .get_tso = ethtool_op_get_tso,
9126 .set_tso = bnx2x_set_tso,
9127 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9128 .self_test = bnx2x_self_test,
9129 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9130 .phys_id = bnx2x_phys_id,
9131 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9132 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9133};
9134
9135/* end of ethtool_ops */
9136
9137/****************************************************************************
9138* General service functions
9139****************************************************************************/
9140
9141static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9142{
9143 u16 pmcsr;
9144
9145 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9146
9147 switch (state) {
9148 case PCI_D0:
34f80b04 9149 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9150 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9151 PCI_PM_CTRL_PME_STATUS));
9152
9153 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9154 /* delay required during transition out of D3hot */
9155 msleep(20);
34f80b04 9156 break;
a2fbb9ea 9157
34f80b04
EG
9158 case PCI_D3hot:
9159 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9160 pmcsr |= 3;
a2fbb9ea 9161
34f80b04
EG
9162 if (bp->wol)
9163 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9164
34f80b04
EG
9165 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9166 pmcsr);
a2fbb9ea 9167
34f80b04
EG
9168 /* No more memory access after this point until
9169 * device is brought back to D0.
9170 */
9171 break;
9172
9173 default:
9174 return -EINVAL;
9175 }
9176 return 0;
a2fbb9ea
ET
9177}
9178
34f80b04
EG
9179/*
9180 * net_device service functions
9181 */
9182
a2fbb9ea
ET
9183static int bnx2x_poll(struct napi_struct *napi, int budget)
9184{
9185 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9186 napi);
9187 struct bnx2x *bp = fp->bp;
9188 int work_done = 0;
9189
9190#ifdef BNX2X_STOP_ON_ERROR
9191 if (unlikely(bp->panic))
34f80b04 9192 goto poll_panic;
a2fbb9ea
ET
9193#endif
9194
9195 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9196 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9197 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9198
9199 bnx2x_update_fpsb_idx(fp);
9200
da5a662a 9201 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9202 bnx2x_tx_int(fp, budget);
9203
da5a662a 9204 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9205 work_done = bnx2x_rx_int(fp, budget);
9206
da5a662a 9207 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9208
9209 /* must not complete if we consumed full budget */
da5a662a 9210 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9211
9212#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9213poll_panic:
a2fbb9ea
ET
9214#endif
9215 netif_rx_complete(bp->dev, napi);
9216
34f80b04 9217 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9218 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9219 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9220 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9221 }
a2fbb9ea
ET
9222 return work_done;
9223}
9224
755735eb
EG
9225
9226/* we split the first BD into headers and data BDs
9227 * to ease the pain of our fellow micocode engineers
9228 * we use one mapping for both BDs
9229 * So far this has only been observed to happen
9230 * in Other Operating Systems(TM)
9231 */
9232static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9233 struct bnx2x_fastpath *fp,
9234 struct eth_tx_bd **tx_bd, u16 hlen,
9235 u16 bd_prod, int nbd)
9236{
9237 struct eth_tx_bd *h_tx_bd = *tx_bd;
9238 struct eth_tx_bd *d_tx_bd;
9239 dma_addr_t mapping;
9240 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9241
9242 /* first fix first BD */
9243 h_tx_bd->nbd = cpu_to_le16(nbd);
9244 h_tx_bd->nbytes = cpu_to_le16(hlen);
9245
9246 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9247 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9248 h_tx_bd->addr_lo, h_tx_bd->nbd);
9249
9250 /* now get a new data BD
9251 * (after the pbd) and fill it */
9252 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9253 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9254
9255 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9256 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9257
9258 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9259 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9260 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9261 d_tx_bd->vlan = 0;
9262 /* this marks the BD as one that has no individual mapping
9263 * the FW ignores this flag in a BD not marked start
9264 */
9265 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9266 DP(NETIF_MSG_TX_QUEUED,
9267 "TSO split data size is %d (%x:%x)\n",
9268 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9269
9270 /* update tx_bd for marking the last BD flag */
9271 *tx_bd = d_tx_bd;
9272
9273 return bd_prod;
9274}
9275
9276static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9277{
9278 if (fix > 0)
9279 csum = (u16) ~csum_fold(csum_sub(csum,
9280 csum_partial(t_header - fix, fix, 0)));
9281
9282 else if (fix < 0)
9283 csum = (u16) ~csum_fold(csum_add(csum,
9284 csum_partial(t_header, -fix, 0)));
9285
9286 return swab16(csum);
9287}
9288
9289static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9290{
9291 u32 rc;
9292
9293 if (skb->ip_summed != CHECKSUM_PARTIAL)
9294 rc = XMIT_PLAIN;
9295
9296 else {
9297 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9298 rc = XMIT_CSUM_V6;
9299 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9300 rc |= XMIT_CSUM_TCP;
9301
9302 } else {
9303 rc = XMIT_CSUM_V4;
9304 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9305 rc |= XMIT_CSUM_TCP;
9306 }
9307 }
9308
9309 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9310 rc |= XMIT_GSO_V4;
9311
9312 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9313 rc |= XMIT_GSO_V6;
9314
9315 return rc;
9316}
9317
9318/* check if packet requires linearization (packet is too fragmented) */
9319static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9320 u32 xmit_type)
9321{
9322 int to_copy = 0;
9323 int hlen = 0;
9324 int first_bd_sz = 0;
9325
9326 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9327 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9328
9329 if (xmit_type & XMIT_GSO) {
9330 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9331 /* Check if LSO packet needs to be copied:
9332 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9333 int wnd_size = MAX_FETCH_BD - 3;
9334 /* Number of widnows to check */
9335 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9336 int wnd_idx = 0;
9337 int frag_idx = 0;
9338 u32 wnd_sum = 0;
9339
9340 /* Headers length */
9341 hlen = (int)(skb_transport_header(skb) - skb->data) +
9342 tcp_hdrlen(skb);
9343
9344 /* Amount of data (w/o headers) on linear part of SKB*/
9345 first_bd_sz = skb_headlen(skb) - hlen;
9346
9347 wnd_sum = first_bd_sz;
9348
9349 /* Calculate the first sum - it's special */
9350 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9351 wnd_sum +=
9352 skb_shinfo(skb)->frags[frag_idx].size;
9353
9354 /* If there was data on linear skb data - check it */
9355 if (first_bd_sz > 0) {
9356 if (unlikely(wnd_sum < lso_mss)) {
9357 to_copy = 1;
9358 goto exit_lbl;
9359 }
9360
9361 wnd_sum -= first_bd_sz;
9362 }
9363
9364 /* Others are easier: run through the frag list and
9365 check all windows */
9366 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9367 wnd_sum +=
9368 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9369
9370 if (unlikely(wnd_sum < lso_mss)) {
9371 to_copy = 1;
9372 break;
9373 }
9374 wnd_sum -=
9375 skb_shinfo(skb)->frags[wnd_idx].size;
9376 }
9377
9378 } else {
9379 /* in non-LSO too fragmented packet should always
9380 be linearized */
9381 to_copy = 1;
9382 }
9383 }
9384
9385exit_lbl:
9386 if (unlikely(to_copy))
9387 DP(NETIF_MSG_TX_QUEUED,
9388 "Linearization IS REQUIRED for %s packet. "
9389 "num_frags %d hlen %d first_bd_sz %d\n",
9390 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9391 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9392
9393 return to_copy;
9394}
9395
9396/* called with netif_tx_lock
a2fbb9ea 9397 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9398 * netif_wake_queue()
a2fbb9ea
ET
9399 */
9400static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9401{
9402 struct bnx2x *bp = netdev_priv(dev);
9403 struct bnx2x_fastpath *fp;
9404 struct sw_tx_bd *tx_buf;
9405 struct eth_tx_bd *tx_bd;
9406 struct eth_tx_parse_bd *pbd = NULL;
9407 u16 pkt_prod, bd_prod;
755735eb 9408 int nbd, fp_index;
a2fbb9ea 9409 dma_addr_t mapping;
755735eb
EG
9410 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9411 int vlan_off = (bp->e1hov ? 4 : 0);
9412 int i;
9413 u8 hlen = 0;
a2fbb9ea
ET
9414
9415#ifdef BNX2X_STOP_ON_ERROR
9416 if (unlikely(bp->panic))
9417 return NETDEV_TX_BUSY;
9418#endif
9419
755735eb 9420 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9421 fp = &bp->fp[fp_index];
755735eb 9422
a2fbb9ea
ET
9423 if (unlikely(bnx2x_tx_avail(bp->fp) <
9424 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9425 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9426 netif_stop_queue(dev);
9427 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9428 return NETDEV_TX_BUSY;
9429 }
9430
755735eb
EG
9431 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9432 " gso type %x xmit_type %x\n",
9433 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9434 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9435
9436 /* First, check if we need to linearaize the skb
9437 (due to FW restrictions) */
9438 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9439 /* Statistics of linearization */
9440 bp->lin_cnt++;
9441 if (skb_linearize(skb) != 0) {
9442 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9443 "silently dropping this SKB\n");
9444 dev_kfree_skb_any(skb);
da5a662a 9445 return NETDEV_TX_OK;
755735eb
EG
9446 }
9447 }
9448
a2fbb9ea 9449 /*
755735eb 9450 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9451 then for TSO or xsum we have a parsing info BD,
755735eb 9452 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9453 (don't forget to mark the last one as last,
9454 and to unmap only AFTER you write to the BD ...)
755735eb 9455 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9456 */
9457
9458 pkt_prod = fp->tx_pkt_prod++;
755735eb 9459 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9460
755735eb 9461 /* get a tx_buf and first BD */
a2fbb9ea
ET
9462 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9463 tx_bd = &fp->tx_desc_ring[bd_prod];
9464
9465 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9466 tx_bd->general_data = (UNICAST_ADDRESS <<
9467 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9468 tx_bd->general_data |= 1; /* header nbd */
9469
755735eb
EG
9470 /* remember the first BD of the packet */
9471 tx_buf->first_bd = fp->tx_bd_prod;
9472 tx_buf->skb = skb;
a2fbb9ea
ET
9473
9474 DP(NETIF_MSG_TX_QUEUED,
9475 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9476 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9477
755735eb
EG
9478 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9479 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9480 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9481 vlan_off += 4;
9482 } else
9483 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9484
755735eb 9485 if (xmit_type) {
a2fbb9ea 9486
755735eb 9487 /* turn on parsing and get a BD */
a2fbb9ea
ET
9488 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9489 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9490
9491 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9492 }
9493
9494 if (xmit_type & XMIT_CSUM) {
9495 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9496
9497 /* for now NS flag is not used in Linux */
755735eb 9498 pbd->global_data = (hlen |
96fc1784 9499 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9500 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9501
755735eb
EG
9502 pbd->ip_hlen = (skb_transport_header(skb) -
9503 skb_network_header(skb)) / 2;
9504
9505 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9506
755735eb
EG
9507 pbd->total_hlen = cpu_to_le16(hlen);
9508 hlen = hlen*2 - vlan_off;
a2fbb9ea 9509
755735eb
EG
9510 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9511
9512 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9513 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9514 ETH_TX_BD_FLAGS_IP_CSUM;
9515 else
9516 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9517
9518 if (xmit_type & XMIT_CSUM_TCP) {
9519 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9520
9521 } else {
9522 s8 fix = SKB_CS_OFF(skb); /* signed! */
9523
a2fbb9ea 9524 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9525 pbd->cs_offset = fix / 2;
a2fbb9ea 9526
755735eb
EG
9527 DP(NETIF_MSG_TX_QUEUED,
9528 "hlen %d offset %d fix %d csum before fix %x\n",
9529 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9530 SKB_CS(skb));
9531
9532 /* HW bug: fixup the CSUM */
9533 pbd->tcp_pseudo_csum =
9534 bnx2x_csum_fix(skb_transport_header(skb),
9535 SKB_CS(skb), fix);
9536
9537 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9538 pbd->tcp_pseudo_csum);
9539 }
a2fbb9ea
ET
9540 }
9541
9542 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9543 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9544
9545 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9546 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9547 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9548 tx_bd->nbd = cpu_to_le16(nbd);
9549 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9550
9551 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9552 " nbytes %d flags %x vlan %x\n",
9553 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9554 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9555 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9556
755735eb 9557 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9558
9559 DP(NETIF_MSG_TX_QUEUED,
9560 "TSO packet len %d hlen %d total len %d tso size %d\n",
9561 skb->len, hlen, skb_headlen(skb),
9562 skb_shinfo(skb)->gso_size);
9563
9564 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9565
755735eb
EG
9566 if (unlikely(skb_headlen(skb) > hlen))
9567 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9568 bd_prod, ++nbd);
a2fbb9ea
ET
9569
9570 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9571 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9572 pbd->tcp_flags = pbd_tcp_flags(skb);
9573
9574 if (xmit_type & XMIT_GSO_V4) {
9575 pbd->ip_id = swab16(ip_hdr(skb)->id);
9576 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9577 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9578 ip_hdr(skb)->daddr,
9579 0, IPPROTO_TCP, 0));
755735eb
EG
9580
9581 } else
9582 pbd->tcp_pseudo_csum =
9583 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9584 &ipv6_hdr(skb)->daddr,
9585 0, IPPROTO_TCP, 0));
9586
a2fbb9ea
ET
9587 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9588 }
9589
755735eb
EG
9590 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9591 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9592
755735eb
EG
9593 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9594 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9595
755735eb
EG
9596 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9597 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9598
755735eb
EG
9599 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9600 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9601 tx_bd->nbytes = cpu_to_le16(frag->size);
9602 tx_bd->vlan = cpu_to_le16(pkt_prod);
9603 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9604
755735eb
EG
9605 DP(NETIF_MSG_TX_QUEUED,
9606 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9607 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9608 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9609 }
9610
755735eb 9611 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9612 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9613
9614 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9615 tx_bd, tx_bd->bd_flags.as_bitfield);
9616
a2fbb9ea
ET
9617 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9618
755735eb 9619 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9620 * if the packet contains or ends with it
9621 */
9622 if (TX_BD_POFF(bd_prod) < nbd)
9623 nbd++;
9624
9625 if (pbd)
9626 DP(NETIF_MSG_TX_QUEUED,
9627 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9628 " tcp_flags %x xsum %x seq %u hlen %u\n",
9629 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9630 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9631 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9632
755735eb 9633 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9634
96fc1784
ET
9635 fp->hw_tx_prods->bds_prod =
9636 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9637 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9638 fp->hw_tx_prods->packets_prod =
9639 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9640 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9641
9642 mmiowb();
9643
755735eb 9644 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9645 dev->trans_start = jiffies;
9646
9647 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9648 netif_stop_queue(dev);
bb2a0f7a 9649 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9650 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9651 netif_wake_queue(dev);
9652 }
9653 fp->tx_pkt++;
9654
9655 return NETDEV_TX_OK;
9656}
9657
bb2a0f7a 9658/* called with rtnl_lock */
a2fbb9ea
ET
9659static int bnx2x_open(struct net_device *dev)
9660{
9661 struct bnx2x *bp = netdev_priv(dev);
9662
9663 bnx2x_set_power_state(bp, PCI_D0);
9664
bb2a0f7a 9665 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9666}
9667
bb2a0f7a 9668/* called with rtnl_lock */
a2fbb9ea
ET
9669static int bnx2x_close(struct net_device *dev)
9670{
a2fbb9ea
ET
9671 struct bnx2x *bp = netdev_priv(dev);
9672
9673 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9674 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9675 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9676 if (!CHIP_REV_IS_SLOW(bp))
9677 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9678
9679 return 0;
9680}
9681
34f80b04
EG
9682/* called with netif_tx_lock from set_multicast */
9683static void bnx2x_set_rx_mode(struct net_device *dev)
9684{
9685 struct bnx2x *bp = netdev_priv(dev);
9686 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9687 int port = BP_PORT(bp);
9688
9689 if (bp->state != BNX2X_STATE_OPEN) {
9690 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9691 return;
9692 }
9693
9694 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9695
9696 if (dev->flags & IFF_PROMISC)
9697 rx_mode = BNX2X_RX_MODE_PROMISC;
9698
9699 else if ((dev->flags & IFF_ALLMULTI) ||
9700 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9701 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9702
9703 else { /* some multicasts */
9704 if (CHIP_IS_E1(bp)) {
9705 int i, old, offset;
9706 struct dev_mc_list *mclist;
9707 struct mac_configuration_cmd *config =
9708 bnx2x_sp(bp, mcast_config);
9709
9710 for (i = 0, mclist = dev->mc_list;
9711 mclist && (i < dev->mc_count);
9712 i++, mclist = mclist->next) {
9713
9714 config->config_table[i].
9715 cam_entry.msb_mac_addr =
9716 swab16(*(u16 *)&mclist->dmi_addr[0]);
9717 config->config_table[i].
9718 cam_entry.middle_mac_addr =
9719 swab16(*(u16 *)&mclist->dmi_addr[2]);
9720 config->config_table[i].
9721 cam_entry.lsb_mac_addr =
9722 swab16(*(u16 *)&mclist->dmi_addr[4]);
9723 config->config_table[i].cam_entry.flags =
9724 cpu_to_le16(port);
9725 config->config_table[i].
9726 target_table_entry.flags = 0;
9727 config->config_table[i].
9728 target_table_entry.client_id = 0;
9729 config->config_table[i].
9730 target_table_entry.vlan_id = 0;
9731
9732 DP(NETIF_MSG_IFUP,
9733 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9734 config->config_table[i].
9735 cam_entry.msb_mac_addr,
9736 config->config_table[i].
9737 cam_entry.middle_mac_addr,
9738 config->config_table[i].
9739 cam_entry.lsb_mac_addr);
9740 }
9741 old = config->hdr.length_6b;
9742 if (old > i) {
9743 for (; i < old; i++) {
9744 if (CAM_IS_INVALID(config->
9745 config_table[i])) {
9746 i--; /* already invalidated */
9747 break;
9748 }
9749 /* invalidate */
9750 CAM_INVALIDATE(config->
9751 config_table[i]);
9752 }
9753 }
9754
9755 if (CHIP_REV_IS_SLOW(bp))
9756 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9757 else
9758 offset = BNX2X_MAX_MULTICAST*(1 + port);
9759
9760 config->hdr.length_6b = i;
9761 config->hdr.offset = offset;
9762 config->hdr.client_id = BP_CL_ID(bp);
9763 config->hdr.reserved1 = 0;
9764
9765 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9766 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9767 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9768 0);
9769 } else { /* E1H */
9770 /* Accept one or more multicasts */
9771 struct dev_mc_list *mclist;
9772 u32 mc_filter[MC_HASH_SIZE];
9773 u32 crc, bit, regidx;
9774 int i;
9775
9776 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9777
9778 for (i = 0, mclist = dev->mc_list;
9779 mclist && (i < dev->mc_count);
9780 i++, mclist = mclist->next) {
9781
9782 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9783 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9784 mclist->dmi_addr[0], mclist->dmi_addr[1],
9785 mclist->dmi_addr[2], mclist->dmi_addr[3],
9786 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9787
9788 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9789 bit = (crc >> 24) & 0xff;
9790 regidx = bit >> 5;
9791 bit &= 0x1f;
9792 mc_filter[regidx] |= (1 << bit);
9793 }
9794
9795 for (i = 0; i < MC_HASH_SIZE; i++)
9796 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9797 mc_filter[i]);
9798 }
9799 }
9800
9801 bp->rx_mode = rx_mode;
9802 bnx2x_set_storm_rx_mode(bp);
9803}
9804
9805/* called with rtnl_lock */
a2fbb9ea
ET
9806static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9807{
9808 struct sockaddr *addr = p;
9809 struct bnx2x *bp = netdev_priv(dev);
9810
34f80b04 9811 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9812 return -EINVAL;
9813
9814 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9815 if (netif_running(dev)) {
9816 if (CHIP_IS_E1(bp))
9817 bnx2x_set_mac_addr_e1(bp);
9818 else
9819 bnx2x_set_mac_addr_e1h(bp);
9820 }
a2fbb9ea
ET
9821
9822 return 0;
9823}
9824
c18487ee 9825/* called with rtnl_lock */
a2fbb9ea
ET
9826static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9827{
9828 struct mii_ioctl_data *data = if_mii(ifr);
9829 struct bnx2x *bp = netdev_priv(dev);
9830 int err;
9831
9832 switch (cmd) {
9833 case SIOCGMIIPHY:
34f80b04 9834 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9835
c14423fe 9836 /* fallthrough */
c18487ee 9837
a2fbb9ea 9838 case SIOCGMIIREG: {
c18487ee 9839 u16 mii_regval;
a2fbb9ea 9840
c18487ee
YR
9841 if (!netif_running(dev))
9842 return -EAGAIN;
a2fbb9ea 9843
34f80b04
EG
9844 mutex_lock(&bp->port.phy_mutex);
9845 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9846 DEFAULT_PHY_DEV_ADDR,
9847 (data->reg_num & 0x1f), &mii_regval);
9848 data->val_out = mii_regval;
34f80b04 9849 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9850 return err;
9851 }
9852
9853 case SIOCSMIIREG:
9854 if (!capable(CAP_NET_ADMIN))
9855 return -EPERM;
9856
c18487ee
YR
9857 if (!netif_running(dev))
9858 return -EAGAIN;
9859
34f80b04
EG
9860 mutex_lock(&bp->port.phy_mutex);
9861 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9862 DEFAULT_PHY_DEV_ADDR,
9863 (data->reg_num & 0x1f), data->val_in);
34f80b04 9864 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9865 return err;
9866
9867 default:
9868 /* do nothing */
9869 break;
9870 }
9871
9872 return -EOPNOTSUPP;
9873}
9874
34f80b04 9875/* called with rtnl_lock */
a2fbb9ea
ET
9876static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9877{
9878 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9879 int rc = 0;
a2fbb9ea
ET
9880
9881 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9882 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9883 return -EINVAL;
9884
9885 /* This does not race with packet allocation
c14423fe 9886 * because the actual alloc size is
a2fbb9ea
ET
9887 * only updated as part of load
9888 */
9889 dev->mtu = new_mtu;
9890
9891 if (netif_running(dev)) {
34f80b04
EG
9892 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9893 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9894 }
34f80b04
EG
9895
9896 return rc;
a2fbb9ea
ET
9897}
9898
9899static void bnx2x_tx_timeout(struct net_device *dev)
9900{
9901 struct bnx2x *bp = netdev_priv(dev);
9902
9903#ifdef BNX2X_STOP_ON_ERROR
9904 if (!bp->panic)
9905 bnx2x_panic();
9906#endif
9907 /* This allows the netif to be shutdown gracefully before resetting */
9908 schedule_work(&bp->reset_task);
9909}
9910
9911#ifdef BCM_VLAN
34f80b04 9912/* called with rtnl_lock */
a2fbb9ea
ET
9913static void bnx2x_vlan_rx_register(struct net_device *dev,
9914 struct vlan_group *vlgrp)
9915{
9916 struct bnx2x *bp = netdev_priv(dev);
9917
9918 bp->vlgrp = vlgrp;
9919 if (netif_running(dev))
49d66772 9920 bnx2x_set_client_config(bp);
a2fbb9ea 9921}
34f80b04 9922
a2fbb9ea
ET
9923#endif
9924
9925#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9926static void poll_bnx2x(struct net_device *dev)
9927{
9928 struct bnx2x *bp = netdev_priv(dev);
9929
9930 disable_irq(bp->pdev->irq);
9931 bnx2x_interrupt(bp->pdev->irq, dev);
9932 enable_irq(bp->pdev->irq);
9933}
9934#endif
9935
34f80b04
EG
9936static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9937 struct net_device *dev)
a2fbb9ea
ET
9938{
9939 struct bnx2x *bp;
9940 int rc;
9941
9942 SET_NETDEV_DEV(dev, &pdev->dev);
9943 bp = netdev_priv(dev);
9944
34f80b04
EG
9945 bp->dev = dev;
9946 bp->pdev = pdev;
a2fbb9ea 9947 bp->flags = 0;
34f80b04 9948 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9949
9950 rc = pci_enable_device(pdev);
9951 if (rc) {
9952 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9953 goto err_out;
9954 }
9955
9956 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9957 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9958 " aborting\n");
9959 rc = -ENODEV;
9960 goto err_out_disable;
9961 }
9962
9963 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9964 printk(KERN_ERR PFX "Cannot find second PCI device"
9965 " base address, aborting\n");
9966 rc = -ENODEV;
9967 goto err_out_disable;
9968 }
9969
34f80b04
EG
9970 if (atomic_read(&pdev->enable_cnt) == 1) {
9971 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9972 if (rc) {
9973 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9974 " aborting\n");
9975 goto err_out_disable;
9976 }
a2fbb9ea 9977
34f80b04
EG
9978 pci_set_master(pdev);
9979 pci_save_state(pdev);
9980 }
a2fbb9ea
ET
9981
9982 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9983 if (bp->pm_cap == 0) {
9984 printk(KERN_ERR PFX "Cannot find power management"
9985 " capability, aborting\n");
9986 rc = -EIO;
9987 goto err_out_release;
9988 }
9989
9990 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9991 if (bp->pcie_cap == 0) {
9992 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9993 " aborting\n");
9994 rc = -EIO;
9995 goto err_out_release;
9996 }
9997
9998 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9999 bp->flags |= USING_DAC_FLAG;
10000 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10001 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10002 " failed, aborting\n");
10003 rc = -EIO;
10004 goto err_out_release;
10005 }
10006
10007 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10008 printk(KERN_ERR PFX "System does not support DMA,"
10009 " aborting\n");
10010 rc = -EIO;
10011 goto err_out_release;
10012 }
10013
34f80b04
EG
10014 dev->mem_start = pci_resource_start(pdev, 0);
10015 dev->base_addr = dev->mem_start;
10016 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10017
10018 dev->irq = pdev->irq;
10019
10020 bp->regview = ioremap_nocache(dev->base_addr,
10021 pci_resource_len(pdev, 0));
10022 if (!bp->regview) {
10023 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10024 rc = -ENOMEM;
10025 goto err_out_release;
10026 }
10027
34f80b04
EG
10028 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10029 min_t(u64, BNX2X_DB_SIZE,
10030 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10031 if (!bp->doorbells) {
10032 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10033 rc = -ENOMEM;
10034 goto err_out_unmap;
10035 }
10036
10037 bnx2x_set_power_state(bp, PCI_D0);
10038
34f80b04
EG
10039 /* clean indirect addresses */
10040 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10041 PCICFG_VENDOR_ID_OFFSET);
10042 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10043 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10044 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10045 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10046
34f80b04
EG
10047 dev->hard_start_xmit = bnx2x_start_xmit;
10048 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10049
34f80b04
EG
10050 dev->ethtool_ops = &bnx2x_ethtool_ops;
10051 dev->open = bnx2x_open;
10052 dev->stop = bnx2x_close;
10053 dev->set_multicast_list = bnx2x_set_rx_mode;
10054 dev->set_mac_address = bnx2x_change_mac_addr;
10055 dev->do_ioctl = bnx2x_ioctl;
10056 dev->change_mtu = bnx2x_change_mtu;
10057 dev->tx_timeout = bnx2x_tx_timeout;
10058#ifdef BCM_VLAN
10059 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10060#endif
10061#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10062 dev->poll_controller = poll_bnx2x;
10063#endif
10064 dev->features |= NETIF_F_SG;
10065 dev->features |= NETIF_F_HW_CSUM;
10066 if (bp->flags & USING_DAC_FLAG)
10067 dev->features |= NETIF_F_HIGHDMA;
10068#ifdef BCM_VLAN
10069 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10070#endif
10071 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10072 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10073
10074 return 0;
10075
10076err_out_unmap:
10077 if (bp->regview) {
10078 iounmap(bp->regview);
10079 bp->regview = NULL;
10080 }
a2fbb9ea
ET
10081 if (bp->doorbells) {
10082 iounmap(bp->doorbells);
10083 bp->doorbells = NULL;
10084 }
10085
10086err_out_release:
34f80b04
EG
10087 if (atomic_read(&pdev->enable_cnt) == 1)
10088 pci_release_regions(pdev);
a2fbb9ea
ET
10089
10090err_out_disable:
10091 pci_disable_device(pdev);
10092 pci_set_drvdata(pdev, NULL);
10093
10094err_out:
10095 return rc;
10096}
10097
25047950
ET
10098static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10099{
10100 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10101
10102 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10103 return val;
10104}
10105
10106/* return value of 1=2.5GHz 2=5GHz */
10107static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10108{
10109 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10110
10111 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10112 return val;
10113}
10114
a2fbb9ea
ET
10115static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10116 const struct pci_device_id *ent)
10117{
10118 static int version_printed;
10119 struct net_device *dev = NULL;
10120 struct bnx2x *bp;
25047950 10121 int rc;
25047950 10122 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10123
10124 if (version_printed++ == 0)
10125 printk(KERN_INFO "%s", version);
10126
10127 /* dev zeroed in init_etherdev */
10128 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10129 if (!dev) {
10130 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10131 return -ENOMEM;
34f80b04 10132 }
a2fbb9ea
ET
10133
10134 netif_carrier_off(dev);
10135
10136 bp = netdev_priv(dev);
10137 bp->msglevel = debug;
10138
34f80b04 10139 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10140 if (rc < 0) {
10141 free_netdev(dev);
10142 return rc;
10143 }
10144
a2fbb9ea
ET
10145 rc = register_netdev(dev);
10146 if (rc) {
c14423fe 10147 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10148 goto init_one_exit;
a2fbb9ea
ET
10149 }
10150
10151 pci_set_drvdata(pdev, dev);
10152
34f80b04
EG
10153 rc = bnx2x_init_bp(bp);
10154 if (rc) {
10155 unregister_netdev(dev);
10156 goto init_one_exit;
10157 }
10158
10159 bp->common.name = board_info[ent->driver_data].name;
25047950 10160 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10161 " IRQ %d, ", dev->name, bp->common.name,
10162 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10163 bnx2x_get_pcie_width(bp),
10164 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10165 dev->base_addr, bp->pdev->irq);
10166 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10167 return 0;
34f80b04
EG
10168
10169init_one_exit:
10170 if (bp->regview)
10171 iounmap(bp->regview);
10172
10173 if (bp->doorbells)
10174 iounmap(bp->doorbells);
10175
10176 free_netdev(dev);
10177
10178 if (atomic_read(&pdev->enable_cnt) == 1)
10179 pci_release_regions(pdev);
10180
10181 pci_disable_device(pdev);
10182 pci_set_drvdata(pdev, NULL);
10183
10184 return rc;
a2fbb9ea
ET
10185}
10186
10187static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10188{
10189 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10190 struct bnx2x *bp;
10191
10192 if (!dev) {
228241eb
ET
10193 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10194 return;
10195 }
228241eb 10196 bp = netdev_priv(dev);
a2fbb9ea 10197
a2fbb9ea
ET
10198 unregister_netdev(dev);
10199
10200 if (bp->regview)
10201 iounmap(bp->regview);
10202
10203 if (bp->doorbells)
10204 iounmap(bp->doorbells);
10205
10206 free_netdev(dev);
34f80b04
EG
10207
10208 if (atomic_read(&pdev->enable_cnt) == 1)
10209 pci_release_regions(pdev);
10210
a2fbb9ea
ET
10211 pci_disable_device(pdev);
10212 pci_set_drvdata(pdev, NULL);
10213}
10214
10215static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10216{
10217 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10218 struct bnx2x *bp;
10219
34f80b04
EG
10220 if (!dev) {
10221 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10222 return -ENODEV;
10223 }
10224 bp = netdev_priv(dev);
a2fbb9ea 10225
34f80b04 10226 rtnl_lock();
a2fbb9ea 10227
34f80b04 10228 pci_save_state(pdev);
228241eb 10229
34f80b04
EG
10230 if (!netif_running(dev)) {
10231 rtnl_unlock();
10232 return 0;
10233 }
a2fbb9ea
ET
10234
10235 netif_device_detach(dev);
a2fbb9ea 10236
da5a662a 10237 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10238
a2fbb9ea 10239 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10240
34f80b04
EG
10241 rtnl_unlock();
10242
a2fbb9ea
ET
10243 return 0;
10244}
10245
10246static int bnx2x_resume(struct pci_dev *pdev)
10247{
10248 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10249 struct bnx2x *bp;
a2fbb9ea
ET
10250 int rc;
10251
228241eb
ET
10252 if (!dev) {
10253 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10254 return -ENODEV;
10255 }
228241eb 10256 bp = netdev_priv(dev);
a2fbb9ea 10257
34f80b04
EG
10258 rtnl_lock();
10259
228241eb 10260 pci_restore_state(pdev);
34f80b04
EG
10261
10262 if (!netif_running(dev)) {
10263 rtnl_unlock();
10264 return 0;
10265 }
10266
a2fbb9ea
ET
10267 bnx2x_set_power_state(bp, PCI_D0);
10268 netif_device_attach(dev);
10269
da5a662a 10270 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10271
34f80b04
EG
10272 rtnl_unlock();
10273
10274 return rc;
a2fbb9ea
ET
10275}
10276
493adb1f
WX
10277/**
10278 * bnx2x_io_error_detected - called when PCI error is detected
10279 * @pdev: Pointer to PCI device
10280 * @state: The current pci connection state
10281 *
10282 * This function is called after a PCI bus error affecting
10283 * this device has been detected.
10284 */
10285static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10286 pci_channel_state_t state)
10287{
10288 struct net_device *dev = pci_get_drvdata(pdev);
10289 struct bnx2x *bp = netdev_priv(dev);
10290
10291 rtnl_lock();
10292
10293 netif_device_detach(dev);
10294
10295 if (netif_running(dev))
10296 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10297
10298 pci_disable_device(pdev);
10299
10300 rtnl_unlock();
10301
10302 /* Request a slot reset */
10303 return PCI_ERS_RESULT_NEED_RESET;
10304}
10305
10306/**
10307 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10308 * @pdev: Pointer to PCI device
10309 *
10310 * Restart the card from scratch, as if from a cold-boot.
10311 */
10312static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10313{
10314 struct net_device *dev = pci_get_drvdata(pdev);
10315 struct bnx2x *bp = netdev_priv(dev);
10316
10317 rtnl_lock();
10318
10319 if (pci_enable_device(pdev)) {
10320 dev_err(&pdev->dev,
10321 "Cannot re-enable PCI device after reset\n");
10322 rtnl_unlock();
10323 return PCI_ERS_RESULT_DISCONNECT;
10324 }
10325
10326 pci_set_master(pdev);
10327 pci_restore_state(pdev);
10328
10329 if (netif_running(dev))
10330 bnx2x_set_power_state(bp, PCI_D0);
10331
10332 rtnl_unlock();
10333
10334 return PCI_ERS_RESULT_RECOVERED;
10335}
10336
10337/**
10338 * bnx2x_io_resume - called when traffic can start flowing again
10339 * @pdev: Pointer to PCI device
10340 *
10341 * This callback is called when the error recovery driver tells us that
10342 * its OK to resume normal operation.
10343 */
10344static void bnx2x_io_resume(struct pci_dev *pdev)
10345{
10346 struct net_device *dev = pci_get_drvdata(pdev);
10347 struct bnx2x *bp = netdev_priv(dev);
10348
10349 rtnl_lock();
10350
10351 if (netif_running(dev))
10352 bnx2x_nic_load(bp, LOAD_OPEN);
10353
10354 netif_device_attach(dev);
10355
10356 rtnl_unlock();
10357}
10358
10359static struct pci_error_handlers bnx2x_err_handler = {
10360 .error_detected = bnx2x_io_error_detected,
10361 .slot_reset = bnx2x_io_slot_reset,
10362 .resume = bnx2x_io_resume,
10363};
10364
a2fbb9ea 10365static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10366 .name = DRV_MODULE_NAME,
10367 .id_table = bnx2x_pci_tbl,
10368 .probe = bnx2x_init_one,
10369 .remove = __devexit_p(bnx2x_remove_one),
10370 .suspend = bnx2x_suspend,
10371 .resume = bnx2x_resume,
10372 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10373};
10374
10375static int __init bnx2x_init(void)
10376{
10377 return pci_register_driver(&bnx2x_pci_driver);
10378}
10379
10380static void __exit bnx2x_cleanup(void)
10381{
10382 pci_unregister_driver(&bnx2x_pci_driver);
10383}
10384
10385module_init(bnx2x_init);
10386module_exit(bnx2x_cleanup);
10387
This page took 0.76651 seconds and 5 git commands to generate.