x86: apic copy apic_64.c to apic_32.c
[deliverable/linux.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2008 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
53 #include <linux/io.h>
54
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
59 #include "bnx2x.h"
60 #include "bnx2x_init.h"
61
62 #define DRV_MODULE_VERSION "1.45.22"
63 #define DRV_MODULE_RELDATE "2008/09/09"
64 #define BNX2X_BC_VER 0x040200
65
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT (5*HZ)
68
69 static char version[] __devinitdata =
70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77
78 static int disable_tpa;
79 static int use_inta;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 static int use_multi;
84
85 module_param(disable_tpa, int, 0);
86 module_param(use_inta, int, 0);
87 module_param(poll, int, 0);
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll, "use polling (for debug)");
92 MODULE_PARM_DESC(debug, "default debug msglevel");
93
94 #ifdef BNX2X_MULTI
95 module_param(use_multi, int, 0);
96 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97 #endif
98
99 enum bnx2x_board_type {
100 BCM57710 = 0,
101 BCM57711 = 1,
102 BCM57711E = 2,
103 };
104
105 /* indexed by board_type, above */
106 static struct {
107 char *name;
108 } board_info[] __devinitdata = {
109 { "Broadcom NetXtreme II BCM57710 XGb" },
110 { "Broadcom NetXtreme II BCM57711 XGb" },
111 { "Broadcom NetXtreme II BCM57711E XGb" }
112 };
113
114
115 static const struct pci_device_id bnx2x_pci_tbl[] = {
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
122 { 0 }
123 };
124
125 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126
127 /****************************************************************************
128 * General service functions
129 ****************************************************************************/
130
131 /* used only at init
132 * locking is done by mcp
133 */
134 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 {
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
139 PCICFG_VENDOR_ID_OFFSET);
140 }
141
142 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
143 {
144 u32 val;
145
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150
151 return val;
152 }
153
154 static const u32 dmae_reg_go_c[] = {
155 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
159 };
160
161 /* copy command into DMAE command memory and set DMAE command go */
162 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
163 int idx)
164 {
165 u32 cmd_offset;
166 int i;
167
168 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171
172 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 }
175 REG_WR(bp, dmae_reg_go_c[idx], 1);
176 }
177
178 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
179 u32 len32)
180 {
181 struct dmae_command *dmae = &bp->init_dmae;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183 int cnt = 200;
184
185 if (!bp->dmae_ready) {
186 u32 *data = bnx2x_sp(bp, wb_data[0]);
187
188 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
189 " using indirect\n", dst_addr, len32);
190 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
191 return;
192 }
193
194 mutex_lock(&bp->dmae_mutex);
195
196 memset(dmae, 0, sizeof(struct dmae_command));
197
198 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
199 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
200 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 #ifdef __BIG_ENDIAN
202 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 #else
204 DMAE_CMD_ENDIANITY_DW_SWAP |
205 #endif
206 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
207 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
208 dmae->src_addr_lo = U64_LO(dma_addr);
209 dmae->src_addr_hi = U64_HI(dma_addr);
210 dmae->dst_addr_lo = dst_addr >> 2;
211 dmae->dst_addr_hi = 0;
212 dmae->len = len32;
213 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_val = DMAE_COMP_VAL;
216
217 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
218 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
219 "dst_addr [%x:%08x (%08x)]\n"
220 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
221 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
222 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
223 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
224 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
225 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
226 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
227
228 *wb_comp = 0;
229
230 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
231
232 udelay(5);
233
234 while (*wb_comp != DMAE_COMP_VAL) {
235 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
236
237 if (!cnt) {
238 BNX2X_ERR("dmae timeout!\n");
239 break;
240 }
241 cnt--;
242 /* adjust delay for emulation/FPGA */
243 if (CHIP_REV_IS_SLOW(bp))
244 msleep(100);
245 else
246 udelay(5);
247 }
248
249 mutex_unlock(&bp->dmae_mutex);
250 }
251
252 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 {
254 struct dmae_command *dmae = &bp->init_dmae;
255 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
256 int cnt = 200;
257
258 if (!bp->dmae_ready) {
259 u32 *data = bnx2x_sp(bp, wb_data[0]);
260 int i;
261
262 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
263 " using indirect\n", src_addr, len32);
264 for (i = 0; i < len32; i++)
265 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
266 return;
267 }
268
269 mutex_lock(&bp->dmae_mutex);
270
271 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
272 memset(dmae, 0, sizeof(struct dmae_command));
273
274 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
275 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
276 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 #ifdef __BIG_ENDIAN
278 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 #else
280 DMAE_CMD_ENDIANITY_DW_SWAP |
281 #endif
282 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
283 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
284 dmae->src_addr_lo = src_addr >> 2;
285 dmae->src_addr_hi = 0;
286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
287 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->len = len32;
289 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_val = DMAE_COMP_VAL;
292
293 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
294 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
295 "dst_addr [%x:%08x (%08x)]\n"
296 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
297 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
298 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
299 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
300
301 *wb_comp = 0;
302
303 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
304
305 udelay(5);
306
307 while (*wb_comp != DMAE_COMP_VAL) {
308
309 if (!cnt) {
310 BNX2X_ERR("dmae timeout!\n");
311 break;
312 }
313 cnt--;
314 /* adjust delay for emulation/FPGA */
315 if (CHIP_REV_IS_SLOW(bp))
316 msleep(100);
317 else
318 udelay(5);
319 }
320 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
321 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
322 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323
324 mutex_unlock(&bp->dmae_mutex);
325 }
326
327 /* used only for slowpath so not inlined */
328 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
329 {
330 u32 wb_write[2];
331
332 wb_write[0] = val_hi;
333 wb_write[1] = val_lo;
334 REG_WR_DMAE(bp, reg, wb_write, 2);
335 }
336
337 #ifdef USE_WB_RD
338 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
339 {
340 u32 wb_data[2];
341
342 REG_RD_DMAE(bp, reg, wb_data, 2);
343
344 return HILO_U64(wb_data[0], wb_data[1]);
345 }
346 #endif
347
348 static int bnx2x_mc_assert(struct bnx2x *bp)
349 {
350 char last_idx;
351 int i, rc = 0;
352 u32 row0, row1, row2, row3;
353
354 /* XSTORM */
355 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
356 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 if (last_idx)
358 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359
360 /* print the asserts */
361 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362
363 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
364 XSTORM_ASSERT_LIST_OFFSET(i));
365 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
367 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
369 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371
372 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
373 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374 " 0x%08x 0x%08x 0x%08x\n",
375 i, row3, row2, row1, row0);
376 rc++;
377 } else {
378 break;
379 }
380 }
381
382 /* TSTORM */
383 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
384 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 if (last_idx)
386 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387
388 /* print the asserts */
389 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390
391 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
392 TSTORM_ASSERT_LIST_OFFSET(i));
393 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
395 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
397 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399
400 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
401 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402 " 0x%08x 0x%08x 0x%08x\n",
403 i, row3, row2, row1, row0);
404 rc++;
405 } else {
406 break;
407 }
408 }
409
410 /* CSTORM */
411 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
412 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 if (last_idx)
414 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415
416 /* print the asserts */
417 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418
419 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
420 CSTORM_ASSERT_LIST_OFFSET(i));
421 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
423 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
425 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427
428 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
429 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430 " 0x%08x 0x%08x 0x%08x\n",
431 i, row3, row2, row1, row0);
432 rc++;
433 } else {
434 break;
435 }
436 }
437
438 /* USTORM */
439 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
440 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 if (last_idx)
442 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443
444 /* print the asserts */
445 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446
447 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
448 USTORM_ASSERT_LIST_OFFSET(i));
449 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_OFFSET(i) + 4);
451 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i) + 8);
453 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455
456 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
457 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458 " 0x%08x 0x%08x 0x%08x\n",
459 i, row3, row2, row1, row0);
460 rc++;
461 } else {
462 break;
463 }
464 }
465
466 return rc;
467 }
468
469 static void bnx2x_fw_dump(struct bnx2x *bp)
470 {
471 u32 mark, offset;
472 u32 data[9];
473 int word;
474
475 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
476 mark = ((mark + 0x3) & ~0x3);
477 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478
479 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
480 for (word = 0; word < 8; word++)
481 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
482 offset + 4*word));
483 data[8] = 0x0;
484 printk(KERN_CONT "%s", (char *)data);
485 }
486 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
487 for (word = 0; word < 8; word++)
488 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
489 offset + 4*word));
490 data[8] = 0x0;
491 printk(KERN_CONT "%s", (char *)data);
492 }
493 printk("\n" KERN_ERR PFX "end of fw dump\n");
494 }
495
496 static void bnx2x_panic_dump(struct bnx2x *bp)
497 {
498 int i;
499 u16 j, start, end;
500
501 bp->stats_state = STATS_STATE_DISABLED;
502 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
504 BNX2X_ERR("begin crash dump -----------------\n");
505
506 for_each_queue(bp, i) {
507 struct bnx2x_fastpath *fp = &bp->fp[i];
508 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
515 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
516 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
517 fp->rx_bd_prod, fp->rx_bd_cons,
518 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
519 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
520 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
521 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
522 " *sb_u_idx(%x) bd data(%x,%x)\n",
523 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
524 fp->status_blk->c_status_block.status_block_index,
525 fp->fp_u_idx,
526 fp->status_blk->u_status_block.status_block_index,
527 hw_prods->packets_prod, hw_prods->bds_prod);
528
529 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
530 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
531 for (j = start; j < end; j++) {
532 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533
534 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
535 sw_bd->skb, sw_bd->first_bd);
536 }
537
538 start = TX_BD(fp->tx_bd_cons - 10);
539 end = TX_BD(fp->tx_bd_cons + 254);
540 for (j = start; j < end; j++) {
541 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542
543 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
545 }
546
547 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
548 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
549 for (j = start; j < end; j++) {
550 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
551 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552
553 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
554 j, rx_bd[1], rx_bd[0], sw_bd->skb);
555 }
556
557 start = RX_SGE(fp->rx_sge_prod);
558 end = RX_SGE(fp->last_max_sge);
559 for (j = start; j < end; j++) {
560 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
561 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562
563 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
564 j, rx_sge[1], rx_sge[0], sw_page->page);
565 }
566
567 start = RCQ_BD(fp->rx_comp_cons - 10);
568 end = RCQ_BD(fp->rx_comp_cons + 503);
569 for (j = start; j < end; j++) {
570 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571
572 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573 j, cqe[0], cqe[1], cqe[2], cqe[3]);
574 }
575 }
576
577 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
578 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
579 " spq_prod_idx(%u)\n",
580 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
581 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
582
583 bnx2x_fw_dump(bp);
584 bnx2x_mc_assert(bp);
585 BNX2X_ERR("end crash dump -----------------\n");
586 }
587
588 static void bnx2x_int_enable(struct bnx2x *bp)
589 {
590 int port = BP_PORT(bp);
591 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592 u32 val = REG_RD(bp, addr);
593 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594
595 if (msix) {
596 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 } else {
600 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
601 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
602 HC_CONFIG_0_REG_INT_LINE_EN_0 |
603 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604
605 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
606 val, port, addr, msix);
607
608 REG_WR(bp, addr, val);
609
610 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
611 }
612
613 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
614 val, port, addr, msix);
615
616 REG_WR(bp, addr, val);
617
618 if (CHIP_IS_E1H(bp)) {
619 /* init leading/trailing edge */
620 if (IS_E1HMF(bp)) {
621 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 if (bp->port.pmf)
623 /* enable nig attention */
624 val |= 0x0100;
625 } else
626 val = 0xffff;
627
628 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
630 }
631 }
632
633 static void bnx2x_int_disable(struct bnx2x *bp)
634 {
635 int port = BP_PORT(bp);
636 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637 u32 val = REG_RD(bp, addr);
638
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645 val, port, addr);
646
647 REG_WR(bp, addr, val);
648 if (REG_RD(bp, addr) != val)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650 }
651
652 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
653 {
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int i;
656
657 /* disable interrupt handling */
658 atomic_inc(&bp->intr_sem);
659 if (disable_hw)
660 /* prevent the HW from sending interrupts */
661 bnx2x_int_disable(bp);
662
663 /* make sure all ISRs are done */
664 if (msix) {
665 for_each_queue(bp, i)
666 synchronize_irq(bp->msix_table[i].vector);
667
668 /* one more for the Slow Path IRQ */
669 synchronize_irq(bp->msix_table[i].vector);
670 } else
671 synchronize_irq(bp->pdev->irq);
672
673 /* make sure sp_task is not running */
674 cancel_work_sync(&bp->sp_task);
675 }
676
677 /* fast path */
678
679 /*
680 * General service functions
681 */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684 u8 storm, u16 index, u8 op, u8 update)
685 {
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
724
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
727
728 return result;
729 }
730
731
732 /*
733 * fast path service functions
734 */
735
736 /* free skb in the packet ring at pos idx
737 * return idx of last bd freed
738 */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740 u16 idx)
741 {
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746 int nbd;
747
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
749 idx, tx_buf, skb);
750
751 /* unmap first bd */
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
758 new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
761 BNX2X_ERR("BAD nbd!\n");
762 bnx2x_panic();
763 }
764 #endif
765
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
768 if (nbd)
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
774 if (--nbd)
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779 if (--nbd)
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781 }
782 }
783
784 /* now free frags */
785 while (nbd > 0) {
786
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791 if (--nbd)
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793 }
794
795 /* release skb */
796 WARN_ON(!skb);
797 dev_kfree_skb(skb);
798 tx_buf->first_bd = 0;
799 tx_buf->skb = NULL;
800
801 return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806 s16 used;
807 u16 prod;
808 u16 cons;
809
810 barrier(); /* Tell compiler that prod and cons can change */
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
813
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819 WARN_ON(used < 0);
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824 return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831 int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
835 return;
836 #endif
837
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
840
841 while (sw_cons != hw_cons) {
842 u16 pkt_cons;
843
844 pkt_cons = TX_BD(sw_cons);
845
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
849 hw_cons, sw_cons, pkt_cons);
850
851 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852 rmb();
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854 }
855 */
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857 sw_cons++;
858 done++;
859
860 if (done == work)
861 break;
862 }
863
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
866
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
871 */
872 smp_mb();
873
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
876
877 netif_tx_lock(bp->dev);
878
879 if (netif_queue_stopped(bp->dev) &&
880 (bp->state == BNX2X_STATE_OPEN) &&
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
883
884 netif_tx_unlock(bp->dev);
885 }
886 }
887
888
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890 union eth_rx_cqe *rr_cqe)
891 {
892 struct bnx2x *bp = fp->bp;
893 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
896 DP(BNX2X_MSG_SP,
897 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
898 FP_IDX(fp), cid, command, bp->state,
899 rr_cqe->ramrod_cqe.ramrod_type);
900
901 bp->spq_left++;
902
903 if (FP_IDX(fp)) {
904 switch (command | fp->state) {
905 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906 BNX2X_FP_STATE_OPENING):
907 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908 cid);
909 fp->state = BNX2X_FP_STATE_OPEN;
910 break;
911
912 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914 cid);
915 fp->state = BNX2X_FP_STATE_HALTED;
916 break;
917
918 default:
919 BNX2X_ERR("unexpected MC reply (%d) "
920 "fp->state is %x\n", command, fp->state);
921 break;
922 }
923 mb(); /* force bnx2x_wait_ramrod() to see the change */
924 return;
925 }
926
927 switch (command | bp->state) {
928 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930 bp->state = BNX2X_STATE_OPEN;
931 break;
932
933 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936 fp->state = BNX2X_FP_STATE_HALTED;
937 break;
938
939 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
942 break;
943
944
945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948 bp->set_mac_pending = 0;
949 break;
950
951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
953 break;
954
955 default:
956 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
957 command, bp->state);
958 break;
959 }
960 mb(); /* force bnx2x_wait_ramrod() to see the change */
961 }
962
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964 struct bnx2x_fastpath *fp, u16 index)
965 {
966 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967 struct page *page = sw_buf->page;
968 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970 /* Skip "next page" elements */
971 if (!page)
972 return;
973
974 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976 __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978 sw_buf->page = NULL;
979 sge->addr_hi = 0;
980 sge->addr_lo = 0;
981 }
982
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984 struct bnx2x_fastpath *fp, int last)
985 {
986 int i;
987
988 for (i = 0; i < last; i++)
989 bnx2x_free_rx_sge(bp, fp, i);
990 }
991
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994 {
995 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998 dma_addr_t mapping;
999
1000 if (unlikely(page == NULL))
1001 return -ENOMEM;
1002
1003 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1004 PCI_DMA_FROMDEVICE);
1005 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 return -ENOMEM;
1008 }
1009
1010 sw_buf->page = page;
1011 pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016 return 0;
1017 }
1018
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020 struct bnx2x_fastpath *fp, u16 index)
1021 {
1022 struct sk_buff *skb;
1023 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025 dma_addr_t mapping;
1026
1027 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028 if (unlikely(skb == NULL))
1029 return -ENOMEM;
1030
1031 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1032 PCI_DMA_FROMDEVICE);
1033 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1034 dev_kfree_skb(skb);
1035 return -ENOMEM;
1036 }
1037
1038 rx_buf->skb = skb;
1039 pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044 return 0;
1045 }
1046
1047 /* note that we are not allocating a new skb,
1048 * we are just moving one from cons to prod
1049 * we are not creating a new mapping,
1050 * so there is no need to check for dma_mapping_error().
1051 */
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053 struct sk_buff *skb, u16 cons, u16 prod)
1054 {
1055 struct bnx2x *bp = fp->bp;
1056 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061 pci_dma_sync_single_for_device(bp->pdev,
1062 pci_unmap_addr(cons_rx_buf, mapping),
1063 bp->rx_offset + RX_COPY_THRESH,
1064 PCI_DMA_FROMDEVICE);
1065
1066 prod_rx_buf->skb = cons_rx_buf->skb;
1067 pci_unmap_addr_set(prod_rx_buf, mapping,
1068 pci_unmap_addr(cons_rx_buf, mapping));
1069 *prod_bd = *cons_bd;
1070 }
1071
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073 u16 idx)
1074 {
1075 u16 last_max = fp->last_max_sge;
1076
1077 if (SUB_S16(idx, last_max) > 0)
1078 fp->last_max_sge = idx;
1079 }
1080
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082 {
1083 int i, j;
1084
1085 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086 int idx = RX_SGE_CNT * i - 1;
1087
1088 for (j = 0; j < 2; j++) {
1089 SGE_MASK_CLEAR_BIT(fp, idx);
1090 idx--;
1091 }
1092 }
1093 }
1094
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096 struct eth_fast_path_rx_cqe *fp_cqe)
1097 {
1098 struct bnx2x *bp = fp->bp;
1099 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100 le16_to_cpu(fp_cqe->len_on_bd)) >>
1101 BCM_PAGE_SHIFT;
1102 u16 last_max, last_elem, first_elem;
1103 u16 delta = 0;
1104 u16 i;
1105
1106 if (!sge_len)
1107 return;
1108
1109 /* First mark all used pages */
1110 for (i = 0; i < sge_len; i++)
1111 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116 /* Here we assume that the last SGE index is the biggest */
1117 prefetch((void *)(fp->sge_mask));
1118 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120 last_max = RX_SGE(fp->last_max_sge);
1121 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124 /* If ring is not full */
1125 if (last_elem + 1 != first_elem)
1126 last_elem++;
1127
1128 /* Now update the prod */
1129 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130 if (likely(fp->sge_mask[i]))
1131 break;
1132
1133 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134 delta += RX_SGE_MASK_ELEM_SZ;
1135 }
1136
1137 if (delta > 0) {
1138 fp->rx_sge_prod += delta;
1139 /* clear page-end entries */
1140 bnx2x_clear_sge_mask_next_elems(fp);
1141 }
1142
1143 DP(NETIF_MSG_RX_STATUS,
1144 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1145 fp->last_max_sge, fp->rx_sge_prod);
1146 }
1147
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149 {
1150 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151 memset(fp->sge_mask, 0xff,
1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
1154 /* Clear the two last indices in the page to 1:
1155 these are the indices that correspond to the "next" element,
1156 hence will never be indicated and should be removed from
1157 the calculations. */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162 struct sk_buff *skb, u16 cons, u16 prod)
1163 {
1164 struct bnx2x *bp = fp->bp;
1165 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168 dma_addr_t mapping;
1169
1170 /* move empty skb from pool to prod and map it */
1171 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1174 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176 /* move partial skb from cons to pool (don't unmap yet) */
1177 fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179 /* mark bin state as start - print error if current state != stop */
1180 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183 fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185 /* point prod_bd to new skb */
1186 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189 #ifdef BNX2X_STOP_ON_ERROR
1190 fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193 #else
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195 #endif
1196 fp->tpa_queue_used);
1197 #endif
1198 }
1199
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201 struct sk_buff *skb,
1202 struct eth_fast_path_rx_cqe *fp_cqe,
1203 u16 cqe_idx)
1204 {
1205 struct sw_rx_page *rx_pg, old_rx_pg;
1206 struct page *sge;
1207 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1208 u32 i, frag_len, frag_size, pages;
1209 int err;
1210 int j;
1211
1212 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1213 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1214
1215 /* This is needed in order to enable forwarding support */
1216 if (frag_size)
1217 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1218 max(frag_size, (u32)len_on_bd));
1219
1220 #ifdef BNX2X_STOP_ON_ERROR
1221 if (pages > 8*PAGES_PER_SGE) {
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223 pages, cqe_idx);
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1225 fp_cqe->pkt_len, len_on_bd);
1226 bnx2x_panic();
1227 return -EINVAL;
1228 }
1229 #endif
1230
1231 /* Run through the SGL and compose the fragmented skb */
1232 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */
1237 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1238 rx_pg = &fp->rx_page_ring[sge_idx];
1239 sge = rx_pg->page;
1240 old_rx_pg = *rx_pg;
1241
1242 /* If we fail to allocate a substitute page, we simply stop
1243 where we are and drop the whole packet */
1244 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1245 if (unlikely(err)) {
1246 bp->eth_stats.rx_skb_alloc_failed++;
1247 return err;
1248 }
1249
1250 /* Unmap the page as we r going to pass it to the stack */
1251 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1252 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253
1254 /* Add one frag and update the appropriate fields in the skb */
1255 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256
1257 skb->data_len += frag_len;
1258 skb->truesize += frag_len;
1259 skb->len += frag_len;
1260
1261 frag_size -= frag_len;
1262 }
1263
1264 return 0;
1265 }
1266
1267 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1269 u16 cqe_idx)
1270 {
1271 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1272 struct sk_buff *skb = rx_buf->skb;
1273 /* alloc new skb */
1274 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275
1276 /* Unmap skb in the pool anyway, as we are going to change
1277 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278 fails. */
1279 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1280 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1281
1282 if (likely(new_skb)) {
1283 /* fix ip xsum and give it to the stack */
1284 /* (no need to map the new skb) */
1285
1286 prefetch(skb);
1287 prefetch(((char *)(skb)) + 128);
1288
1289 #ifdef BNX2X_STOP_ON_ERROR
1290 if (pad + len > bp->rx_buf_size) {
1291 BNX2X_ERR("skb_put is about to fail... "
1292 "pad %d len %d rx_buf_size %d\n",
1293 pad, len, bp->rx_buf_size);
1294 bnx2x_panic();
1295 return;
1296 }
1297 #endif
1298
1299 skb_reserve(skb, pad);
1300 skb_put(skb, len);
1301
1302 skb->protocol = eth_type_trans(skb, bp->dev);
1303 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304
1305 {
1306 struct iphdr *iph;
1307
1308 iph = (struct iphdr *)skb->data;
1309 iph->check = 0;
1310 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1311 }
1312
1313 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1314 &cqe->fast_path_cqe, cqe_idx)) {
1315 #ifdef BCM_VLAN
1316 if ((bp->vlgrp != NULL) &&
1317 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1318 PARSING_FLAGS_VLAN))
1319 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1320 le16_to_cpu(cqe->fast_path_cqe.
1321 vlan_tag));
1322 else
1323 #endif
1324 netif_receive_skb(skb);
1325 } else {
1326 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1327 " - dropping packet!\n");
1328 dev_kfree_skb(skb);
1329 }
1330
1331 bp->dev->last_rx = jiffies;
1332
1333 /* put new skb in bin */
1334 fp->tpa_pool[queue].skb = new_skb;
1335
1336 } else {
1337 /* else drop the packet and keep the buffer in the bin */
1338 DP(NETIF_MSG_RX_STATUS,
1339 "Failed to allocate new skb - dropping packet!\n");
1340 bp->eth_stats.rx_skb_alloc_failed++;
1341 }
1342
1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1344 }
1345
1346 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347 struct bnx2x_fastpath *fp,
1348 u16 bd_prod, u16 rx_comp_prod,
1349 u16 rx_sge_prod)
1350 {
1351 struct tstorm_eth_rx_producers rx_prods = {0};
1352 int i;
1353
1354 /* Update producers */
1355 rx_prods.bd_prod = bd_prod;
1356 rx_prods.cqe_prod = rx_comp_prod;
1357 rx_prods.sge_prod = rx_sge_prod;
1358
1359 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1360 REG_WR(bp, BAR_TSTRORM_INTMEM +
1361 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1362 ((u32 *)&rx_prods)[i]);
1363
1364 DP(NETIF_MSG_RX_STATUS,
1365 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1366 bd_prod, rx_comp_prod, rx_sge_prod);
1367 }
1368
1369 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1370 {
1371 struct bnx2x *bp = fp->bp;
1372 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1373 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1374 int rx_pkt = 0;
1375
1376 #ifdef BNX2X_STOP_ON_ERROR
1377 if (unlikely(bp->panic))
1378 return 0;
1379 #endif
1380
1381 /* CQ "next element" is of the size of the regular element,
1382 that's why it's ok here */
1383 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1384 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1385 hw_comp_cons++;
1386
1387 bd_cons = fp->rx_bd_cons;
1388 bd_prod = fp->rx_bd_prod;
1389 bd_prod_fw = bd_prod;
1390 sw_comp_cons = fp->rx_comp_cons;
1391 sw_comp_prod = fp->rx_comp_prod;
1392
1393 /* Memory barrier necessary as speculative reads of the rx
1394 * buffer can be ahead of the index in the status block
1395 */
1396 rmb();
1397
1398 DP(NETIF_MSG_RX_STATUS,
1399 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1400 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1401
1402 while (sw_comp_cons != hw_comp_cons) {
1403 struct sw_rx_bd *rx_buf = NULL;
1404 struct sk_buff *skb;
1405 union eth_rx_cqe *cqe;
1406 u8 cqe_fp_flags;
1407 u16 len, pad;
1408
1409 comp_ring_cons = RCQ_BD(sw_comp_cons);
1410 bd_prod = RX_BD(bd_prod);
1411 bd_cons = RX_BD(bd_cons);
1412
1413 cqe = &fp->rx_comp_ring[comp_ring_cons];
1414 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1415
1416 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1417 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1418 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1419 cqe->fast_path_cqe.rss_hash_result,
1420 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1421 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1422
1423 /* is this a slowpath msg? */
1424 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1425 bnx2x_sp_event(fp, cqe);
1426 goto next_cqe;
1427
1428 /* this is an rx packet */
1429 } else {
1430 rx_buf = &fp->rx_buf_ring[bd_cons];
1431 skb = rx_buf->skb;
1432 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1433 pad = cqe->fast_path_cqe.placement_offset;
1434
1435 /* If CQE is marked both TPA_START and TPA_END
1436 it is a non-TPA CQE */
1437 if ((!fp->disable_tpa) &&
1438 (TPA_TYPE(cqe_fp_flags) !=
1439 (TPA_TYPE_START | TPA_TYPE_END))) {
1440 u16 queue = cqe->fast_path_cqe.queue_index;
1441
1442 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1443 DP(NETIF_MSG_RX_STATUS,
1444 "calling tpa_start on queue %d\n",
1445 queue);
1446
1447 bnx2x_tpa_start(fp, queue, skb,
1448 bd_cons, bd_prod);
1449 goto next_rx;
1450 }
1451
1452 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1453 DP(NETIF_MSG_RX_STATUS,
1454 "calling tpa_stop on queue %d\n",
1455 queue);
1456
1457 if (!BNX2X_RX_SUM_FIX(cqe))
1458 BNX2X_ERR("STOP on none TCP "
1459 "data\n");
1460
1461 /* This is a size of the linear data
1462 on this skb */
1463 len = le16_to_cpu(cqe->fast_path_cqe.
1464 len_on_bd);
1465 bnx2x_tpa_stop(bp, fp, queue, pad,
1466 len, cqe, comp_ring_cons);
1467 #ifdef BNX2X_STOP_ON_ERROR
1468 if (bp->panic)
1469 return -EINVAL;
1470 #endif
1471
1472 bnx2x_update_sge_prod(fp,
1473 &cqe->fast_path_cqe);
1474 goto next_cqe;
1475 }
1476 }
1477
1478 pci_dma_sync_single_for_device(bp->pdev,
1479 pci_unmap_addr(rx_buf, mapping),
1480 pad + RX_COPY_THRESH,
1481 PCI_DMA_FROMDEVICE);
1482 prefetch(skb);
1483 prefetch(((char *)(skb)) + 128);
1484
1485 /* is this an error packet? */
1486 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1487 DP(NETIF_MSG_RX_ERR,
1488 "ERROR flags %x rx packet %u\n",
1489 cqe_fp_flags, sw_comp_cons);
1490 bp->eth_stats.rx_err_discard_pkt++;
1491 goto reuse_rx;
1492 }
1493
1494 /* Since we don't have a jumbo ring
1495 * copy small packets if mtu > 1500
1496 */
1497 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1498 (len <= RX_COPY_THRESH)) {
1499 struct sk_buff *new_skb;
1500
1501 new_skb = netdev_alloc_skb(bp->dev,
1502 len + pad);
1503 if (new_skb == NULL) {
1504 DP(NETIF_MSG_RX_ERR,
1505 "ERROR packet dropped "
1506 "because of alloc failure\n");
1507 bp->eth_stats.rx_skb_alloc_failed++;
1508 goto reuse_rx;
1509 }
1510
1511 /* aligned copy */
1512 skb_copy_from_linear_data_offset(skb, pad,
1513 new_skb->data + pad, len);
1514 skb_reserve(new_skb, pad);
1515 skb_put(new_skb, len);
1516
1517 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1518
1519 skb = new_skb;
1520
1521 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1522 pci_unmap_single(bp->pdev,
1523 pci_unmap_addr(rx_buf, mapping),
1524 bp->rx_buf_size,
1525 PCI_DMA_FROMDEVICE);
1526 skb_reserve(skb, pad);
1527 skb_put(skb, len);
1528
1529 } else {
1530 DP(NETIF_MSG_RX_ERR,
1531 "ERROR packet dropped because "
1532 "of alloc failure\n");
1533 bp->eth_stats.rx_skb_alloc_failed++;
1534 reuse_rx:
1535 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1536 goto next_rx;
1537 }
1538
1539 skb->protocol = eth_type_trans(skb, bp->dev);
1540
1541 skb->ip_summed = CHECKSUM_NONE;
1542 if (bp->rx_csum) {
1543 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1544 skb->ip_summed = CHECKSUM_UNNECESSARY;
1545 else
1546 bp->eth_stats.hw_csum_err++;
1547 }
1548 }
1549
1550 #ifdef BCM_VLAN
1551 if ((bp->vlgrp != NULL) &&
1552 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1553 PARSING_FLAGS_VLAN))
1554 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1555 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1556 else
1557 #endif
1558 netif_receive_skb(skb);
1559
1560 bp->dev->last_rx = jiffies;
1561
1562 next_rx:
1563 rx_buf->skb = NULL;
1564
1565 bd_cons = NEXT_RX_IDX(bd_cons);
1566 bd_prod = NEXT_RX_IDX(bd_prod);
1567 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1568 rx_pkt++;
1569 next_cqe:
1570 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1571 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1572
1573 if (rx_pkt == budget)
1574 break;
1575 } /* while */
1576
1577 fp->rx_bd_cons = bd_cons;
1578 fp->rx_bd_prod = bd_prod_fw;
1579 fp->rx_comp_cons = sw_comp_cons;
1580 fp->rx_comp_prod = sw_comp_prod;
1581
1582 /* Update producers */
1583 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1584 fp->rx_sge_prod);
1585 mmiowb(); /* keep prod updates ordered */
1586
1587 fp->rx_pkt += rx_pkt;
1588 fp->rx_calls++;
1589
1590 return rx_pkt;
1591 }
1592
1593 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1594 {
1595 struct bnx2x_fastpath *fp = fp_cookie;
1596 struct bnx2x *bp = fp->bp;
1597 struct net_device *dev = bp->dev;
1598 int index = FP_IDX(fp);
1599
1600 /* Return here if interrupt is disabled */
1601 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1602 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1603 return IRQ_HANDLED;
1604 }
1605
1606 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1607 index, FP_SB_ID(fp));
1608 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1609
1610 #ifdef BNX2X_STOP_ON_ERROR
1611 if (unlikely(bp->panic))
1612 return IRQ_HANDLED;
1613 #endif
1614
1615 prefetch(fp->rx_cons_sb);
1616 prefetch(fp->tx_cons_sb);
1617 prefetch(&fp->status_blk->c_status_block.status_block_index);
1618 prefetch(&fp->status_blk->u_status_block.status_block_index);
1619
1620 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1621
1622 return IRQ_HANDLED;
1623 }
1624
1625 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1626 {
1627 struct net_device *dev = dev_instance;
1628 struct bnx2x *bp = netdev_priv(dev);
1629 u16 status = bnx2x_ack_int(bp);
1630 u16 mask;
1631
1632 /* Return here if interrupt is shared and it's not for us */
1633 if (unlikely(status == 0)) {
1634 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1635 return IRQ_NONE;
1636 }
1637 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1638
1639 /* Return here if interrupt is disabled */
1640 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1641 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1642 return IRQ_HANDLED;
1643 }
1644
1645 #ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1647 return IRQ_HANDLED;
1648 #endif
1649
1650 mask = 0x2 << bp->fp[0].sb_id;
1651 if (status & mask) {
1652 struct bnx2x_fastpath *fp = &bp->fp[0];
1653
1654 prefetch(fp->rx_cons_sb);
1655 prefetch(fp->tx_cons_sb);
1656 prefetch(&fp->status_blk->c_status_block.status_block_index);
1657 prefetch(&fp->status_blk->u_status_block.status_block_index);
1658
1659 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1660
1661 status &= ~mask;
1662 }
1663
1664
1665 if (unlikely(status & 0x1)) {
1666 schedule_work(&bp->sp_task);
1667
1668 status &= ~0x1;
1669 if (!status)
1670 return IRQ_HANDLED;
1671 }
1672
1673 if (status)
1674 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1675 status);
1676
1677 return IRQ_HANDLED;
1678 }
1679
1680 /* end of fast path */
1681
1682 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1683
1684 /* Link */
1685
1686 /*
1687 * General service functions
1688 */
1689
1690 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1691 {
1692 u32 lock_status;
1693 u32 resource_bit = (1 << resource);
1694 int func = BP_FUNC(bp);
1695 u32 hw_lock_control_reg;
1696 int cnt;
1697
1698 /* Validating that the resource is within range */
1699 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1700 DP(NETIF_MSG_HW,
1701 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1702 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1703 return -EINVAL;
1704 }
1705
1706 if (func <= 5) {
1707 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1708 } else {
1709 hw_lock_control_reg =
1710 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1711 }
1712
1713 /* Validating that the resource is not already taken */
1714 lock_status = REG_RD(bp, hw_lock_control_reg);
1715 if (lock_status & resource_bit) {
1716 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1717 lock_status, resource_bit);
1718 return -EEXIST;
1719 }
1720
1721 /* Try for 5 second every 5ms */
1722 for (cnt = 0; cnt < 1000; cnt++) {
1723 /* Try to acquire the lock */
1724 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1725 lock_status = REG_RD(bp, hw_lock_control_reg);
1726 if (lock_status & resource_bit)
1727 return 0;
1728
1729 msleep(5);
1730 }
1731 DP(NETIF_MSG_HW, "Timeout\n");
1732 return -EAGAIN;
1733 }
1734
1735 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1736 {
1737 u32 lock_status;
1738 u32 resource_bit = (1 << resource);
1739 int func = BP_FUNC(bp);
1740 u32 hw_lock_control_reg;
1741
1742 /* Validating that the resource is within range */
1743 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1744 DP(NETIF_MSG_HW,
1745 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1746 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1747 return -EINVAL;
1748 }
1749
1750 if (func <= 5) {
1751 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1752 } else {
1753 hw_lock_control_reg =
1754 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1755 }
1756
1757 /* Validating that the resource is currently taken */
1758 lock_status = REG_RD(bp, hw_lock_control_reg);
1759 if (!(lock_status & resource_bit)) {
1760 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1761 lock_status, resource_bit);
1762 return -EFAULT;
1763 }
1764
1765 REG_WR(bp, hw_lock_control_reg, resource_bit);
1766 return 0;
1767 }
1768
1769 /* HW Lock for shared dual port PHYs */
1770 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1771 {
1772 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1773
1774 mutex_lock(&bp->port.phy_mutex);
1775
1776 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1777 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1778 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1779 }
1780
1781 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1782 {
1783 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1784
1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1787 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1788
1789 mutex_unlock(&bp->port.phy_mutex);
1790 }
1791
1792 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1793 {
1794 /* The GPIO should be swapped if swap register is set and active */
1795 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1796 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1797 int gpio_shift = gpio_num +
1798 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1799 u32 gpio_mask = (1 << gpio_shift);
1800 u32 gpio_reg;
1801
1802 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1803 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1804 return -EINVAL;
1805 }
1806
1807 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1808 /* read GPIO and mask except the float bits */
1809 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1810
1811 switch (mode) {
1812 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1813 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1814 gpio_num, gpio_shift);
1815 /* clear FLOAT and set CLR */
1816 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1817 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1818 break;
1819
1820 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1821 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1822 gpio_num, gpio_shift);
1823 /* clear FLOAT and set SET */
1824 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1825 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1826 break;
1827
1828 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1829 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1830 gpio_num, gpio_shift);
1831 /* set FLOAT */
1832 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1833 break;
1834
1835 default:
1836 break;
1837 }
1838
1839 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1840 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1841
1842 return 0;
1843 }
1844
1845 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1846 {
1847 u32 spio_mask = (1 << spio_num);
1848 u32 spio_reg;
1849
1850 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1851 (spio_num > MISC_REGISTERS_SPIO_7)) {
1852 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1853 return -EINVAL;
1854 }
1855
1856 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1857 /* read SPIO and mask except the float bits */
1858 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1859
1860 switch (mode) {
1861 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1862 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1863 /* clear FLOAT and set CLR */
1864 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1865 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1866 break;
1867
1868 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1869 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1870 /* clear FLOAT and set SET */
1871 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1872 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1873 break;
1874
1875 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1876 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1877 /* set FLOAT */
1878 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1879 break;
1880
1881 default:
1882 break;
1883 }
1884
1885 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1886 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1887
1888 return 0;
1889 }
1890
1891 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1892 {
1893 switch (bp->link_vars.ieee_fc) {
1894 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1895 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1896 ADVERTISED_Pause);
1897 break;
1898 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1899 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1900 ADVERTISED_Pause);
1901 break;
1902 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1903 bp->port.advertising |= ADVERTISED_Asym_Pause;
1904 break;
1905 default:
1906 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1907 ADVERTISED_Pause);
1908 break;
1909 }
1910 }
1911
1912 static void bnx2x_link_report(struct bnx2x *bp)
1913 {
1914 if (bp->link_vars.link_up) {
1915 if (bp->state == BNX2X_STATE_OPEN)
1916 netif_carrier_on(bp->dev);
1917 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1918
1919 printk("%d Mbps ", bp->link_vars.line_speed);
1920
1921 if (bp->link_vars.duplex == DUPLEX_FULL)
1922 printk("full duplex");
1923 else
1924 printk("half duplex");
1925
1926 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1927 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1928 printk(", receive ");
1929 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1930 printk("& transmit ");
1931 } else {
1932 printk(", transmit ");
1933 }
1934 printk("flow control ON");
1935 }
1936 printk("\n");
1937
1938 } else { /* link_down */
1939 netif_carrier_off(bp->dev);
1940 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1941 }
1942 }
1943
1944 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1945 {
1946 if (!BP_NOMCP(bp)) {
1947 u8 rc;
1948
1949 /* Initialize link parameters structure variables */
1950 /* It is recommended to turn off RX FC for jumbo frames
1951 for better performance */
1952 if (IS_E1HMF(bp))
1953 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1954 else if (bp->dev->mtu > 5000)
1955 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1956 else
1957 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1958
1959 bnx2x_acquire_phy_lock(bp);
1960 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1961 bnx2x_release_phy_lock(bp);
1962
1963 if (bp->link_vars.link_up)
1964 bnx2x_link_report(bp);
1965
1966 bnx2x_calc_fc_adv(bp);
1967
1968 return rc;
1969 }
1970 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1971 return -EINVAL;
1972 }
1973
1974 static void bnx2x_link_set(struct bnx2x *bp)
1975 {
1976 if (!BP_NOMCP(bp)) {
1977 bnx2x_acquire_phy_lock(bp);
1978 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1979 bnx2x_release_phy_lock(bp);
1980
1981 bnx2x_calc_fc_adv(bp);
1982 } else
1983 BNX2X_ERR("Bootcode is missing -not setting link\n");
1984 }
1985
1986 static void bnx2x__link_reset(struct bnx2x *bp)
1987 {
1988 if (!BP_NOMCP(bp)) {
1989 bnx2x_acquire_phy_lock(bp);
1990 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1991 bnx2x_release_phy_lock(bp);
1992 } else
1993 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1994 }
1995
1996 static u8 bnx2x_link_test(struct bnx2x *bp)
1997 {
1998 u8 rc;
1999
2000 bnx2x_acquire_phy_lock(bp);
2001 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2002 bnx2x_release_phy_lock(bp);
2003
2004 return rc;
2005 }
2006
2007 /* Calculates the sum of vn_min_rates.
2008 It's needed for further normalizing of the min_rates.
2009
2010 Returns:
2011 sum of vn_min_rates
2012 or
2013 0 - if all the min_rates are 0.
2014 In the later case fairness algorithm should be deactivated.
2015 If not all min_rates are zero then those that are zeroes will
2016 be set to 1.
2017 */
2018 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2019 {
2020 int i, port = BP_PORT(bp);
2021 u32 wsum = 0;
2022 int all_zero = 1;
2023
2024 for (i = 0; i < E1HVN_MAX; i++) {
2025 u32 vn_cfg =
2026 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2027 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2028 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2029 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2030 /* If min rate is zero - set it to 1 */
2031 if (!vn_min_rate)
2032 vn_min_rate = DEF_MIN_RATE;
2033 else
2034 all_zero = 0;
2035
2036 wsum += vn_min_rate;
2037 }
2038 }
2039
2040 /* ... only if all min rates are zeros - disable FAIRNESS */
2041 if (all_zero)
2042 return 0;
2043
2044 return wsum;
2045 }
2046
2047 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2048 int en_fness,
2049 u16 port_rate,
2050 struct cmng_struct_per_port *m_cmng_port)
2051 {
2052 u32 r_param = port_rate / 8;
2053 int port = BP_PORT(bp);
2054 int i;
2055
2056 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2057
2058 /* Enable minmax only if we are in e1hmf mode */
2059 if (IS_E1HMF(bp)) {
2060 u32 fair_periodic_timeout_usec;
2061 u32 t_fair;
2062
2063 /* Enable rate shaping and fairness */
2064 m_cmng_port->flags.cmng_vn_enable = 1;
2065 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2066 m_cmng_port->flags.rate_shaping_enable = 1;
2067
2068 if (!en_fness)
2069 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2070 " fairness will be disabled\n");
2071
2072 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2073 m_cmng_port->rs_vars.rs_periodic_timeout =
2074 RS_PERIODIC_TIMEOUT_USEC / 4;
2075
2076 /* this is the threshold below which no timer arming will occur
2077 1.25 coefficient is for the threshold to be a little bigger
2078 than the real time, to compensate for timer in-accuracy */
2079 m_cmng_port->rs_vars.rs_threshold =
2080 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2081
2082 /* resolution of fairness timer */
2083 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2084 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2085 t_fair = T_FAIR_COEF / port_rate;
2086
2087 /* this is the threshold below which we won't arm
2088 the timer anymore */
2089 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2090
2091 /* we multiply by 1e3/8 to get bytes/msec.
2092 We don't want the credits to pass a credit
2093 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2094 m_cmng_port->fair_vars.upper_bound =
2095 r_param * t_fair * FAIR_MEM;
2096 /* since each tick is 4 usec */
2097 m_cmng_port->fair_vars.fairness_timeout =
2098 fair_periodic_timeout_usec / 4;
2099
2100 } else {
2101 /* Disable rate shaping and fairness */
2102 m_cmng_port->flags.cmng_vn_enable = 0;
2103 m_cmng_port->flags.fairness_enable = 0;
2104 m_cmng_port->flags.rate_shaping_enable = 0;
2105
2106 DP(NETIF_MSG_IFUP,
2107 "Single function mode minmax will be disabled\n");
2108 }
2109
2110 /* Store it to internal memory */
2111 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2112 REG_WR(bp, BAR_XSTRORM_INTMEM +
2113 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2114 ((u32 *)(m_cmng_port))[i]);
2115 }
2116
2117 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2118 u32 wsum, u16 port_rate,
2119 struct cmng_struct_per_port *m_cmng_port)
2120 {
2121 struct rate_shaping_vars_per_vn m_rs_vn;
2122 struct fairness_vars_per_vn m_fair_vn;
2123 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2124 u16 vn_min_rate, vn_max_rate;
2125 int i;
2126
2127 /* If function is hidden - set min and max to zeroes */
2128 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2129 vn_min_rate = 0;
2130 vn_max_rate = 0;
2131
2132 } else {
2133 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2134 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2135 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2136 if current min rate is zero - set it to 1.
2137 This is a requirement of the algorithm. */
2138 if ((vn_min_rate == 0) && wsum)
2139 vn_min_rate = DEF_MIN_RATE;
2140 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2141 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2142 }
2143
2144 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2145 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2146
2147 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2148 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2149
2150 /* global vn counter - maximal Mbps for this vn */
2151 m_rs_vn.vn_counter.rate = vn_max_rate;
2152
2153 /* quota - number of bytes transmitted in this period */
2154 m_rs_vn.vn_counter.quota =
2155 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2156
2157 #ifdef BNX2X_PER_PROT_QOS
2158 /* per protocol counter */
2159 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2160 /* maximal Mbps for this protocol */
2161 m_rs_vn.protocol_counters[protocol].rate =
2162 protocol_max_rate[protocol];
2163 /* the quota in each timer period -
2164 number of bytes transmitted in this period */
2165 m_rs_vn.protocol_counters[protocol].quota =
2166 (u32)(rs_periodic_timeout_usec *
2167 ((double)m_rs_vn.
2168 protocol_counters[protocol].rate/8));
2169 }
2170 #endif
2171
2172 if (wsum) {
2173 /* credit for each period of the fairness algorithm:
2174 number of bytes in T_FAIR (the vn share the port rate).
2175 wsum should not be larger than 10000, thus
2176 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2177 m_fair_vn.vn_credit_delta =
2178 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2179 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2180 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2181 m_fair_vn.vn_credit_delta);
2182 }
2183
2184 #ifdef BNX2X_PER_PROT_QOS
2185 do {
2186 u32 protocolWeightSum = 0;
2187
2188 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2189 protocolWeightSum +=
2190 drvInit.protocol_min_rate[protocol];
2191 /* per protocol counter -
2192 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2193 if (protocolWeightSum > 0) {
2194 for (protocol = 0;
2195 protocol < NUM_OF_PROTOCOLS; protocol++)
2196 /* credit for each period of the
2197 fairness algorithm - number of bytes in
2198 T_FAIR (the protocol share the vn rate) */
2199 m_fair_vn.protocol_credit_delta[protocol] =
2200 (u32)((vn_min_rate / 8) * t_fair *
2201 protocol_min_rate / protocolWeightSum);
2202 }
2203 } while (0);
2204 #endif
2205
2206 /* Store it to internal memory */
2207 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2208 REG_WR(bp, BAR_XSTRORM_INTMEM +
2209 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2210 ((u32 *)(&m_rs_vn))[i]);
2211
2212 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2213 REG_WR(bp, BAR_XSTRORM_INTMEM +
2214 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2215 ((u32 *)(&m_fair_vn))[i]);
2216 }
2217
2218 /* This function is called upon link interrupt */
2219 static void bnx2x_link_attn(struct bnx2x *bp)
2220 {
2221 int vn;
2222
2223 /* Make sure that we are synced with the current statistics */
2224 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2225
2226 bnx2x_acquire_phy_lock(bp);
2227 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2228 bnx2x_release_phy_lock(bp);
2229
2230 if (bp->link_vars.link_up) {
2231
2232 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2233 struct host_port_stats *pstats;
2234
2235 pstats = bnx2x_sp(bp, port_stats);
2236 /* reset old bmac stats */
2237 memset(&(pstats->mac_stx[0]), 0,
2238 sizeof(struct mac_stx));
2239 }
2240 if ((bp->state == BNX2X_STATE_OPEN) ||
2241 (bp->state == BNX2X_STATE_DISABLED))
2242 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2243 }
2244
2245 /* indicate link status */
2246 bnx2x_link_report(bp);
2247
2248 if (IS_E1HMF(bp)) {
2249 int func;
2250
2251 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2252 if (vn == BP_E1HVN(bp))
2253 continue;
2254
2255 func = ((vn << 1) | BP_PORT(bp));
2256
2257 /* Set the attention towards other drivers
2258 on the same port */
2259 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2260 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2261 }
2262 }
2263
2264 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2265 struct cmng_struct_per_port m_cmng_port;
2266 u32 wsum;
2267 int port = BP_PORT(bp);
2268
2269 /* Init RATE SHAPING and FAIRNESS contexts */
2270 wsum = bnx2x_calc_vn_wsum(bp);
2271 bnx2x_init_port_minmax(bp, (int)wsum,
2272 bp->link_vars.line_speed,
2273 &m_cmng_port);
2274 if (IS_E1HMF(bp))
2275 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2276 bnx2x_init_vn_minmax(bp, 2*vn + port,
2277 wsum, bp->link_vars.line_speed,
2278 &m_cmng_port);
2279 }
2280 }
2281
2282 static void bnx2x__link_status_update(struct bnx2x *bp)
2283 {
2284 if (bp->state != BNX2X_STATE_OPEN)
2285 return;
2286
2287 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2288
2289 if (bp->link_vars.link_up)
2290 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2291 else
2292 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2293
2294 /* indicate link status */
2295 bnx2x_link_report(bp);
2296 }
2297
2298 static void bnx2x_pmf_update(struct bnx2x *bp)
2299 {
2300 int port = BP_PORT(bp);
2301 u32 val;
2302
2303 bp->port.pmf = 1;
2304 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2305
2306 /* enable nig attention */
2307 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2308 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2309 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2310
2311 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2312 }
2313
2314 /* end of Link */
2315
2316 /* slow path */
2317
2318 /*
2319 * General service functions
2320 */
2321
2322 /* the slow path queue is odd since completions arrive on the fastpath ring */
2323 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2324 u32 data_hi, u32 data_lo, int common)
2325 {
2326 int func = BP_FUNC(bp);
2327
2328 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2329 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2330 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2331 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2332 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2333
2334 #ifdef BNX2X_STOP_ON_ERROR
2335 if (unlikely(bp->panic))
2336 return -EIO;
2337 #endif
2338
2339 spin_lock_bh(&bp->spq_lock);
2340
2341 if (!bp->spq_left) {
2342 BNX2X_ERR("BUG! SPQ ring full!\n");
2343 spin_unlock_bh(&bp->spq_lock);
2344 bnx2x_panic();
2345 return -EBUSY;
2346 }
2347
2348 /* CID needs port number to be encoded int it */
2349 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2350 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2351 HW_CID(bp, cid)));
2352 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2353 if (common)
2354 bp->spq_prod_bd->hdr.type |=
2355 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2356
2357 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2358 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2359
2360 bp->spq_left--;
2361
2362 if (bp->spq_prod_bd == bp->spq_last_bd) {
2363 bp->spq_prod_bd = bp->spq;
2364 bp->spq_prod_idx = 0;
2365 DP(NETIF_MSG_TIMER, "end of spq\n");
2366
2367 } else {
2368 bp->spq_prod_bd++;
2369 bp->spq_prod_idx++;
2370 }
2371
2372 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2373 bp->spq_prod_idx);
2374
2375 spin_unlock_bh(&bp->spq_lock);
2376 return 0;
2377 }
2378
2379 /* acquire split MCP access lock register */
2380 static int bnx2x_acquire_alr(struct bnx2x *bp)
2381 {
2382 u32 i, j, val;
2383 int rc = 0;
2384
2385 might_sleep();
2386 i = 100;
2387 for (j = 0; j < i*10; j++) {
2388 val = (1UL << 31);
2389 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2390 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2391 if (val & (1L << 31))
2392 break;
2393
2394 msleep(5);
2395 }
2396 if (!(val & (1L << 31))) {
2397 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2398 rc = -EBUSY;
2399 }
2400
2401 return rc;
2402 }
2403
2404 /* release split MCP access lock register */
2405 static void bnx2x_release_alr(struct bnx2x *bp)
2406 {
2407 u32 val = 0;
2408
2409 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2410 }
2411
2412 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2413 {
2414 struct host_def_status_block *def_sb = bp->def_status_blk;
2415 u16 rc = 0;
2416
2417 barrier(); /* status block is written to by the chip */
2418 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2419 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2420 rc |= 1;
2421 }
2422 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2423 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2424 rc |= 2;
2425 }
2426 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2427 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2428 rc |= 4;
2429 }
2430 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2431 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2432 rc |= 8;
2433 }
2434 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2435 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2436 rc |= 16;
2437 }
2438 return rc;
2439 }
2440
2441 /*
2442 * slow path service functions
2443 */
2444
2445 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2446 {
2447 int port = BP_PORT(bp);
2448 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2449 COMMAND_REG_ATTN_BITS_SET);
2450 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2451 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2452 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2453 NIG_REG_MASK_INTERRUPT_PORT0;
2454 u32 aeu_mask;
2455
2456 if (bp->attn_state & asserted)
2457 BNX2X_ERR("IGU ERROR\n");
2458
2459 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2460 aeu_mask = REG_RD(bp, aeu_addr);
2461
2462 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2463 aeu_mask, asserted);
2464 aeu_mask &= ~(asserted & 0xff);
2465 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2466
2467 REG_WR(bp, aeu_addr, aeu_mask);
2468 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2469
2470 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2471 bp->attn_state |= asserted;
2472 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2473
2474 if (asserted & ATTN_HARD_WIRED_MASK) {
2475 if (asserted & ATTN_NIG_FOR_FUNC) {
2476
2477 /* save nig interrupt mask */
2478 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2479 REG_WR(bp, nig_int_mask_addr, 0);
2480
2481 bnx2x_link_attn(bp);
2482
2483 /* handle unicore attn? */
2484 }
2485 if (asserted & ATTN_SW_TIMER_4_FUNC)
2486 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2487
2488 if (asserted & GPIO_2_FUNC)
2489 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2490
2491 if (asserted & GPIO_3_FUNC)
2492 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2493
2494 if (asserted & GPIO_4_FUNC)
2495 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2496
2497 if (port == 0) {
2498 if (asserted & ATTN_GENERAL_ATTN_1) {
2499 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2500 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2501 }
2502 if (asserted & ATTN_GENERAL_ATTN_2) {
2503 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2504 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2505 }
2506 if (asserted & ATTN_GENERAL_ATTN_3) {
2507 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2508 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2509 }
2510 } else {
2511 if (asserted & ATTN_GENERAL_ATTN_4) {
2512 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2513 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2514 }
2515 if (asserted & ATTN_GENERAL_ATTN_5) {
2516 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2517 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2518 }
2519 if (asserted & ATTN_GENERAL_ATTN_6) {
2520 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2521 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2522 }
2523 }
2524
2525 } /* if hardwired */
2526
2527 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2528 asserted, hc_addr);
2529 REG_WR(bp, hc_addr, asserted);
2530
2531 /* now set back the mask */
2532 if (asserted & ATTN_NIG_FOR_FUNC)
2533 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2534 }
2535
2536 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2537 {
2538 int port = BP_PORT(bp);
2539 int reg_offset;
2540 u32 val;
2541
2542 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2543 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2544
2545 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2546
2547 val = REG_RD(bp, reg_offset);
2548 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2549 REG_WR(bp, reg_offset, val);
2550
2551 BNX2X_ERR("SPIO5 hw attention\n");
2552
2553 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2555 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2556 /* Fan failure attention */
2557
2558 /* The PHY reset is controlled by GPIO 1 */
2559 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2560 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2561 /* Low power mode is controlled by GPIO 2 */
2562 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2563 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2564 /* mark the failure */
2565 bp->link_params.ext_phy_config &=
2566 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2567 bp->link_params.ext_phy_config |=
2568 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2569 SHMEM_WR(bp,
2570 dev_info.port_hw_config[port].
2571 external_phy_config,
2572 bp->link_params.ext_phy_config);
2573 /* log the failure */
2574 printk(KERN_ERR PFX "Fan Failure on Network"
2575 " Controller %s has caused the driver to"
2576 " shutdown the card to prevent permanent"
2577 " damage. Please contact Dell Support for"
2578 " assistance\n", bp->dev->name);
2579 break;
2580
2581 default:
2582 break;
2583 }
2584 }
2585
2586 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2587
2588 val = REG_RD(bp, reg_offset);
2589 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2590 REG_WR(bp, reg_offset, val);
2591
2592 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2593 (attn & HW_INTERRUT_ASSERT_SET_0));
2594 bnx2x_panic();
2595 }
2596 }
2597
2598 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2599 {
2600 u32 val;
2601
2602 if (attn & BNX2X_DOORQ_ASSERT) {
2603
2604 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2605 BNX2X_ERR("DB hw attention 0x%x\n", val);
2606 /* DORQ discard attention */
2607 if (val & 0x2)
2608 BNX2X_ERR("FATAL error from DORQ\n");
2609 }
2610
2611 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2612
2613 int port = BP_PORT(bp);
2614 int reg_offset;
2615
2616 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2617 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2618
2619 val = REG_RD(bp, reg_offset);
2620 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2621 REG_WR(bp, reg_offset, val);
2622
2623 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2624 (attn & HW_INTERRUT_ASSERT_SET_1));
2625 bnx2x_panic();
2626 }
2627 }
2628
2629 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2630 {
2631 u32 val;
2632
2633 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2634
2635 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2636 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2637 /* CFC error attention */
2638 if (val & 0x2)
2639 BNX2X_ERR("FATAL error from CFC\n");
2640 }
2641
2642 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2643
2644 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2645 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2646 /* RQ_USDMDP_FIFO_OVERFLOW */
2647 if (val & 0x18000)
2648 BNX2X_ERR("FATAL error from PXP\n");
2649 }
2650
2651 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2652
2653 int port = BP_PORT(bp);
2654 int reg_offset;
2655
2656 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2657 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2658
2659 val = REG_RD(bp, reg_offset);
2660 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2661 REG_WR(bp, reg_offset, val);
2662
2663 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2664 (attn & HW_INTERRUT_ASSERT_SET_2));
2665 bnx2x_panic();
2666 }
2667 }
2668
2669 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2670 {
2671 u32 val;
2672
2673 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2674
2675 if (attn & BNX2X_PMF_LINK_ASSERT) {
2676 int func = BP_FUNC(bp);
2677
2678 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2679 bnx2x__link_status_update(bp);
2680 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2681 DRV_STATUS_PMF)
2682 bnx2x_pmf_update(bp);
2683
2684 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2685
2686 BNX2X_ERR("MC assert!\n");
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2689 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2690 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2691 bnx2x_panic();
2692
2693 } else if (attn & BNX2X_MCP_ASSERT) {
2694
2695 BNX2X_ERR("MCP assert!\n");
2696 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2697 bnx2x_fw_dump(bp);
2698
2699 } else
2700 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2701 }
2702
2703 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2704 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2705 if (attn & BNX2X_GRC_TIMEOUT) {
2706 val = CHIP_IS_E1H(bp) ?
2707 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2708 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2709 }
2710 if (attn & BNX2X_GRC_RSV) {
2711 val = CHIP_IS_E1H(bp) ?
2712 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2713 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2714 }
2715 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2716 }
2717 }
2718
2719 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2720 {
2721 struct attn_route attn;
2722 struct attn_route group_mask;
2723 int port = BP_PORT(bp);
2724 int index;
2725 u32 reg_addr;
2726 u32 val;
2727 u32 aeu_mask;
2728
2729 /* need to take HW lock because MCP or other port might also
2730 try to handle this event */
2731 bnx2x_acquire_alr(bp);
2732
2733 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2734 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2735 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2736 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2737 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2738 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2739
2740 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2741 if (deasserted & (1 << index)) {
2742 group_mask = bp->attn_group[index];
2743
2744 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2745 index, group_mask.sig[0], group_mask.sig[1],
2746 group_mask.sig[2], group_mask.sig[3]);
2747
2748 bnx2x_attn_int_deasserted3(bp,
2749 attn.sig[3] & group_mask.sig[3]);
2750 bnx2x_attn_int_deasserted1(bp,
2751 attn.sig[1] & group_mask.sig[1]);
2752 bnx2x_attn_int_deasserted2(bp,
2753 attn.sig[2] & group_mask.sig[2]);
2754 bnx2x_attn_int_deasserted0(bp,
2755 attn.sig[0] & group_mask.sig[0]);
2756
2757 if ((attn.sig[0] & group_mask.sig[0] &
2758 HW_PRTY_ASSERT_SET_0) ||
2759 (attn.sig[1] & group_mask.sig[1] &
2760 HW_PRTY_ASSERT_SET_1) ||
2761 (attn.sig[2] & group_mask.sig[2] &
2762 HW_PRTY_ASSERT_SET_2))
2763 BNX2X_ERR("FATAL HW block parity attention\n");
2764 }
2765 }
2766
2767 bnx2x_release_alr(bp);
2768
2769 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2770
2771 val = ~deasserted;
2772 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2773 val, reg_addr);
2774 REG_WR(bp, reg_addr, val);
2775
2776 if (~bp->attn_state & deasserted)
2777 BNX2X_ERR("IGU ERROR\n");
2778
2779 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2780 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2781
2782 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2783 aeu_mask = REG_RD(bp, reg_addr);
2784
2785 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2786 aeu_mask, deasserted);
2787 aeu_mask |= (deasserted & 0xff);
2788 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2789
2790 REG_WR(bp, reg_addr, aeu_mask);
2791 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2792
2793 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2794 bp->attn_state &= ~deasserted;
2795 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2796 }
2797
2798 static void bnx2x_attn_int(struct bnx2x *bp)
2799 {
2800 /* read local copy of bits */
2801 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2802 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2803 u32 attn_state = bp->attn_state;
2804
2805 /* look for changed bits */
2806 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2807 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2808
2809 DP(NETIF_MSG_HW,
2810 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2811 attn_bits, attn_ack, asserted, deasserted);
2812
2813 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2814 BNX2X_ERR("BAD attention state\n");
2815
2816 /* handle bits that were raised */
2817 if (asserted)
2818 bnx2x_attn_int_asserted(bp, asserted);
2819
2820 if (deasserted)
2821 bnx2x_attn_int_deasserted(bp, deasserted);
2822 }
2823
2824 static void bnx2x_sp_task(struct work_struct *work)
2825 {
2826 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2827 u16 status;
2828
2829
2830 /* Return here if interrupt is disabled */
2831 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2832 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2833 return;
2834 }
2835
2836 status = bnx2x_update_dsb_idx(bp);
2837 /* if (status == 0) */
2838 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2839
2840 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2841
2842 /* HW attentions */
2843 if (status & 0x1)
2844 bnx2x_attn_int(bp);
2845
2846 /* CStorm events: query_stats, port delete ramrod */
2847 if (status & 0x2)
2848 bp->stats_pending = 0;
2849
2850 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2851 IGU_INT_NOP, 1);
2852 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2853 IGU_INT_NOP, 1);
2854 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2855 IGU_INT_NOP, 1);
2856 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2857 IGU_INT_NOP, 1);
2858 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2859 IGU_INT_ENABLE, 1);
2860
2861 }
2862
2863 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2864 {
2865 struct net_device *dev = dev_instance;
2866 struct bnx2x *bp = netdev_priv(dev);
2867
2868 /* Return here if interrupt is disabled */
2869 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2870 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2871 return IRQ_HANDLED;
2872 }
2873
2874 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2875
2876 #ifdef BNX2X_STOP_ON_ERROR
2877 if (unlikely(bp->panic))
2878 return IRQ_HANDLED;
2879 #endif
2880
2881 schedule_work(&bp->sp_task);
2882
2883 return IRQ_HANDLED;
2884 }
2885
2886 /* end of slow path */
2887
2888 /* Statistics */
2889
2890 /****************************************************************************
2891 * Macros
2892 ****************************************************************************/
2893
2894 /* sum[hi:lo] += add[hi:lo] */
2895 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2896 do { \
2897 s_lo += a_lo; \
2898 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2899 } while (0)
2900
2901 /* difference = minuend - subtrahend */
2902 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2903 do { \
2904 if (m_lo < s_lo) { \
2905 /* underflow */ \
2906 d_hi = m_hi - s_hi; \
2907 if (d_hi > 0) { \
2908 /* we can 'loan' 1 */ \
2909 d_hi--; \
2910 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2911 } else { \
2912 /* m_hi <= s_hi */ \
2913 d_hi = 0; \
2914 d_lo = 0; \
2915 } \
2916 } else { \
2917 /* m_lo >= s_lo */ \
2918 if (m_hi < s_hi) { \
2919 d_hi = 0; \
2920 d_lo = 0; \
2921 } else { \
2922 /* m_hi >= s_hi */ \
2923 d_hi = m_hi - s_hi; \
2924 d_lo = m_lo - s_lo; \
2925 } \
2926 } \
2927 } while (0)
2928
2929 #define UPDATE_STAT64(s, t) \
2930 do { \
2931 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2932 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2933 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2934 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2935 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2936 pstats->mac_stx[1].t##_lo, diff.lo); \
2937 } while (0)
2938
2939 #define UPDATE_STAT64_NIG(s, t) \
2940 do { \
2941 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2942 diff.lo, new->s##_lo, old->s##_lo); \
2943 ADD_64(estats->t##_hi, diff.hi, \
2944 estats->t##_lo, diff.lo); \
2945 } while (0)
2946
2947 /* sum[hi:lo] += add */
2948 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2949 do { \
2950 s_lo += a; \
2951 s_hi += (s_lo < a) ? 1 : 0; \
2952 } while (0)
2953
2954 #define UPDATE_EXTEND_STAT(s) \
2955 do { \
2956 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2957 pstats->mac_stx[1].s##_lo, \
2958 new->s); \
2959 } while (0)
2960
2961 #define UPDATE_EXTEND_TSTAT(s, t) \
2962 do { \
2963 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2964 old_tclient->s = le32_to_cpu(tclient->s); \
2965 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2966 } while (0)
2967
2968 #define UPDATE_EXTEND_XSTAT(s, t) \
2969 do { \
2970 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2971 old_xclient->s = le32_to_cpu(xclient->s); \
2972 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2973 } while (0)
2974
2975 /*
2976 * General service functions
2977 */
2978
2979 static inline long bnx2x_hilo(u32 *hiref)
2980 {
2981 u32 lo = *(hiref + 1);
2982 #if (BITS_PER_LONG == 64)
2983 u32 hi = *hiref;
2984
2985 return HILO_U64(hi, lo);
2986 #else
2987 return lo;
2988 #endif
2989 }
2990
2991 /*
2992 * Init service functions
2993 */
2994
2995 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2996 {
2997 if (!bp->stats_pending) {
2998 struct eth_query_ramrod_data ramrod_data = {0};
2999 int rc;
3000
3001 ramrod_data.drv_counter = bp->stats_counter++;
3002 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3003 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3004
3005 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3006 ((u32 *)&ramrod_data)[1],
3007 ((u32 *)&ramrod_data)[0], 0);
3008 if (rc == 0) {
3009 /* stats ramrod has it's own slot on the spq */
3010 bp->spq_left++;
3011 bp->stats_pending = 1;
3012 }
3013 }
3014 }
3015
3016 static void bnx2x_stats_init(struct bnx2x *bp)
3017 {
3018 int port = BP_PORT(bp);
3019
3020 bp->executer_idx = 0;
3021 bp->stats_counter = 0;
3022
3023 /* port stats */
3024 if (!BP_NOMCP(bp))
3025 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3026 else
3027 bp->port.port_stx = 0;
3028 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3029
3030 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3031 bp->port.old_nig_stats.brb_discard =
3032 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3033 bp->port.old_nig_stats.brb_truncate =
3034 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3037 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3038 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3039
3040 /* function stats */
3041 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3042 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3043 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3044 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3045
3046 bp->stats_state = STATS_STATE_DISABLED;
3047 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3048 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3049 }
3050
3051 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3052 {
3053 struct dmae_command *dmae = &bp->stats_dmae;
3054 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3055
3056 *stats_comp = DMAE_COMP_VAL;
3057
3058 /* loader */
3059 if (bp->executer_idx) {
3060 int loader_idx = PMF_DMAE_C(bp);
3061
3062 memset(dmae, 0, sizeof(struct dmae_command));
3063
3064 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3065 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3066 DMAE_CMD_DST_RESET |
3067 #ifdef __BIG_ENDIAN
3068 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3069 #else
3070 DMAE_CMD_ENDIANITY_DW_SWAP |
3071 #endif
3072 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3073 DMAE_CMD_PORT_0) |
3074 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3075 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3076 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3077 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3078 sizeof(struct dmae_command) *
3079 (loader_idx + 1)) >> 2;
3080 dmae->dst_addr_hi = 0;
3081 dmae->len = sizeof(struct dmae_command) >> 2;
3082 if (CHIP_IS_E1(bp))
3083 dmae->len--;
3084 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3085 dmae->comp_addr_hi = 0;
3086 dmae->comp_val = 1;
3087
3088 *stats_comp = 0;
3089 bnx2x_post_dmae(bp, dmae, loader_idx);
3090
3091 } else if (bp->func_stx) {
3092 *stats_comp = 0;
3093 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3094 }
3095 }
3096
3097 static int bnx2x_stats_comp(struct bnx2x *bp)
3098 {
3099 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3100 int cnt = 10;
3101
3102 might_sleep();
3103 while (*stats_comp != DMAE_COMP_VAL) {
3104 if (!cnt) {
3105 BNX2X_ERR("timeout waiting for stats finished\n");
3106 break;
3107 }
3108 cnt--;
3109 msleep(1);
3110 }
3111 return 1;
3112 }
3113
3114 /*
3115 * Statistics service functions
3116 */
3117
3118 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3119 {
3120 struct dmae_command *dmae;
3121 u32 opcode;
3122 int loader_idx = PMF_DMAE_C(bp);
3123 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3124
3125 /* sanity */
3126 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3127 BNX2X_ERR("BUG!\n");
3128 return;
3129 }
3130
3131 bp->executer_idx = 0;
3132
3133 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3134 DMAE_CMD_C_ENABLE |
3135 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3136 #ifdef __BIG_ENDIAN
3137 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3138 #else
3139 DMAE_CMD_ENDIANITY_DW_SWAP |
3140 #endif
3141 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3142 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3143
3144 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3145 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3146 dmae->src_addr_lo = bp->port.port_stx >> 2;
3147 dmae->src_addr_hi = 0;
3148 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3149 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3150 dmae->len = DMAE_LEN32_RD_MAX;
3151 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3152 dmae->comp_addr_hi = 0;
3153 dmae->comp_val = 1;
3154
3155 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3156 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3157 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3158 dmae->src_addr_hi = 0;
3159 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3160 DMAE_LEN32_RD_MAX * 4);
3161 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3162 DMAE_LEN32_RD_MAX * 4);
3163 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3164 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3165 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3166 dmae->comp_val = DMAE_COMP_VAL;
3167
3168 *stats_comp = 0;
3169 bnx2x_hw_stats_post(bp);
3170 bnx2x_stats_comp(bp);
3171 }
3172
3173 static void bnx2x_port_stats_init(struct bnx2x *bp)
3174 {
3175 struct dmae_command *dmae;
3176 int port = BP_PORT(bp);
3177 int vn = BP_E1HVN(bp);
3178 u32 opcode;
3179 int loader_idx = PMF_DMAE_C(bp);
3180 u32 mac_addr;
3181 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3182
3183 /* sanity */
3184 if (!bp->link_vars.link_up || !bp->port.pmf) {
3185 BNX2X_ERR("BUG!\n");
3186 return;
3187 }
3188
3189 bp->executer_idx = 0;
3190
3191 /* MCP */
3192 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3193 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3194 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3195 #ifdef __BIG_ENDIAN
3196 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3197 #else
3198 DMAE_CMD_ENDIANITY_DW_SWAP |
3199 #endif
3200 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3201 (vn << DMAE_CMD_E1HVN_SHIFT));
3202
3203 if (bp->port.port_stx) {
3204
3205 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3206 dmae->opcode = opcode;
3207 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3208 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3209 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3210 dmae->dst_addr_hi = 0;
3211 dmae->len = sizeof(struct host_port_stats) >> 2;
3212 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3213 dmae->comp_addr_hi = 0;
3214 dmae->comp_val = 1;
3215 }
3216
3217 if (bp->func_stx) {
3218
3219 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3220 dmae->opcode = opcode;
3221 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3222 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3223 dmae->dst_addr_lo = bp->func_stx >> 2;
3224 dmae->dst_addr_hi = 0;
3225 dmae->len = sizeof(struct host_func_stats) >> 2;
3226 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3227 dmae->comp_addr_hi = 0;
3228 dmae->comp_val = 1;
3229 }
3230
3231 /* MAC */
3232 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3233 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3234 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3235 #ifdef __BIG_ENDIAN
3236 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3237 #else
3238 DMAE_CMD_ENDIANITY_DW_SWAP |
3239 #endif
3240 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3241 (vn << DMAE_CMD_E1HVN_SHIFT));
3242
3243 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3244
3245 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3246 NIG_REG_INGRESS_BMAC0_MEM);
3247
3248 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3249 BIGMAC_REGISTER_TX_STAT_GTBYT */
3250 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3251 dmae->opcode = opcode;
3252 dmae->src_addr_lo = (mac_addr +
3253 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3254 dmae->src_addr_hi = 0;
3255 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3256 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3257 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3258 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3259 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3260 dmae->comp_addr_hi = 0;
3261 dmae->comp_val = 1;
3262
3263 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3264 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3265 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3266 dmae->opcode = opcode;
3267 dmae->src_addr_lo = (mac_addr +
3268 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3269 dmae->src_addr_hi = 0;
3270 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3271 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3272 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3273 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3274 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3275 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3276 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3277 dmae->comp_addr_hi = 0;
3278 dmae->comp_val = 1;
3279
3280 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3281
3282 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3283
3284 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3286 dmae->opcode = opcode;
3287 dmae->src_addr_lo = (mac_addr +
3288 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3289 dmae->src_addr_hi = 0;
3290 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3291 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3292 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3293 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3294 dmae->comp_addr_hi = 0;
3295 dmae->comp_val = 1;
3296
3297 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3298 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3299 dmae->opcode = opcode;
3300 dmae->src_addr_lo = (mac_addr +
3301 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3302 dmae->src_addr_hi = 0;
3303 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3304 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3305 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3306 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3307 dmae->len = 1;
3308 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3309 dmae->comp_addr_hi = 0;
3310 dmae->comp_val = 1;
3311
3312 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3313 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3314 dmae->opcode = opcode;
3315 dmae->src_addr_lo = (mac_addr +
3316 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3317 dmae->src_addr_hi = 0;
3318 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3319 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3320 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3321 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3322 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3323 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3324 dmae->comp_addr_hi = 0;
3325 dmae->comp_val = 1;
3326 }
3327
3328 /* NIG */
3329 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3330 dmae->opcode = opcode;
3331 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3332 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3333 dmae->src_addr_hi = 0;
3334 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3335 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3336 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3337 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3338 dmae->comp_addr_hi = 0;
3339 dmae->comp_val = 1;
3340
3341 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3342 dmae->opcode = opcode;
3343 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3344 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3345 dmae->src_addr_hi = 0;
3346 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3347 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3349 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3350 dmae->len = (2*sizeof(u32)) >> 2;
3351 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3352 dmae->comp_addr_hi = 0;
3353 dmae->comp_val = 1;
3354
3355 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3356 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3357 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3358 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3359 #ifdef __BIG_ENDIAN
3360 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3361 #else
3362 DMAE_CMD_ENDIANITY_DW_SWAP |
3363 #endif
3364 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3365 (vn << DMAE_CMD_E1HVN_SHIFT));
3366 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3367 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3368 dmae->src_addr_hi = 0;
3369 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3370 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3372 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3373 dmae->len = (2*sizeof(u32)) >> 2;
3374 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3375 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3376 dmae->comp_val = DMAE_COMP_VAL;
3377
3378 *stats_comp = 0;
3379 }
3380
3381 static void bnx2x_func_stats_init(struct bnx2x *bp)
3382 {
3383 struct dmae_command *dmae = &bp->stats_dmae;
3384 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3385
3386 /* sanity */
3387 if (!bp->func_stx) {
3388 BNX2X_ERR("BUG!\n");
3389 return;
3390 }
3391
3392 bp->executer_idx = 0;
3393 memset(dmae, 0, sizeof(struct dmae_command));
3394
3395 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3396 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3397 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3398 #ifdef __BIG_ENDIAN
3399 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3400 #else
3401 DMAE_CMD_ENDIANITY_DW_SWAP |
3402 #endif
3403 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3404 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3405 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3406 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3407 dmae->dst_addr_lo = bp->func_stx >> 2;
3408 dmae->dst_addr_hi = 0;
3409 dmae->len = sizeof(struct host_func_stats) >> 2;
3410 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3411 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3412 dmae->comp_val = DMAE_COMP_VAL;
3413
3414 *stats_comp = 0;
3415 }
3416
3417 static void bnx2x_stats_start(struct bnx2x *bp)
3418 {
3419 if (bp->port.pmf)
3420 bnx2x_port_stats_init(bp);
3421
3422 else if (bp->func_stx)
3423 bnx2x_func_stats_init(bp);
3424
3425 bnx2x_hw_stats_post(bp);
3426 bnx2x_storm_stats_post(bp);
3427 }
3428
3429 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3430 {
3431 bnx2x_stats_comp(bp);
3432 bnx2x_stats_pmf_update(bp);
3433 bnx2x_stats_start(bp);
3434 }
3435
3436 static void bnx2x_stats_restart(struct bnx2x *bp)
3437 {
3438 bnx2x_stats_comp(bp);
3439 bnx2x_stats_start(bp);
3440 }
3441
3442 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3443 {
3444 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3445 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3446 struct regpair diff;
3447
3448 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3449 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3450 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3451 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3452 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3453 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3454 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3455 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3456 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3457 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3458 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3459 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3460 UPDATE_STAT64(tx_stat_gt127,
3461 tx_stat_etherstatspkts65octetsto127octets);
3462 UPDATE_STAT64(tx_stat_gt255,
3463 tx_stat_etherstatspkts128octetsto255octets);
3464 UPDATE_STAT64(tx_stat_gt511,
3465 tx_stat_etherstatspkts256octetsto511octets);
3466 UPDATE_STAT64(tx_stat_gt1023,
3467 tx_stat_etherstatspkts512octetsto1023octets);
3468 UPDATE_STAT64(tx_stat_gt1518,
3469 tx_stat_etherstatspkts1024octetsto1522octets);
3470 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3471 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3472 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3473 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3474 UPDATE_STAT64(tx_stat_gterr,
3475 tx_stat_dot3statsinternalmactransmiterrors);
3476 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3477 }
3478
3479 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3480 {
3481 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3482 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3483
3484 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3485 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3487 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3488 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3489 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3490 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3491 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3492 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3493 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3494 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3495 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3496 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3497 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3498 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3499 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3500 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3501 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3505 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3506 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3513 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3514 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3515 }
3516
3517 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3518 {
3519 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3520 struct nig_stats *old = &(bp->port.old_nig_stats);
3521 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3522 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3523 struct regpair diff;
3524
3525 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3526 bnx2x_bmac_stats_update(bp);
3527
3528 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3529 bnx2x_emac_stats_update(bp);
3530
3531 else { /* unreached */
3532 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3533 return -1;
3534 }
3535
3536 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3537 new->brb_discard - old->brb_discard);
3538 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3539 new->brb_truncate - old->brb_truncate);
3540
3541 UPDATE_STAT64_NIG(egress_mac_pkt0,
3542 etherstatspkts1024octetsto1522octets);
3543 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3544
3545 memcpy(old, new, sizeof(struct nig_stats));
3546
3547 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3548 sizeof(struct mac_stx));
3549 estats->brb_drop_hi = pstats->brb_drop_hi;
3550 estats->brb_drop_lo = pstats->brb_drop_lo;
3551
3552 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3553
3554 return 0;
3555 }
3556
3557 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3558 {
3559 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3560 int cl_id = BP_CL_ID(bp);
3561 struct tstorm_per_port_stats *tport =
3562 &stats->tstorm_common.port_statistics;
3563 struct tstorm_per_client_stats *tclient =
3564 &stats->tstorm_common.client_statistics[cl_id];
3565 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3566 struct xstorm_per_client_stats *xclient =
3567 &stats->xstorm_common.client_statistics[cl_id];
3568 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3569 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3570 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3571 u32 diff;
3572
3573 /* are storm stats valid? */
3574 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3575 bp->stats_counter) {
3576 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3577 " tstorm counter (%d) != stats_counter (%d)\n",
3578 tclient->stats_counter, bp->stats_counter);
3579 return -1;
3580 }
3581 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3582 bp->stats_counter) {
3583 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3584 " xstorm counter (%d) != stats_counter (%d)\n",
3585 xclient->stats_counter, bp->stats_counter);
3586 return -2;
3587 }
3588
3589 fstats->total_bytes_received_hi =
3590 fstats->valid_bytes_received_hi =
3591 le32_to_cpu(tclient->total_rcv_bytes.hi);
3592 fstats->total_bytes_received_lo =
3593 fstats->valid_bytes_received_lo =
3594 le32_to_cpu(tclient->total_rcv_bytes.lo);
3595
3596 estats->error_bytes_received_hi =
3597 le32_to_cpu(tclient->rcv_error_bytes.hi);
3598 estats->error_bytes_received_lo =
3599 le32_to_cpu(tclient->rcv_error_bytes.lo);
3600 ADD_64(estats->error_bytes_received_hi,
3601 estats->rx_stat_ifhcinbadoctets_hi,
3602 estats->error_bytes_received_lo,
3603 estats->rx_stat_ifhcinbadoctets_lo);
3604
3605 ADD_64(fstats->total_bytes_received_hi,
3606 estats->error_bytes_received_hi,
3607 fstats->total_bytes_received_lo,
3608 estats->error_bytes_received_lo);
3609
3610 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3611 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3612 total_multicast_packets_received);
3613 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3614 total_broadcast_packets_received);
3615
3616 fstats->total_bytes_transmitted_hi =
3617 le32_to_cpu(xclient->total_sent_bytes.hi);
3618 fstats->total_bytes_transmitted_lo =
3619 le32_to_cpu(xclient->total_sent_bytes.lo);
3620
3621 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3622 total_unicast_packets_transmitted);
3623 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3624 total_multicast_packets_transmitted);
3625 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3626 total_broadcast_packets_transmitted);
3627
3628 memcpy(estats, &(fstats->total_bytes_received_hi),
3629 sizeof(struct host_func_stats) - 2*sizeof(u32));
3630
3631 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3632 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3633 estats->brb_truncate_discard =
3634 le32_to_cpu(tport->brb_truncate_discard);
3635 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3636
3637 old_tclient->rcv_unicast_bytes.hi =
3638 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3639 old_tclient->rcv_unicast_bytes.lo =
3640 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3641 old_tclient->rcv_broadcast_bytes.hi =
3642 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3643 old_tclient->rcv_broadcast_bytes.lo =
3644 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3645 old_tclient->rcv_multicast_bytes.hi =
3646 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3647 old_tclient->rcv_multicast_bytes.lo =
3648 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3649 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3650
3651 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3652 old_tclient->packets_too_big_discard =
3653 le32_to_cpu(tclient->packets_too_big_discard);
3654 estats->no_buff_discard =
3655 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3656 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3657
3658 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3659 old_xclient->unicast_bytes_sent.hi =
3660 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3661 old_xclient->unicast_bytes_sent.lo =
3662 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3663 old_xclient->multicast_bytes_sent.hi =
3664 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3665 old_xclient->multicast_bytes_sent.lo =
3666 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3667 old_xclient->broadcast_bytes_sent.hi =
3668 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3669 old_xclient->broadcast_bytes_sent.lo =
3670 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3671
3672 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3673
3674 return 0;
3675 }
3676
3677 static void bnx2x_net_stats_update(struct bnx2x *bp)
3678 {
3679 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3680 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3681 struct net_device_stats *nstats = &bp->dev->stats;
3682
3683 nstats->rx_packets =
3684 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3685 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3686 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3687
3688 nstats->tx_packets =
3689 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3690 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3691 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3692
3693 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3694
3695 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3696
3697 nstats->rx_dropped = old_tclient->checksum_discard +
3698 estats->mac_discard;
3699 nstats->tx_dropped = 0;
3700
3701 nstats->multicast =
3702 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3703
3704 nstats->collisions =
3705 estats->tx_stat_dot3statssinglecollisionframes_lo +
3706 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3707 estats->tx_stat_dot3statslatecollisions_lo +
3708 estats->tx_stat_dot3statsexcessivecollisions_lo;
3709
3710 estats->jabber_packets_received =
3711 old_tclient->packets_too_big_discard +
3712 estats->rx_stat_dot3statsframestoolong_lo;
3713
3714 nstats->rx_length_errors =
3715 estats->rx_stat_etherstatsundersizepkts_lo +
3716 estats->jabber_packets_received;
3717 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3718 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3719 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3720 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3721 nstats->rx_missed_errors = estats->xxoverflow_discard;
3722
3723 nstats->rx_errors = nstats->rx_length_errors +
3724 nstats->rx_over_errors +
3725 nstats->rx_crc_errors +
3726 nstats->rx_frame_errors +
3727 nstats->rx_fifo_errors +
3728 nstats->rx_missed_errors;
3729
3730 nstats->tx_aborted_errors =
3731 estats->tx_stat_dot3statslatecollisions_lo +
3732 estats->tx_stat_dot3statsexcessivecollisions_lo;
3733 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3734 nstats->tx_fifo_errors = 0;
3735 nstats->tx_heartbeat_errors = 0;
3736 nstats->tx_window_errors = 0;
3737
3738 nstats->tx_errors = nstats->tx_aborted_errors +
3739 nstats->tx_carrier_errors;
3740 }
3741
3742 static void bnx2x_stats_update(struct bnx2x *bp)
3743 {
3744 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3745 int update = 0;
3746
3747 if (*stats_comp != DMAE_COMP_VAL)
3748 return;
3749
3750 if (bp->port.pmf)
3751 update = (bnx2x_hw_stats_update(bp) == 0);
3752
3753 update |= (bnx2x_storm_stats_update(bp) == 0);
3754
3755 if (update)
3756 bnx2x_net_stats_update(bp);
3757
3758 else {
3759 if (bp->stats_pending) {
3760 bp->stats_pending++;
3761 if (bp->stats_pending == 3) {
3762 BNX2X_ERR("stats not updated for 3 times\n");
3763 bnx2x_panic();
3764 return;
3765 }
3766 }
3767 }
3768
3769 if (bp->msglevel & NETIF_MSG_TIMER) {
3770 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3771 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3772 struct net_device_stats *nstats = &bp->dev->stats;
3773 int i;
3774
3775 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3776 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3777 " tx pkt (%lx)\n",
3778 bnx2x_tx_avail(bp->fp),
3779 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3780 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3781 " rx pkt (%lx)\n",
3782 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3783 bp->fp->rx_comp_cons),
3784 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3785 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3786 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3787 estats->driver_xoff, estats->brb_drop_lo);
3788 printk(KERN_DEBUG "tstats: checksum_discard %u "
3789 "packets_too_big_discard %u no_buff_discard %u "
3790 "mac_discard %u mac_filter_discard %u "
3791 "xxovrflow_discard %u brb_truncate_discard %u "
3792 "ttl0_discard %u\n",
3793 old_tclient->checksum_discard,
3794 old_tclient->packets_too_big_discard,
3795 old_tclient->no_buff_discard, estats->mac_discard,
3796 estats->mac_filter_discard, estats->xxoverflow_discard,
3797 estats->brb_truncate_discard,
3798 old_tclient->ttl0_discard);
3799
3800 for_each_queue(bp, i) {
3801 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3802 bnx2x_fp(bp, i, tx_pkt),
3803 bnx2x_fp(bp, i, rx_pkt),
3804 bnx2x_fp(bp, i, rx_calls));
3805 }
3806 }
3807
3808 bnx2x_hw_stats_post(bp);
3809 bnx2x_storm_stats_post(bp);
3810 }
3811
3812 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3813 {
3814 struct dmae_command *dmae;
3815 u32 opcode;
3816 int loader_idx = PMF_DMAE_C(bp);
3817 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3818
3819 bp->executer_idx = 0;
3820
3821 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3822 DMAE_CMD_C_ENABLE |
3823 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3824 #ifdef __BIG_ENDIAN
3825 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3826 #else
3827 DMAE_CMD_ENDIANITY_DW_SWAP |
3828 #endif
3829 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3830 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3831
3832 if (bp->port.port_stx) {
3833
3834 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3835 if (bp->func_stx)
3836 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3837 else
3838 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3839 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3840 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3841 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3842 dmae->dst_addr_hi = 0;
3843 dmae->len = sizeof(struct host_port_stats) >> 2;
3844 if (bp->func_stx) {
3845 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3846 dmae->comp_addr_hi = 0;
3847 dmae->comp_val = 1;
3848 } else {
3849 dmae->comp_addr_lo =
3850 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3851 dmae->comp_addr_hi =
3852 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3853 dmae->comp_val = DMAE_COMP_VAL;
3854
3855 *stats_comp = 0;
3856 }
3857 }
3858
3859 if (bp->func_stx) {
3860
3861 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3862 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3863 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3864 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3865 dmae->dst_addr_lo = bp->func_stx >> 2;
3866 dmae->dst_addr_hi = 0;
3867 dmae->len = sizeof(struct host_func_stats) >> 2;
3868 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3869 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3870 dmae->comp_val = DMAE_COMP_VAL;
3871
3872 *stats_comp = 0;
3873 }
3874 }
3875
3876 static void bnx2x_stats_stop(struct bnx2x *bp)
3877 {
3878 int update = 0;
3879
3880 bnx2x_stats_comp(bp);
3881
3882 if (bp->port.pmf)
3883 update = (bnx2x_hw_stats_update(bp) == 0);
3884
3885 update |= (bnx2x_storm_stats_update(bp) == 0);
3886
3887 if (update) {
3888 bnx2x_net_stats_update(bp);
3889
3890 if (bp->port.pmf)
3891 bnx2x_port_stats_stop(bp);
3892
3893 bnx2x_hw_stats_post(bp);
3894 bnx2x_stats_comp(bp);
3895 }
3896 }
3897
3898 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3899 {
3900 }
3901
3902 static const struct {
3903 void (*action)(struct bnx2x *bp);
3904 enum bnx2x_stats_state next_state;
3905 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3906 /* state event */
3907 {
3908 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3909 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3910 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3911 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3912 },
3913 {
3914 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3915 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3916 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3917 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3918 }
3919 };
3920
3921 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3922 {
3923 enum bnx2x_stats_state state = bp->stats_state;
3924
3925 bnx2x_stats_stm[state][event].action(bp);
3926 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3927
3928 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3929 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3930 state, event, bp->stats_state);
3931 }
3932
3933 static void bnx2x_timer(unsigned long data)
3934 {
3935 struct bnx2x *bp = (struct bnx2x *) data;
3936
3937 if (!netif_running(bp->dev))
3938 return;
3939
3940 if (atomic_read(&bp->intr_sem) != 0)
3941 goto timer_restart;
3942
3943 if (poll) {
3944 struct bnx2x_fastpath *fp = &bp->fp[0];
3945 int rc;
3946
3947 bnx2x_tx_int(fp, 1000);
3948 rc = bnx2x_rx_int(fp, 1000);
3949 }
3950
3951 if (!BP_NOMCP(bp)) {
3952 int func = BP_FUNC(bp);
3953 u32 drv_pulse;
3954 u32 mcp_pulse;
3955
3956 ++bp->fw_drv_pulse_wr_seq;
3957 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3958 /* TBD - add SYSTEM_TIME */
3959 drv_pulse = bp->fw_drv_pulse_wr_seq;
3960 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3961
3962 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3963 MCP_PULSE_SEQ_MASK);
3964 /* The delta between driver pulse and mcp response
3965 * should be 1 (before mcp response) or 0 (after mcp response)
3966 */
3967 if ((drv_pulse != mcp_pulse) &&
3968 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3969 /* someone lost a heartbeat... */
3970 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3971 drv_pulse, mcp_pulse);
3972 }
3973 }
3974
3975 if ((bp->state == BNX2X_STATE_OPEN) ||
3976 (bp->state == BNX2X_STATE_DISABLED))
3977 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3978
3979 timer_restart:
3980 mod_timer(&bp->timer, jiffies + bp->current_interval);
3981 }
3982
3983 /* end of Statistics */
3984
3985 /* nic init */
3986
3987 /*
3988 * nic init service functions
3989 */
3990
3991 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3992 {
3993 int port = BP_PORT(bp);
3994
3995 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3996 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3997 sizeof(struct ustorm_status_block)/4);
3998 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3999 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4000 sizeof(struct cstorm_status_block)/4);
4001 }
4002
4003 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4004 dma_addr_t mapping, int sb_id)
4005 {
4006 int port = BP_PORT(bp);
4007 int func = BP_FUNC(bp);
4008 int index;
4009 u64 section;
4010
4011 /* USTORM */
4012 section = ((u64)mapping) + offsetof(struct host_status_block,
4013 u_status_block);
4014 sb->u_status_block.status_block_id = sb_id;
4015
4016 REG_WR(bp, BAR_USTRORM_INTMEM +
4017 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4018 REG_WR(bp, BAR_USTRORM_INTMEM +
4019 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4020 U64_HI(section));
4021 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4022 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4023
4024 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4025 REG_WR16(bp, BAR_USTRORM_INTMEM +
4026 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4027
4028 /* CSTORM */
4029 section = ((u64)mapping) + offsetof(struct host_status_block,
4030 c_status_block);
4031 sb->c_status_block.status_block_id = sb_id;
4032
4033 REG_WR(bp, BAR_CSTRORM_INTMEM +
4034 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4035 REG_WR(bp, BAR_CSTRORM_INTMEM +
4036 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4037 U64_HI(section));
4038 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4039 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4040
4041 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4042 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4043 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4044
4045 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4046 }
4047
4048 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4049 {
4050 int func = BP_FUNC(bp);
4051
4052 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4053 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4054 sizeof(struct ustorm_def_status_block)/4);
4055 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4056 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4057 sizeof(struct cstorm_def_status_block)/4);
4058 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4059 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4060 sizeof(struct xstorm_def_status_block)/4);
4061 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4062 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4063 sizeof(struct tstorm_def_status_block)/4);
4064 }
4065
4066 static void bnx2x_init_def_sb(struct bnx2x *bp,
4067 struct host_def_status_block *def_sb,
4068 dma_addr_t mapping, int sb_id)
4069 {
4070 int port = BP_PORT(bp);
4071 int func = BP_FUNC(bp);
4072 int index, val, reg_offset;
4073 u64 section;
4074
4075 /* ATTN */
4076 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4077 atten_status_block);
4078 def_sb->atten_status_block.status_block_id = sb_id;
4079
4080 bp->attn_state = 0;
4081
4082 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4083 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4084
4085 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4086 bp->attn_group[index].sig[0] = REG_RD(bp,
4087 reg_offset + 0x10*index);
4088 bp->attn_group[index].sig[1] = REG_RD(bp,
4089 reg_offset + 0x4 + 0x10*index);
4090 bp->attn_group[index].sig[2] = REG_RD(bp,
4091 reg_offset + 0x8 + 0x10*index);
4092 bp->attn_group[index].sig[3] = REG_RD(bp,
4093 reg_offset + 0xc + 0x10*index);
4094 }
4095
4096 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4097 HC_REG_ATTN_MSG0_ADDR_L);
4098
4099 REG_WR(bp, reg_offset, U64_LO(section));
4100 REG_WR(bp, reg_offset + 4, U64_HI(section));
4101
4102 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4103
4104 val = REG_RD(bp, reg_offset);
4105 val |= sb_id;
4106 REG_WR(bp, reg_offset, val);
4107
4108 /* USTORM */
4109 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4110 u_def_status_block);
4111 def_sb->u_def_status_block.status_block_id = sb_id;
4112
4113 REG_WR(bp, BAR_USTRORM_INTMEM +
4114 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4115 REG_WR(bp, BAR_USTRORM_INTMEM +
4116 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4117 U64_HI(section));
4118 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4119 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4120
4121 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4122 REG_WR16(bp, BAR_USTRORM_INTMEM +
4123 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4124
4125 /* CSTORM */
4126 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4127 c_def_status_block);
4128 def_sb->c_def_status_block.status_block_id = sb_id;
4129
4130 REG_WR(bp, BAR_CSTRORM_INTMEM +
4131 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4132 REG_WR(bp, BAR_CSTRORM_INTMEM +
4133 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4134 U64_HI(section));
4135 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4136 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4137
4138 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4139 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4140 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4141
4142 /* TSTORM */
4143 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4144 t_def_status_block);
4145 def_sb->t_def_status_block.status_block_id = sb_id;
4146
4147 REG_WR(bp, BAR_TSTRORM_INTMEM +
4148 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4149 REG_WR(bp, BAR_TSTRORM_INTMEM +
4150 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4151 U64_HI(section));
4152 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4153 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4154
4155 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4156 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4157 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4158
4159 /* XSTORM */
4160 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4161 x_def_status_block);
4162 def_sb->x_def_status_block.status_block_id = sb_id;
4163
4164 REG_WR(bp, BAR_XSTRORM_INTMEM +
4165 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4166 REG_WR(bp, BAR_XSTRORM_INTMEM +
4167 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4168 U64_HI(section));
4169 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4170 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4171
4172 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4173 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4174 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4175
4176 bp->stats_pending = 0;
4177 bp->set_mac_pending = 0;
4178
4179 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4180 }
4181
4182 static void bnx2x_update_coalesce(struct bnx2x *bp)
4183 {
4184 int port = BP_PORT(bp);
4185 int i;
4186
4187 for_each_queue(bp, i) {
4188 int sb_id = bp->fp[i].sb_id;
4189
4190 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4191 REG_WR8(bp, BAR_USTRORM_INTMEM +
4192 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4193 U_SB_ETH_RX_CQ_INDEX),
4194 bp->rx_ticks/12);
4195 REG_WR16(bp, BAR_USTRORM_INTMEM +
4196 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4197 U_SB_ETH_RX_CQ_INDEX),
4198 bp->rx_ticks ? 0 : 1);
4199 REG_WR16(bp, BAR_USTRORM_INTMEM +
4200 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4201 U_SB_ETH_RX_BD_INDEX),
4202 bp->rx_ticks ? 0 : 1);
4203
4204 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4205 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4206 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4207 C_SB_ETH_TX_CQ_INDEX),
4208 bp->tx_ticks/12);
4209 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4210 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4211 C_SB_ETH_TX_CQ_INDEX),
4212 bp->tx_ticks ? 0 : 1);
4213 }
4214 }
4215
4216 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4217 struct bnx2x_fastpath *fp, int last)
4218 {
4219 int i;
4220
4221 for (i = 0; i < last; i++) {
4222 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4223 struct sk_buff *skb = rx_buf->skb;
4224
4225 if (skb == NULL) {
4226 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4227 continue;
4228 }
4229
4230 if (fp->tpa_state[i] == BNX2X_TPA_START)
4231 pci_unmap_single(bp->pdev,
4232 pci_unmap_addr(rx_buf, mapping),
4233 bp->rx_buf_size,
4234 PCI_DMA_FROMDEVICE);
4235
4236 dev_kfree_skb(skb);
4237 rx_buf->skb = NULL;
4238 }
4239 }
4240
4241 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4242 {
4243 int func = BP_FUNC(bp);
4244 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4245 ETH_MAX_AGGREGATION_QUEUES_E1H;
4246 u16 ring_prod, cqe_ring_prod;
4247 int i, j;
4248
4249 bp->rx_buf_size = bp->dev->mtu;
4250 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4251 BCM_RX_ETH_PAYLOAD_ALIGN;
4252
4253 if (bp->flags & TPA_ENABLE_FLAG) {
4254 DP(NETIF_MSG_IFUP,
4255 "rx_buf_size %d effective_mtu %d\n",
4256 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4257
4258 for_each_queue(bp, j) {
4259 struct bnx2x_fastpath *fp = &bp->fp[j];
4260
4261 for (i = 0; i < max_agg_queues; i++) {
4262 fp->tpa_pool[i].skb =
4263 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4264 if (!fp->tpa_pool[i].skb) {
4265 BNX2X_ERR("Failed to allocate TPA "
4266 "skb pool for queue[%d] - "
4267 "disabling TPA on this "
4268 "queue!\n", j);
4269 bnx2x_free_tpa_pool(bp, fp, i);
4270 fp->disable_tpa = 1;
4271 break;
4272 }
4273 pci_unmap_addr_set((struct sw_rx_bd *)
4274 &bp->fp->tpa_pool[i],
4275 mapping, 0);
4276 fp->tpa_state[i] = BNX2X_TPA_STOP;
4277 }
4278 }
4279 }
4280
4281 for_each_queue(bp, j) {
4282 struct bnx2x_fastpath *fp = &bp->fp[j];
4283
4284 fp->rx_bd_cons = 0;
4285 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4286 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4287
4288 /* "next page" elements initialization */
4289 /* SGE ring */
4290 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4291 struct eth_rx_sge *sge;
4292
4293 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4294 sge->addr_hi =
4295 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4296 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4297 sge->addr_lo =
4298 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4299 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4300 }
4301
4302 bnx2x_init_sge_ring_bit_mask(fp);
4303
4304 /* RX BD ring */
4305 for (i = 1; i <= NUM_RX_RINGS; i++) {
4306 struct eth_rx_bd *rx_bd;
4307
4308 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4309 rx_bd->addr_hi =
4310 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4311 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4312 rx_bd->addr_lo =
4313 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4314 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4315 }
4316
4317 /* CQ ring */
4318 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4319 struct eth_rx_cqe_next_page *nextpg;
4320
4321 nextpg = (struct eth_rx_cqe_next_page *)
4322 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4323 nextpg->addr_hi =
4324 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4325 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4326 nextpg->addr_lo =
4327 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4328 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4329 }
4330
4331 /* Allocate SGEs and initialize the ring elements */
4332 for (i = 0, ring_prod = 0;
4333 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4334
4335 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4336 BNX2X_ERR("was only able to allocate "
4337 "%d rx sges\n", i);
4338 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4339 /* Cleanup already allocated elements */
4340 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4341 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4342 fp->disable_tpa = 1;
4343 ring_prod = 0;
4344 break;
4345 }
4346 ring_prod = NEXT_SGE_IDX(ring_prod);
4347 }
4348 fp->rx_sge_prod = ring_prod;
4349
4350 /* Allocate BDs and initialize BD ring */
4351 fp->rx_comp_cons = 0;
4352 cqe_ring_prod = ring_prod = 0;
4353 for (i = 0; i < bp->rx_ring_size; i++) {
4354 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4355 BNX2X_ERR("was only able to allocate "
4356 "%d rx skbs\n", i);
4357 bp->eth_stats.rx_skb_alloc_failed++;
4358 break;
4359 }
4360 ring_prod = NEXT_RX_IDX(ring_prod);
4361 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4362 WARN_ON(ring_prod <= i);
4363 }
4364
4365 fp->rx_bd_prod = ring_prod;
4366 /* must not have more available CQEs than BDs */
4367 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4368 cqe_ring_prod);
4369 fp->rx_pkt = fp->rx_calls = 0;
4370
4371 /* Warning!
4372 * this will generate an interrupt (to the TSTORM)
4373 * must only be done after chip is initialized
4374 */
4375 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4376 fp->rx_sge_prod);
4377 if (j != 0)
4378 continue;
4379
4380 REG_WR(bp, BAR_USTRORM_INTMEM +
4381 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4382 U64_LO(fp->rx_comp_mapping));
4383 REG_WR(bp, BAR_USTRORM_INTMEM +
4384 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4385 U64_HI(fp->rx_comp_mapping));
4386 }
4387 }
4388
4389 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4390 {
4391 int i, j;
4392
4393 for_each_queue(bp, j) {
4394 struct bnx2x_fastpath *fp = &bp->fp[j];
4395
4396 for (i = 1; i <= NUM_TX_RINGS; i++) {
4397 struct eth_tx_bd *tx_bd =
4398 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4399
4400 tx_bd->addr_hi =
4401 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4402 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4403 tx_bd->addr_lo =
4404 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4405 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4406 }
4407
4408 fp->tx_pkt_prod = 0;
4409 fp->tx_pkt_cons = 0;
4410 fp->tx_bd_prod = 0;
4411 fp->tx_bd_cons = 0;
4412 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4413 fp->tx_pkt = 0;
4414 }
4415 }
4416
4417 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4418 {
4419 int func = BP_FUNC(bp);
4420
4421 spin_lock_init(&bp->spq_lock);
4422
4423 bp->spq_left = MAX_SPQ_PENDING;
4424 bp->spq_prod_idx = 0;
4425 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4426 bp->spq_prod_bd = bp->spq;
4427 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4428
4429 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4430 U64_LO(bp->spq_mapping));
4431 REG_WR(bp,
4432 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4433 U64_HI(bp->spq_mapping));
4434
4435 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4436 bp->spq_prod_idx);
4437 }
4438
4439 static void bnx2x_init_context(struct bnx2x *bp)
4440 {
4441 int i;
4442
4443 for_each_queue(bp, i) {
4444 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4445 struct bnx2x_fastpath *fp = &bp->fp[i];
4446 u8 sb_id = FP_SB_ID(fp);
4447
4448 context->xstorm_st_context.tx_bd_page_base_hi =
4449 U64_HI(fp->tx_desc_mapping);
4450 context->xstorm_st_context.tx_bd_page_base_lo =
4451 U64_LO(fp->tx_desc_mapping);
4452 context->xstorm_st_context.db_data_addr_hi =
4453 U64_HI(fp->tx_prods_mapping);
4454 context->xstorm_st_context.db_data_addr_lo =
4455 U64_LO(fp->tx_prods_mapping);
4456 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4457 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4458
4459 context->ustorm_st_context.common.sb_index_numbers =
4460 BNX2X_RX_SB_INDEX_NUM;
4461 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4462 context->ustorm_st_context.common.status_block_id = sb_id;
4463 context->ustorm_st_context.common.flags =
4464 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4465 context->ustorm_st_context.common.mc_alignment_size =
4466 BCM_RX_ETH_PAYLOAD_ALIGN;
4467 context->ustorm_st_context.common.bd_buff_size =
4468 bp->rx_buf_size;
4469 context->ustorm_st_context.common.bd_page_base_hi =
4470 U64_HI(fp->rx_desc_mapping);
4471 context->ustorm_st_context.common.bd_page_base_lo =
4472 U64_LO(fp->rx_desc_mapping);
4473 if (!fp->disable_tpa) {
4474 context->ustorm_st_context.common.flags |=
4475 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4476 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4477 context->ustorm_st_context.common.sge_buff_size =
4478 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4479 context->ustorm_st_context.common.sge_page_base_hi =
4480 U64_HI(fp->rx_sge_mapping);
4481 context->ustorm_st_context.common.sge_page_base_lo =
4482 U64_LO(fp->rx_sge_mapping);
4483 }
4484
4485 context->cstorm_st_context.sb_index_number =
4486 C_SB_ETH_TX_CQ_INDEX;
4487 context->cstorm_st_context.status_block_id = sb_id;
4488
4489 context->xstorm_ag_context.cdu_reserved =
4490 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4491 CDU_REGION_NUMBER_XCM_AG,
4492 ETH_CONNECTION_TYPE);
4493 context->ustorm_ag_context.cdu_usage =
4494 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4495 CDU_REGION_NUMBER_UCM_AG,
4496 ETH_CONNECTION_TYPE);
4497 }
4498 }
4499
4500 static void bnx2x_init_ind_table(struct bnx2x *bp)
4501 {
4502 int port = BP_PORT(bp);
4503 int i;
4504
4505 if (!is_multi(bp))
4506 return;
4507
4508 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4509 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4510 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4511 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4512 i % bp->num_queues);
4513
4514 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4515 }
4516
4517 static void bnx2x_set_client_config(struct bnx2x *bp)
4518 {
4519 struct tstorm_eth_client_config tstorm_client = {0};
4520 int port = BP_PORT(bp);
4521 int i;
4522
4523 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4524 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4525 tstorm_client.config_flags =
4526 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4527 #ifdef BCM_VLAN
4528 if (bp->rx_mode && bp->vlgrp) {
4529 tstorm_client.config_flags |=
4530 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4531 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4532 }
4533 #endif
4534
4535 if (bp->flags & TPA_ENABLE_FLAG) {
4536 tstorm_client.max_sges_for_packet =
4537 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4538 tstorm_client.max_sges_for_packet =
4539 ((tstorm_client.max_sges_for_packet +
4540 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4541 PAGES_PER_SGE_SHIFT;
4542
4543 tstorm_client.config_flags |=
4544 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4545 }
4546
4547 for_each_queue(bp, i) {
4548 REG_WR(bp, BAR_TSTRORM_INTMEM +
4549 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4550 ((u32 *)&tstorm_client)[0]);
4551 REG_WR(bp, BAR_TSTRORM_INTMEM +
4552 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4553 ((u32 *)&tstorm_client)[1]);
4554 }
4555
4556 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4557 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4558 }
4559
4560 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4561 {
4562 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4563 int mode = bp->rx_mode;
4564 int mask = (1 << BP_L_ID(bp));
4565 int func = BP_FUNC(bp);
4566 int i;
4567
4568 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4569
4570 switch (mode) {
4571 case BNX2X_RX_MODE_NONE: /* no Rx */
4572 tstorm_mac_filter.ucast_drop_all = mask;
4573 tstorm_mac_filter.mcast_drop_all = mask;
4574 tstorm_mac_filter.bcast_drop_all = mask;
4575 break;
4576 case BNX2X_RX_MODE_NORMAL:
4577 tstorm_mac_filter.bcast_accept_all = mask;
4578 break;
4579 case BNX2X_RX_MODE_ALLMULTI:
4580 tstorm_mac_filter.mcast_accept_all = mask;
4581 tstorm_mac_filter.bcast_accept_all = mask;
4582 break;
4583 case BNX2X_RX_MODE_PROMISC:
4584 tstorm_mac_filter.ucast_accept_all = mask;
4585 tstorm_mac_filter.mcast_accept_all = mask;
4586 tstorm_mac_filter.bcast_accept_all = mask;
4587 break;
4588 default:
4589 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4590 break;
4591 }
4592
4593 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4594 REG_WR(bp, BAR_TSTRORM_INTMEM +
4595 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4596 ((u32 *)&tstorm_mac_filter)[i]);
4597
4598 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4599 ((u32 *)&tstorm_mac_filter)[i]); */
4600 }
4601
4602 if (mode != BNX2X_RX_MODE_NONE)
4603 bnx2x_set_client_config(bp);
4604 }
4605
4606 static void bnx2x_init_internal_common(struct bnx2x *bp)
4607 {
4608 int i;
4609
4610 if (bp->flags & TPA_ENABLE_FLAG) {
4611 struct tstorm_eth_tpa_exist tpa = {0};
4612
4613 tpa.tpa_exist = 1;
4614
4615 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4616 ((u32 *)&tpa)[0]);
4617 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4618 ((u32 *)&tpa)[1]);
4619 }
4620
4621 /* Zero this manually as its initialization is
4622 currently missing in the initTool */
4623 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4624 REG_WR(bp, BAR_USTRORM_INTMEM +
4625 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4626 }
4627
4628 static void bnx2x_init_internal_port(struct bnx2x *bp)
4629 {
4630 int port = BP_PORT(bp);
4631
4632 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4635 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4636 }
4637
4638 static void bnx2x_init_internal_func(struct bnx2x *bp)
4639 {
4640 struct tstorm_eth_function_common_config tstorm_config = {0};
4641 struct stats_indication_flags stats_flags = {0};
4642 int port = BP_PORT(bp);
4643 int func = BP_FUNC(bp);
4644 int i;
4645 u16 max_agg_size;
4646
4647 if (is_multi(bp)) {
4648 tstorm_config.config_flags = MULTI_FLAGS;
4649 tstorm_config.rss_result_mask = MULTI_MASK;
4650 }
4651
4652 tstorm_config.leading_client_id = BP_L_ID(bp);
4653
4654 REG_WR(bp, BAR_TSTRORM_INTMEM +
4655 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4656 (*(u32 *)&tstorm_config));
4657
4658 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4659 bnx2x_set_storm_rx_mode(bp);
4660
4661 /* reset xstorm per client statistics */
4662 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4663 REG_WR(bp, BAR_XSTRORM_INTMEM +
4664 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4665 i*4, 0);
4666 }
4667 /* reset tstorm per client statistics */
4668 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4669 REG_WR(bp, BAR_TSTRORM_INTMEM +
4670 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4671 i*4, 0);
4672 }
4673
4674 /* Init statistics related context */
4675 stats_flags.collect_eth = 1;
4676
4677 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4678 ((u32 *)&stats_flags)[0]);
4679 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4680 ((u32 *)&stats_flags)[1]);
4681
4682 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4683 ((u32 *)&stats_flags)[0]);
4684 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4685 ((u32 *)&stats_flags)[1]);
4686
4687 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4688 ((u32 *)&stats_flags)[0]);
4689 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4690 ((u32 *)&stats_flags)[1]);
4691
4692 REG_WR(bp, BAR_XSTRORM_INTMEM +
4693 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4694 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4695 REG_WR(bp, BAR_XSTRORM_INTMEM +
4696 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4697 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4698
4699 REG_WR(bp, BAR_TSTRORM_INTMEM +
4700 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4701 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4702 REG_WR(bp, BAR_TSTRORM_INTMEM +
4703 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4704 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4705
4706 if (CHIP_IS_E1H(bp)) {
4707 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4708 IS_E1HMF(bp));
4709 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4710 IS_E1HMF(bp));
4711 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4712 IS_E1HMF(bp));
4713 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4714 IS_E1HMF(bp));
4715
4716 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4717 bp->e1hov);
4718 }
4719
4720 /* Init CQ ring mapping and aggregation size */
4721 max_agg_size = min((u32)(bp->rx_buf_size +
4722 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4723 (u32)0xffff);
4724 for_each_queue(bp, i) {
4725 struct bnx2x_fastpath *fp = &bp->fp[i];
4726
4727 REG_WR(bp, BAR_USTRORM_INTMEM +
4728 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4729 U64_LO(fp->rx_comp_mapping));
4730 REG_WR(bp, BAR_USTRORM_INTMEM +
4731 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4732 U64_HI(fp->rx_comp_mapping));
4733
4734 REG_WR16(bp, BAR_USTRORM_INTMEM +
4735 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4736 max_agg_size);
4737 }
4738 }
4739
4740 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4741 {
4742 switch (load_code) {
4743 case FW_MSG_CODE_DRV_LOAD_COMMON:
4744 bnx2x_init_internal_common(bp);
4745 /* no break */
4746
4747 case FW_MSG_CODE_DRV_LOAD_PORT:
4748 bnx2x_init_internal_port(bp);
4749 /* no break */
4750
4751 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4752 bnx2x_init_internal_func(bp);
4753 break;
4754
4755 default:
4756 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4757 break;
4758 }
4759 }
4760
4761 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4762 {
4763 int i;
4764
4765 for_each_queue(bp, i) {
4766 struct bnx2x_fastpath *fp = &bp->fp[i];
4767
4768 fp->bp = bp;
4769 fp->state = BNX2X_FP_STATE_CLOSED;
4770 fp->index = i;
4771 fp->cl_id = BP_L_ID(bp) + i;
4772 fp->sb_id = fp->cl_id;
4773 DP(NETIF_MSG_IFUP,
4774 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4775 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4776 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4777 FP_SB_ID(fp));
4778 bnx2x_update_fpsb_idx(fp);
4779 }
4780
4781 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4782 DEF_SB_ID);
4783 bnx2x_update_dsb_idx(bp);
4784 bnx2x_update_coalesce(bp);
4785 bnx2x_init_rx_rings(bp);
4786 bnx2x_init_tx_ring(bp);
4787 bnx2x_init_sp_ring(bp);
4788 bnx2x_init_context(bp);
4789 bnx2x_init_internal(bp, load_code);
4790 bnx2x_init_ind_table(bp);
4791 bnx2x_int_enable(bp);
4792 }
4793
4794 /* end of nic init */
4795
4796 /*
4797 * gzip service functions
4798 */
4799
4800 static int bnx2x_gunzip_init(struct bnx2x *bp)
4801 {
4802 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4803 &bp->gunzip_mapping);
4804 if (bp->gunzip_buf == NULL)
4805 goto gunzip_nomem1;
4806
4807 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4808 if (bp->strm == NULL)
4809 goto gunzip_nomem2;
4810
4811 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4812 GFP_KERNEL);
4813 if (bp->strm->workspace == NULL)
4814 goto gunzip_nomem3;
4815
4816 return 0;
4817
4818 gunzip_nomem3:
4819 kfree(bp->strm);
4820 bp->strm = NULL;
4821
4822 gunzip_nomem2:
4823 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4824 bp->gunzip_mapping);
4825 bp->gunzip_buf = NULL;
4826
4827 gunzip_nomem1:
4828 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4829 " un-compression\n", bp->dev->name);
4830 return -ENOMEM;
4831 }
4832
4833 static void bnx2x_gunzip_end(struct bnx2x *bp)
4834 {
4835 kfree(bp->strm->workspace);
4836
4837 kfree(bp->strm);
4838 bp->strm = NULL;
4839
4840 if (bp->gunzip_buf) {
4841 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4842 bp->gunzip_mapping);
4843 bp->gunzip_buf = NULL;
4844 }
4845 }
4846
4847 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4848 {
4849 int n, rc;
4850
4851 /* check gzip header */
4852 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4853 return -EINVAL;
4854
4855 n = 10;
4856
4857 #define FNAME 0x8
4858
4859 if (zbuf[3] & FNAME)
4860 while ((zbuf[n++] != 0) && (n < len));
4861
4862 bp->strm->next_in = zbuf + n;
4863 bp->strm->avail_in = len - n;
4864 bp->strm->next_out = bp->gunzip_buf;
4865 bp->strm->avail_out = FW_BUF_SIZE;
4866
4867 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4868 if (rc != Z_OK)
4869 return rc;
4870
4871 rc = zlib_inflate(bp->strm, Z_FINISH);
4872 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4873 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4874 bp->dev->name, bp->strm->msg);
4875
4876 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4877 if (bp->gunzip_outlen & 0x3)
4878 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4879 " gunzip_outlen (%d) not aligned\n",
4880 bp->dev->name, bp->gunzip_outlen);
4881 bp->gunzip_outlen >>= 2;
4882
4883 zlib_inflateEnd(bp->strm);
4884
4885 if (rc == Z_STREAM_END)
4886 return 0;
4887
4888 return rc;
4889 }
4890
4891 /* nic load/unload */
4892
4893 /*
4894 * General service functions
4895 */
4896
4897 /* send a NIG loopback debug packet */
4898 static void bnx2x_lb_pckt(struct bnx2x *bp)
4899 {
4900 u32 wb_write[3];
4901
4902 /* Ethernet source and destination addresses */
4903 wb_write[0] = 0x55555555;
4904 wb_write[1] = 0x55555555;
4905 wb_write[2] = 0x20; /* SOP */
4906 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4907
4908 /* NON-IP protocol */
4909 wb_write[0] = 0x09000000;
4910 wb_write[1] = 0x55555555;
4911 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4912 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4913 }
4914
4915 /* some of the internal memories
4916 * are not directly readable from the driver
4917 * to test them we send debug packets
4918 */
4919 static int bnx2x_int_mem_test(struct bnx2x *bp)
4920 {
4921 int factor;
4922 int count, i;
4923 u32 val = 0;
4924
4925 if (CHIP_REV_IS_FPGA(bp))
4926 factor = 120;
4927 else if (CHIP_REV_IS_EMUL(bp))
4928 factor = 200;
4929 else
4930 factor = 1;
4931
4932 DP(NETIF_MSG_HW, "start part1\n");
4933
4934 /* Disable inputs of parser neighbor blocks */
4935 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4936 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4937 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4938 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4939
4940 /* Write 0 to parser credits for CFC search request */
4941 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4942
4943 /* send Ethernet packet */
4944 bnx2x_lb_pckt(bp);
4945
4946 /* TODO do i reset NIG statistic? */
4947 /* Wait until NIG register shows 1 packet of size 0x10 */
4948 count = 1000 * factor;
4949 while (count) {
4950
4951 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4952 val = *bnx2x_sp(bp, wb_data[0]);
4953 if (val == 0x10)
4954 break;
4955
4956 msleep(10);
4957 count--;
4958 }
4959 if (val != 0x10) {
4960 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4961 return -1;
4962 }
4963
4964 /* Wait until PRS register shows 1 packet */
4965 count = 1000 * factor;
4966 while (count) {
4967 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4968 if (val == 1)
4969 break;
4970
4971 msleep(10);
4972 count--;
4973 }
4974 if (val != 0x1) {
4975 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4976 return -2;
4977 }
4978
4979 /* Reset and init BRB, PRS */
4980 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4981 msleep(50);
4982 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4983 msleep(50);
4984 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4985 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4986
4987 DP(NETIF_MSG_HW, "part2\n");
4988
4989 /* Disable inputs of parser neighbor blocks */
4990 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4991 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4992 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4993 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4994
4995 /* Write 0 to parser credits for CFC search request */
4996 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4997
4998 /* send 10 Ethernet packets */
4999 for (i = 0; i < 10; i++)
5000 bnx2x_lb_pckt(bp);
5001
5002 /* Wait until NIG register shows 10 + 1
5003 packets of size 11*0x10 = 0xb0 */
5004 count = 1000 * factor;
5005 while (count) {
5006
5007 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5008 val = *bnx2x_sp(bp, wb_data[0]);
5009 if (val == 0xb0)
5010 break;
5011
5012 msleep(10);
5013 count--;
5014 }
5015 if (val != 0xb0) {
5016 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5017 return -3;
5018 }
5019
5020 /* Wait until PRS register shows 2 packets */
5021 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5022 if (val != 2)
5023 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5024
5025 /* Write 1 to parser credits for CFC search request */
5026 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5027
5028 /* Wait until PRS register shows 3 packets */
5029 msleep(10 * factor);
5030 /* Wait until NIG register shows 1 packet of size 0x10 */
5031 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5032 if (val != 3)
5033 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5034
5035 /* clear NIG EOP FIFO */
5036 for (i = 0; i < 11; i++)
5037 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5038 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5039 if (val != 1) {
5040 BNX2X_ERR("clear of NIG failed\n");
5041 return -4;
5042 }
5043
5044 /* Reset and init BRB, PRS, NIG */
5045 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5046 msleep(50);
5047 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5048 msleep(50);
5049 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5050 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5051 #ifndef BCM_ISCSI
5052 /* set NIC mode */
5053 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5054 #endif
5055
5056 /* Enable inputs of parser neighbor blocks */
5057 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5058 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5059 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5060 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5061
5062 DP(NETIF_MSG_HW, "done\n");
5063
5064 return 0; /* OK */
5065 }
5066
5067 static void enable_blocks_attention(struct bnx2x *bp)
5068 {
5069 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5070 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5071 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5072 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5073 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5074 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5075 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5076 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5077 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5078 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5079 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5080 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5081 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5082 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5083 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5084 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5085 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5086 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5087 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5088 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5089 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5090 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5091 if (CHIP_REV_IS_FPGA(bp))
5092 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5093 else
5094 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5095 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5096 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5097 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5098 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5099 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5100 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5101 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5102 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5103 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5104 }
5105
5106
5107 static int bnx2x_init_common(struct bnx2x *bp)
5108 {
5109 u32 val, i;
5110
5111 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5112
5113 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5114 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5115
5116 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5117 if (CHIP_IS_E1H(bp))
5118 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5119
5120 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5121 msleep(30);
5122 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5123
5124 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5125 if (CHIP_IS_E1(bp)) {
5126 /* enable HW interrupt from PXP on USDM overflow
5127 bit 16 on INT_MASK_0 */
5128 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5129 }
5130
5131 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5132 bnx2x_init_pxp(bp);
5133
5134 #ifdef __BIG_ENDIAN
5135 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5136 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5137 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5138 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5139 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5140 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5141
5142 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5143 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5144 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5145 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5146 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5147 #endif
5148
5149 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5150 #ifdef BCM_ISCSI
5151 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5152 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5153 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5154 #endif
5155
5156 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5157 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5158
5159 /* let the HW do it's magic ... */
5160 msleep(100);
5161 /* finish PXP init */
5162 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5163 if (val != 1) {
5164 BNX2X_ERR("PXP2 CFG failed\n");
5165 return -EBUSY;
5166 }
5167 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5168 if (val != 1) {
5169 BNX2X_ERR("PXP2 RD_INIT failed\n");
5170 return -EBUSY;
5171 }
5172
5173 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5174 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5175
5176 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5177
5178 /* clean the DMAE memory */
5179 bp->dmae_ready = 1;
5180 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5181
5182 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5183 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5184 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5185 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5186
5187 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5188 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5189 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5190 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5191
5192 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5193 /* soft reset pulse */
5194 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5195 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5196
5197 #ifdef BCM_ISCSI
5198 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5199 #endif
5200
5201 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5202 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5203 if (!CHIP_REV_IS_SLOW(bp)) {
5204 /* enable hw interrupt from doorbell Q */
5205 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5206 }
5207
5208 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5209 if (CHIP_REV_IS_SLOW(bp)) {
5210 /* fix for emulation and FPGA for no pause */
5211 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5212 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5213 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5214 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5215 }
5216
5217 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5218 /* set NIC mode */
5219 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5220 if (CHIP_IS_E1H(bp))
5221 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5222
5223 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5224 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5225 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5226 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5227
5228 if (CHIP_IS_E1H(bp)) {
5229 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5230 STORM_INTMEM_SIZE_E1H/2);
5231 bnx2x_init_fill(bp,
5232 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5233 0, STORM_INTMEM_SIZE_E1H/2);
5234 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5235 STORM_INTMEM_SIZE_E1H/2);
5236 bnx2x_init_fill(bp,
5237 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5238 0, STORM_INTMEM_SIZE_E1H/2);
5239 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5240 STORM_INTMEM_SIZE_E1H/2);
5241 bnx2x_init_fill(bp,
5242 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5243 0, STORM_INTMEM_SIZE_E1H/2);
5244 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1H/2);
5246 bnx2x_init_fill(bp,
5247 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5248 0, STORM_INTMEM_SIZE_E1H/2);
5249 } else { /* E1 */
5250 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5251 STORM_INTMEM_SIZE_E1);
5252 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5253 STORM_INTMEM_SIZE_E1);
5254 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5255 STORM_INTMEM_SIZE_E1);
5256 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5257 STORM_INTMEM_SIZE_E1);
5258 }
5259
5260 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5261 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5262 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5263 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5264
5265 /* sync semi rtc */
5266 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5267 0x80000000);
5268 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5269 0x80000000);
5270
5271 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5272 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5273 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5274
5275 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5276 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5277 REG_WR(bp, i, 0xc0cac01a);
5278 /* TODO: replace with something meaningful */
5279 }
5280 if (CHIP_IS_E1H(bp))
5281 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5282 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5283
5284 if (sizeof(union cdu_context) != 1024)
5285 /* we currently assume that a context is 1024 bytes */
5286 printk(KERN_ALERT PFX "please adjust the size of"
5287 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5288
5289 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5290 val = (4 << 24) + (0 << 12) + 1024;
5291 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5292 if (CHIP_IS_E1(bp)) {
5293 /* !!! fix pxp client crdit until excel update */
5294 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5295 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5296 }
5297
5298 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5299 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5300
5301 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5302 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5303
5304 /* PXPCS COMMON comes here */
5305 /* Reset PCIE errors for debug */
5306 REG_WR(bp, 0x2814, 0xffffffff);
5307 REG_WR(bp, 0x3820, 0xffffffff);
5308
5309 /* EMAC0 COMMON comes here */
5310 /* EMAC1 COMMON comes here */
5311 /* DBU COMMON comes here */
5312 /* DBG COMMON comes here */
5313
5314 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5315 if (CHIP_IS_E1H(bp)) {
5316 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5317 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5318 }
5319
5320 if (CHIP_REV_IS_SLOW(bp))
5321 msleep(200);
5322
5323 /* finish CFC init */
5324 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5325 if (val != 1) {
5326 BNX2X_ERR("CFC LL_INIT failed\n");
5327 return -EBUSY;
5328 }
5329 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5330 if (val != 1) {
5331 BNX2X_ERR("CFC AC_INIT failed\n");
5332 return -EBUSY;
5333 }
5334 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5335 if (val != 1) {
5336 BNX2X_ERR("CFC CAM_INIT failed\n");
5337 return -EBUSY;
5338 }
5339 REG_WR(bp, CFC_REG_DEBUG0, 0);
5340
5341 /* read NIG statistic
5342 to see if this is our first up since powerup */
5343 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5344 val = *bnx2x_sp(bp, wb_data[0]);
5345
5346 /* do internal memory self test */
5347 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5348 BNX2X_ERR("internal mem self test failed\n");
5349 return -EBUSY;
5350 }
5351
5352 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5353 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5354 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5355 /* Fan failure is indicated by SPIO 5 */
5356 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5357 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5358
5359 /* set to active low mode */
5360 val = REG_RD(bp, MISC_REG_SPIO_INT);
5361 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5362 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5363 REG_WR(bp, MISC_REG_SPIO_INT, val);
5364
5365 /* enable interrupt to signal the IGU */
5366 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5367 val |= (1 << MISC_REGISTERS_SPIO_5);
5368 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5369 break;
5370
5371 default:
5372 break;
5373 }
5374
5375 /* clear PXP2 attentions */
5376 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5377
5378 enable_blocks_attention(bp);
5379
5380 if (!BP_NOMCP(bp)) {
5381 bnx2x_acquire_phy_lock(bp);
5382 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5383 bnx2x_release_phy_lock(bp);
5384 } else
5385 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5386
5387 return 0;
5388 }
5389
5390 static int bnx2x_init_port(struct bnx2x *bp)
5391 {
5392 int port = BP_PORT(bp);
5393 u32 val;
5394
5395 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5396
5397 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5398
5399 /* Port PXP comes here */
5400 /* Port PXP2 comes here */
5401 #ifdef BCM_ISCSI
5402 /* Port0 1
5403 * Port1 385 */
5404 i++;
5405 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5406 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5407 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5408 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5409
5410 /* Port0 2
5411 * Port1 386 */
5412 i++;
5413 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5414 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5415 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5416 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5417
5418 /* Port0 3
5419 * Port1 387 */
5420 i++;
5421 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5422 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5423 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5424 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5425 #endif
5426 /* Port CMs come here */
5427
5428 /* Port QM comes here */
5429 #ifdef BCM_ISCSI
5430 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5431 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5432
5433 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5434 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5435 #endif
5436 /* Port DQ comes here */
5437 /* Port BRB1 comes here */
5438 /* Port PRS comes here */
5439 /* Port TSDM comes here */
5440 /* Port CSDM comes here */
5441 /* Port USDM comes here */
5442 /* Port XSDM comes here */
5443 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5444 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5445 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5446 port ? USEM_PORT1_END : USEM_PORT0_END);
5447 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5448 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5449 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5450 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5451 /* Port UPB comes here */
5452 /* Port XPB comes here */
5453
5454 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5455 port ? PBF_PORT1_END : PBF_PORT0_END);
5456
5457 /* configure PBF to work without PAUSE mtu 9000 */
5458 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5459
5460 /* update threshold */
5461 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5462 /* update init credit */
5463 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5464
5465 /* probe changes */
5466 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5467 msleep(5);
5468 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5469
5470 #ifdef BCM_ISCSI
5471 /* tell the searcher where the T2 table is */
5472 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5473
5474 wb_write[0] = U64_LO(bp->t2_mapping);
5475 wb_write[1] = U64_HI(bp->t2_mapping);
5476 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5477 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5478 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5479 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5480
5481 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5482 /* Port SRCH comes here */
5483 #endif
5484 /* Port CDU comes here */
5485 /* Port CFC comes here */
5486
5487 if (CHIP_IS_E1(bp)) {
5488 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5489 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5490 }
5491 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5492 port ? HC_PORT1_END : HC_PORT0_END);
5493
5494 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5495 MISC_AEU_PORT0_START,
5496 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5497 /* init aeu_mask_attn_func_0/1:
5498 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5499 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5500 * bits 4-7 are used for "per vn group attention" */
5501 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5502 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5503
5504 /* Port PXPCS comes here */
5505 /* Port EMAC0 comes here */
5506 /* Port EMAC1 comes here */
5507 /* Port DBU comes here */
5508 /* Port DBG comes here */
5509 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5510 port ? NIG_PORT1_END : NIG_PORT0_END);
5511
5512 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5513
5514 if (CHIP_IS_E1H(bp)) {
5515 u32 wsum;
5516 struct cmng_struct_per_port m_cmng_port;
5517 int vn;
5518
5519 /* 0x2 disable e1hov, 0x1 enable */
5520 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5521 (IS_E1HMF(bp) ? 0x1 : 0x2));
5522
5523 /* Init RATE SHAPING and FAIRNESS contexts.
5524 Initialize as if there is 10G link. */
5525 wsum = bnx2x_calc_vn_wsum(bp);
5526 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5527 if (IS_E1HMF(bp))
5528 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5529 bnx2x_init_vn_minmax(bp, 2*vn + port,
5530 wsum, 10000, &m_cmng_port);
5531 }
5532
5533 /* Port MCP comes here */
5534 /* Port DMAE comes here */
5535
5536 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5537 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5538 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5539 /* add SPIO 5 to group 0 */
5540 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5541 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5542 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5543 break;
5544
5545 default:
5546 break;
5547 }
5548
5549 bnx2x__link_reset(bp);
5550
5551 return 0;
5552 }
5553
5554 #define ILT_PER_FUNC (768/2)
5555 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5556 /* the phys address is shifted right 12 bits and has an added
5557 1=valid bit added to the 53rd bit
5558 then since this is a wide register(TM)
5559 we split it into two 32 bit writes
5560 */
5561 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5562 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5563 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5564 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5565
5566 #define CNIC_ILT_LINES 0
5567
5568 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5569 {
5570 int reg;
5571
5572 if (CHIP_IS_E1H(bp))
5573 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5574 else /* E1 */
5575 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5576
5577 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5578 }
5579
5580 static int bnx2x_init_func(struct bnx2x *bp)
5581 {
5582 int port = BP_PORT(bp);
5583 int func = BP_FUNC(bp);
5584 int i;
5585
5586 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5587
5588 i = FUNC_ILT_BASE(func);
5589
5590 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5591 if (CHIP_IS_E1H(bp)) {
5592 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5593 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5594 } else /* E1 */
5595 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5596 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5597
5598
5599 if (CHIP_IS_E1H(bp)) {
5600 for (i = 0; i < 9; i++)
5601 bnx2x_init_block(bp,
5602 cm_start[func][i], cm_end[func][i]);
5603
5604 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5605 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5606 }
5607
5608 /* HC init per function */
5609 if (CHIP_IS_E1H(bp)) {
5610 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5611
5612 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5613 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5614 }
5615 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5616
5617 if (CHIP_IS_E1H(bp))
5618 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5619
5620 /* Reset PCIE errors for debug */
5621 REG_WR(bp, 0x2114, 0xffffffff);
5622 REG_WR(bp, 0x2120, 0xffffffff);
5623
5624 return 0;
5625 }
5626
5627 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5628 {
5629 int i, rc = 0;
5630
5631 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5632 BP_FUNC(bp), load_code);
5633
5634 bp->dmae_ready = 0;
5635 mutex_init(&bp->dmae_mutex);
5636 bnx2x_gunzip_init(bp);
5637
5638 switch (load_code) {
5639 case FW_MSG_CODE_DRV_LOAD_COMMON:
5640 rc = bnx2x_init_common(bp);
5641 if (rc)
5642 goto init_hw_err;
5643 /* no break */
5644
5645 case FW_MSG_CODE_DRV_LOAD_PORT:
5646 bp->dmae_ready = 1;
5647 rc = bnx2x_init_port(bp);
5648 if (rc)
5649 goto init_hw_err;
5650 /* no break */
5651
5652 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5653 bp->dmae_ready = 1;
5654 rc = bnx2x_init_func(bp);
5655 if (rc)
5656 goto init_hw_err;
5657 break;
5658
5659 default:
5660 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5661 break;
5662 }
5663
5664 if (!BP_NOMCP(bp)) {
5665 int func = BP_FUNC(bp);
5666
5667 bp->fw_drv_pulse_wr_seq =
5668 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5669 DRV_PULSE_SEQ_MASK);
5670 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5671 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5672 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5673 } else
5674 bp->func_stx = 0;
5675
5676 /* this needs to be done before gunzip end */
5677 bnx2x_zero_def_sb(bp);
5678 for_each_queue(bp, i)
5679 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5680
5681 init_hw_err:
5682 bnx2x_gunzip_end(bp);
5683
5684 return rc;
5685 }
5686
5687 /* send the MCP a request, block until there is a reply */
5688 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5689 {
5690 int func = BP_FUNC(bp);
5691 u32 seq = ++bp->fw_seq;
5692 u32 rc = 0;
5693 u32 cnt = 1;
5694 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5695
5696 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5697 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5698
5699 do {
5700 /* let the FW do it's magic ... */
5701 msleep(delay);
5702
5703 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5704
5705 /* Give the FW up to 2 second (200*10ms) */
5706 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5707
5708 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5709 cnt*delay, rc, seq);
5710
5711 /* is this a reply to our command? */
5712 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5713 rc &= FW_MSG_CODE_MASK;
5714
5715 } else {
5716 /* FW BUG! */
5717 BNX2X_ERR("FW failed to respond!\n");
5718 bnx2x_fw_dump(bp);
5719 rc = 0;
5720 }
5721
5722 return rc;
5723 }
5724
5725 static void bnx2x_free_mem(struct bnx2x *bp)
5726 {
5727
5728 #define BNX2X_PCI_FREE(x, y, size) \
5729 do { \
5730 if (x) { \
5731 pci_free_consistent(bp->pdev, size, x, y); \
5732 x = NULL; \
5733 y = 0; \
5734 } \
5735 } while (0)
5736
5737 #define BNX2X_FREE(x) \
5738 do { \
5739 if (x) { \
5740 vfree(x); \
5741 x = NULL; \
5742 } \
5743 } while (0)
5744
5745 int i;
5746
5747 /* fastpath */
5748 for_each_queue(bp, i) {
5749
5750 /* Status blocks */
5751 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5752 bnx2x_fp(bp, i, status_blk_mapping),
5753 sizeof(struct host_status_block) +
5754 sizeof(struct eth_tx_db_data));
5755
5756 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5757 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5758 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5759 bnx2x_fp(bp, i, tx_desc_mapping),
5760 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5761
5762 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5763 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5764 bnx2x_fp(bp, i, rx_desc_mapping),
5765 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5766
5767 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5768 bnx2x_fp(bp, i, rx_comp_mapping),
5769 sizeof(struct eth_fast_path_rx_cqe) *
5770 NUM_RCQ_BD);
5771
5772 /* SGE ring */
5773 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5774 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5775 bnx2x_fp(bp, i, rx_sge_mapping),
5776 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5777 }
5778 /* end of fastpath */
5779
5780 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5781 sizeof(struct host_def_status_block));
5782
5783 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5784 sizeof(struct bnx2x_slowpath));
5785
5786 #ifdef BCM_ISCSI
5787 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5788 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5789 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5790 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5791 #endif
5792 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5793
5794 #undef BNX2X_PCI_FREE
5795 #undef BNX2X_KFREE
5796 }
5797
5798 static int bnx2x_alloc_mem(struct bnx2x *bp)
5799 {
5800
5801 #define BNX2X_PCI_ALLOC(x, y, size) \
5802 do { \
5803 x = pci_alloc_consistent(bp->pdev, size, y); \
5804 if (x == NULL) \
5805 goto alloc_mem_err; \
5806 memset(x, 0, size); \
5807 } while (0)
5808
5809 #define BNX2X_ALLOC(x, size) \
5810 do { \
5811 x = vmalloc(size); \
5812 if (x == NULL) \
5813 goto alloc_mem_err; \
5814 memset(x, 0, size); \
5815 } while (0)
5816
5817 int i;
5818
5819 /* fastpath */
5820 for_each_queue(bp, i) {
5821 bnx2x_fp(bp, i, bp) = bp;
5822
5823 /* Status blocks */
5824 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5825 &bnx2x_fp(bp, i, status_blk_mapping),
5826 sizeof(struct host_status_block) +
5827 sizeof(struct eth_tx_db_data));
5828
5829 bnx2x_fp(bp, i, hw_tx_prods) =
5830 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5831
5832 bnx2x_fp(bp, i, tx_prods_mapping) =
5833 bnx2x_fp(bp, i, status_blk_mapping) +
5834 sizeof(struct host_status_block);
5835
5836 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5837 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5838 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5839 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5840 &bnx2x_fp(bp, i, tx_desc_mapping),
5841 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5842
5843 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5844 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5845 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5846 &bnx2x_fp(bp, i, rx_desc_mapping),
5847 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5848
5849 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5850 &bnx2x_fp(bp, i, rx_comp_mapping),
5851 sizeof(struct eth_fast_path_rx_cqe) *
5852 NUM_RCQ_BD);
5853
5854 /* SGE ring */
5855 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5856 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5857 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5858 &bnx2x_fp(bp, i, rx_sge_mapping),
5859 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5860 }
5861 /* end of fastpath */
5862
5863 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5864 sizeof(struct host_def_status_block));
5865
5866 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5867 sizeof(struct bnx2x_slowpath));
5868
5869 #ifdef BCM_ISCSI
5870 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5871
5872 /* Initialize T1 */
5873 for (i = 0; i < 64*1024; i += 64) {
5874 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5875 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5876 }
5877
5878 /* allocate searcher T2 table
5879 we allocate 1/4 of alloc num for T2
5880 (which is not entered into the ILT) */
5881 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5882
5883 /* Initialize T2 */
5884 for (i = 0; i < 16*1024; i += 64)
5885 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5886
5887 /* now fixup the last line in the block to point to the next block */
5888 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5889
5890 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5891 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5892
5893 /* QM queues (128*MAX_CONN) */
5894 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5895 #endif
5896
5897 /* Slow path ring */
5898 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5899
5900 return 0;
5901
5902 alloc_mem_err:
5903 bnx2x_free_mem(bp);
5904 return -ENOMEM;
5905
5906 #undef BNX2X_PCI_ALLOC
5907 #undef BNX2X_ALLOC
5908 }
5909
5910 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5911 {
5912 int i;
5913
5914 for_each_queue(bp, i) {
5915 struct bnx2x_fastpath *fp = &bp->fp[i];
5916
5917 u16 bd_cons = fp->tx_bd_cons;
5918 u16 sw_prod = fp->tx_pkt_prod;
5919 u16 sw_cons = fp->tx_pkt_cons;
5920
5921 while (sw_cons != sw_prod) {
5922 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5923 sw_cons++;
5924 }
5925 }
5926 }
5927
5928 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5929 {
5930 int i, j;
5931
5932 for_each_queue(bp, j) {
5933 struct bnx2x_fastpath *fp = &bp->fp[j];
5934
5935 for (i = 0; i < NUM_RX_BD; i++) {
5936 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5937 struct sk_buff *skb = rx_buf->skb;
5938
5939 if (skb == NULL)
5940 continue;
5941
5942 pci_unmap_single(bp->pdev,
5943 pci_unmap_addr(rx_buf, mapping),
5944 bp->rx_buf_size,
5945 PCI_DMA_FROMDEVICE);
5946
5947 rx_buf->skb = NULL;
5948 dev_kfree_skb(skb);
5949 }
5950 if (!fp->disable_tpa)
5951 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5952 ETH_MAX_AGGREGATION_QUEUES_E1 :
5953 ETH_MAX_AGGREGATION_QUEUES_E1H);
5954 }
5955 }
5956
5957 static void bnx2x_free_skbs(struct bnx2x *bp)
5958 {
5959 bnx2x_free_tx_skbs(bp);
5960 bnx2x_free_rx_skbs(bp);
5961 }
5962
5963 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5964 {
5965 int i, offset = 1;
5966
5967 free_irq(bp->msix_table[0].vector, bp->dev);
5968 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5969 bp->msix_table[0].vector);
5970
5971 for_each_queue(bp, i) {
5972 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5973 "state %x\n", i, bp->msix_table[i + offset].vector,
5974 bnx2x_fp(bp, i, state));
5975
5976 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5977 BNX2X_ERR("IRQ of fp #%d being freed while "
5978 "state != closed\n", i);
5979
5980 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5981 }
5982 }
5983
5984 static void bnx2x_free_irq(struct bnx2x *bp)
5985 {
5986 if (bp->flags & USING_MSIX_FLAG) {
5987 bnx2x_free_msix_irqs(bp);
5988 pci_disable_msix(bp->pdev);
5989 bp->flags &= ~USING_MSIX_FLAG;
5990
5991 } else
5992 free_irq(bp->pdev->irq, bp->dev);
5993 }
5994
5995 static int bnx2x_enable_msix(struct bnx2x *bp)
5996 {
5997 int i, rc, offset;
5998
5999 bp->msix_table[0].entry = 0;
6000 offset = 1;
6001 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6002
6003 for_each_queue(bp, i) {
6004 int igu_vec = offset + i + BP_L_ID(bp);
6005
6006 bp->msix_table[i + offset].entry = igu_vec;
6007 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6008 "(fastpath #%u)\n", i + offset, igu_vec, i);
6009 }
6010
6011 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6012 bp->num_queues + offset);
6013 if (rc) {
6014 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6015 return -1;
6016 }
6017 bp->flags |= USING_MSIX_FLAG;
6018
6019 return 0;
6020 }
6021
6022 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6023 {
6024 int i, rc, offset = 1;
6025
6026 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6027 bp->dev->name, bp->dev);
6028 if (rc) {
6029 BNX2X_ERR("request sp irq failed\n");
6030 return -EBUSY;
6031 }
6032
6033 for_each_queue(bp, i) {
6034 rc = request_irq(bp->msix_table[i + offset].vector,
6035 bnx2x_msix_fp_int, 0,
6036 bp->dev->name, &bp->fp[i]);
6037 if (rc) {
6038 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6039 i + offset, -rc);
6040 bnx2x_free_msix_irqs(bp);
6041 return -EBUSY;
6042 }
6043
6044 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6045 }
6046
6047 return 0;
6048 }
6049
6050 static int bnx2x_req_irq(struct bnx2x *bp)
6051 {
6052 int rc;
6053
6054 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6055 bp->dev->name, bp->dev);
6056 if (!rc)
6057 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6058
6059 return rc;
6060 }
6061
6062 static void bnx2x_napi_enable(struct bnx2x *bp)
6063 {
6064 int i;
6065
6066 for_each_queue(bp, i)
6067 napi_enable(&bnx2x_fp(bp, i, napi));
6068 }
6069
6070 static void bnx2x_napi_disable(struct bnx2x *bp)
6071 {
6072 int i;
6073
6074 for_each_queue(bp, i)
6075 napi_disable(&bnx2x_fp(bp, i, napi));
6076 }
6077
6078 static void bnx2x_netif_start(struct bnx2x *bp)
6079 {
6080 if (atomic_dec_and_test(&bp->intr_sem)) {
6081 if (netif_running(bp->dev)) {
6082 if (bp->state == BNX2X_STATE_OPEN)
6083 netif_wake_queue(bp->dev);
6084 bnx2x_napi_enable(bp);
6085 bnx2x_int_enable(bp);
6086 }
6087 }
6088 }
6089
6090 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6091 {
6092 bnx2x_int_disable_sync(bp, disable_hw);
6093 if (netif_running(bp->dev)) {
6094 bnx2x_napi_disable(bp);
6095 netif_tx_disable(bp->dev);
6096 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6097 }
6098 }
6099
6100 /*
6101 * Init service functions
6102 */
6103
6104 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6105 {
6106 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6107 int port = BP_PORT(bp);
6108
6109 /* CAM allocation
6110 * unicasts 0-31:port0 32-63:port1
6111 * multicast 64-127:port0 128-191:port1
6112 */
6113 config->hdr.length_6b = 2;
6114 config->hdr.offset = port ? 31 : 0;
6115 config->hdr.client_id = BP_CL_ID(bp);
6116 config->hdr.reserved1 = 0;
6117
6118 /* primary MAC */
6119 config->config_table[0].cam_entry.msb_mac_addr =
6120 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6121 config->config_table[0].cam_entry.middle_mac_addr =
6122 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6123 config->config_table[0].cam_entry.lsb_mac_addr =
6124 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6125 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6126 if (set)
6127 config->config_table[0].target_table_entry.flags = 0;
6128 else
6129 CAM_INVALIDATE(config->config_table[0]);
6130 config->config_table[0].target_table_entry.client_id = 0;
6131 config->config_table[0].target_table_entry.vlan_id = 0;
6132
6133 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6134 (set ? "setting" : "clearing"),
6135 config->config_table[0].cam_entry.msb_mac_addr,
6136 config->config_table[0].cam_entry.middle_mac_addr,
6137 config->config_table[0].cam_entry.lsb_mac_addr);
6138
6139 /* broadcast */
6140 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6141 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6142 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6143 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6144 if (set)
6145 config->config_table[1].target_table_entry.flags =
6146 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6147 else
6148 CAM_INVALIDATE(config->config_table[1]);
6149 config->config_table[1].target_table_entry.client_id = 0;
6150 config->config_table[1].target_table_entry.vlan_id = 0;
6151
6152 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6153 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6154 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6155 }
6156
6157 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6158 {
6159 struct mac_configuration_cmd_e1h *config =
6160 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6161
6162 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6163 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6164 return;
6165 }
6166
6167 /* CAM allocation for E1H
6168 * unicasts: by func number
6169 * multicast: 20+FUNC*20, 20 each
6170 */
6171 config->hdr.length_6b = 1;
6172 config->hdr.offset = BP_FUNC(bp);
6173 config->hdr.client_id = BP_CL_ID(bp);
6174 config->hdr.reserved1 = 0;
6175
6176 /* primary MAC */
6177 config->config_table[0].msb_mac_addr =
6178 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6179 config->config_table[0].middle_mac_addr =
6180 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6181 config->config_table[0].lsb_mac_addr =
6182 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6183 config->config_table[0].client_id = BP_L_ID(bp);
6184 config->config_table[0].vlan_id = 0;
6185 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6186 if (set)
6187 config->config_table[0].flags = BP_PORT(bp);
6188 else
6189 config->config_table[0].flags =
6190 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6191
6192 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6193 (set ? "setting" : "clearing"),
6194 config->config_table[0].msb_mac_addr,
6195 config->config_table[0].middle_mac_addr,
6196 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6197
6198 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6199 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6200 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6201 }
6202
6203 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6204 int *state_p, int poll)
6205 {
6206 /* can take a while if any port is running */
6207 int cnt = 500;
6208
6209 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6210 poll ? "polling" : "waiting", state, idx);
6211
6212 might_sleep();
6213 while (cnt--) {
6214 if (poll) {
6215 bnx2x_rx_int(bp->fp, 10);
6216 /* if index is different from 0
6217 * the reply for some commands will
6218 * be on the non default queue
6219 */
6220 if (idx)
6221 bnx2x_rx_int(&bp->fp[idx], 10);
6222 }
6223
6224 mb(); /* state is changed by bnx2x_sp_event() */
6225 if (*state_p == state)
6226 return 0;
6227
6228 msleep(1);
6229 }
6230
6231 /* timeout! */
6232 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6233 poll ? "polling" : "waiting", state, idx);
6234 #ifdef BNX2X_STOP_ON_ERROR
6235 bnx2x_panic();
6236 #endif
6237
6238 return -EBUSY;
6239 }
6240
6241 static int bnx2x_setup_leading(struct bnx2x *bp)
6242 {
6243 int rc;
6244
6245 /* reset IGU state */
6246 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6247
6248 /* SETUP ramrod */
6249 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6250
6251 /* Wait for completion */
6252 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6253
6254 return rc;
6255 }
6256
6257 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6258 {
6259 /* reset IGU state */
6260 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6261
6262 /* SETUP ramrod */
6263 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6264 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6265
6266 /* Wait for completion */
6267 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6268 &(bp->fp[index].state), 0);
6269 }
6270
6271 static int bnx2x_poll(struct napi_struct *napi, int budget);
6272 static void bnx2x_set_rx_mode(struct net_device *dev);
6273
6274 /* must be called with rtnl_lock */
6275 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6276 {
6277 u32 load_code;
6278 int i, rc;
6279 #ifdef BNX2X_STOP_ON_ERROR
6280 if (unlikely(bp->panic))
6281 return -EPERM;
6282 #endif
6283
6284 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6285
6286 /* Send LOAD_REQUEST command to MCP
6287 Returns the type of LOAD command:
6288 if it is the first port to be initialized
6289 common blocks should be initialized, otherwise - not
6290 */
6291 if (!BP_NOMCP(bp)) {
6292 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6293 if (!load_code) {
6294 BNX2X_ERR("MCP response failure, aborting\n");
6295 return -EBUSY;
6296 }
6297 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6298 return -EBUSY; /* other port in diagnostic mode */
6299
6300 } else {
6301 int port = BP_PORT(bp);
6302
6303 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6304 load_count[0], load_count[1], load_count[2]);
6305 load_count[0]++;
6306 load_count[1 + port]++;
6307 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6308 load_count[0], load_count[1], load_count[2]);
6309 if (load_count[0] == 1)
6310 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6311 else if (load_count[1 + port] == 1)
6312 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6313 else
6314 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6315 }
6316
6317 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6318 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6319 bp->port.pmf = 1;
6320 else
6321 bp->port.pmf = 0;
6322 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6323
6324 /* if we can't use MSI-X we only need one fp,
6325 * so try to enable MSI-X with the requested number of fp's
6326 * and fallback to inta with one fp
6327 */
6328 if (use_inta) {
6329 bp->num_queues = 1;
6330
6331 } else {
6332 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6333 /* user requested number */
6334 bp->num_queues = use_multi;
6335
6336 else if (use_multi)
6337 bp->num_queues = min_t(u32, num_online_cpus(),
6338 BP_MAX_QUEUES(bp));
6339 else
6340 bp->num_queues = 1;
6341
6342 if (bnx2x_enable_msix(bp)) {
6343 /* failed to enable MSI-X */
6344 bp->num_queues = 1;
6345 if (use_multi)
6346 BNX2X_ERR("Multi requested but failed"
6347 " to enable MSI-X\n");
6348 }
6349 }
6350 DP(NETIF_MSG_IFUP,
6351 "set number of queues to %d\n", bp->num_queues);
6352
6353 if (bnx2x_alloc_mem(bp))
6354 return -ENOMEM;
6355
6356 for_each_queue(bp, i)
6357 bnx2x_fp(bp, i, disable_tpa) =
6358 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6359
6360 if (bp->flags & USING_MSIX_FLAG) {
6361 rc = bnx2x_req_msix_irqs(bp);
6362 if (rc) {
6363 pci_disable_msix(bp->pdev);
6364 goto load_error;
6365 }
6366 } else {
6367 bnx2x_ack_int(bp);
6368 rc = bnx2x_req_irq(bp);
6369 if (rc) {
6370 BNX2X_ERR("IRQ request failed, aborting\n");
6371 goto load_error;
6372 }
6373 }
6374
6375 for_each_queue(bp, i)
6376 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6377 bnx2x_poll, 128);
6378
6379 /* Initialize HW */
6380 rc = bnx2x_init_hw(bp, load_code);
6381 if (rc) {
6382 BNX2X_ERR("HW init failed, aborting\n");
6383 goto load_int_disable;
6384 }
6385
6386 /* Setup NIC internals and enable interrupts */
6387 bnx2x_nic_init(bp, load_code);
6388
6389 /* Send LOAD_DONE command to MCP */
6390 if (!BP_NOMCP(bp)) {
6391 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6392 if (!load_code) {
6393 BNX2X_ERR("MCP response failure, aborting\n");
6394 rc = -EBUSY;
6395 goto load_rings_free;
6396 }
6397 }
6398
6399 bnx2x_stats_init(bp);
6400
6401 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6402
6403 /* Enable Rx interrupt handling before sending the ramrod
6404 as it's completed on Rx FP queue */
6405 bnx2x_napi_enable(bp);
6406
6407 /* Enable interrupt handling */
6408 atomic_set(&bp->intr_sem, 0);
6409
6410 rc = bnx2x_setup_leading(bp);
6411 if (rc) {
6412 BNX2X_ERR("Setup leading failed!\n");
6413 goto load_netif_stop;
6414 }
6415
6416 if (CHIP_IS_E1H(bp))
6417 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6418 BNX2X_ERR("!!! mf_cfg function disabled\n");
6419 bp->state = BNX2X_STATE_DISABLED;
6420 }
6421
6422 if (bp->state == BNX2X_STATE_OPEN)
6423 for_each_nondefault_queue(bp, i) {
6424 rc = bnx2x_setup_multi(bp, i);
6425 if (rc)
6426 goto load_netif_stop;
6427 }
6428
6429 if (CHIP_IS_E1(bp))
6430 bnx2x_set_mac_addr_e1(bp, 1);
6431 else
6432 bnx2x_set_mac_addr_e1h(bp, 1);
6433
6434 if (bp->port.pmf)
6435 bnx2x_initial_phy_init(bp);
6436
6437 /* Start fast path */
6438 switch (load_mode) {
6439 case LOAD_NORMAL:
6440 /* Tx queue should be only reenabled */
6441 netif_wake_queue(bp->dev);
6442 bnx2x_set_rx_mode(bp->dev);
6443 break;
6444
6445 case LOAD_OPEN:
6446 netif_start_queue(bp->dev);
6447 bnx2x_set_rx_mode(bp->dev);
6448 if (bp->flags & USING_MSIX_FLAG)
6449 printk(KERN_INFO PFX "%s: using MSI-X\n",
6450 bp->dev->name);
6451 break;
6452
6453 case LOAD_DIAG:
6454 bnx2x_set_rx_mode(bp->dev);
6455 bp->state = BNX2X_STATE_DIAG;
6456 break;
6457
6458 default:
6459 break;
6460 }
6461
6462 if (!bp->port.pmf)
6463 bnx2x__link_status_update(bp);
6464
6465 /* start the timer */
6466 mod_timer(&bp->timer, jiffies + bp->current_interval);
6467
6468
6469 return 0;
6470
6471 load_netif_stop:
6472 bnx2x_napi_disable(bp);
6473 load_rings_free:
6474 /* Free SKBs, SGEs, TPA pool and driver internals */
6475 bnx2x_free_skbs(bp);
6476 for_each_queue(bp, i)
6477 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6478 load_int_disable:
6479 bnx2x_int_disable_sync(bp, 1);
6480 /* Release IRQs */
6481 bnx2x_free_irq(bp);
6482 load_error:
6483 bnx2x_free_mem(bp);
6484
6485 /* TBD we really need to reset the chip
6486 if we want to recover from this */
6487 return rc;
6488 }
6489
6490 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6491 {
6492 int rc;
6493
6494 /* halt the connection */
6495 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6496 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6497
6498 /* Wait for completion */
6499 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6500 &(bp->fp[index].state), 1);
6501 if (rc) /* timeout */
6502 return rc;
6503
6504 /* delete cfc entry */
6505 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6506
6507 /* Wait for completion */
6508 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6509 &(bp->fp[index].state), 1);
6510 return rc;
6511 }
6512
6513 static int bnx2x_stop_leading(struct bnx2x *bp)
6514 {
6515 u16 dsb_sp_prod_idx;
6516 /* if the other port is handling traffic,
6517 this can take a lot of time */
6518 int cnt = 500;
6519 int rc;
6520
6521 might_sleep();
6522
6523 /* Send HALT ramrod */
6524 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6525 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6526
6527 /* Wait for completion */
6528 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6529 &(bp->fp[0].state), 1);
6530 if (rc) /* timeout */
6531 return rc;
6532
6533 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6534
6535 /* Send PORT_DELETE ramrod */
6536 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6537
6538 /* Wait for completion to arrive on default status block
6539 we are going to reset the chip anyway
6540 so there is not much to do if this times out
6541 */
6542 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6543 if (!cnt) {
6544 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6545 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6546 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6547 #ifdef BNX2X_STOP_ON_ERROR
6548 bnx2x_panic();
6549 #else
6550 rc = -EBUSY;
6551 #endif
6552 break;
6553 }
6554 cnt--;
6555 msleep(1);
6556 }
6557 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6558 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6559
6560 return rc;
6561 }
6562
6563 static void bnx2x_reset_func(struct bnx2x *bp)
6564 {
6565 int port = BP_PORT(bp);
6566 int func = BP_FUNC(bp);
6567 int base, i;
6568
6569 /* Configure IGU */
6570 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6571 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6572
6573 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6574
6575 /* Clear ILT */
6576 base = FUNC_ILT_BASE(func);
6577 for (i = base; i < base + ILT_PER_FUNC; i++)
6578 bnx2x_ilt_wr(bp, i, 0);
6579 }
6580
6581 static void bnx2x_reset_port(struct bnx2x *bp)
6582 {
6583 int port = BP_PORT(bp);
6584 u32 val;
6585
6586 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6587
6588 /* Do not rcv packets to BRB */
6589 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6590 /* Do not direct rcv packets that are not for MCP to the BRB */
6591 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6592 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6593
6594 /* Configure AEU */
6595 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6596
6597 msleep(100);
6598 /* Check for BRB port occupancy */
6599 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6600 if (val)
6601 DP(NETIF_MSG_IFDOWN,
6602 "BRB1 is not empty %d blocks are occupied\n", val);
6603
6604 /* TODO: Close Doorbell port? */
6605 }
6606
6607 static void bnx2x_reset_common(struct bnx2x *bp)
6608 {
6609 /* reset_common */
6610 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6611 0xd3ffff7f);
6612 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6613 }
6614
6615 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6616 {
6617 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6618 BP_FUNC(bp), reset_code);
6619
6620 switch (reset_code) {
6621 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6622 bnx2x_reset_port(bp);
6623 bnx2x_reset_func(bp);
6624 bnx2x_reset_common(bp);
6625 break;
6626
6627 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6628 bnx2x_reset_port(bp);
6629 bnx2x_reset_func(bp);
6630 break;
6631
6632 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6633 bnx2x_reset_func(bp);
6634 break;
6635
6636 default:
6637 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6638 break;
6639 }
6640 }
6641
6642 /* must be called with rtnl_lock */
6643 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6644 {
6645 int port = BP_PORT(bp);
6646 u32 reset_code = 0;
6647 int i, cnt, rc;
6648
6649 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6650
6651 bp->rx_mode = BNX2X_RX_MODE_NONE;
6652 bnx2x_set_storm_rx_mode(bp);
6653
6654 bnx2x_netif_stop(bp, 1);
6655 if (!netif_running(bp->dev))
6656 bnx2x_napi_disable(bp);
6657 del_timer_sync(&bp->timer);
6658 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6659 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6660 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6661
6662 /* Wait until tx fast path tasks complete */
6663 for_each_queue(bp, i) {
6664 struct bnx2x_fastpath *fp = &bp->fp[i];
6665
6666 cnt = 1000;
6667 smp_rmb();
6668 while (BNX2X_HAS_TX_WORK(fp)) {
6669
6670 bnx2x_tx_int(fp, 1000);
6671 if (!cnt) {
6672 BNX2X_ERR("timeout waiting for queue[%d]\n",
6673 i);
6674 #ifdef BNX2X_STOP_ON_ERROR
6675 bnx2x_panic();
6676 return -EBUSY;
6677 #else
6678 break;
6679 #endif
6680 }
6681 cnt--;
6682 msleep(1);
6683 smp_rmb();
6684 }
6685 }
6686 /* Give HW time to discard old tx messages */
6687 msleep(1);
6688
6689 /* Release IRQs */
6690 bnx2x_free_irq(bp);
6691
6692 if (CHIP_IS_E1(bp)) {
6693 struct mac_configuration_cmd *config =
6694 bnx2x_sp(bp, mcast_config);
6695
6696 bnx2x_set_mac_addr_e1(bp, 0);
6697
6698 for (i = 0; i < config->hdr.length_6b; i++)
6699 CAM_INVALIDATE(config->config_table[i]);
6700
6701 config->hdr.length_6b = i;
6702 if (CHIP_REV_IS_SLOW(bp))
6703 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6704 else
6705 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6706 config->hdr.client_id = BP_CL_ID(bp);
6707 config->hdr.reserved1 = 0;
6708
6709 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6710 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6711 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6712
6713 } else { /* E1H */
6714 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6715
6716 bnx2x_set_mac_addr_e1h(bp, 0);
6717
6718 for (i = 0; i < MC_HASH_SIZE; i++)
6719 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6720 }
6721
6722 if (unload_mode == UNLOAD_NORMAL)
6723 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6724
6725 else if (bp->flags & NO_WOL_FLAG) {
6726 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6727 if (CHIP_IS_E1H(bp))
6728 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6729
6730 } else if (bp->wol) {
6731 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6732 u8 *mac_addr = bp->dev->dev_addr;
6733 u32 val;
6734 /* The mac address is written to entries 1-4 to
6735 preserve entry 0 which is used by the PMF */
6736 u8 entry = (BP_E1HVN(bp) + 1)*8;
6737
6738 val = (mac_addr[0] << 8) | mac_addr[1];
6739 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6740
6741 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6742 (mac_addr[4] << 8) | mac_addr[5];
6743 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6744
6745 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6746
6747 } else
6748 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6749
6750 /* Close multi and leading connections
6751 Completions for ramrods are collected in a synchronous way */
6752 for_each_nondefault_queue(bp, i)
6753 if (bnx2x_stop_multi(bp, i))
6754 goto unload_error;
6755
6756 rc = bnx2x_stop_leading(bp);
6757 if (rc) {
6758 BNX2X_ERR("Stop leading failed!\n");
6759 #ifdef BNX2X_STOP_ON_ERROR
6760 return -EBUSY;
6761 #else
6762 goto unload_error;
6763 #endif
6764 }
6765
6766 unload_error:
6767 if (!BP_NOMCP(bp))
6768 reset_code = bnx2x_fw_command(bp, reset_code);
6769 else {
6770 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6771 load_count[0], load_count[1], load_count[2]);
6772 load_count[0]--;
6773 load_count[1 + port]--;
6774 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6775 load_count[0], load_count[1], load_count[2]);
6776 if (load_count[0] == 0)
6777 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6778 else if (load_count[1 + port] == 0)
6779 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6780 else
6781 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6782 }
6783
6784 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6785 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6786 bnx2x__link_reset(bp);
6787
6788 /* Reset the chip */
6789 bnx2x_reset_chip(bp, reset_code);
6790
6791 /* Report UNLOAD_DONE to MCP */
6792 if (!BP_NOMCP(bp))
6793 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6794
6795 /* Free SKBs, SGEs, TPA pool and driver internals */
6796 bnx2x_free_skbs(bp);
6797 for_each_queue(bp, i)
6798 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6799 bnx2x_free_mem(bp);
6800
6801 bp->state = BNX2X_STATE_CLOSED;
6802
6803 netif_carrier_off(bp->dev);
6804
6805 return 0;
6806 }
6807
6808 static void bnx2x_reset_task(struct work_struct *work)
6809 {
6810 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6811
6812 #ifdef BNX2X_STOP_ON_ERROR
6813 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6814 " so reset not done to allow debug dump,\n"
6815 KERN_ERR " you will need to reboot when done\n");
6816 return;
6817 #endif
6818
6819 rtnl_lock();
6820
6821 if (!netif_running(bp->dev))
6822 goto reset_task_exit;
6823
6824 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6825 bnx2x_nic_load(bp, LOAD_NORMAL);
6826
6827 reset_task_exit:
6828 rtnl_unlock();
6829 }
6830
6831 /* end of nic load/unload */
6832
6833 /* ethtool_ops */
6834
6835 /*
6836 * Init service functions
6837 */
6838
6839 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6840 {
6841 u32 val;
6842
6843 /* Check if there is any driver already loaded */
6844 val = REG_RD(bp, MISC_REG_UNPREPARED);
6845 if (val == 0x1) {
6846 /* Check if it is the UNDI driver
6847 * UNDI driver initializes CID offset for normal bell to 0x7
6848 */
6849 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6850 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6851 if (val == 0x7)
6852 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6853 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6854
6855 if (val == 0x7) {
6856 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6857 /* save our func */
6858 int func = BP_FUNC(bp);
6859 u32 swap_en;
6860 u32 swap_val;
6861
6862 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6863
6864 /* try unload UNDI on port 0 */
6865 bp->func = 0;
6866 bp->fw_seq =
6867 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6868 DRV_MSG_SEQ_NUMBER_MASK);
6869 reset_code = bnx2x_fw_command(bp, reset_code);
6870
6871 /* if UNDI is loaded on the other port */
6872 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6873
6874 /* send "DONE" for previous unload */
6875 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6876
6877 /* unload UNDI on port 1 */
6878 bp->func = 1;
6879 bp->fw_seq =
6880 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6881 DRV_MSG_SEQ_NUMBER_MASK);
6882 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6883
6884 bnx2x_fw_command(bp, reset_code);
6885 }
6886
6887 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6888 HC_REG_CONFIG_0), 0x1000);
6889
6890 /* close input traffic and wait for it */
6891 /* Do not rcv packets to BRB */
6892 REG_WR(bp,
6893 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6894 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6895 /* Do not direct rcv packets that are not for MCP to
6896 * the BRB */
6897 REG_WR(bp,
6898 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6899 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6900 /* clear AEU */
6901 REG_WR(bp,
6902 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6903 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6904 msleep(10);
6905
6906 /* save NIG port swap info */
6907 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6908 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6909 /* reset device */
6910 REG_WR(bp,
6911 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6912 0xd3ffffff);
6913 REG_WR(bp,
6914 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6915 0x1403);
6916 /* take the NIG out of reset and restore swap values */
6917 REG_WR(bp,
6918 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6919 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6920 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6921 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6922
6923 /* send unload done to the MCP */
6924 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6925
6926 /* restore our func and fw_seq */
6927 bp->func = func;
6928 bp->fw_seq =
6929 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6930 DRV_MSG_SEQ_NUMBER_MASK);
6931 }
6932 }
6933 }
6934
6935 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6936 {
6937 u32 val, val2, val3, val4, id;
6938 u16 pmc;
6939
6940 /* Get the chip revision id and number. */
6941 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6942 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6943 id = ((val & 0xffff) << 16);
6944 val = REG_RD(bp, MISC_REG_CHIP_REV);
6945 id |= ((val & 0xf) << 12);
6946 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6947 id |= ((val & 0xff) << 4);
6948 REG_RD(bp, MISC_REG_BOND_ID);
6949 id |= (val & 0xf);
6950 bp->common.chip_id = id;
6951 bp->link_params.chip_id = bp->common.chip_id;
6952 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6953
6954 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6955 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6956 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6957 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6958 bp->common.flash_size, bp->common.flash_size);
6959
6960 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6961 bp->link_params.shmem_base = bp->common.shmem_base;
6962 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6963
6964 if (!bp->common.shmem_base ||
6965 (bp->common.shmem_base < 0xA0000) ||
6966 (bp->common.shmem_base >= 0xC0000)) {
6967 BNX2X_DEV_INFO("MCP not active\n");
6968 bp->flags |= NO_MCP_FLAG;
6969 return;
6970 }
6971
6972 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6973 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6974 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6975 BNX2X_ERR("BAD MCP validity signature\n");
6976
6977 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6978 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6979
6980 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6981 bp->common.hw_config, bp->common.board);
6982
6983 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6984 SHARED_HW_CFG_LED_MODE_MASK) >>
6985 SHARED_HW_CFG_LED_MODE_SHIFT);
6986
6987 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6988 bp->common.bc_ver = val;
6989 BNX2X_DEV_INFO("bc_ver %X\n", val);
6990 if (val < BNX2X_BC_VER) {
6991 /* for now only warn
6992 * later we might need to enforce this */
6993 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6994 " please upgrade BC\n", BNX2X_BC_VER, val);
6995 }
6996
6997 if (BP_E1HVN(bp) == 0) {
6998 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6999 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7000 } else {
7001 /* no WOL capability for E1HVN != 0 */
7002 bp->flags |= NO_WOL_FLAG;
7003 }
7004 BNX2X_DEV_INFO("%sWoL capable\n",
7005 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7006
7007 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7008 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7009 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7010 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7011
7012 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7013 val, val2, val3, val4);
7014 }
7015
7016 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7017 u32 switch_cfg)
7018 {
7019 int port = BP_PORT(bp);
7020 u32 ext_phy_type;
7021
7022 switch (switch_cfg) {
7023 case SWITCH_CFG_1G:
7024 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7025
7026 ext_phy_type =
7027 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7028 switch (ext_phy_type) {
7029 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7030 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7031 ext_phy_type);
7032
7033 bp->port.supported |= (SUPPORTED_10baseT_Half |
7034 SUPPORTED_10baseT_Full |
7035 SUPPORTED_100baseT_Half |
7036 SUPPORTED_100baseT_Full |
7037 SUPPORTED_1000baseT_Full |
7038 SUPPORTED_2500baseX_Full |
7039 SUPPORTED_TP |
7040 SUPPORTED_FIBRE |
7041 SUPPORTED_Autoneg |
7042 SUPPORTED_Pause |
7043 SUPPORTED_Asym_Pause);
7044 break;
7045
7046 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7047 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7048 ext_phy_type);
7049
7050 bp->port.supported |= (SUPPORTED_10baseT_Half |
7051 SUPPORTED_10baseT_Full |
7052 SUPPORTED_100baseT_Half |
7053 SUPPORTED_100baseT_Full |
7054 SUPPORTED_1000baseT_Full |
7055 SUPPORTED_TP |
7056 SUPPORTED_FIBRE |
7057 SUPPORTED_Autoneg |
7058 SUPPORTED_Pause |
7059 SUPPORTED_Asym_Pause);
7060 break;
7061
7062 default:
7063 BNX2X_ERR("NVRAM config error. "
7064 "BAD SerDes ext_phy_config 0x%x\n",
7065 bp->link_params.ext_phy_config);
7066 return;
7067 }
7068
7069 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7070 port*0x10);
7071 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7072 break;
7073
7074 case SWITCH_CFG_10G:
7075 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7076
7077 ext_phy_type =
7078 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7079 switch (ext_phy_type) {
7080 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7081 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7082 ext_phy_type);
7083
7084 bp->port.supported |= (SUPPORTED_10baseT_Half |
7085 SUPPORTED_10baseT_Full |
7086 SUPPORTED_100baseT_Half |
7087 SUPPORTED_100baseT_Full |
7088 SUPPORTED_1000baseT_Full |
7089 SUPPORTED_2500baseX_Full |
7090 SUPPORTED_10000baseT_Full |
7091 SUPPORTED_TP |
7092 SUPPORTED_FIBRE |
7093 SUPPORTED_Autoneg |
7094 SUPPORTED_Pause |
7095 SUPPORTED_Asym_Pause);
7096 break;
7097
7098 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7099 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7100 ext_phy_type);
7101
7102 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7103 SUPPORTED_FIBRE |
7104 SUPPORTED_Pause |
7105 SUPPORTED_Asym_Pause);
7106 break;
7107
7108 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7109 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7110 ext_phy_type);
7111
7112 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7113 SUPPORTED_1000baseT_Full |
7114 SUPPORTED_FIBRE |
7115 SUPPORTED_Pause |
7116 SUPPORTED_Asym_Pause);
7117 break;
7118
7119 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7120 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7121 ext_phy_type);
7122
7123 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7124 SUPPORTED_1000baseT_Full |
7125 SUPPORTED_FIBRE |
7126 SUPPORTED_Autoneg |
7127 SUPPORTED_Pause |
7128 SUPPORTED_Asym_Pause);
7129 break;
7130
7131 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7132 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7133 ext_phy_type);
7134
7135 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7136 SUPPORTED_2500baseX_Full |
7137 SUPPORTED_1000baseT_Full |
7138 SUPPORTED_FIBRE |
7139 SUPPORTED_Autoneg |
7140 SUPPORTED_Pause |
7141 SUPPORTED_Asym_Pause);
7142 break;
7143
7144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7145 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7146 ext_phy_type);
7147
7148 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7149 SUPPORTED_TP |
7150 SUPPORTED_Autoneg |
7151 SUPPORTED_Pause |
7152 SUPPORTED_Asym_Pause);
7153 break;
7154
7155 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7156 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7157 bp->link_params.ext_phy_config);
7158 break;
7159
7160 default:
7161 BNX2X_ERR("NVRAM config error. "
7162 "BAD XGXS ext_phy_config 0x%x\n",
7163 bp->link_params.ext_phy_config);
7164 return;
7165 }
7166
7167 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7168 port*0x18);
7169 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7170
7171 break;
7172
7173 default:
7174 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7175 bp->port.link_config);
7176 return;
7177 }
7178 bp->link_params.phy_addr = bp->port.phy_addr;
7179
7180 /* mask what we support according to speed_cap_mask */
7181 if (!(bp->link_params.speed_cap_mask &
7182 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7183 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7184
7185 if (!(bp->link_params.speed_cap_mask &
7186 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7187 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7188
7189 if (!(bp->link_params.speed_cap_mask &
7190 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7191 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7192
7193 if (!(bp->link_params.speed_cap_mask &
7194 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7195 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7196
7197 if (!(bp->link_params.speed_cap_mask &
7198 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7199 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7200 SUPPORTED_1000baseT_Full);
7201
7202 if (!(bp->link_params.speed_cap_mask &
7203 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7204 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7205
7206 if (!(bp->link_params.speed_cap_mask &
7207 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7208 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7209
7210 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7211 }
7212
7213 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7214 {
7215 bp->link_params.req_duplex = DUPLEX_FULL;
7216
7217 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7218 case PORT_FEATURE_LINK_SPEED_AUTO:
7219 if (bp->port.supported & SUPPORTED_Autoneg) {
7220 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7221 bp->port.advertising = bp->port.supported;
7222 } else {
7223 u32 ext_phy_type =
7224 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7225
7226 if ((ext_phy_type ==
7227 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7228 (ext_phy_type ==
7229 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7230 /* force 10G, no AN */
7231 bp->link_params.req_line_speed = SPEED_10000;
7232 bp->port.advertising =
7233 (ADVERTISED_10000baseT_Full |
7234 ADVERTISED_FIBRE);
7235 break;
7236 }
7237 BNX2X_ERR("NVRAM config error. "
7238 "Invalid link_config 0x%x"
7239 " Autoneg not supported\n",
7240 bp->port.link_config);
7241 return;
7242 }
7243 break;
7244
7245 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7246 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7247 bp->link_params.req_line_speed = SPEED_10;
7248 bp->port.advertising = (ADVERTISED_10baseT_Full |
7249 ADVERTISED_TP);
7250 } else {
7251 BNX2X_ERR("NVRAM config error. "
7252 "Invalid link_config 0x%x"
7253 " speed_cap_mask 0x%x\n",
7254 bp->port.link_config,
7255 bp->link_params.speed_cap_mask);
7256 return;
7257 }
7258 break;
7259
7260 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7261 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7262 bp->link_params.req_line_speed = SPEED_10;
7263 bp->link_params.req_duplex = DUPLEX_HALF;
7264 bp->port.advertising = (ADVERTISED_10baseT_Half |
7265 ADVERTISED_TP);
7266 } else {
7267 BNX2X_ERR("NVRAM config error. "
7268 "Invalid link_config 0x%x"
7269 " speed_cap_mask 0x%x\n",
7270 bp->port.link_config,
7271 bp->link_params.speed_cap_mask);
7272 return;
7273 }
7274 break;
7275
7276 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7277 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7278 bp->link_params.req_line_speed = SPEED_100;
7279 bp->port.advertising = (ADVERTISED_100baseT_Full |
7280 ADVERTISED_TP);
7281 } else {
7282 BNX2X_ERR("NVRAM config error. "
7283 "Invalid link_config 0x%x"
7284 " speed_cap_mask 0x%x\n",
7285 bp->port.link_config,
7286 bp->link_params.speed_cap_mask);
7287 return;
7288 }
7289 break;
7290
7291 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7292 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7293 bp->link_params.req_line_speed = SPEED_100;
7294 bp->link_params.req_duplex = DUPLEX_HALF;
7295 bp->port.advertising = (ADVERTISED_100baseT_Half |
7296 ADVERTISED_TP);
7297 } else {
7298 BNX2X_ERR("NVRAM config error. "
7299 "Invalid link_config 0x%x"
7300 " speed_cap_mask 0x%x\n",
7301 bp->port.link_config,
7302 bp->link_params.speed_cap_mask);
7303 return;
7304 }
7305 break;
7306
7307 case PORT_FEATURE_LINK_SPEED_1G:
7308 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7309 bp->link_params.req_line_speed = SPEED_1000;
7310 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7311 ADVERTISED_TP);
7312 } else {
7313 BNX2X_ERR("NVRAM config error. "
7314 "Invalid link_config 0x%x"
7315 " speed_cap_mask 0x%x\n",
7316 bp->port.link_config,
7317 bp->link_params.speed_cap_mask);
7318 return;
7319 }
7320 break;
7321
7322 case PORT_FEATURE_LINK_SPEED_2_5G:
7323 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7324 bp->link_params.req_line_speed = SPEED_2500;
7325 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7326 ADVERTISED_TP);
7327 } else {
7328 BNX2X_ERR("NVRAM config error. "
7329 "Invalid link_config 0x%x"
7330 " speed_cap_mask 0x%x\n",
7331 bp->port.link_config,
7332 bp->link_params.speed_cap_mask);
7333 return;
7334 }
7335 break;
7336
7337 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7338 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7339 case PORT_FEATURE_LINK_SPEED_10G_KR:
7340 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7341 bp->link_params.req_line_speed = SPEED_10000;
7342 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7343 ADVERTISED_FIBRE);
7344 } else {
7345 BNX2X_ERR("NVRAM config error. "
7346 "Invalid link_config 0x%x"
7347 " speed_cap_mask 0x%x\n",
7348 bp->port.link_config,
7349 bp->link_params.speed_cap_mask);
7350 return;
7351 }
7352 break;
7353
7354 default:
7355 BNX2X_ERR("NVRAM config error. "
7356 "BAD link speed link_config 0x%x\n",
7357 bp->port.link_config);
7358 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7359 bp->port.advertising = bp->port.supported;
7360 break;
7361 }
7362
7363 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7364 PORT_FEATURE_FLOW_CONTROL_MASK);
7365 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7366 !(bp->port.supported & SUPPORTED_Autoneg))
7367 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7368
7369 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7370 " advertising 0x%x\n",
7371 bp->link_params.req_line_speed,
7372 bp->link_params.req_duplex,
7373 bp->link_params.req_flow_ctrl, bp->port.advertising);
7374 }
7375
7376 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7377 {
7378 int port = BP_PORT(bp);
7379 u32 val, val2;
7380
7381 bp->link_params.bp = bp;
7382 bp->link_params.port = port;
7383
7384 bp->link_params.serdes_config =
7385 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7386 bp->link_params.lane_config =
7387 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7388 bp->link_params.ext_phy_config =
7389 SHMEM_RD(bp,
7390 dev_info.port_hw_config[port].external_phy_config);
7391 bp->link_params.speed_cap_mask =
7392 SHMEM_RD(bp,
7393 dev_info.port_hw_config[port].speed_capability_mask);
7394
7395 bp->port.link_config =
7396 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7397
7398 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7399 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7400 " link_config 0x%08x\n",
7401 bp->link_params.serdes_config,
7402 bp->link_params.lane_config,
7403 bp->link_params.ext_phy_config,
7404 bp->link_params.speed_cap_mask, bp->port.link_config);
7405
7406 bp->link_params.switch_cfg = (bp->port.link_config &
7407 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7408 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7409
7410 bnx2x_link_settings_requested(bp);
7411
7412 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7413 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7414 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7415 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7416 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7417 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7418 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7419 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7420 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7421 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7422 }
7423
7424 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7425 {
7426 int func = BP_FUNC(bp);
7427 u32 val, val2;
7428 int rc = 0;
7429
7430 bnx2x_get_common_hwinfo(bp);
7431
7432 bp->e1hov = 0;
7433 bp->e1hmf = 0;
7434 if (CHIP_IS_E1H(bp)) {
7435 bp->mf_config =
7436 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7437
7438 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7439 FUNC_MF_CFG_E1HOV_TAG_MASK);
7440 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7441
7442 bp->e1hov = val;
7443 bp->e1hmf = 1;
7444 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7445 "(0x%04x)\n",
7446 func, bp->e1hov, bp->e1hov);
7447 } else {
7448 BNX2X_DEV_INFO("Single function mode\n");
7449 if (BP_E1HVN(bp)) {
7450 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7451 " aborting\n", func);
7452 rc = -EPERM;
7453 }
7454 }
7455 }
7456
7457 if (!BP_NOMCP(bp)) {
7458 bnx2x_get_port_hwinfo(bp);
7459
7460 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7461 DRV_MSG_SEQ_NUMBER_MASK);
7462 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7463 }
7464
7465 if (IS_E1HMF(bp)) {
7466 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7467 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7468 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7469 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7470 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7471 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7472 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7473 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7474 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7475 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7476 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7477 ETH_ALEN);
7478 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7479 ETH_ALEN);
7480 }
7481
7482 return rc;
7483 }
7484
7485 if (BP_NOMCP(bp)) {
7486 /* only supposed to happen on emulation/FPGA */
7487 BNX2X_ERR("warning random MAC workaround active\n");
7488 random_ether_addr(bp->dev->dev_addr);
7489 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7490 }
7491
7492 return rc;
7493 }
7494
7495 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7496 {
7497 int func = BP_FUNC(bp);
7498 int rc;
7499
7500 /* Disable interrupt handling until HW is initialized */
7501 atomic_set(&bp->intr_sem, 1);
7502
7503 mutex_init(&bp->port.phy_mutex);
7504
7505 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7506 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7507
7508 rc = bnx2x_get_hwinfo(bp);
7509
7510 /* need to reset chip if undi was active */
7511 if (!BP_NOMCP(bp))
7512 bnx2x_undi_unload(bp);
7513
7514 if (CHIP_REV_IS_FPGA(bp))
7515 printk(KERN_ERR PFX "FPGA detected\n");
7516
7517 if (BP_NOMCP(bp) && (func == 0))
7518 printk(KERN_ERR PFX
7519 "MCP disabled, must load devices in order!\n");
7520
7521 /* Set TPA flags */
7522 if (disable_tpa) {
7523 bp->flags &= ~TPA_ENABLE_FLAG;
7524 bp->dev->features &= ~NETIF_F_LRO;
7525 } else {
7526 bp->flags |= TPA_ENABLE_FLAG;
7527 bp->dev->features |= NETIF_F_LRO;
7528 }
7529
7530
7531 bp->tx_ring_size = MAX_TX_AVAIL;
7532 bp->rx_ring_size = MAX_RX_AVAIL;
7533
7534 bp->rx_csum = 1;
7535 bp->rx_offset = 0;
7536
7537 bp->tx_ticks = 50;
7538 bp->rx_ticks = 25;
7539
7540 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7541 bp->current_interval = (poll ? poll : bp->timer_interval);
7542
7543 init_timer(&bp->timer);
7544 bp->timer.expires = jiffies + bp->current_interval;
7545 bp->timer.data = (unsigned long) bp;
7546 bp->timer.function = bnx2x_timer;
7547
7548 return rc;
7549 }
7550
7551 /*
7552 * ethtool service functions
7553 */
7554
7555 /* All ethtool functions called with rtnl_lock */
7556
7557 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7558 {
7559 struct bnx2x *bp = netdev_priv(dev);
7560
7561 cmd->supported = bp->port.supported;
7562 cmd->advertising = bp->port.advertising;
7563
7564 if (netif_carrier_ok(dev)) {
7565 cmd->speed = bp->link_vars.line_speed;
7566 cmd->duplex = bp->link_vars.duplex;
7567 } else {
7568 cmd->speed = bp->link_params.req_line_speed;
7569 cmd->duplex = bp->link_params.req_duplex;
7570 }
7571 if (IS_E1HMF(bp)) {
7572 u16 vn_max_rate;
7573
7574 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7575 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7576 if (vn_max_rate < cmd->speed)
7577 cmd->speed = vn_max_rate;
7578 }
7579
7580 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7581 u32 ext_phy_type =
7582 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7583
7584 switch (ext_phy_type) {
7585 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7586 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7587 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7588 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7589 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7590 cmd->port = PORT_FIBRE;
7591 break;
7592
7593 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7594 cmd->port = PORT_TP;
7595 break;
7596
7597 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7598 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7599 bp->link_params.ext_phy_config);
7600 break;
7601
7602 default:
7603 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7604 bp->link_params.ext_phy_config);
7605 break;
7606 }
7607 } else
7608 cmd->port = PORT_TP;
7609
7610 cmd->phy_address = bp->port.phy_addr;
7611 cmd->transceiver = XCVR_INTERNAL;
7612
7613 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7614 cmd->autoneg = AUTONEG_ENABLE;
7615 else
7616 cmd->autoneg = AUTONEG_DISABLE;
7617
7618 cmd->maxtxpkt = 0;
7619 cmd->maxrxpkt = 0;
7620
7621 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7622 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7623 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7624 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7625 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7626 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7627 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7628
7629 return 0;
7630 }
7631
7632 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7633 {
7634 struct bnx2x *bp = netdev_priv(dev);
7635 u32 advertising;
7636
7637 if (IS_E1HMF(bp))
7638 return 0;
7639
7640 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7641 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7642 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7643 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7644 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7645 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7646 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7647
7648 if (cmd->autoneg == AUTONEG_ENABLE) {
7649 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7650 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7651 return -EINVAL;
7652 }
7653
7654 /* advertise the requested speed and duplex if supported */
7655 cmd->advertising &= bp->port.supported;
7656
7657 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7658 bp->link_params.req_duplex = DUPLEX_FULL;
7659 bp->port.advertising |= (ADVERTISED_Autoneg |
7660 cmd->advertising);
7661
7662 } else { /* forced speed */
7663 /* advertise the requested speed and duplex if supported */
7664 switch (cmd->speed) {
7665 case SPEED_10:
7666 if (cmd->duplex == DUPLEX_FULL) {
7667 if (!(bp->port.supported &
7668 SUPPORTED_10baseT_Full)) {
7669 DP(NETIF_MSG_LINK,
7670 "10M full not supported\n");
7671 return -EINVAL;
7672 }
7673
7674 advertising = (ADVERTISED_10baseT_Full |
7675 ADVERTISED_TP);
7676 } else {
7677 if (!(bp->port.supported &
7678 SUPPORTED_10baseT_Half)) {
7679 DP(NETIF_MSG_LINK,
7680 "10M half not supported\n");
7681 return -EINVAL;
7682 }
7683
7684 advertising = (ADVERTISED_10baseT_Half |
7685 ADVERTISED_TP);
7686 }
7687 break;
7688
7689 case SPEED_100:
7690 if (cmd->duplex == DUPLEX_FULL) {
7691 if (!(bp->port.supported &
7692 SUPPORTED_100baseT_Full)) {
7693 DP(NETIF_MSG_LINK,
7694 "100M full not supported\n");
7695 return -EINVAL;
7696 }
7697
7698 advertising = (ADVERTISED_100baseT_Full |
7699 ADVERTISED_TP);
7700 } else {
7701 if (!(bp->port.supported &
7702 SUPPORTED_100baseT_Half)) {
7703 DP(NETIF_MSG_LINK,
7704 "100M half not supported\n");
7705 return -EINVAL;
7706 }
7707
7708 advertising = (ADVERTISED_100baseT_Half |
7709 ADVERTISED_TP);
7710 }
7711 break;
7712
7713 case SPEED_1000:
7714 if (cmd->duplex != DUPLEX_FULL) {
7715 DP(NETIF_MSG_LINK, "1G half not supported\n");
7716 return -EINVAL;
7717 }
7718
7719 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7720 DP(NETIF_MSG_LINK, "1G full not supported\n");
7721 return -EINVAL;
7722 }
7723
7724 advertising = (ADVERTISED_1000baseT_Full |
7725 ADVERTISED_TP);
7726 break;
7727
7728 case SPEED_2500:
7729 if (cmd->duplex != DUPLEX_FULL) {
7730 DP(NETIF_MSG_LINK,
7731 "2.5G half not supported\n");
7732 return -EINVAL;
7733 }
7734
7735 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7736 DP(NETIF_MSG_LINK,
7737 "2.5G full not supported\n");
7738 return -EINVAL;
7739 }
7740
7741 advertising = (ADVERTISED_2500baseX_Full |
7742 ADVERTISED_TP);
7743 break;
7744
7745 case SPEED_10000:
7746 if (cmd->duplex != DUPLEX_FULL) {
7747 DP(NETIF_MSG_LINK, "10G half not supported\n");
7748 return -EINVAL;
7749 }
7750
7751 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7752 DP(NETIF_MSG_LINK, "10G full not supported\n");
7753 return -EINVAL;
7754 }
7755
7756 advertising = (ADVERTISED_10000baseT_Full |
7757 ADVERTISED_FIBRE);
7758 break;
7759
7760 default:
7761 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7762 return -EINVAL;
7763 }
7764
7765 bp->link_params.req_line_speed = cmd->speed;
7766 bp->link_params.req_duplex = cmd->duplex;
7767 bp->port.advertising = advertising;
7768 }
7769
7770 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7771 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7772 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7773 bp->port.advertising);
7774
7775 if (netif_running(dev)) {
7776 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7777 bnx2x_link_set(bp);
7778 }
7779
7780 return 0;
7781 }
7782
7783 #define PHY_FW_VER_LEN 10
7784
7785 static void bnx2x_get_drvinfo(struct net_device *dev,
7786 struct ethtool_drvinfo *info)
7787 {
7788 struct bnx2x *bp = netdev_priv(dev);
7789 u8 phy_fw_ver[PHY_FW_VER_LEN];
7790
7791 strcpy(info->driver, DRV_MODULE_NAME);
7792 strcpy(info->version, DRV_MODULE_VERSION);
7793
7794 phy_fw_ver[0] = '\0';
7795 if (bp->port.pmf) {
7796 bnx2x_acquire_phy_lock(bp);
7797 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7798 (bp->state != BNX2X_STATE_CLOSED),
7799 phy_fw_ver, PHY_FW_VER_LEN);
7800 bnx2x_release_phy_lock(bp);
7801 }
7802
7803 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7804 (bp->common.bc_ver & 0xff0000) >> 16,
7805 (bp->common.bc_ver & 0xff00) >> 8,
7806 (bp->common.bc_ver & 0xff),
7807 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7808 strcpy(info->bus_info, pci_name(bp->pdev));
7809 info->n_stats = BNX2X_NUM_STATS;
7810 info->testinfo_len = BNX2X_NUM_TESTS;
7811 info->eedump_len = bp->common.flash_size;
7812 info->regdump_len = 0;
7813 }
7814
7815 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7816 {
7817 struct bnx2x *bp = netdev_priv(dev);
7818
7819 if (bp->flags & NO_WOL_FLAG) {
7820 wol->supported = 0;
7821 wol->wolopts = 0;
7822 } else {
7823 wol->supported = WAKE_MAGIC;
7824 if (bp->wol)
7825 wol->wolopts = WAKE_MAGIC;
7826 else
7827 wol->wolopts = 0;
7828 }
7829 memset(&wol->sopass, 0, sizeof(wol->sopass));
7830 }
7831
7832 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7833 {
7834 struct bnx2x *bp = netdev_priv(dev);
7835
7836 if (wol->wolopts & ~WAKE_MAGIC)
7837 return -EINVAL;
7838
7839 if (wol->wolopts & WAKE_MAGIC) {
7840 if (bp->flags & NO_WOL_FLAG)
7841 return -EINVAL;
7842
7843 bp->wol = 1;
7844 } else
7845 bp->wol = 0;
7846
7847 return 0;
7848 }
7849
7850 static u32 bnx2x_get_msglevel(struct net_device *dev)
7851 {
7852 struct bnx2x *bp = netdev_priv(dev);
7853
7854 return bp->msglevel;
7855 }
7856
7857 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7858 {
7859 struct bnx2x *bp = netdev_priv(dev);
7860
7861 if (capable(CAP_NET_ADMIN))
7862 bp->msglevel = level;
7863 }
7864
7865 static int bnx2x_nway_reset(struct net_device *dev)
7866 {
7867 struct bnx2x *bp = netdev_priv(dev);
7868
7869 if (!bp->port.pmf)
7870 return 0;
7871
7872 if (netif_running(dev)) {
7873 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7874 bnx2x_link_set(bp);
7875 }
7876
7877 return 0;
7878 }
7879
7880 static int bnx2x_get_eeprom_len(struct net_device *dev)
7881 {
7882 struct bnx2x *bp = netdev_priv(dev);
7883
7884 return bp->common.flash_size;
7885 }
7886
7887 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7888 {
7889 int port = BP_PORT(bp);
7890 int count, i;
7891 u32 val = 0;
7892
7893 /* adjust timeout for emulation/FPGA */
7894 count = NVRAM_TIMEOUT_COUNT;
7895 if (CHIP_REV_IS_SLOW(bp))
7896 count *= 100;
7897
7898 /* request access to nvram interface */
7899 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7900 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7901
7902 for (i = 0; i < count*10; i++) {
7903 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7904 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7905 break;
7906
7907 udelay(5);
7908 }
7909
7910 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7911 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7912 return -EBUSY;
7913 }
7914
7915 return 0;
7916 }
7917
7918 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7919 {
7920 int port = BP_PORT(bp);
7921 int count, i;
7922 u32 val = 0;
7923
7924 /* adjust timeout for emulation/FPGA */
7925 count = NVRAM_TIMEOUT_COUNT;
7926 if (CHIP_REV_IS_SLOW(bp))
7927 count *= 100;
7928
7929 /* relinquish nvram interface */
7930 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7931 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7932
7933 for (i = 0; i < count*10; i++) {
7934 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7935 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7936 break;
7937
7938 udelay(5);
7939 }
7940
7941 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7942 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7943 return -EBUSY;
7944 }
7945
7946 return 0;
7947 }
7948
7949 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7950 {
7951 u32 val;
7952
7953 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7954
7955 /* enable both bits, even on read */
7956 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7957 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7958 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7959 }
7960
7961 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7962 {
7963 u32 val;
7964
7965 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7966
7967 /* disable both bits, even after read */
7968 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7969 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7970 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7971 }
7972
7973 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7974 u32 cmd_flags)
7975 {
7976 int count, i, rc;
7977 u32 val;
7978
7979 /* build the command word */
7980 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7981
7982 /* need to clear DONE bit separately */
7983 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7984
7985 /* address of the NVRAM to read from */
7986 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7987 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7988
7989 /* issue a read command */
7990 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7991
7992 /* adjust timeout for emulation/FPGA */
7993 count = NVRAM_TIMEOUT_COUNT;
7994 if (CHIP_REV_IS_SLOW(bp))
7995 count *= 100;
7996
7997 /* wait for completion */
7998 *ret_val = 0;
7999 rc = -EBUSY;
8000 for (i = 0; i < count; i++) {
8001 udelay(5);
8002 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8003
8004 if (val & MCPR_NVM_COMMAND_DONE) {
8005 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8006 /* we read nvram data in cpu order
8007 * but ethtool sees it as an array of bytes
8008 * converting to big-endian will do the work */
8009 val = cpu_to_be32(val);
8010 *ret_val = val;
8011 rc = 0;
8012 break;
8013 }
8014 }
8015
8016 return rc;
8017 }
8018
8019 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8020 int buf_size)
8021 {
8022 int rc;
8023 u32 cmd_flags;
8024 u32 val;
8025
8026 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8027 DP(BNX2X_MSG_NVM,
8028 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8029 offset, buf_size);
8030 return -EINVAL;
8031 }
8032
8033 if (offset + buf_size > bp->common.flash_size) {
8034 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8035 " buf_size (0x%x) > flash_size (0x%x)\n",
8036 offset, buf_size, bp->common.flash_size);
8037 return -EINVAL;
8038 }
8039
8040 /* request access to nvram interface */
8041 rc = bnx2x_acquire_nvram_lock(bp);
8042 if (rc)
8043 return rc;
8044
8045 /* enable access to nvram interface */
8046 bnx2x_enable_nvram_access(bp);
8047
8048 /* read the first word(s) */
8049 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8050 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8051 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8052 memcpy(ret_buf, &val, 4);
8053
8054 /* advance to the next dword */
8055 offset += sizeof(u32);
8056 ret_buf += sizeof(u32);
8057 buf_size -= sizeof(u32);
8058 cmd_flags = 0;
8059 }
8060
8061 if (rc == 0) {
8062 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8063 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8064 memcpy(ret_buf, &val, 4);
8065 }
8066
8067 /* disable access to nvram interface */
8068 bnx2x_disable_nvram_access(bp);
8069 bnx2x_release_nvram_lock(bp);
8070
8071 return rc;
8072 }
8073
8074 static int bnx2x_get_eeprom(struct net_device *dev,
8075 struct ethtool_eeprom *eeprom, u8 *eebuf)
8076 {
8077 struct bnx2x *bp = netdev_priv(dev);
8078 int rc;
8079
8080 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8081 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8082 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8083 eeprom->len, eeprom->len);
8084
8085 /* parameters already validated in ethtool_get_eeprom */
8086
8087 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8088
8089 return rc;
8090 }
8091
8092 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8093 u32 cmd_flags)
8094 {
8095 int count, i, rc;
8096
8097 /* build the command word */
8098 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8099
8100 /* need to clear DONE bit separately */
8101 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8102
8103 /* write the data */
8104 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8105
8106 /* address of the NVRAM to write to */
8107 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8108 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8109
8110 /* issue the write command */
8111 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8112
8113 /* adjust timeout for emulation/FPGA */
8114 count = NVRAM_TIMEOUT_COUNT;
8115 if (CHIP_REV_IS_SLOW(bp))
8116 count *= 100;
8117
8118 /* wait for completion */
8119 rc = -EBUSY;
8120 for (i = 0; i < count; i++) {
8121 udelay(5);
8122 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8123 if (val & MCPR_NVM_COMMAND_DONE) {
8124 rc = 0;
8125 break;
8126 }
8127 }
8128
8129 return rc;
8130 }
8131
8132 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8133
8134 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8135 int buf_size)
8136 {
8137 int rc;
8138 u32 cmd_flags;
8139 u32 align_offset;
8140 u32 val;
8141
8142 if (offset + buf_size > bp->common.flash_size) {
8143 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8144 " buf_size (0x%x) > flash_size (0x%x)\n",
8145 offset, buf_size, bp->common.flash_size);
8146 return -EINVAL;
8147 }
8148
8149 /* request access to nvram interface */
8150 rc = bnx2x_acquire_nvram_lock(bp);
8151 if (rc)
8152 return rc;
8153
8154 /* enable access to nvram interface */
8155 bnx2x_enable_nvram_access(bp);
8156
8157 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8158 align_offset = (offset & ~0x03);
8159 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8160
8161 if (rc == 0) {
8162 val &= ~(0xff << BYTE_OFFSET(offset));
8163 val |= (*data_buf << BYTE_OFFSET(offset));
8164
8165 /* nvram data is returned as an array of bytes
8166 * convert it back to cpu order */
8167 val = be32_to_cpu(val);
8168
8169 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8170 cmd_flags);
8171 }
8172
8173 /* disable access to nvram interface */
8174 bnx2x_disable_nvram_access(bp);
8175 bnx2x_release_nvram_lock(bp);
8176
8177 return rc;
8178 }
8179
8180 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8181 int buf_size)
8182 {
8183 int rc;
8184 u32 cmd_flags;
8185 u32 val;
8186 u32 written_so_far;
8187
8188 if (buf_size == 1) /* ethtool */
8189 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8190
8191 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8192 DP(BNX2X_MSG_NVM,
8193 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8194 offset, buf_size);
8195 return -EINVAL;
8196 }
8197
8198 if (offset + buf_size > bp->common.flash_size) {
8199 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8200 " buf_size (0x%x) > flash_size (0x%x)\n",
8201 offset, buf_size, bp->common.flash_size);
8202 return -EINVAL;
8203 }
8204
8205 /* request access to nvram interface */
8206 rc = bnx2x_acquire_nvram_lock(bp);
8207 if (rc)
8208 return rc;
8209
8210 /* enable access to nvram interface */
8211 bnx2x_enable_nvram_access(bp);
8212
8213 written_so_far = 0;
8214 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8215 while ((written_so_far < buf_size) && (rc == 0)) {
8216 if (written_so_far == (buf_size - sizeof(u32)))
8217 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8218 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8219 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8220 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8221 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8222
8223 memcpy(&val, data_buf, 4);
8224
8225 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8226
8227 /* advance to the next dword */
8228 offset += sizeof(u32);
8229 data_buf += sizeof(u32);
8230 written_so_far += sizeof(u32);
8231 cmd_flags = 0;
8232 }
8233
8234 /* disable access to nvram interface */
8235 bnx2x_disable_nvram_access(bp);
8236 bnx2x_release_nvram_lock(bp);
8237
8238 return rc;
8239 }
8240
8241 static int bnx2x_set_eeprom(struct net_device *dev,
8242 struct ethtool_eeprom *eeprom, u8 *eebuf)
8243 {
8244 struct bnx2x *bp = netdev_priv(dev);
8245 int rc;
8246
8247 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8248 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8249 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8250 eeprom->len, eeprom->len);
8251
8252 /* parameters already validated in ethtool_set_eeprom */
8253
8254 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8255 if (eeprom->magic == 0x00504859)
8256 if (bp->port.pmf) {
8257
8258 bnx2x_acquire_phy_lock(bp);
8259 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8260 bp->link_params.ext_phy_config,
8261 (bp->state != BNX2X_STATE_CLOSED),
8262 eebuf, eeprom->len);
8263 if ((bp->state == BNX2X_STATE_OPEN) ||
8264 (bp->state == BNX2X_STATE_DISABLED)) {
8265 rc |= bnx2x_link_reset(&bp->link_params,
8266 &bp->link_vars);
8267 rc |= bnx2x_phy_init(&bp->link_params,
8268 &bp->link_vars);
8269 }
8270 bnx2x_release_phy_lock(bp);
8271
8272 } else /* Only the PMF can access the PHY */
8273 return -EINVAL;
8274 else
8275 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8276
8277 return rc;
8278 }
8279
8280 static int bnx2x_get_coalesce(struct net_device *dev,
8281 struct ethtool_coalesce *coal)
8282 {
8283 struct bnx2x *bp = netdev_priv(dev);
8284
8285 memset(coal, 0, sizeof(struct ethtool_coalesce));
8286
8287 coal->rx_coalesce_usecs = bp->rx_ticks;
8288 coal->tx_coalesce_usecs = bp->tx_ticks;
8289
8290 return 0;
8291 }
8292
8293 static int bnx2x_set_coalesce(struct net_device *dev,
8294 struct ethtool_coalesce *coal)
8295 {
8296 struct bnx2x *bp = netdev_priv(dev);
8297
8298 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8299 if (bp->rx_ticks > 3000)
8300 bp->rx_ticks = 3000;
8301
8302 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8303 if (bp->tx_ticks > 0x3000)
8304 bp->tx_ticks = 0x3000;
8305
8306 if (netif_running(dev))
8307 bnx2x_update_coalesce(bp);
8308
8309 return 0;
8310 }
8311
8312 static void bnx2x_get_ringparam(struct net_device *dev,
8313 struct ethtool_ringparam *ering)
8314 {
8315 struct bnx2x *bp = netdev_priv(dev);
8316
8317 ering->rx_max_pending = MAX_RX_AVAIL;
8318 ering->rx_mini_max_pending = 0;
8319 ering->rx_jumbo_max_pending = 0;
8320
8321 ering->rx_pending = bp->rx_ring_size;
8322 ering->rx_mini_pending = 0;
8323 ering->rx_jumbo_pending = 0;
8324
8325 ering->tx_max_pending = MAX_TX_AVAIL;
8326 ering->tx_pending = bp->tx_ring_size;
8327 }
8328
8329 static int bnx2x_set_ringparam(struct net_device *dev,
8330 struct ethtool_ringparam *ering)
8331 {
8332 struct bnx2x *bp = netdev_priv(dev);
8333 int rc = 0;
8334
8335 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8336 (ering->tx_pending > MAX_TX_AVAIL) ||
8337 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8338 return -EINVAL;
8339
8340 bp->rx_ring_size = ering->rx_pending;
8341 bp->tx_ring_size = ering->tx_pending;
8342
8343 if (netif_running(dev)) {
8344 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8345 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8346 }
8347
8348 return rc;
8349 }
8350
8351 static void bnx2x_get_pauseparam(struct net_device *dev,
8352 struct ethtool_pauseparam *epause)
8353 {
8354 struct bnx2x *bp = netdev_priv(dev);
8355
8356 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8357 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8358
8359 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8360 FLOW_CTRL_RX);
8361 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8362 FLOW_CTRL_TX);
8363
8364 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8365 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8366 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8367 }
8368
8369 static int bnx2x_set_pauseparam(struct net_device *dev,
8370 struct ethtool_pauseparam *epause)
8371 {
8372 struct bnx2x *bp = netdev_priv(dev);
8373
8374 if (IS_E1HMF(bp))
8375 return 0;
8376
8377 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8378 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8379 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8380
8381 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8382
8383 if (epause->rx_pause)
8384 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8385
8386 if (epause->tx_pause)
8387 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8388
8389 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8390 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8391
8392 if (epause->autoneg) {
8393 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8394 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8395 return -EINVAL;
8396 }
8397
8398 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8399 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8400 }
8401
8402 DP(NETIF_MSG_LINK,
8403 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8404
8405 if (netif_running(dev)) {
8406 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8407 bnx2x_link_set(bp);
8408 }
8409
8410 return 0;
8411 }
8412
8413 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8414 {
8415 struct bnx2x *bp = netdev_priv(dev);
8416 int changed = 0;
8417 int rc = 0;
8418
8419 /* TPA requires Rx CSUM offloading */
8420 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8421 if (!(dev->features & NETIF_F_LRO)) {
8422 dev->features |= NETIF_F_LRO;
8423 bp->flags |= TPA_ENABLE_FLAG;
8424 changed = 1;
8425 }
8426
8427 } else if (dev->features & NETIF_F_LRO) {
8428 dev->features &= ~NETIF_F_LRO;
8429 bp->flags &= ~TPA_ENABLE_FLAG;
8430 changed = 1;
8431 }
8432
8433 if (changed && netif_running(dev)) {
8434 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8435 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8436 }
8437
8438 return rc;
8439 }
8440
8441 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8442 {
8443 struct bnx2x *bp = netdev_priv(dev);
8444
8445 return bp->rx_csum;
8446 }
8447
8448 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8449 {
8450 struct bnx2x *bp = netdev_priv(dev);
8451 int rc = 0;
8452
8453 bp->rx_csum = data;
8454
8455 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8456 TPA'ed packets will be discarded due to wrong TCP CSUM */
8457 if (!data) {
8458 u32 flags = ethtool_op_get_flags(dev);
8459
8460 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8461 }
8462
8463 return rc;
8464 }
8465
8466 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8467 {
8468 if (data) {
8469 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8470 dev->features |= NETIF_F_TSO6;
8471 } else {
8472 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8473 dev->features &= ~NETIF_F_TSO6;
8474 }
8475
8476 return 0;
8477 }
8478
8479 static const struct {
8480 char string[ETH_GSTRING_LEN];
8481 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8482 { "register_test (offline)" },
8483 { "memory_test (offline)" },
8484 { "loopback_test (offline)" },
8485 { "nvram_test (online)" },
8486 { "interrupt_test (online)" },
8487 { "link_test (online)" },
8488 { "idle check (online)" },
8489 { "MC errors (online)" }
8490 };
8491
8492 static int bnx2x_self_test_count(struct net_device *dev)
8493 {
8494 return BNX2X_NUM_TESTS;
8495 }
8496
8497 static int bnx2x_test_registers(struct bnx2x *bp)
8498 {
8499 int idx, i, rc = -ENODEV;
8500 u32 wr_val = 0;
8501 int port = BP_PORT(bp);
8502 static const struct {
8503 u32 offset0;
8504 u32 offset1;
8505 u32 mask;
8506 } reg_tbl[] = {
8507 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8508 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8509 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8510 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8511 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8512 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8513 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8514 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8515 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8516 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8517 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8518 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8519 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8520 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8521 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8522 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8523 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8524 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8525 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8526 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8527 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8528 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8529 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8530 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8531 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8532 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8533 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8534 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8535 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8536 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8537 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8538 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8539 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8540 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8541 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8542 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8543 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8544 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8545
8546 { 0xffffffff, 0, 0x00000000 }
8547 };
8548
8549 if (!netif_running(bp->dev))
8550 return rc;
8551
8552 /* Repeat the test twice:
8553 First by writing 0x00000000, second by writing 0xffffffff */
8554 for (idx = 0; idx < 2; idx++) {
8555
8556 switch (idx) {
8557 case 0:
8558 wr_val = 0;
8559 break;
8560 case 1:
8561 wr_val = 0xffffffff;
8562 break;
8563 }
8564
8565 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8566 u32 offset, mask, save_val, val;
8567
8568 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8569 mask = reg_tbl[i].mask;
8570
8571 save_val = REG_RD(bp, offset);
8572
8573 REG_WR(bp, offset, wr_val);
8574 val = REG_RD(bp, offset);
8575
8576 /* Restore the original register's value */
8577 REG_WR(bp, offset, save_val);
8578
8579 /* verify that value is as expected value */
8580 if ((val & mask) != (wr_val & mask))
8581 goto test_reg_exit;
8582 }
8583 }
8584
8585 rc = 0;
8586
8587 test_reg_exit:
8588 return rc;
8589 }
8590
8591 static int bnx2x_test_memory(struct bnx2x *bp)
8592 {
8593 int i, j, rc = -ENODEV;
8594 u32 val;
8595 static const struct {
8596 u32 offset;
8597 int size;
8598 } mem_tbl[] = {
8599 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8600 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8601 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8602 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8603 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8604 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8605 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8606
8607 { 0xffffffff, 0 }
8608 };
8609 static const struct {
8610 char *name;
8611 u32 offset;
8612 u32 e1_mask;
8613 u32 e1h_mask;
8614 } prty_tbl[] = {
8615 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8616 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8617 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8618 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8619 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8620 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8621
8622 { NULL, 0xffffffff, 0, 0 }
8623 };
8624
8625 if (!netif_running(bp->dev))
8626 return rc;
8627
8628 /* Go through all the memories */
8629 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8630 for (j = 0; j < mem_tbl[i].size; j++)
8631 REG_RD(bp, mem_tbl[i].offset + j*4);
8632
8633 /* Check the parity status */
8634 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8635 val = REG_RD(bp, prty_tbl[i].offset);
8636 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8637 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8638 DP(NETIF_MSG_HW,
8639 "%s is 0x%x\n", prty_tbl[i].name, val);
8640 goto test_mem_exit;
8641 }
8642 }
8643
8644 rc = 0;
8645
8646 test_mem_exit:
8647 return rc;
8648 }
8649
8650 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8651 {
8652 int cnt = 1000;
8653
8654 if (link_up)
8655 while (bnx2x_link_test(bp) && cnt--)
8656 msleep(10);
8657 }
8658
8659 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8660 {
8661 unsigned int pkt_size, num_pkts, i;
8662 struct sk_buff *skb;
8663 unsigned char *packet;
8664 struct bnx2x_fastpath *fp = &bp->fp[0];
8665 u16 tx_start_idx, tx_idx;
8666 u16 rx_start_idx, rx_idx;
8667 u16 pkt_prod;
8668 struct sw_tx_bd *tx_buf;
8669 struct eth_tx_bd *tx_bd;
8670 dma_addr_t mapping;
8671 union eth_rx_cqe *cqe;
8672 u8 cqe_fp_flags;
8673 struct sw_rx_bd *rx_buf;
8674 u16 len;
8675 int rc = -ENODEV;
8676
8677 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8678 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8679 bnx2x_acquire_phy_lock(bp);
8680 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8681 bnx2x_release_phy_lock(bp);
8682
8683 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8684 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8685 bnx2x_acquire_phy_lock(bp);
8686 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8687 bnx2x_release_phy_lock(bp);
8688 /* wait until link state is restored */
8689 bnx2x_wait_for_link(bp, link_up);
8690
8691 } else
8692 return -EINVAL;
8693
8694 pkt_size = 1514;
8695 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8696 if (!skb) {
8697 rc = -ENOMEM;
8698 goto test_loopback_exit;
8699 }
8700 packet = skb_put(skb, pkt_size);
8701 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8702 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8703 for (i = ETH_HLEN; i < pkt_size; i++)
8704 packet[i] = (unsigned char) (i & 0xff);
8705
8706 num_pkts = 0;
8707 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8708 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8709
8710 pkt_prod = fp->tx_pkt_prod++;
8711 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8712 tx_buf->first_bd = fp->tx_bd_prod;
8713 tx_buf->skb = skb;
8714
8715 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8716 mapping = pci_map_single(bp->pdev, skb->data,
8717 skb_headlen(skb), PCI_DMA_TODEVICE);
8718 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8719 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8720 tx_bd->nbd = cpu_to_le16(1);
8721 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8722 tx_bd->vlan = cpu_to_le16(pkt_prod);
8723 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8724 ETH_TX_BD_FLAGS_END_BD);
8725 tx_bd->general_data = ((UNICAST_ADDRESS <<
8726 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8727
8728 fp->hw_tx_prods->bds_prod =
8729 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8730 mb(); /* FW restriction: must not reorder writing nbd and packets */
8731 fp->hw_tx_prods->packets_prod =
8732 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8733 DOORBELL(bp, FP_IDX(fp), 0);
8734
8735 mmiowb();
8736
8737 num_pkts++;
8738 fp->tx_bd_prod++;
8739 bp->dev->trans_start = jiffies;
8740
8741 udelay(100);
8742
8743 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8744 if (tx_idx != tx_start_idx + num_pkts)
8745 goto test_loopback_exit;
8746
8747 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8748 if (rx_idx != rx_start_idx + num_pkts)
8749 goto test_loopback_exit;
8750
8751 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8752 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8753 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8754 goto test_loopback_rx_exit;
8755
8756 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8757 if (len != pkt_size)
8758 goto test_loopback_rx_exit;
8759
8760 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8761 skb = rx_buf->skb;
8762 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8763 for (i = ETH_HLEN; i < pkt_size; i++)
8764 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8765 goto test_loopback_rx_exit;
8766
8767 rc = 0;
8768
8769 test_loopback_rx_exit:
8770 bp->dev->last_rx = jiffies;
8771
8772 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8773 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8774 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8775 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8776
8777 /* Update producers */
8778 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8779 fp->rx_sge_prod);
8780 mmiowb(); /* keep prod updates ordered */
8781
8782 test_loopback_exit:
8783 bp->link_params.loopback_mode = LOOPBACK_NONE;
8784
8785 return rc;
8786 }
8787
8788 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8789 {
8790 int rc = 0;
8791
8792 if (!netif_running(bp->dev))
8793 return BNX2X_LOOPBACK_FAILED;
8794
8795 bnx2x_netif_stop(bp, 1);
8796
8797 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8798 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8799 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8800 }
8801
8802 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8803 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8804 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8805 }
8806
8807 bnx2x_netif_start(bp);
8808
8809 return rc;
8810 }
8811
8812 #define CRC32_RESIDUAL 0xdebb20e3
8813
8814 static int bnx2x_test_nvram(struct bnx2x *bp)
8815 {
8816 static const struct {
8817 int offset;
8818 int size;
8819 } nvram_tbl[] = {
8820 { 0, 0x14 }, /* bootstrap */
8821 { 0x14, 0xec }, /* dir */
8822 { 0x100, 0x350 }, /* manuf_info */
8823 { 0x450, 0xf0 }, /* feature_info */
8824 { 0x640, 0x64 }, /* upgrade_key_info */
8825 { 0x6a4, 0x64 },
8826 { 0x708, 0x70 }, /* manuf_key_info */
8827 { 0x778, 0x70 },
8828 { 0, 0 }
8829 };
8830 u32 buf[0x350 / 4];
8831 u8 *data = (u8 *)buf;
8832 int i, rc;
8833 u32 magic, csum;
8834
8835 rc = bnx2x_nvram_read(bp, 0, data, 4);
8836 if (rc) {
8837 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8838 goto test_nvram_exit;
8839 }
8840
8841 magic = be32_to_cpu(buf[0]);
8842 if (magic != 0x669955aa) {
8843 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8844 rc = -ENODEV;
8845 goto test_nvram_exit;
8846 }
8847
8848 for (i = 0; nvram_tbl[i].size; i++) {
8849
8850 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8851 nvram_tbl[i].size);
8852 if (rc) {
8853 DP(NETIF_MSG_PROBE,
8854 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8855 goto test_nvram_exit;
8856 }
8857
8858 csum = ether_crc_le(nvram_tbl[i].size, data);
8859 if (csum != CRC32_RESIDUAL) {
8860 DP(NETIF_MSG_PROBE,
8861 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8862 rc = -ENODEV;
8863 goto test_nvram_exit;
8864 }
8865 }
8866
8867 test_nvram_exit:
8868 return rc;
8869 }
8870
8871 static int bnx2x_test_intr(struct bnx2x *bp)
8872 {
8873 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8874 int i, rc;
8875
8876 if (!netif_running(bp->dev))
8877 return -ENODEV;
8878
8879 config->hdr.length_6b = 0;
8880 config->hdr.offset = 0;
8881 config->hdr.client_id = BP_CL_ID(bp);
8882 config->hdr.reserved1 = 0;
8883
8884 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8885 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8886 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8887 if (rc == 0) {
8888 bp->set_mac_pending++;
8889 for (i = 0; i < 10; i++) {
8890 if (!bp->set_mac_pending)
8891 break;
8892 msleep_interruptible(10);
8893 }
8894 if (i == 10)
8895 rc = -ENODEV;
8896 }
8897
8898 return rc;
8899 }
8900
8901 static void bnx2x_self_test(struct net_device *dev,
8902 struct ethtool_test *etest, u64 *buf)
8903 {
8904 struct bnx2x *bp = netdev_priv(dev);
8905
8906 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8907
8908 if (!netif_running(dev))
8909 return;
8910
8911 /* offline tests are not supported in MF mode */
8912 if (IS_E1HMF(bp))
8913 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8914
8915 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8916 u8 link_up;
8917
8918 link_up = bp->link_vars.link_up;
8919 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8920 bnx2x_nic_load(bp, LOAD_DIAG);
8921 /* wait until link state is restored */
8922 bnx2x_wait_for_link(bp, link_up);
8923
8924 if (bnx2x_test_registers(bp) != 0) {
8925 buf[0] = 1;
8926 etest->flags |= ETH_TEST_FL_FAILED;
8927 }
8928 if (bnx2x_test_memory(bp) != 0) {
8929 buf[1] = 1;
8930 etest->flags |= ETH_TEST_FL_FAILED;
8931 }
8932 buf[2] = bnx2x_test_loopback(bp, link_up);
8933 if (buf[2] != 0)
8934 etest->flags |= ETH_TEST_FL_FAILED;
8935
8936 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8937 bnx2x_nic_load(bp, LOAD_NORMAL);
8938 /* wait until link state is restored */
8939 bnx2x_wait_for_link(bp, link_up);
8940 }
8941 if (bnx2x_test_nvram(bp) != 0) {
8942 buf[3] = 1;
8943 etest->flags |= ETH_TEST_FL_FAILED;
8944 }
8945 if (bnx2x_test_intr(bp) != 0) {
8946 buf[4] = 1;
8947 etest->flags |= ETH_TEST_FL_FAILED;
8948 }
8949 if (bp->port.pmf)
8950 if (bnx2x_link_test(bp) != 0) {
8951 buf[5] = 1;
8952 etest->flags |= ETH_TEST_FL_FAILED;
8953 }
8954 buf[7] = bnx2x_mc_assert(bp);
8955 if (buf[7] != 0)
8956 etest->flags |= ETH_TEST_FL_FAILED;
8957
8958 #ifdef BNX2X_EXTRA_DEBUG
8959 bnx2x_panic_dump(bp);
8960 #endif
8961 }
8962
8963 static const struct {
8964 long offset;
8965 int size;
8966 u32 flags;
8967 #define STATS_FLAGS_PORT 1
8968 #define STATS_FLAGS_FUNC 2
8969 u8 string[ETH_GSTRING_LEN];
8970 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8971 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8972 8, STATS_FLAGS_FUNC, "rx_bytes" },
8973 { STATS_OFFSET32(error_bytes_received_hi),
8974 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8975 { STATS_OFFSET32(total_bytes_transmitted_hi),
8976 8, STATS_FLAGS_FUNC, "tx_bytes" },
8977 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8978 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8979 { STATS_OFFSET32(total_unicast_packets_received_hi),
8980 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8981 { STATS_OFFSET32(total_multicast_packets_received_hi),
8982 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8983 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8984 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8985 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8986 8, STATS_FLAGS_FUNC, "tx_packets" },
8987 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8988 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8989 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8990 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8991 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8992 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8993 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8994 8, STATS_FLAGS_PORT, "rx_align_errors" },
8995 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8996 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8997 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8998 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8999 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9000 8, STATS_FLAGS_PORT, "tx_deferred" },
9001 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9002 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9003 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9004 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9005 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9006 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9007 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9008 8, STATS_FLAGS_PORT, "rx_fragments" },
9009 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9010 8, STATS_FLAGS_PORT, "rx_jabbers" },
9011 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9012 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9013 { STATS_OFFSET32(jabber_packets_received),
9014 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9015 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9016 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9017 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9018 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9019 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9020 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9021 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9022 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9023 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9024 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9025 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9026 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9027 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9028 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9029 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9030 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9031 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9032 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9033 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9034 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9035 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9036 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9037 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9038 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9039 { STATS_OFFSET32(mac_filter_discard),
9040 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9041 { STATS_OFFSET32(no_buff_discard),
9042 4, STATS_FLAGS_FUNC, "rx_discards" },
9043 { STATS_OFFSET32(xxoverflow_discard),
9044 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9045 { STATS_OFFSET32(brb_drop_hi),
9046 8, STATS_FLAGS_PORT, "brb_discard" },
9047 { STATS_OFFSET32(brb_truncate_hi),
9048 8, STATS_FLAGS_PORT, "brb_truncate" },
9049 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9050 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9051 { STATS_OFFSET32(rx_skb_alloc_failed),
9052 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9053 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9054 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9055 };
9056
9057 #define IS_NOT_E1HMF_STAT(bp, i) \
9058 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9059
9060 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9061 {
9062 struct bnx2x *bp = netdev_priv(dev);
9063 int i, j;
9064
9065 switch (stringset) {
9066 case ETH_SS_STATS:
9067 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9068 if (IS_NOT_E1HMF_STAT(bp, i))
9069 continue;
9070 strcpy(buf + j*ETH_GSTRING_LEN,
9071 bnx2x_stats_arr[i].string);
9072 j++;
9073 }
9074 break;
9075
9076 case ETH_SS_TEST:
9077 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9078 break;
9079 }
9080 }
9081
9082 static int bnx2x_get_stats_count(struct net_device *dev)
9083 {
9084 struct bnx2x *bp = netdev_priv(dev);
9085 int i, num_stats = 0;
9086
9087 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9088 if (IS_NOT_E1HMF_STAT(bp, i))
9089 continue;
9090 num_stats++;
9091 }
9092 return num_stats;
9093 }
9094
9095 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9096 struct ethtool_stats *stats, u64 *buf)
9097 {
9098 struct bnx2x *bp = netdev_priv(dev);
9099 u32 *hw_stats = (u32 *)&bp->eth_stats;
9100 int i, j;
9101
9102 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9103 if (IS_NOT_E1HMF_STAT(bp, i))
9104 continue;
9105
9106 if (bnx2x_stats_arr[i].size == 0) {
9107 /* skip this counter */
9108 buf[j] = 0;
9109 j++;
9110 continue;
9111 }
9112 if (bnx2x_stats_arr[i].size == 4) {
9113 /* 4-byte counter */
9114 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9115 j++;
9116 continue;
9117 }
9118 /* 8-byte counter */
9119 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9120 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9121 j++;
9122 }
9123 }
9124
9125 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9126 {
9127 struct bnx2x *bp = netdev_priv(dev);
9128 int port = BP_PORT(bp);
9129 int i;
9130
9131 if (!netif_running(dev))
9132 return 0;
9133
9134 if (!bp->port.pmf)
9135 return 0;
9136
9137 if (data == 0)
9138 data = 2;
9139
9140 for (i = 0; i < (data * 2); i++) {
9141 if ((i % 2) == 0)
9142 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9143 bp->link_params.hw_led_mode,
9144 bp->link_params.chip_id);
9145 else
9146 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9147 bp->link_params.hw_led_mode,
9148 bp->link_params.chip_id);
9149
9150 msleep_interruptible(500);
9151 if (signal_pending(current))
9152 break;
9153 }
9154
9155 if (bp->link_vars.link_up)
9156 bnx2x_set_led(bp, port, LED_MODE_OPER,
9157 bp->link_vars.line_speed,
9158 bp->link_params.hw_led_mode,
9159 bp->link_params.chip_id);
9160
9161 return 0;
9162 }
9163
9164 static struct ethtool_ops bnx2x_ethtool_ops = {
9165 .get_settings = bnx2x_get_settings,
9166 .set_settings = bnx2x_set_settings,
9167 .get_drvinfo = bnx2x_get_drvinfo,
9168 .get_wol = bnx2x_get_wol,
9169 .set_wol = bnx2x_set_wol,
9170 .get_msglevel = bnx2x_get_msglevel,
9171 .set_msglevel = bnx2x_set_msglevel,
9172 .nway_reset = bnx2x_nway_reset,
9173 .get_link = ethtool_op_get_link,
9174 .get_eeprom_len = bnx2x_get_eeprom_len,
9175 .get_eeprom = bnx2x_get_eeprom,
9176 .set_eeprom = bnx2x_set_eeprom,
9177 .get_coalesce = bnx2x_get_coalesce,
9178 .set_coalesce = bnx2x_set_coalesce,
9179 .get_ringparam = bnx2x_get_ringparam,
9180 .set_ringparam = bnx2x_set_ringparam,
9181 .get_pauseparam = bnx2x_get_pauseparam,
9182 .set_pauseparam = bnx2x_set_pauseparam,
9183 .get_rx_csum = bnx2x_get_rx_csum,
9184 .set_rx_csum = bnx2x_set_rx_csum,
9185 .get_tx_csum = ethtool_op_get_tx_csum,
9186 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9187 .set_flags = bnx2x_set_flags,
9188 .get_flags = ethtool_op_get_flags,
9189 .get_sg = ethtool_op_get_sg,
9190 .set_sg = ethtool_op_set_sg,
9191 .get_tso = ethtool_op_get_tso,
9192 .set_tso = bnx2x_set_tso,
9193 .self_test_count = bnx2x_self_test_count,
9194 .self_test = bnx2x_self_test,
9195 .get_strings = bnx2x_get_strings,
9196 .phys_id = bnx2x_phys_id,
9197 .get_stats_count = bnx2x_get_stats_count,
9198 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9199 };
9200
9201 /* end of ethtool_ops */
9202
9203 /****************************************************************************
9204 * General service functions
9205 ****************************************************************************/
9206
9207 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9208 {
9209 u16 pmcsr;
9210
9211 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9212
9213 switch (state) {
9214 case PCI_D0:
9215 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9216 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9217 PCI_PM_CTRL_PME_STATUS));
9218
9219 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9220 /* delay required during transition out of D3hot */
9221 msleep(20);
9222 break;
9223
9224 case PCI_D3hot:
9225 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9226 pmcsr |= 3;
9227
9228 if (bp->wol)
9229 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9230
9231 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9232 pmcsr);
9233
9234 /* No more memory access after this point until
9235 * device is brought back to D0.
9236 */
9237 break;
9238
9239 default:
9240 return -EINVAL;
9241 }
9242 return 0;
9243 }
9244
9245 /*
9246 * net_device service functions
9247 */
9248
9249 static int bnx2x_poll(struct napi_struct *napi, int budget)
9250 {
9251 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9252 napi);
9253 struct bnx2x *bp = fp->bp;
9254 int work_done = 0;
9255 u16 rx_cons_sb;
9256
9257 #ifdef BNX2X_STOP_ON_ERROR
9258 if (unlikely(bp->panic))
9259 goto poll_panic;
9260 #endif
9261
9262 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9263 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9264 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9265
9266 bnx2x_update_fpsb_idx(fp);
9267
9268 if (BNX2X_HAS_TX_WORK(fp))
9269 bnx2x_tx_int(fp, budget);
9270
9271 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9272 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9273 rx_cons_sb++;
9274 if (BNX2X_HAS_RX_WORK(fp))
9275 work_done = bnx2x_rx_int(fp, budget);
9276
9277 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9278 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9279 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9280 rx_cons_sb++;
9281
9282 /* must not complete if we consumed full budget */
9283 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9284
9285 #ifdef BNX2X_STOP_ON_ERROR
9286 poll_panic:
9287 #endif
9288 netif_rx_complete(bp->dev, napi);
9289
9290 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9291 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9292 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9293 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9294 }
9295 return work_done;
9296 }
9297
9298
9299 /* we split the first BD into headers and data BDs
9300 * to ease the pain of our fellow microcode engineers
9301 * we use one mapping for both BDs
9302 * So far this has only been observed to happen
9303 * in Other Operating Systems(TM)
9304 */
9305 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9306 struct bnx2x_fastpath *fp,
9307 struct eth_tx_bd **tx_bd, u16 hlen,
9308 u16 bd_prod, int nbd)
9309 {
9310 struct eth_tx_bd *h_tx_bd = *tx_bd;
9311 struct eth_tx_bd *d_tx_bd;
9312 dma_addr_t mapping;
9313 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9314
9315 /* first fix first BD */
9316 h_tx_bd->nbd = cpu_to_le16(nbd);
9317 h_tx_bd->nbytes = cpu_to_le16(hlen);
9318
9319 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9320 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9321 h_tx_bd->addr_lo, h_tx_bd->nbd);
9322
9323 /* now get a new data BD
9324 * (after the pbd) and fill it */
9325 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9326 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9327
9328 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9329 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9330
9331 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9332 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9333 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9334 d_tx_bd->vlan = 0;
9335 /* this marks the BD as one that has no individual mapping
9336 * the FW ignores this flag in a BD not marked start
9337 */
9338 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9339 DP(NETIF_MSG_TX_QUEUED,
9340 "TSO split data size is %d (%x:%x)\n",
9341 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9342
9343 /* update tx_bd for marking the last BD flag */
9344 *tx_bd = d_tx_bd;
9345
9346 return bd_prod;
9347 }
9348
9349 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9350 {
9351 if (fix > 0)
9352 csum = (u16) ~csum_fold(csum_sub(csum,
9353 csum_partial(t_header - fix, fix, 0)));
9354
9355 else if (fix < 0)
9356 csum = (u16) ~csum_fold(csum_add(csum,
9357 csum_partial(t_header, -fix, 0)));
9358
9359 return swab16(csum);
9360 }
9361
9362 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9363 {
9364 u32 rc;
9365
9366 if (skb->ip_summed != CHECKSUM_PARTIAL)
9367 rc = XMIT_PLAIN;
9368
9369 else {
9370 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9371 rc = XMIT_CSUM_V6;
9372 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9373 rc |= XMIT_CSUM_TCP;
9374
9375 } else {
9376 rc = XMIT_CSUM_V4;
9377 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9378 rc |= XMIT_CSUM_TCP;
9379 }
9380 }
9381
9382 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9383 rc |= XMIT_GSO_V4;
9384
9385 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9386 rc |= XMIT_GSO_V6;
9387
9388 return rc;
9389 }
9390
9391 /* check if packet requires linearization (packet is too fragmented) */
9392 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9393 u32 xmit_type)
9394 {
9395 int to_copy = 0;
9396 int hlen = 0;
9397 int first_bd_sz = 0;
9398
9399 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9400 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9401
9402 if (xmit_type & XMIT_GSO) {
9403 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9404 /* Check if LSO packet needs to be copied:
9405 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9406 int wnd_size = MAX_FETCH_BD - 3;
9407 /* Number of windows to check */
9408 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9409 int wnd_idx = 0;
9410 int frag_idx = 0;
9411 u32 wnd_sum = 0;
9412
9413 /* Headers length */
9414 hlen = (int)(skb_transport_header(skb) - skb->data) +
9415 tcp_hdrlen(skb);
9416
9417 /* Amount of data (w/o headers) on linear part of SKB*/
9418 first_bd_sz = skb_headlen(skb) - hlen;
9419
9420 wnd_sum = first_bd_sz;
9421
9422 /* Calculate the first sum - it's special */
9423 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9424 wnd_sum +=
9425 skb_shinfo(skb)->frags[frag_idx].size;
9426
9427 /* If there was data on linear skb data - check it */
9428 if (first_bd_sz > 0) {
9429 if (unlikely(wnd_sum < lso_mss)) {
9430 to_copy = 1;
9431 goto exit_lbl;
9432 }
9433
9434 wnd_sum -= first_bd_sz;
9435 }
9436
9437 /* Others are easier: run through the frag list and
9438 check all windows */
9439 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9440 wnd_sum +=
9441 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9442
9443 if (unlikely(wnd_sum < lso_mss)) {
9444 to_copy = 1;
9445 break;
9446 }
9447 wnd_sum -=
9448 skb_shinfo(skb)->frags[wnd_idx].size;
9449 }
9450
9451 } else {
9452 /* in non-LSO too fragmented packet should always
9453 be linearized */
9454 to_copy = 1;
9455 }
9456 }
9457
9458 exit_lbl:
9459 if (unlikely(to_copy))
9460 DP(NETIF_MSG_TX_QUEUED,
9461 "Linearization IS REQUIRED for %s packet. "
9462 "num_frags %d hlen %d first_bd_sz %d\n",
9463 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9464 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9465
9466 return to_copy;
9467 }
9468
9469 /* called with netif_tx_lock
9470 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9471 * netif_wake_queue()
9472 */
9473 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9474 {
9475 struct bnx2x *bp = netdev_priv(dev);
9476 struct bnx2x_fastpath *fp;
9477 struct sw_tx_bd *tx_buf;
9478 struct eth_tx_bd *tx_bd;
9479 struct eth_tx_parse_bd *pbd = NULL;
9480 u16 pkt_prod, bd_prod;
9481 int nbd, fp_index;
9482 dma_addr_t mapping;
9483 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9484 int vlan_off = (bp->e1hov ? 4 : 0);
9485 int i;
9486 u8 hlen = 0;
9487
9488 #ifdef BNX2X_STOP_ON_ERROR
9489 if (unlikely(bp->panic))
9490 return NETDEV_TX_BUSY;
9491 #endif
9492
9493 fp_index = (smp_processor_id() % bp->num_queues);
9494 fp = &bp->fp[fp_index];
9495
9496 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9497 bp->eth_stats.driver_xoff++,
9498 netif_stop_queue(dev);
9499 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9500 return NETDEV_TX_BUSY;
9501 }
9502
9503 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9504 " gso type %x xmit_type %x\n",
9505 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9506 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9507
9508 /* First, check if we need to linearize the skb
9509 (due to FW restrictions) */
9510 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9511 /* Statistics of linearization */
9512 bp->lin_cnt++;
9513 if (skb_linearize(skb) != 0) {
9514 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9515 "silently dropping this SKB\n");
9516 dev_kfree_skb_any(skb);
9517 return NETDEV_TX_OK;
9518 }
9519 }
9520
9521 /*
9522 Please read carefully. First we use one BD which we mark as start,
9523 then for TSO or xsum we have a parsing info BD,
9524 and only then we have the rest of the TSO BDs.
9525 (don't forget to mark the last one as last,
9526 and to unmap only AFTER you write to the BD ...)
9527 And above all, all pdb sizes are in words - NOT DWORDS!
9528 */
9529
9530 pkt_prod = fp->tx_pkt_prod++;
9531 bd_prod = TX_BD(fp->tx_bd_prod);
9532
9533 /* get a tx_buf and first BD */
9534 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9535 tx_bd = &fp->tx_desc_ring[bd_prod];
9536
9537 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9538 tx_bd->general_data = (UNICAST_ADDRESS <<
9539 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9540 /* header nbd */
9541 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9542
9543 /* remember the first BD of the packet */
9544 tx_buf->first_bd = fp->tx_bd_prod;
9545 tx_buf->skb = skb;
9546
9547 DP(NETIF_MSG_TX_QUEUED,
9548 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9549 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9550
9551 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9552 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9553 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9554 vlan_off += 4;
9555 } else
9556 tx_bd->vlan = cpu_to_le16(pkt_prod);
9557
9558 if (xmit_type) {
9559 /* turn on parsing and get a BD */
9560 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9561 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9562
9563 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9564 }
9565
9566 if (xmit_type & XMIT_CSUM) {
9567 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9568
9569 /* for now NS flag is not used in Linux */
9570 pbd->global_data = (hlen |
9571 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9572 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9573
9574 pbd->ip_hlen = (skb_transport_header(skb) -
9575 skb_network_header(skb)) / 2;
9576
9577 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9578
9579 pbd->total_hlen = cpu_to_le16(hlen);
9580 hlen = hlen*2 - vlan_off;
9581
9582 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9583
9584 if (xmit_type & XMIT_CSUM_V4)
9585 tx_bd->bd_flags.as_bitfield |=
9586 ETH_TX_BD_FLAGS_IP_CSUM;
9587 else
9588 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9589
9590 if (xmit_type & XMIT_CSUM_TCP) {
9591 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9592
9593 } else {
9594 s8 fix = SKB_CS_OFF(skb); /* signed! */
9595
9596 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9597 pbd->cs_offset = fix / 2;
9598
9599 DP(NETIF_MSG_TX_QUEUED,
9600 "hlen %d offset %d fix %d csum before fix %x\n",
9601 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9602 SKB_CS(skb));
9603
9604 /* HW bug: fixup the CSUM */
9605 pbd->tcp_pseudo_csum =
9606 bnx2x_csum_fix(skb_transport_header(skb),
9607 SKB_CS(skb), fix);
9608
9609 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9610 pbd->tcp_pseudo_csum);
9611 }
9612 }
9613
9614 mapping = pci_map_single(bp->pdev, skb->data,
9615 skb_headlen(skb), PCI_DMA_TODEVICE);
9616
9617 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9618 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9619 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9620 tx_bd->nbd = cpu_to_le16(nbd);
9621 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9622
9623 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9624 " nbytes %d flags %x vlan %x\n",
9625 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9626 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9627 le16_to_cpu(tx_bd->vlan));
9628
9629 if (xmit_type & XMIT_GSO) {
9630
9631 DP(NETIF_MSG_TX_QUEUED,
9632 "TSO packet len %d hlen %d total len %d tso size %d\n",
9633 skb->len, hlen, skb_headlen(skb),
9634 skb_shinfo(skb)->gso_size);
9635
9636 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9637
9638 if (unlikely(skb_headlen(skb) > hlen))
9639 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9640 bd_prod, ++nbd);
9641
9642 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9643 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9644 pbd->tcp_flags = pbd_tcp_flags(skb);
9645
9646 if (xmit_type & XMIT_GSO_V4) {
9647 pbd->ip_id = swab16(ip_hdr(skb)->id);
9648 pbd->tcp_pseudo_csum =
9649 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9650 ip_hdr(skb)->daddr,
9651 0, IPPROTO_TCP, 0));
9652
9653 } else
9654 pbd->tcp_pseudo_csum =
9655 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9656 &ipv6_hdr(skb)->daddr,
9657 0, IPPROTO_TCP, 0));
9658
9659 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9660 }
9661
9662 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9663 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9664
9665 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9666 tx_bd = &fp->tx_desc_ring[bd_prod];
9667
9668 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9669 frag->size, PCI_DMA_TODEVICE);
9670
9671 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9672 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9673 tx_bd->nbytes = cpu_to_le16(frag->size);
9674 tx_bd->vlan = cpu_to_le16(pkt_prod);
9675 tx_bd->bd_flags.as_bitfield = 0;
9676
9677 DP(NETIF_MSG_TX_QUEUED,
9678 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9679 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9680 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9681 }
9682
9683 /* now at last mark the BD as the last BD */
9684 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9685
9686 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9687 tx_bd, tx_bd->bd_flags.as_bitfield);
9688
9689 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9690
9691 /* now send a tx doorbell, counting the next BD
9692 * if the packet contains or ends with it
9693 */
9694 if (TX_BD_POFF(bd_prod) < nbd)
9695 nbd++;
9696
9697 if (pbd)
9698 DP(NETIF_MSG_TX_QUEUED,
9699 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9700 " tcp_flags %x xsum %x seq %u hlen %u\n",
9701 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9702 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9703 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9704
9705 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9706
9707 fp->hw_tx_prods->bds_prod =
9708 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9709 mb(); /* FW restriction: must not reorder writing nbd and packets */
9710 fp->hw_tx_prods->packets_prod =
9711 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9712 DOORBELL(bp, FP_IDX(fp), 0);
9713
9714 mmiowb();
9715
9716 fp->tx_bd_prod += nbd;
9717 dev->trans_start = jiffies;
9718
9719 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9720 netif_stop_queue(dev);
9721 bp->eth_stats.driver_xoff++;
9722 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9723 netif_wake_queue(dev);
9724 }
9725 fp->tx_pkt++;
9726
9727 return NETDEV_TX_OK;
9728 }
9729
9730 /* called with rtnl_lock */
9731 static int bnx2x_open(struct net_device *dev)
9732 {
9733 struct bnx2x *bp = netdev_priv(dev);
9734
9735 bnx2x_set_power_state(bp, PCI_D0);
9736
9737 return bnx2x_nic_load(bp, LOAD_OPEN);
9738 }
9739
9740 /* called with rtnl_lock */
9741 static int bnx2x_close(struct net_device *dev)
9742 {
9743 struct bnx2x *bp = netdev_priv(dev);
9744
9745 /* Unload the driver, release IRQs */
9746 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9747 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9748 if (!CHIP_REV_IS_SLOW(bp))
9749 bnx2x_set_power_state(bp, PCI_D3hot);
9750
9751 return 0;
9752 }
9753
9754 /* called with netif_tx_lock from set_multicast */
9755 static void bnx2x_set_rx_mode(struct net_device *dev)
9756 {
9757 struct bnx2x *bp = netdev_priv(dev);
9758 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9759 int port = BP_PORT(bp);
9760
9761 if (bp->state != BNX2X_STATE_OPEN) {
9762 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9763 return;
9764 }
9765
9766 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9767
9768 if (dev->flags & IFF_PROMISC)
9769 rx_mode = BNX2X_RX_MODE_PROMISC;
9770
9771 else if ((dev->flags & IFF_ALLMULTI) ||
9772 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9773 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9774
9775 else { /* some multicasts */
9776 if (CHIP_IS_E1(bp)) {
9777 int i, old, offset;
9778 struct dev_mc_list *mclist;
9779 struct mac_configuration_cmd *config =
9780 bnx2x_sp(bp, mcast_config);
9781
9782 for (i = 0, mclist = dev->mc_list;
9783 mclist && (i < dev->mc_count);
9784 i++, mclist = mclist->next) {
9785
9786 config->config_table[i].
9787 cam_entry.msb_mac_addr =
9788 swab16(*(u16 *)&mclist->dmi_addr[0]);
9789 config->config_table[i].
9790 cam_entry.middle_mac_addr =
9791 swab16(*(u16 *)&mclist->dmi_addr[2]);
9792 config->config_table[i].
9793 cam_entry.lsb_mac_addr =
9794 swab16(*(u16 *)&mclist->dmi_addr[4]);
9795 config->config_table[i].cam_entry.flags =
9796 cpu_to_le16(port);
9797 config->config_table[i].
9798 target_table_entry.flags = 0;
9799 config->config_table[i].
9800 target_table_entry.client_id = 0;
9801 config->config_table[i].
9802 target_table_entry.vlan_id = 0;
9803
9804 DP(NETIF_MSG_IFUP,
9805 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9806 config->config_table[i].
9807 cam_entry.msb_mac_addr,
9808 config->config_table[i].
9809 cam_entry.middle_mac_addr,
9810 config->config_table[i].
9811 cam_entry.lsb_mac_addr);
9812 }
9813 old = config->hdr.length_6b;
9814 if (old > i) {
9815 for (; i < old; i++) {
9816 if (CAM_IS_INVALID(config->
9817 config_table[i])) {
9818 i--; /* already invalidated */
9819 break;
9820 }
9821 /* invalidate */
9822 CAM_INVALIDATE(config->
9823 config_table[i]);
9824 }
9825 }
9826
9827 if (CHIP_REV_IS_SLOW(bp))
9828 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9829 else
9830 offset = BNX2X_MAX_MULTICAST*(1 + port);
9831
9832 config->hdr.length_6b = i;
9833 config->hdr.offset = offset;
9834 config->hdr.client_id = BP_CL_ID(bp);
9835 config->hdr.reserved1 = 0;
9836
9837 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9838 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9839 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9840 0);
9841 } else { /* E1H */
9842 /* Accept one or more multicasts */
9843 struct dev_mc_list *mclist;
9844 u32 mc_filter[MC_HASH_SIZE];
9845 u32 crc, bit, regidx;
9846 int i;
9847
9848 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9849
9850 for (i = 0, mclist = dev->mc_list;
9851 mclist && (i < dev->mc_count);
9852 i++, mclist = mclist->next) {
9853
9854 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9855 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9856 mclist->dmi_addr[0], mclist->dmi_addr[1],
9857 mclist->dmi_addr[2], mclist->dmi_addr[3],
9858 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9859
9860 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9861 bit = (crc >> 24) & 0xff;
9862 regidx = bit >> 5;
9863 bit &= 0x1f;
9864 mc_filter[regidx] |= (1 << bit);
9865 }
9866
9867 for (i = 0; i < MC_HASH_SIZE; i++)
9868 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9869 mc_filter[i]);
9870 }
9871 }
9872
9873 bp->rx_mode = rx_mode;
9874 bnx2x_set_storm_rx_mode(bp);
9875 }
9876
9877 /* called with rtnl_lock */
9878 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9879 {
9880 struct sockaddr *addr = p;
9881 struct bnx2x *bp = netdev_priv(dev);
9882
9883 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9884 return -EINVAL;
9885
9886 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9887 if (netif_running(dev)) {
9888 if (CHIP_IS_E1(bp))
9889 bnx2x_set_mac_addr_e1(bp, 1);
9890 else
9891 bnx2x_set_mac_addr_e1h(bp, 1);
9892 }
9893
9894 return 0;
9895 }
9896
9897 /* called with rtnl_lock */
9898 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9899 {
9900 struct mii_ioctl_data *data = if_mii(ifr);
9901 struct bnx2x *bp = netdev_priv(dev);
9902 int port = BP_PORT(bp);
9903 int err;
9904
9905 switch (cmd) {
9906 case SIOCGMIIPHY:
9907 data->phy_id = bp->port.phy_addr;
9908
9909 /* fallthrough */
9910
9911 case SIOCGMIIREG: {
9912 u16 mii_regval;
9913
9914 if (!netif_running(dev))
9915 return -EAGAIN;
9916
9917 mutex_lock(&bp->port.phy_mutex);
9918 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9919 DEFAULT_PHY_DEV_ADDR,
9920 (data->reg_num & 0x1f), &mii_regval);
9921 data->val_out = mii_regval;
9922 mutex_unlock(&bp->port.phy_mutex);
9923 return err;
9924 }
9925
9926 case SIOCSMIIREG:
9927 if (!capable(CAP_NET_ADMIN))
9928 return -EPERM;
9929
9930 if (!netif_running(dev))
9931 return -EAGAIN;
9932
9933 mutex_lock(&bp->port.phy_mutex);
9934 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9935 DEFAULT_PHY_DEV_ADDR,
9936 (data->reg_num & 0x1f), data->val_in);
9937 mutex_unlock(&bp->port.phy_mutex);
9938 return err;
9939
9940 default:
9941 /* do nothing */
9942 break;
9943 }
9944
9945 return -EOPNOTSUPP;
9946 }
9947
9948 /* called with rtnl_lock */
9949 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9950 {
9951 struct bnx2x *bp = netdev_priv(dev);
9952 int rc = 0;
9953
9954 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9955 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9956 return -EINVAL;
9957
9958 /* This does not race with packet allocation
9959 * because the actual alloc size is
9960 * only updated as part of load
9961 */
9962 dev->mtu = new_mtu;
9963
9964 if (netif_running(dev)) {
9965 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9966 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9967 }
9968
9969 return rc;
9970 }
9971
9972 static void bnx2x_tx_timeout(struct net_device *dev)
9973 {
9974 struct bnx2x *bp = netdev_priv(dev);
9975
9976 #ifdef BNX2X_STOP_ON_ERROR
9977 if (!bp->panic)
9978 bnx2x_panic();
9979 #endif
9980 /* This allows the netif to be shutdown gracefully before resetting */
9981 schedule_work(&bp->reset_task);
9982 }
9983
9984 #ifdef BCM_VLAN
9985 /* called with rtnl_lock */
9986 static void bnx2x_vlan_rx_register(struct net_device *dev,
9987 struct vlan_group *vlgrp)
9988 {
9989 struct bnx2x *bp = netdev_priv(dev);
9990
9991 bp->vlgrp = vlgrp;
9992 if (netif_running(dev))
9993 bnx2x_set_client_config(bp);
9994 }
9995
9996 #endif
9997
9998 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9999 static void poll_bnx2x(struct net_device *dev)
10000 {
10001 struct bnx2x *bp = netdev_priv(dev);
10002
10003 disable_irq(bp->pdev->irq);
10004 bnx2x_interrupt(bp->pdev->irq, dev);
10005 enable_irq(bp->pdev->irq);
10006 }
10007 #endif
10008
10009 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10010 struct net_device *dev)
10011 {
10012 struct bnx2x *bp;
10013 int rc;
10014
10015 SET_NETDEV_DEV(dev, &pdev->dev);
10016 bp = netdev_priv(dev);
10017
10018 bp->dev = dev;
10019 bp->pdev = pdev;
10020 bp->flags = 0;
10021 bp->func = PCI_FUNC(pdev->devfn);
10022
10023 rc = pci_enable_device(pdev);
10024 if (rc) {
10025 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10026 goto err_out;
10027 }
10028
10029 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10030 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10031 " aborting\n");
10032 rc = -ENODEV;
10033 goto err_out_disable;
10034 }
10035
10036 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10037 printk(KERN_ERR PFX "Cannot find second PCI device"
10038 " base address, aborting\n");
10039 rc = -ENODEV;
10040 goto err_out_disable;
10041 }
10042
10043 if (atomic_read(&pdev->enable_cnt) == 1) {
10044 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10045 if (rc) {
10046 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10047 " aborting\n");
10048 goto err_out_disable;
10049 }
10050
10051 pci_set_master(pdev);
10052 pci_save_state(pdev);
10053 }
10054
10055 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10056 if (bp->pm_cap == 0) {
10057 printk(KERN_ERR PFX "Cannot find power management"
10058 " capability, aborting\n");
10059 rc = -EIO;
10060 goto err_out_release;
10061 }
10062
10063 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10064 if (bp->pcie_cap == 0) {
10065 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10066 " aborting\n");
10067 rc = -EIO;
10068 goto err_out_release;
10069 }
10070
10071 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10072 bp->flags |= USING_DAC_FLAG;
10073 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10074 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10075 " failed, aborting\n");
10076 rc = -EIO;
10077 goto err_out_release;
10078 }
10079
10080 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10081 printk(KERN_ERR PFX "System does not support DMA,"
10082 " aborting\n");
10083 rc = -EIO;
10084 goto err_out_release;
10085 }
10086
10087 dev->mem_start = pci_resource_start(pdev, 0);
10088 dev->base_addr = dev->mem_start;
10089 dev->mem_end = pci_resource_end(pdev, 0);
10090
10091 dev->irq = pdev->irq;
10092
10093 bp->regview = ioremap_nocache(dev->base_addr,
10094 pci_resource_len(pdev, 0));
10095 if (!bp->regview) {
10096 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10097 rc = -ENOMEM;
10098 goto err_out_release;
10099 }
10100
10101 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10102 min_t(u64, BNX2X_DB_SIZE,
10103 pci_resource_len(pdev, 2)));
10104 if (!bp->doorbells) {
10105 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10106 rc = -ENOMEM;
10107 goto err_out_unmap;
10108 }
10109
10110 bnx2x_set_power_state(bp, PCI_D0);
10111
10112 /* clean indirect addresses */
10113 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10114 PCICFG_VENDOR_ID_OFFSET);
10115 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10116 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10117 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10118 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10119
10120 dev->hard_start_xmit = bnx2x_start_xmit;
10121 dev->watchdog_timeo = TX_TIMEOUT;
10122
10123 dev->ethtool_ops = &bnx2x_ethtool_ops;
10124 dev->open = bnx2x_open;
10125 dev->stop = bnx2x_close;
10126 dev->set_multicast_list = bnx2x_set_rx_mode;
10127 dev->set_mac_address = bnx2x_change_mac_addr;
10128 dev->do_ioctl = bnx2x_ioctl;
10129 dev->change_mtu = bnx2x_change_mtu;
10130 dev->tx_timeout = bnx2x_tx_timeout;
10131 #ifdef BCM_VLAN
10132 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10133 #endif
10134 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10135 dev->poll_controller = poll_bnx2x;
10136 #endif
10137 dev->features |= NETIF_F_SG;
10138 dev->features |= NETIF_F_HW_CSUM;
10139 if (bp->flags & USING_DAC_FLAG)
10140 dev->features |= NETIF_F_HIGHDMA;
10141 #ifdef BCM_VLAN
10142 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10143 #endif
10144 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10145 dev->features |= NETIF_F_TSO6;
10146
10147 return 0;
10148
10149 err_out_unmap:
10150 if (bp->regview) {
10151 iounmap(bp->regview);
10152 bp->regview = NULL;
10153 }
10154 if (bp->doorbells) {
10155 iounmap(bp->doorbells);
10156 bp->doorbells = NULL;
10157 }
10158
10159 err_out_release:
10160 if (atomic_read(&pdev->enable_cnt) == 1)
10161 pci_release_regions(pdev);
10162
10163 err_out_disable:
10164 pci_disable_device(pdev);
10165 pci_set_drvdata(pdev, NULL);
10166
10167 err_out:
10168 return rc;
10169 }
10170
10171 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10172 {
10173 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10174
10175 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10176 return val;
10177 }
10178
10179 /* return value of 1=2.5GHz 2=5GHz */
10180 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10181 {
10182 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10183
10184 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10185 return val;
10186 }
10187
10188 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10189 const struct pci_device_id *ent)
10190 {
10191 static int version_printed;
10192 struct net_device *dev = NULL;
10193 struct bnx2x *bp;
10194 int rc;
10195 DECLARE_MAC_BUF(mac);
10196
10197 if (version_printed++ == 0)
10198 printk(KERN_INFO "%s", version);
10199
10200 /* dev zeroed in init_etherdev */
10201 dev = alloc_etherdev(sizeof(*bp));
10202 if (!dev) {
10203 printk(KERN_ERR PFX "Cannot allocate net device\n");
10204 return -ENOMEM;
10205 }
10206
10207 netif_carrier_off(dev);
10208
10209 bp = netdev_priv(dev);
10210 bp->msglevel = debug;
10211
10212 rc = bnx2x_init_dev(pdev, dev);
10213 if (rc < 0) {
10214 free_netdev(dev);
10215 return rc;
10216 }
10217
10218 rc = register_netdev(dev);
10219 if (rc) {
10220 dev_err(&pdev->dev, "Cannot register net device\n");
10221 goto init_one_exit;
10222 }
10223
10224 pci_set_drvdata(pdev, dev);
10225
10226 rc = bnx2x_init_bp(bp);
10227 if (rc) {
10228 unregister_netdev(dev);
10229 goto init_one_exit;
10230 }
10231
10232 bp->common.name = board_info[ent->driver_data].name;
10233 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10234 " IRQ %d, ", dev->name, bp->common.name,
10235 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10236 bnx2x_get_pcie_width(bp),
10237 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10238 dev->base_addr, bp->pdev->irq);
10239 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10240 return 0;
10241
10242 init_one_exit:
10243 if (bp->regview)
10244 iounmap(bp->regview);
10245
10246 if (bp->doorbells)
10247 iounmap(bp->doorbells);
10248
10249 free_netdev(dev);
10250
10251 if (atomic_read(&pdev->enable_cnt) == 1)
10252 pci_release_regions(pdev);
10253
10254 pci_disable_device(pdev);
10255 pci_set_drvdata(pdev, NULL);
10256
10257 return rc;
10258 }
10259
10260 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10261 {
10262 struct net_device *dev = pci_get_drvdata(pdev);
10263 struct bnx2x *bp;
10264
10265 if (!dev) {
10266 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10267 return;
10268 }
10269 bp = netdev_priv(dev);
10270
10271 unregister_netdev(dev);
10272
10273 if (bp->regview)
10274 iounmap(bp->regview);
10275
10276 if (bp->doorbells)
10277 iounmap(bp->doorbells);
10278
10279 free_netdev(dev);
10280
10281 if (atomic_read(&pdev->enable_cnt) == 1)
10282 pci_release_regions(pdev);
10283
10284 pci_disable_device(pdev);
10285 pci_set_drvdata(pdev, NULL);
10286 }
10287
10288 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10289 {
10290 struct net_device *dev = pci_get_drvdata(pdev);
10291 struct bnx2x *bp;
10292
10293 if (!dev) {
10294 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10295 return -ENODEV;
10296 }
10297 bp = netdev_priv(dev);
10298
10299 rtnl_lock();
10300
10301 pci_save_state(pdev);
10302
10303 if (!netif_running(dev)) {
10304 rtnl_unlock();
10305 return 0;
10306 }
10307
10308 netif_device_detach(dev);
10309
10310 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10311
10312 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10313
10314 rtnl_unlock();
10315
10316 return 0;
10317 }
10318
10319 static int bnx2x_resume(struct pci_dev *pdev)
10320 {
10321 struct net_device *dev = pci_get_drvdata(pdev);
10322 struct bnx2x *bp;
10323 int rc;
10324
10325 if (!dev) {
10326 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10327 return -ENODEV;
10328 }
10329 bp = netdev_priv(dev);
10330
10331 rtnl_lock();
10332
10333 pci_restore_state(pdev);
10334
10335 if (!netif_running(dev)) {
10336 rtnl_unlock();
10337 return 0;
10338 }
10339
10340 bnx2x_set_power_state(bp, PCI_D0);
10341 netif_device_attach(dev);
10342
10343 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10344
10345 rtnl_unlock();
10346
10347 return rc;
10348 }
10349
10350 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10351 {
10352 int i;
10353
10354 bp->state = BNX2X_STATE_ERROR;
10355
10356 bp->rx_mode = BNX2X_RX_MODE_NONE;
10357
10358 bnx2x_netif_stop(bp, 0);
10359
10360 del_timer_sync(&bp->timer);
10361 bp->stats_state = STATS_STATE_DISABLED;
10362 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10363
10364 /* Release IRQs */
10365 bnx2x_free_irq(bp);
10366
10367 if (CHIP_IS_E1(bp)) {
10368 struct mac_configuration_cmd *config =
10369 bnx2x_sp(bp, mcast_config);
10370
10371 for (i = 0; i < config->hdr.length_6b; i++)
10372 CAM_INVALIDATE(config->config_table[i]);
10373 }
10374
10375 /* Free SKBs, SGEs, TPA pool and driver internals */
10376 bnx2x_free_skbs(bp);
10377 for_each_queue(bp, i)
10378 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10379 bnx2x_free_mem(bp);
10380
10381 bp->state = BNX2X_STATE_CLOSED;
10382
10383 netif_carrier_off(bp->dev);
10384
10385 return 0;
10386 }
10387
10388 static void bnx2x_eeh_recover(struct bnx2x *bp)
10389 {
10390 u32 val;
10391
10392 mutex_init(&bp->port.phy_mutex);
10393
10394 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10395 bp->link_params.shmem_base = bp->common.shmem_base;
10396 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10397
10398 if (!bp->common.shmem_base ||
10399 (bp->common.shmem_base < 0xA0000) ||
10400 (bp->common.shmem_base >= 0xC0000)) {
10401 BNX2X_DEV_INFO("MCP not active\n");
10402 bp->flags |= NO_MCP_FLAG;
10403 return;
10404 }
10405
10406 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10407 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10408 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10409 BNX2X_ERR("BAD MCP validity signature\n");
10410
10411 if (!BP_NOMCP(bp)) {
10412 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10413 & DRV_MSG_SEQ_NUMBER_MASK);
10414 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10415 }
10416 }
10417
10418 /**
10419 * bnx2x_io_error_detected - called when PCI error is detected
10420 * @pdev: Pointer to PCI device
10421 * @state: The current pci connection state
10422 *
10423 * This function is called after a PCI bus error affecting
10424 * this device has been detected.
10425 */
10426 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10427 pci_channel_state_t state)
10428 {
10429 struct net_device *dev = pci_get_drvdata(pdev);
10430 struct bnx2x *bp = netdev_priv(dev);
10431
10432 rtnl_lock();
10433
10434 netif_device_detach(dev);
10435
10436 if (netif_running(dev))
10437 bnx2x_eeh_nic_unload(bp);
10438
10439 pci_disable_device(pdev);
10440
10441 rtnl_unlock();
10442
10443 /* Request a slot reset */
10444 return PCI_ERS_RESULT_NEED_RESET;
10445 }
10446
10447 /**
10448 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10449 * @pdev: Pointer to PCI device
10450 *
10451 * Restart the card from scratch, as if from a cold-boot.
10452 */
10453 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10454 {
10455 struct net_device *dev = pci_get_drvdata(pdev);
10456 struct bnx2x *bp = netdev_priv(dev);
10457
10458 rtnl_lock();
10459
10460 if (pci_enable_device(pdev)) {
10461 dev_err(&pdev->dev,
10462 "Cannot re-enable PCI device after reset\n");
10463 rtnl_unlock();
10464 return PCI_ERS_RESULT_DISCONNECT;
10465 }
10466
10467 pci_set_master(pdev);
10468 pci_restore_state(pdev);
10469
10470 if (netif_running(dev))
10471 bnx2x_set_power_state(bp, PCI_D0);
10472
10473 rtnl_unlock();
10474
10475 return PCI_ERS_RESULT_RECOVERED;
10476 }
10477
10478 /**
10479 * bnx2x_io_resume - called when traffic can start flowing again
10480 * @pdev: Pointer to PCI device
10481 *
10482 * This callback is called when the error recovery driver tells us that
10483 * its OK to resume normal operation.
10484 */
10485 static void bnx2x_io_resume(struct pci_dev *pdev)
10486 {
10487 struct net_device *dev = pci_get_drvdata(pdev);
10488 struct bnx2x *bp = netdev_priv(dev);
10489
10490 rtnl_lock();
10491
10492 bnx2x_eeh_recover(bp);
10493
10494 if (netif_running(dev))
10495 bnx2x_nic_load(bp, LOAD_NORMAL);
10496
10497 netif_device_attach(dev);
10498
10499 rtnl_unlock();
10500 }
10501
10502 static struct pci_error_handlers bnx2x_err_handler = {
10503 .error_detected = bnx2x_io_error_detected,
10504 .slot_reset = bnx2x_io_slot_reset,
10505 .resume = bnx2x_io_resume,
10506 };
10507
10508 static struct pci_driver bnx2x_pci_driver = {
10509 .name = DRV_MODULE_NAME,
10510 .id_table = bnx2x_pci_tbl,
10511 .probe = bnx2x_init_one,
10512 .remove = __devexit_p(bnx2x_remove_one),
10513 .suspend = bnx2x_suspend,
10514 .resume = bnx2x_resume,
10515 .err_handler = &bnx2x_err_handler,
10516 };
10517
10518 static int __init bnx2x_init(void)
10519 {
10520 return pci_register_driver(&bnx2x_pci_driver);
10521 }
10522
10523 static void __exit bnx2x_cleanup(void)
10524 {
10525 pci_unregister_driver(&bnx2x_pci_driver);
10526 }
10527
10528 module_init(bnx2x_init);
10529 module_exit(bnx2x_cleanup);
10530
This page took 0.404841 seconds and 5 git commands to generate.