bnx2x: Handling load failures
[deliverable/linux.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION "1.45.24"
61 #define DRV_MODULE_RELDATE "2009/01/14"
62 #define BNX2X_BC_VER 0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
66
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99 BCM57710 = 0,
100 BCM57711 = 1,
101 BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106 char *name;
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121 { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131 * locking is done by mcp
132 */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163 {
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
179 {
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
215
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227 *wb_comp = 0;
228
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231 udelay(5);
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236 if (!cnt) {
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
240 cnt--;
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
246 }
247
248 mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
291
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300 *wb_comp = 0;
301
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304 udelay(5);
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
308 if (!cnt) {
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
312 cnt--;
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
318 }
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323 mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349 char last_idx;
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
462 }
463 }
464
465 return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
483 printk(KERN_CONT "%s", (char *)data);
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
490 printk(KERN_CONT "%s", (char *)data);
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497 int i;
498 u16 j, start, end;
499
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
554 }
555
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582 bnx2x_fw_dump(bp);
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594 if (msix) {
595 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598 } else {
599 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603
604 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val, port, addr, msix);
606
607 REG_WR(bp, addr, val);
608
609 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610 }
611
612 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
613 val, port, addr, msix);
614
615 REG_WR(bp, addr, val);
616
617 if (CHIP_IS_E1H(bp)) {
618 /* init leading/trailing edge */
619 if (IS_E1HMF(bp)) {
620 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621 if (bp->port.pmf)
622 /* enable nig attention */
623 val |= 0x0100;
624 } else
625 val = 0xffff;
626
627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629 }
630 }
631
632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634 int port = BP_PORT(bp);
635 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 u32 val = REG_RD(bp, addr);
637
638 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
645
646 REG_WR(bp, addr, val);
647 if (REG_RD(bp, addr) != val)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654 int i;
655
656 /* disable interrupt handling */
657 atomic_inc(&bp->intr_sem);
658 if (disable_hw)
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
661
662 /* make sure all ISRs are done */
663 if (msix) {
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
666
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
669 } else
670 synchronize_irq(bp->pdev->irq);
671
672 /* make sure sp_task is not running */
673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
675 }
676
677 /* fast path */
678
679 /*
680 * General service functions
681 */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684 u8 storm, u16 index, u8 op, u8 update)
685 {
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
724
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
727
728 return result;
729 }
730
731
732 /*
733 * fast path service functions
734 */
735
736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737 {
738 u16 tx_cons_sb;
739
740 /* Tell compiler that status block fields can change */
741 barrier();
742 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743 return ((fp->tx_pkt_prod != tx_cons_sb) ||
744 (fp->tx_pkt_prod != fp->tx_pkt_cons));
745 }
746
747 /* free skb in the packet ring at pos idx
748 * return idx of last bd freed
749 */
750 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751 u16 idx)
752 {
753 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
754 struct eth_tx_bd *tx_bd;
755 struct sk_buff *skb = tx_buf->skb;
756 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
757 int nbd;
758
759 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
760 idx, tx_buf, skb);
761
762 /* unmap first bd */
763 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
764 tx_bd = &fp->tx_desc_ring[bd_idx];
765 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
766 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
767
768 nbd = le16_to_cpu(tx_bd->nbd) - 1;
769 new_cons = nbd + tx_buf->first_bd;
770 #ifdef BNX2X_STOP_ON_ERROR
771 if (nbd > (MAX_SKB_FRAGS + 2)) {
772 BNX2X_ERR("BAD nbd!\n");
773 bnx2x_panic();
774 }
775 #endif
776
777 /* Skip a parse bd and the TSO split header bd
778 since they have no mapping */
779 if (nbd)
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781
782 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
783 ETH_TX_BD_FLAGS_TCP_CSUM |
784 ETH_TX_BD_FLAGS_SW_LSO)) {
785 if (--nbd)
786 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787 tx_bd = &fp->tx_desc_ring[bd_idx];
788 /* is this a TSO split header bd? */
789 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
790 if (--nbd)
791 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
792 }
793 }
794
795 /* now free frags */
796 while (nbd > 0) {
797
798 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
799 tx_bd = &fp->tx_desc_ring[bd_idx];
800 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
801 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
802 if (--nbd)
803 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
804 }
805
806 /* release skb */
807 WARN_ON(!skb);
808 dev_kfree_skb(skb);
809 tx_buf->first_bd = 0;
810 tx_buf->skb = NULL;
811
812 return new_cons;
813 }
814
815 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
816 {
817 s16 used;
818 u16 prod;
819 u16 cons;
820
821 barrier(); /* Tell compiler that prod and cons can change */
822 prod = fp->tx_bd_prod;
823 cons = fp->tx_bd_cons;
824
825 /* NUM_TX_RINGS = number of "next-page" entries
826 It will be used as a threshold */
827 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
828
829 #ifdef BNX2X_STOP_ON_ERROR
830 WARN_ON(used < 0);
831 WARN_ON(used > fp->bp->tx_ring_size);
832 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
833 #endif
834
835 return (s16)(fp->bp->tx_ring_size) - used;
836 }
837
838 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
839 {
840 struct bnx2x *bp = fp->bp;
841 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
842 int done = 0;
843
844 #ifdef BNX2X_STOP_ON_ERROR
845 if (unlikely(bp->panic))
846 return;
847 #endif
848
849 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
850 sw_cons = fp->tx_pkt_cons;
851
852 while (sw_cons != hw_cons) {
853 u16 pkt_cons;
854
855 pkt_cons = TX_BD(sw_cons);
856
857 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
858
859 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
860 hw_cons, sw_cons, pkt_cons);
861
862 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
863 rmb();
864 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
865 }
866 */
867 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
868 sw_cons++;
869 done++;
870
871 if (done == work)
872 break;
873 }
874
875 fp->tx_pkt_cons = sw_cons;
876 fp->tx_bd_cons = bd_cons;
877
878 /* Need to make the tx_cons update visible to start_xmit()
879 * before checking for netif_queue_stopped(). Without the
880 * memory barrier, there is a small possibility that start_xmit()
881 * will miss it and cause the queue to be stopped forever.
882 */
883 smp_mb();
884
885 /* TBD need a thresh? */
886 if (unlikely(netif_queue_stopped(bp->dev))) {
887
888 netif_tx_lock(bp->dev);
889
890 if (netif_queue_stopped(bp->dev) &&
891 (bp->state == BNX2X_STATE_OPEN) &&
892 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
893 netif_wake_queue(bp->dev);
894
895 netif_tx_unlock(bp->dev);
896 }
897 }
898
899
900 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
901 union eth_rx_cqe *rr_cqe)
902 {
903 struct bnx2x *bp = fp->bp;
904 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
905 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
906
907 DP(BNX2X_MSG_SP,
908 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
909 FP_IDX(fp), cid, command, bp->state,
910 rr_cqe->ramrod_cqe.ramrod_type);
911
912 bp->spq_left++;
913
914 if (FP_IDX(fp)) {
915 switch (command | fp->state) {
916 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
917 BNX2X_FP_STATE_OPENING):
918 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
919 cid);
920 fp->state = BNX2X_FP_STATE_OPEN;
921 break;
922
923 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
924 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
925 cid);
926 fp->state = BNX2X_FP_STATE_HALTED;
927 break;
928
929 default:
930 BNX2X_ERR("unexpected MC reply (%d) "
931 "fp->state is %x\n", command, fp->state);
932 break;
933 }
934 mb(); /* force bnx2x_wait_ramrod() to see the change */
935 return;
936 }
937
938 switch (command | bp->state) {
939 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
940 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
941 bp->state = BNX2X_STATE_OPEN;
942 break;
943
944 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
945 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
946 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
947 fp->state = BNX2X_FP_STATE_HALTED;
948 break;
949
950 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
951 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
952 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
953 break;
954
955
956 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
957 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
958 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
959 bp->set_mac_pending = 0;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
964 break;
965
966 default:
967 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
968 command, bp->state);
969 break;
970 }
971 mb(); /* force bnx2x_wait_ramrod() to see the change */
972 }
973
974 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
975 struct bnx2x_fastpath *fp, u16 index)
976 {
977 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
978 struct page *page = sw_buf->page;
979 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
980
981 /* Skip "next page" elements */
982 if (!page)
983 return;
984
985 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
986 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
987 __free_pages(page, PAGES_PER_SGE_SHIFT);
988
989 sw_buf->page = NULL;
990 sge->addr_hi = 0;
991 sge->addr_lo = 0;
992 }
993
994 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
995 struct bnx2x_fastpath *fp, int last)
996 {
997 int i;
998
999 for (i = 0; i < last; i++)
1000 bnx2x_free_rx_sge(bp, fp, i);
1001 }
1002
1003 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1004 struct bnx2x_fastpath *fp, u16 index)
1005 {
1006 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1007 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1008 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1009 dma_addr_t mapping;
1010
1011 if (unlikely(page == NULL))
1012 return -ENOMEM;
1013
1014 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1015 PCI_DMA_FROMDEVICE);
1016 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1017 __free_pages(page, PAGES_PER_SGE_SHIFT);
1018 return -ENOMEM;
1019 }
1020
1021 sw_buf->page = page;
1022 pci_unmap_addr_set(sw_buf, mapping, mapping);
1023
1024 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1025 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1026
1027 return 0;
1028 }
1029
1030 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1032 {
1033 struct sk_buff *skb;
1034 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1035 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1036 dma_addr_t mapping;
1037
1038 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1039 if (unlikely(skb == NULL))
1040 return -ENOMEM;
1041
1042 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1043 PCI_DMA_FROMDEVICE);
1044 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1045 dev_kfree_skb(skb);
1046 return -ENOMEM;
1047 }
1048
1049 rx_buf->skb = skb;
1050 pci_unmap_addr_set(rx_buf, mapping, mapping);
1051
1052 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1053 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1054
1055 return 0;
1056 }
1057
1058 /* note that we are not allocating a new skb,
1059 * we are just moving one from cons to prod
1060 * we are not creating a new mapping,
1061 * so there is no need to check for dma_mapping_error().
1062 */
1063 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1064 struct sk_buff *skb, u16 cons, u16 prod)
1065 {
1066 struct bnx2x *bp = fp->bp;
1067 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1068 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1069 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1070 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1071
1072 pci_dma_sync_single_for_device(bp->pdev,
1073 pci_unmap_addr(cons_rx_buf, mapping),
1074 bp->rx_offset + RX_COPY_THRESH,
1075 PCI_DMA_FROMDEVICE);
1076
1077 prod_rx_buf->skb = cons_rx_buf->skb;
1078 pci_unmap_addr_set(prod_rx_buf, mapping,
1079 pci_unmap_addr(cons_rx_buf, mapping));
1080 *prod_bd = *cons_bd;
1081 }
1082
1083 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1084 u16 idx)
1085 {
1086 u16 last_max = fp->last_max_sge;
1087
1088 if (SUB_S16(idx, last_max) > 0)
1089 fp->last_max_sge = idx;
1090 }
1091
1092 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1093 {
1094 int i, j;
1095
1096 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1097 int idx = RX_SGE_CNT * i - 1;
1098
1099 for (j = 0; j < 2; j++) {
1100 SGE_MASK_CLEAR_BIT(fp, idx);
1101 idx--;
1102 }
1103 }
1104 }
1105
1106 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1107 struct eth_fast_path_rx_cqe *fp_cqe)
1108 {
1109 struct bnx2x *bp = fp->bp;
1110 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1111 le16_to_cpu(fp_cqe->len_on_bd)) >>
1112 SGE_PAGE_SHIFT;
1113 u16 last_max, last_elem, first_elem;
1114 u16 delta = 0;
1115 u16 i;
1116
1117 if (!sge_len)
1118 return;
1119
1120 /* First mark all used pages */
1121 for (i = 0; i < sge_len; i++)
1122 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1123
1124 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1125 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1126
1127 /* Here we assume that the last SGE index is the biggest */
1128 prefetch((void *)(fp->sge_mask));
1129 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1130
1131 last_max = RX_SGE(fp->last_max_sge);
1132 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1133 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1134
1135 /* If ring is not full */
1136 if (last_elem + 1 != first_elem)
1137 last_elem++;
1138
1139 /* Now update the prod */
1140 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1141 if (likely(fp->sge_mask[i]))
1142 break;
1143
1144 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1145 delta += RX_SGE_MASK_ELEM_SZ;
1146 }
1147
1148 if (delta > 0) {
1149 fp->rx_sge_prod += delta;
1150 /* clear page-end entries */
1151 bnx2x_clear_sge_mask_next_elems(fp);
1152 }
1153
1154 DP(NETIF_MSG_RX_STATUS,
1155 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1156 fp->last_max_sge, fp->rx_sge_prod);
1157 }
1158
1159 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1160 {
1161 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1162 memset(fp->sge_mask, 0xff,
1163 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1164
1165 /* Clear the two last indices in the page to 1:
1166 these are the indices that correspond to the "next" element,
1167 hence will never be indicated and should be removed from
1168 the calculations. */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1170 }
1171
1172 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1173 struct sk_buff *skb, u16 cons, u16 prod)
1174 {
1175 struct bnx2x *bp = fp->bp;
1176 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1177 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1178 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1179 dma_addr_t mapping;
1180
1181 /* move empty skb from pool to prod and map it */
1182 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1183 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1184 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1185 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1186
1187 /* move partial skb from cons to pool (don't unmap yet) */
1188 fp->tpa_pool[queue] = *cons_rx_buf;
1189
1190 /* mark bin state as start - print error if current state != stop */
1191 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1192 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1193
1194 fp->tpa_state[queue] = BNX2X_TPA_START;
1195
1196 /* point prod_bd to new skb */
1197 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1198 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1199
1200 #ifdef BNX2X_STOP_ON_ERROR
1201 fp->tpa_queue_used |= (1 << queue);
1202 #ifdef __powerpc64__
1203 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1204 #else
1205 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1206 #endif
1207 fp->tpa_queue_used);
1208 #endif
1209 }
1210
1211 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1212 struct sk_buff *skb,
1213 struct eth_fast_path_rx_cqe *fp_cqe,
1214 u16 cqe_idx)
1215 {
1216 struct sw_rx_page *rx_pg, old_rx_pg;
1217 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1218 u32 i, frag_len, frag_size, pages;
1219 int err;
1220 int j;
1221
1222 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1223 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1224
1225 /* This is needed in order to enable forwarding support */
1226 if (frag_size)
1227 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1228 max(frag_size, (u32)len_on_bd));
1229
1230 #ifdef BNX2X_STOP_ON_ERROR
1231 if (pages >
1232 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1233 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1234 pages, cqe_idx);
1235 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1236 fp_cqe->pkt_len, len_on_bd);
1237 bnx2x_panic();
1238 return -EINVAL;
1239 }
1240 #endif
1241
1242 /* Run through the SGL and compose the fragmented skb */
1243 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1244 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1245
1246 /* FW gives the indices of the SGE as if the ring is an array
1247 (meaning that "next" element will consume 2 indices) */
1248 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1249 rx_pg = &fp->rx_page_ring[sge_idx];
1250 old_rx_pg = *rx_pg;
1251
1252 /* If we fail to allocate a substitute page, we simply stop
1253 where we are and drop the whole packet */
1254 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1255 if (unlikely(err)) {
1256 bp->eth_stats.rx_skb_alloc_failed++;
1257 return err;
1258 }
1259
1260 /* Unmap the page as we r going to pass it to the stack */
1261 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1262 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1263
1264 /* Add one frag and update the appropriate fields in the skb */
1265 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1266
1267 skb->data_len += frag_len;
1268 skb->truesize += frag_len;
1269 skb->len += frag_len;
1270
1271 frag_size -= frag_len;
1272 }
1273
1274 return 0;
1275 }
1276
1277 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1278 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1279 u16 cqe_idx)
1280 {
1281 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1282 struct sk_buff *skb = rx_buf->skb;
1283 /* alloc new skb */
1284 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1285
1286 /* Unmap skb in the pool anyway, as we are going to change
1287 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1288 fails. */
1289 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1290 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1291
1292 if (likely(new_skb)) {
1293 /* fix ip xsum and give it to the stack */
1294 /* (no need to map the new skb) */
1295 #ifdef BCM_VLAN
1296 int is_vlan_cqe =
1297 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1298 PARSING_FLAGS_VLAN);
1299 int is_not_hwaccel_vlan_cqe =
1300 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1301 #endif
1302
1303 prefetch(skb);
1304 prefetch(((char *)(skb)) + 128);
1305
1306 #ifdef BNX2X_STOP_ON_ERROR
1307 if (pad + len > bp->rx_buf_size) {
1308 BNX2X_ERR("skb_put is about to fail... "
1309 "pad %d len %d rx_buf_size %d\n",
1310 pad, len, bp->rx_buf_size);
1311 bnx2x_panic();
1312 return;
1313 }
1314 #endif
1315
1316 skb_reserve(skb, pad);
1317 skb_put(skb, len);
1318
1319 skb->protocol = eth_type_trans(skb, bp->dev);
1320 skb->ip_summed = CHECKSUM_UNNECESSARY;
1321
1322 {
1323 struct iphdr *iph;
1324
1325 iph = (struct iphdr *)skb->data;
1326 #ifdef BCM_VLAN
1327 /* If there is no Rx VLAN offloading -
1328 take VLAN tag into an account */
1329 if (unlikely(is_not_hwaccel_vlan_cqe))
1330 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1331 #endif
1332 iph->check = 0;
1333 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1334 }
1335
1336 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1337 &cqe->fast_path_cqe, cqe_idx)) {
1338 #ifdef BCM_VLAN
1339 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1340 (!is_not_hwaccel_vlan_cqe))
1341 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1342 le16_to_cpu(cqe->fast_path_cqe.
1343 vlan_tag));
1344 else
1345 #endif
1346 netif_receive_skb(skb);
1347 } else {
1348 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1349 " - dropping packet!\n");
1350 dev_kfree_skb(skb);
1351 }
1352
1353
1354 /* put new skb in bin */
1355 fp->tpa_pool[queue].skb = new_skb;
1356
1357 } else {
1358 /* else drop the packet and keep the buffer in the bin */
1359 DP(NETIF_MSG_RX_STATUS,
1360 "Failed to allocate new skb - dropping packet!\n");
1361 bp->eth_stats.rx_skb_alloc_failed++;
1362 }
1363
1364 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1365 }
1366
1367 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1368 struct bnx2x_fastpath *fp,
1369 u16 bd_prod, u16 rx_comp_prod,
1370 u16 rx_sge_prod)
1371 {
1372 struct tstorm_eth_rx_producers rx_prods = {0};
1373 int i;
1374
1375 /* Update producers */
1376 rx_prods.bd_prod = bd_prod;
1377 rx_prods.cqe_prod = rx_comp_prod;
1378 rx_prods.sge_prod = rx_sge_prod;
1379
1380 /*
1381 * Make sure that the BD and SGE data is updated before updating the
1382 * producers since FW might read the BD/SGE right after the producer
1383 * is updated.
1384 * This is only applicable for weak-ordered memory model archs such
1385 * as IA-64. The following barrier is also mandatory since FW will
1386 * assumes BDs must have buffers.
1387 */
1388 wmb();
1389
1390 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1391 REG_WR(bp, BAR_TSTRORM_INTMEM +
1392 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1393 ((u32 *)&rx_prods)[i]);
1394
1395 mmiowb(); /* keep prod updates ordered */
1396
1397 DP(NETIF_MSG_RX_STATUS,
1398 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1399 bd_prod, rx_comp_prod, rx_sge_prod);
1400 }
1401
1402 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1403 {
1404 struct bnx2x *bp = fp->bp;
1405 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1406 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1407 int rx_pkt = 0;
1408
1409 #ifdef BNX2X_STOP_ON_ERROR
1410 if (unlikely(bp->panic))
1411 return 0;
1412 #endif
1413
1414 /* CQ "next element" is of the size of the regular element,
1415 that's why it's ok here */
1416 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1417 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1418 hw_comp_cons++;
1419
1420 bd_cons = fp->rx_bd_cons;
1421 bd_prod = fp->rx_bd_prod;
1422 bd_prod_fw = bd_prod;
1423 sw_comp_cons = fp->rx_comp_cons;
1424 sw_comp_prod = fp->rx_comp_prod;
1425
1426 /* Memory barrier necessary as speculative reads of the rx
1427 * buffer can be ahead of the index in the status block
1428 */
1429 rmb();
1430
1431 DP(NETIF_MSG_RX_STATUS,
1432 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1433 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1434
1435 while (sw_comp_cons != hw_comp_cons) {
1436 struct sw_rx_bd *rx_buf = NULL;
1437 struct sk_buff *skb;
1438 union eth_rx_cqe *cqe;
1439 u8 cqe_fp_flags;
1440 u16 len, pad;
1441
1442 comp_ring_cons = RCQ_BD(sw_comp_cons);
1443 bd_prod = RX_BD(bd_prod);
1444 bd_cons = RX_BD(bd_cons);
1445
1446 cqe = &fp->rx_comp_ring[comp_ring_cons];
1447 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1448
1449 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1450 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1451 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1452 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1453 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1454 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1455
1456 /* is this a slowpath msg? */
1457 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1458 bnx2x_sp_event(fp, cqe);
1459 goto next_cqe;
1460
1461 /* this is an rx packet */
1462 } else {
1463 rx_buf = &fp->rx_buf_ring[bd_cons];
1464 skb = rx_buf->skb;
1465 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1466 pad = cqe->fast_path_cqe.placement_offset;
1467
1468 /* If CQE is marked both TPA_START and TPA_END
1469 it is a non-TPA CQE */
1470 if ((!fp->disable_tpa) &&
1471 (TPA_TYPE(cqe_fp_flags) !=
1472 (TPA_TYPE_START | TPA_TYPE_END))) {
1473 u16 queue = cqe->fast_path_cqe.queue_index;
1474
1475 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1476 DP(NETIF_MSG_RX_STATUS,
1477 "calling tpa_start on queue %d\n",
1478 queue);
1479
1480 bnx2x_tpa_start(fp, queue, skb,
1481 bd_cons, bd_prod);
1482 goto next_rx;
1483 }
1484
1485 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1486 DP(NETIF_MSG_RX_STATUS,
1487 "calling tpa_stop on queue %d\n",
1488 queue);
1489
1490 if (!BNX2X_RX_SUM_FIX(cqe))
1491 BNX2X_ERR("STOP on none TCP "
1492 "data\n");
1493
1494 /* This is a size of the linear data
1495 on this skb */
1496 len = le16_to_cpu(cqe->fast_path_cqe.
1497 len_on_bd);
1498 bnx2x_tpa_stop(bp, fp, queue, pad,
1499 len, cqe, comp_ring_cons);
1500 #ifdef BNX2X_STOP_ON_ERROR
1501 if (bp->panic)
1502 return -EINVAL;
1503 #endif
1504
1505 bnx2x_update_sge_prod(fp,
1506 &cqe->fast_path_cqe);
1507 goto next_cqe;
1508 }
1509 }
1510
1511 pci_dma_sync_single_for_device(bp->pdev,
1512 pci_unmap_addr(rx_buf, mapping),
1513 pad + RX_COPY_THRESH,
1514 PCI_DMA_FROMDEVICE);
1515 prefetch(skb);
1516 prefetch(((char *)(skb)) + 128);
1517
1518 /* is this an error packet? */
1519 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1520 DP(NETIF_MSG_RX_ERR,
1521 "ERROR flags %x rx packet %u\n",
1522 cqe_fp_flags, sw_comp_cons);
1523 bp->eth_stats.rx_err_discard_pkt++;
1524 goto reuse_rx;
1525 }
1526
1527 /* Since we don't have a jumbo ring
1528 * copy small packets if mtu > 1500
1529 */
1530 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1531 (len <= RX_COPY_THRESH)) {
1532 struct sk_buff *new_skb;
1533
1534 new_skb = netdev_alloc_skb(bp->dev,
1535 len + pad);
1536 if (new_skb == NULL) {
1537 DP(NETIF_MSG_RX_ERR,
1538 "ERROR packet dropped "
1539 "because of alloc failure\n");
1540 bp->eth_stats.rx_skb_alloc_failed++;
1541 goto reuse_rx;
1542 }
1543
1544 /* aligned copy */
1545 skb_copy_from_linear_data_offset(skb, pad,
1546 new_skb->data + pad, len);
1547 skb_reserve(new_skb, pad);
1548 skb_put(new_skb, len);
1549
1550 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1551
1552 skb = new_skb;
1553
1554 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1555 pci_unmap_single(bp->pdev,
1556 pci_unmap_addr(rx_buf, mapping),
1557 bp->rx_buf_size,
1558 PCI_DMA_FROMDEVICE);
1559 skb_reserve(skb, pad);
1560 skb_put(skb, len);
1561
1562 } else {
1563 DP(NETIF_MSG_RX_ERR,
1564 "ERROR packet dropped because "
1565 "of alloc failure\n");
1566 bp->eth_stats.rx_skb_alloc_failed++;
1567 reuse_rx:
1568 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1569 goto next_rx;
1570 }
1571
1572 skb->protocol = eth_type_trans(skb, bp->dev);
1573
1574 skb->ip_summed = CHECKSUM_NONE;
1575 if (bp->rx_csum) {
1576 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1577 skb->ip_summed = CHECKSUM_UNNECESSARY;
1578 else
1579 bp->eth_stats.hw_csum_err++;
1580 }
1581 }
1582
1583 #ifdef BCM_VLAN
1584 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1585 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1586 PARSING_FLAGS_VLAN))
1587 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1588 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1589 else
1590 #endif
1591 netif_receive_skb(skb);
1592
1593
1594 next_rx:
1595 rx_buf->skb = NULL;
1596
1597 bd_cons = NEXT_RX_IDX(bd_cons);
1598 bd_prod = NEXT_RX_IDX(bd_prod);
1599 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1600 rx_pkt++;
1601 next_cqe:
1602 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1603 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1604
1605 if (rx_pkt == budget)
1606 break;
1607 } /* while */
1608
1609 fp->rx_bd_cons = bd_cons;
1610 fp->rx_bd_prod = bd_prod_fw;
1611 fp->rx_comp_cons = sw_comp_cons;
1612 fp->rx_comp_prod = sw_comp_prod;
1613
1614 /* Update producers */
1615 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1616 fp->rx_sge_prod);
1617
1618 fp->rx_pkt += rx_pkt;
1619 fp->rx_calls++;
1620
1621 return rx_pkt;
1622 }
1623
1624 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1625 {
1626 struct bnx2x_fastpath *fp = fp_cookie;
1627 struct bnx2x *bp = fp->bp;
1628 int index = FP_IDX(fp);
1629
1630 /* Return here if interrupt is disabled */
1631 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1632 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1633 return IRQ_HANDLED;
1634 }
1635
1636 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1637 index, FP_SB_ID(fp));
1638 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1639
1640 #ifdef BNX2X_STOP_ON_ERROR
1641 if (unlikely(bp->panic))
1642 return IRQ_HANDLED;
1643 #endif
1644
1645 prefetch(fp->rx_cons_sb);
1646 prefetch(fp->tx_cons_sb);
1647 prefetch(&fp->status_blk->c_status_block.status_block_index);
1648 prefetch(&fp->status_blk->u_status_block.status_block_index);
1649
1650 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1651
1652 return IRQ_HANDLED;
1653 }
1654
1655 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1656 {
1657 struct net_device *dev = dev_instance;
1658 struct bnx2x *bp = netdev_priv(dev);
1659 u16 status = bnx2x_ack_int(bp);
1660 u16 mask;
1661
1662 /* Return here if interrupt is shared and it's not for us */
1663 if (unlikely(status == 0)) {
1664 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1665 return IRQ_NONE;
1666 }
1667 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1668
1669 /* Return here if interrupt is disabled */
1670 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1671 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1672 return IRQ_HANDLED;
1673 }
1674
1675 #ifdef BNX2X_STOP_ON_ERROR
1676 if (unlikely(bp->panic))
1677 return IRQ_HANDLED;
1678 #endif
1679
1680 mask = 0x2 << bp->fp[0].sb_id;
1681 if (status & mask) {
1682 struct bnx2x_fastpath *fp = &bp->fp[0];
1683
1684 prefetch(fp->rx_cons_sb);
1685 prefetch(fp->tx_cons_sb);
1686 prefetch(&fp->status_blk->c_status_block.status_block_index);
1687 prefetch(&fp->status_blk->u_status_block.status_block_index);
1688
1689 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1690
1691 status &= ~mask;
1692 }
1693
1694
1695 if (unlikely(status & 0x1)) {
1696 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1697
1698 status &= ~0x1;
1699 if (!status)
1700 return IRQ_HANDLED;
1701 }
1702
1703 if (status)
1704 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1705 status);
1706
1707 return IRQ_HANDLED;
1708 }
1709
1710 /* end of fast path */
1711
1712 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1713
1714 /* Link */
1715
1716 /*
1717 * General service functions
1718 */
1719
1720 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1721 {
1722 u32 lock_status;
1723 u32 resource_bit = (1 << resource);
1724 int func = BP_FUNC(bp);
1725 u32 hw_lock_control_reg;
1726 int cnt;
1727
1728 /* Validating that the resource is within range */
1729 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1730 DP(NETIF_MSG_HW,
1731 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1732 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1733 return -EINVAL;
1734 }
1735
1736 if (func <= 5) {
1737 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1738 } else {
1739 hw_lock_control_reg =
1740 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1741 }
1742
1743 /* Validating that the resource is not already taken */
1744 lock_status = REG_RD(bp, hw_lock_control_reg);
1745 if (lock_status & resource_bit) {
1746 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1747 lock_status, resource_bit);
1748 return -EEXIST;
1749 }
1750
1751 /* Try for 5 second every 5ms */
1752 for (cnt = 0; cnt < 1000; cnt++) {
1753 /* Try to acquire the lock */
1754 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1755 lock_status = REG_RD(bp, hw_lock_control_reg);
1756 if (lock_status & resource_bit)
1757 return 0;
1758
1759 msleep(5);
1760 }
1761 DP(NETIF_MSG_HW, "Timeout\n");
1762 return -EAGAIN;
1763 }
1764
1765 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1766 {
1767 u32 lock_status;
1768 u32 resource_bit = (1 << resource);
1769 int func = BP_FUNC(bp);
1770 u32 hw_lock_control_reg;
1771
1772 /* Validating that the resource is within range */
1773 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1774 DP(NETIF_MSG_HW,
1775 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1776 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1777 return -EINVAL;
1778 }
1779
1780 if (func <= 5) {
1781 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1782 } else {
1783 hw_lock_control_reg =
1784 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1785 }
1786
1787 /* Validating that the resource is currently taken */
1788 lock_status = REG_RD(bp, hw_lock_control_reg);
1789 if (!(lock_status & resource_bit)) {
1790 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1791 lock_status, resource_bit);
1792 return -EFAULT;
1793 }
1794
1795 REG_WR(bp, hw_lock_control_reg, resource_bit);
1796 return 0;
1797 }
1798
1799 /* HW Lock for shared dual port PHYs */
1800 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1801 {
1802 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1803
1804 mutex_lock(&bp->port.phy_mutex);
1805
1806 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1807 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1808 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1809 }
1810
1811 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1812 {
1813 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1814
1815 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1816 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1817 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1818
1819 mutex_unlock(&bp->port.phy_mutex);
1820 }
1821
1822 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1823 {
1824 /* The GPIO should be swapped if swap register is set and active */
1825 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1826 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1827 int gpio_shift = gpio_num +
1828 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1829 u32 gpio_mask = (1 << gpio_shift);
1830 u32 gpio_reg;
1831
1832 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1833 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1834 return -EINVAL;
1835 }
1836
1837 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838 /* read GPIO and mask except the float bits */
1839 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1840
1841 switch (mode) {
1842 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1843 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1844 gpio_num, gpio_shift);
1845 /* clear FLOAT and set CLR */
1846 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1847 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1848 break;
1849
1850 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1851 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1852 gpio_num, gpio_shift);
1853 /* clear FLOAT and set SET */
1854 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1855 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1856 break;
1857
1858 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1859 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1860 gpio_num, gpio_shift);
1861 /* set FLOAT */
1862 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1863 break;
1864
1865 default:
1866 break;
1867 }
1868
1869 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1870 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1871
1872 return 0;
1873 }
1874
1875 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1876 {
1877 u32 spio_mask = (1 << spio_num);
1878 u32 spio_reg;
1879
1880 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1881 (spio_num > MISC_REGISTERS_SPIO_7)) {
1882 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1883 return -EINVAL;
1884 }
1885
1886 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1887 /* read SPIO and mask except the float bits */
1888 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1889
1890 switch (mode) {
1891 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1892 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1893 /* clear FLOAT and set CLR */
1894 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1895 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1896 break;
1897
1898 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1899 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1900 /* clear FLOAT and set SET */
1901 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1902 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1903 break;
1904
1905 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1906 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1907 /* set FLOAT */
1908 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1909 break;
1910
1911 default:
1912 break;
1913 }
1914
1915 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1916 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1917
1918 return 0;
1919 }
1920
1921 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1922 {
1923 switch (bp->link_vars.ieee_fc &
1924 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1925 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1926 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1927 ADVERTISED_Pause);
1928 break;
1929 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1930 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1931 ADVERTISED_Pause);
1932 break;
1933 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1934 bp->port.advertising |= ADVERTISED_Asym_Pause;
1935 break;
1936 default:
1937 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1938 ADVERTISED_Pause);
1939 break;
1940 }
1941 }
1942
1943 static void bnx2x_link_report(struct bnx2x *bp)
1944 {
1945 if (bp->link_vars.link_up) {
1946 if (bp->state == BNX2X_STATE_OPEN)
1947 netif_carrier_on(bp->dev);
1948 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1949
1950 printk("%d Mbps ", bp->link_vars.line_speed);
1951
1952 if (bp->link_vars.duplex == DUPLEX_FULL)
1953 printk("full duplex");
1954 else
1955 printk("half duplex");
1956
1957 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1958 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1959 printk(", receive ");
1960 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1961 printk("& transmit ");
1962 } else {
1963 printk(", transmit ");
1964 }
1965 printk("flow control ON");
1966 }
1967 printk("\n");
1968
1969 } else { /* link_down */
1970 netif_carrier_off(bp->dev);
1971 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1972 }
1973 }
1974
1975 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1976 {
1977 if (!BP_NOMCP(bp)) {
1978 u8 rc;
1979
1980 /* Initialize link parameters structure variables */
1981 /* It is recommended to turn off RX FC for jumbo frames
1982 for better performance */
1983 if (IS_E1HMF(bp))
1984 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1985 else if (bp->dev->mtu > 5000)
1986 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1987 else
1988 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1989
1990 bnx2x_acquire_phy_lock(bp);
1991 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1992 bnx2x_release_phy_lock(bp);
1993
1994 bnx2x_calc_fc_adv(bp);
1995
1996 if (bp->link_vars.link_up)
1997 bnx2x_link_report(bp);
1998
1999
2000 return rc;
2001 }
2002 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2003 return -EINVAL;
2004 }
2005
2006 static void bnx2x_link_set(struct bnx2x *bp)
2007 {
2008 if (!BP_NOMCP(bp)) {
2009 bnx2x_acquire_phy_lock(bp);
2010 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2011 bnx2x_release_phy_lock(bp);
2012
2013 bnx2x_calc_fc_adv(bp);
2014 } else
2015 BNX2X_ERR("Bootcode is missing -not setting link\n");
2016 }
2017
2018 static void bnx2x__link_reset(struct bnx2x *bp)
2019 {
2020 if (!BP_NOMCP(bp)) {
2021 bnx2x_acquire_phy_lock(bp);
2022 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2023 bnx2x_release_phy_lock(bp);
2024 } else
2025 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2026 }
2027
2028 static u8 bnx2x_link_test(struct bnx2x *bp)
2029 {
2030 u8 rc;
2031
2032 bnx2x_acquire_phy_lock(bp);
2033 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2034 bnx2x_release_phy_lock(bp);
2035
2036 return rc;
2037 }
2038
2039 /* Calculates the sum of vn_min_rates.
2040 It's needed for further normalizing of the min_rates.
2041
2042 Returns:
2043 sum of vn_min_rates
2044 or
2045 0 - if all the min_rates are 0.
2046 In the later case fairness algorithm should be deactivated.
2047 If not all min_rates are zero then those that are zeroes will
2048 be set to 1.
2049 */
2050 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2051 {
2052 int i, port = BP_PORT(bp);
2053 u32 wsum = 0;
2054 int all_zero = 1;
2055
2056 for (i = 0; i < E1HVN_MAX; i++) {
2057 u32 vn_cfg =
2058 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2059 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2060 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2061 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2062 /* If min rate is zero - set it to 1 */
2063 if (!vn_min_rate)
2064 vn_min_rate = DEF_MIN_RATE;
2065 else
2066 all_zero = 0;
2067
2068 wsum += vn_min_rate;
2069 }
2070 }
2071
2072 /* ... only if all min rates are zeros - disable FAIRNESS */
2073 if (all_zero)
2074 return 0;
2075
2076 return wsum;
2077 }
2078
2079 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2080 int en_fness,
2081 u16 port_rate,
2082 struct cmng_struct_per_port *m_cmng_port)
2083 {
2084 u32 r_param = port_rate / 8;
2085 int port = BP_PORT(bp);
2086 int i;
2087
2088 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2089
2090 /* Enable minmax only if we are in e1hmf mode */
2091 if (IS_E1HMF(bp)) {
2092 u32 fair_periodic_timeout_usec;
2093 u32 t_fair;
2094
2095 /* Enable rate shaping and fairness */
2096 m_cmng_port->flags.cmng_vn_enable = 1;
2097 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2098 m_cmng_port->flags.rate_shaping_enable = 1;
2099
2100 if (!en_fness)
2101 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2102 " fairness will be disabled\n");
2103
2104 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2105 m_cmng_port->rs_vars.rs_periodic_timeout =
2106 RS_PERIODIC_TIMEOUT_USEC / 4;
2107
2108 /* this is the threshold below which no timer arming will occur
2109 1.25 coefficient is for the threshold to be a little bigger
2110 than the real time, to compensate for timer in-accuracy */
2111 m_cmng_port->rs_vars.rs_threshold =
2112 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2113
2114 /* resolution of fairness timer */
2115 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2116 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2117 t_fair = T_FAIR_COEF / port_rate;
2118
2119 /* this is the threshold below which we won't arm
2120 the timer anymore */
2121 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2122
2123 /* we multiply by 1e3/8 to get bytes/msec.
2124 We don't want the credits to pass a credit
2125 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2126 m_cmng_port->fair_vars.upper_bound =
2127 r_param * t_fair * FAIR_MEM;
2128 /* since each tick is 4 usec */
2129 m_cmng_port->fair_vars.fairness_timeout =
2130 fair_periodic_timeout_usec / 4;
2131
2132 } else {
2133 /* Disable rate shaping and fairness */
2134 m_cmng_port->flags.cmng_vn_enable = 0;
2135 m_cmng_port->flags.fairness_enable = 0;
2136 m_cmng_port->flags.rate_shaping_enable = 0;
2137
2138 DP(NETIF_MSG_IFUP,
2139 "Single function mode minmax will be disabled\n");
2140 }
2141
2142 /* Store it to internal memory */
2143 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2144 REG_WR(bp, BAR_XSTRORM_INTMEM +
2145 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2146 ((u32 *)(m_cmng_port))[i]);
2147 }
2148
2149 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2150 u32 wsum, u16 port_rate,
2151 struct cmng_struct_per_port *m_cmng_port)
2152 {
2153 struct rate_shaping_vars_per_vn m_rs_vn;
2154 struct fairness_vars_per_vn m_fair_vn;
2155 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2156 u16 vn_min_rate, vn_max_rate;
2157 int i;
2158
2159 /* If function is hidden - set min and max to zeroes */
2160 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2161 vn_min_rate = 0;
2162 vn_max_rate = 0;
2163
2164 } else {
2165 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2166 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2167 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2168 if current min rate is zero - set it to 1.
2169 This is a requirement of the algorithm. */
2170 if ((vn_min_rate == 0) && wsum)
2171 vn_min_rate = DEF_MIN_RATE;
2172 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2173 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2174 }
2175
2176 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2177 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2178
2179 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2180 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2181
2182 /* global vn counter - maximal Mbps for this vn */
2183 m_rs_vn.vn_counter.rate = vn_max_rate;
2184
2185 /* quota - number of bytes transmitted in this period */
2186 m_rs_vn.vn_counter.quota =
2187 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2188
2189 #ifdef BNX2X_PER_PROT_QOS
2190 /* per protocol counter */
2191 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2192 /* maximal Mbps for this protocol */
2193 m_rs_vn.protocol_counters[protocol].rate =
2194 protocol_max_rate[protocol];
2195 /* the quota in each timer period -
2196 number of bytes transmitted in this period */
2197 m_rs_vn.protocol_counters[protocol].quota =
2198 (u32)(rs_periodic_timeout_usec *
2199 ((double)m_rs_vn.
2200 protocol_counters[protocol].rate/8));
2201 }
2202 #endif
2203
2204 if (wsum) {
2205 /* credit for each period of the fairness algorithm:
2206 number of bytes in T_FAIR (the vn share the port rate).
2207 wsum should not be larger than 10000, thus
2208 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2209 m_fair_vn.vn_credit_delta =
2210 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2211 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2212 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2213 m_fair_vn.vn_credit_delta);
2214 }
2215
2216 #ifdef BNX2X_PER_PROT_QOS
2217 do {
2218 u32 protocolWeightSum = 0;
2219
2220 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2221 protocolWeightSum +=
2222 drvInit.protocol_min_rate[protocol];
2223 /* per protocol counter -
2224 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2225 if (protocolWeightSum > 0) {
2226 for (protocol = 0;
2227 protocol < NUM_OF_PROTOCOLS; protocol++)
2228 /* credit for each period of the
2229 fairness algorithm - number of bytes in
2230 T_FAIR (the protocol share the vn rate) */
2231 m_fair_vn.protocol_credit_delta[protocol] =
2232 (u32)((vn_min_rate / 8) * t_fair *
2233 protocol_min_rate / protocolWeightSum);
2234 }
2235 } while (0);
2236 #endif
2237
2238 /* Store it to internal memory */
2239 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2240 REG_WR(bp, BAR_XSTRORM_INTMEM +
2241 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2242 ((u32 *)(&m_rs_vn))[i]);
2243
2244 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2245 REG_WR(bp, BAR_XSTRORM_INTMEM +
2246 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2247 ((u32 *)(&m_fair_vn))[i]);
2248 }
2249
2250 /* This function is called upon link interrupt */
2251 static void bnx2x_link_attn(struct bnx2x *bp)
2252 {
2253 int vn;
2254
2255 /* Make sure that we are synced with the current statistics */
2256 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2257
2258 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2259
2260 if (bp->link_vars.link_up) {
2261
2262 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2263 struct host_port_stats *pstats;
2264
2265 pstats = bnx2x_sp(bp, port_stats);
2266 /* reset old bmac stats */
2267 memset(&(pstats->mac_stx[0]), 0,
2268 sizeof(struct mac_stx));
2269 }
2270 if ((bp->state == BNX2X_STATE_OPEN) ||
2271 (bp->state == BNX2X_STATE_DISABLED))
2272 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2273 }
2274
2275 /* indicate link status */
2276 bnx2x_link_report(bp);
2277
2278 if (IS_E1HMF(bp)) {
2279 int func;
2280
2281 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2282 if (vn == BP_E1HVN(bp))
2283 continue;
2284
2285 func = ((vn << 1) | BP_PORT(bp));
2286
2287 /* Set the attention towards other drivers
2288 on the same port */
2289 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2290 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2291 }
2292 }
2293
2294 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2295 struct cmng_struct_per_port m_cmng_port;
2296 u32 wsum;
2297 int port = BP_PORT(bp);
2298
2299 /* Init RATE SHAPING and FAIRNESS contexts */
2300 wsum = bnx2x_calc_vn_wsum(bp);
2301 bnx2x_init_port_minmax(bp, (int)wsum,
2302 bp->link_vars.line_speed,
2303 &m_cmng_port);
2304 if (IS_E1HMF(bp))
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2306 bnx2x_init_vn_minmax(bp, 2*vn + port,
2307 wsum, bp->link_vars.line_speed,
2308 &m_cmng_port);
2309 }
2310 }
2311
2312 static void bnx2x__link_status_update(struct bnx2x *bp)
2313 {
2314 if (bp->state != BNX2X_STATE_OPEN)
2315 return;
2316
2317 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2318
2319 if (bp->link_vars.link_up)
2320 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2321 else
2322 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2323
2324 /* indicate link status */
2325 bnx2x_link_report(bp);
2326 }
2327
2328 static void bnx2x_pmf_update(struct bnx2x *bp)
2329 {
2330 int port = BP_PORT(bp);
2331 u32 val;
2332
2333 bp->port.pmf = 1;
2334 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2335
2336 /* enable nig attention */
2337 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2338 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2339 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2340
2341 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2342 }
2343
2344 /* end of Link */
2345
2346 /* slow path */
2347
2348 /*
2349 * General service functions
2350 */
2351
2352 /* the slow path queue is odd since completions arrive on the fastpath ring */
2353 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2354 u32 data_hi, u32 data_lo, int common)
2355 {
2356 int func = BP_FUNC(bp);
2357
2358 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2359 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2360 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2361 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2362 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2363
2364 #ifdef BNX2X_STOP_ON_ERROR
2365 if (unlikely(bp->panic))
2366 return -EIO;
2367 #endif
2368
2369 spin_lock_bh(&bp->spq_lock);
2370
2371 if (!bp->spq_left) {
2372 BNX2X_ERR("BUG! SPQ ring full!\n");
2373 spin_unlock_bh(&bp->spq_lock);
2374 bnx2x_panic();
2375 return -EBUSY;
2376 }
2377
2378 /* CID needs port number to be encoded int it */
2379 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2380 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2381 HW_CID(bp, cid)));
2382 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2383 if (common)
2384 bp->spq_prod_bd->hdr.type |=
2385 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2386
2387 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2388 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2389
2390 bp->spq_left--;
2391
2392 if (bp->spq_prod_bd == bp->spq_last_bd) {
2393 bp->spq_prod_bd = bp->spq;
2394 bp->spq_prod_idx = 0;
2395 DP(NETIF_MSG_TIMER, "end of spq\n");
2396
2397 } else {
2398 bp->spq_prod_bd++;
2399 bp->spq_prod_idx++;
2400 }
2401
2402 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2403 bp->spq_prod_idx);
2404
2405 spin_unlock_bh(&bp->spq_lock);
2406 return 0;
2407 }
2408
2409 /* acquire split MCP access lock register */
2410 static int bnx2x_acquire_alr(struct bnx2x *bp)
2411 {
2412 u32 i, j, val;
2413 int rc = 0;
2414
2415 might_sleep();
2416 i = 100;
2417 for (j = 0; j < i*10; j++) {
2418 val = (1UL << 31);
2419 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2420 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2421 if (val & (1L << 31))
2422 break;
2423
2424 msleep(5);
2425 }
2426 if (!(val & (1L << 31))) {
2427 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2428 rc = -EBUSY;
2429 }
2430
2431 return rc;
2432 }
2433
2434 /* release split MCP access lock register */
2435 static void bnx2x_release_alr(struct bnx2x *bp)
2436 {
2437 u32 val = 0;
2438
2439 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2440 }
2441
2442 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2443 {
2444 struct host_def_status_block *def_sb = bp->def_status_blk;
2445 u16 rc = 0;
2446
2447 barrier(); /* status block is written to by the chip */
2448 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2449 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2450 rc |= 1;
2451 }
2452 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2453 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2454 rc |= 2;
2455 }
2456 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2457 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2458 rc |= 4;
2459 }
2460 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2461 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2462 rc |= 8;
2463 }
2464 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2465 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2466 rc |= 16;
2467 }
2468 return rc;
2469 }
2470
2471 /*
2472 * slow path service functions
2473 */
2474
2475 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2476 {
2477 int port = BP_PORT(bp);
2478 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2479 COMMAND_REG_ATTN_BITS_SET);
2480 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2481 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2482 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2483 NIG_REG_MASK_INTERRUPT_PORT0;
2484 u32 aeu_mask;
2485
2486 if (bp->attn_state & asserted)
2487 BNX2X_ERR("IGU ERROR\n");
2488
2489 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2490 aeu_mask = REG_RD(bp, aeu_addr);
2491
2492 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2493 aeu_mask, asserted);
2494 aeu_mask &= ~(asserted & 0xff);
2495 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2496
2497 REG_WR(bp, aeu_addr, aeu_mask);
2498 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2499
2500 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2501 bp->attn_state |= asserted;
2502 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2503
2504 if (asserted & ATTN_HARD_WIRED_MASK) {
2505 if (asserted & ATTN_NIG_FOR_FUNC) {
2506
2507 bnx2x_acquire_phy_lock(bp);
2508
2509 /* save nig interrupt mask */
2510 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2511 REG_WR(bp, nig_int_mask_addr, 0);
2512
2513 bnx2x_link_attn(bp);
2514
2515 /* handle unicore attn? */
2516 }
2517 if (asserted & ATTN_SW_TIMER_4_FUNC)
2518 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2519
2520 if (asserted & GPIO_2_FUNC)
2521 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2522
2523 if (asserted & GPIO_3_FUNC)
2524 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2525
2526 if (asserted & GPIO_4_FUNC)
2527 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2528
2529 if (port == 0) {
2530 if (asserted & ATTN_GENERAL_ATTN_1) {
2531 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2532 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2533 }
2534 if (asserted & ATTN_GENERAL_ATTN_2) {
2535 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2536 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2537 }
2538 if (asserted & ATTN_GENERAL_ATTN_3) {
2539 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2540 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2541 }
2542 } else {
2543 if (asserted & ATTN_GENERAL_ATTN_4) {
2544 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2545 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2546 }
2547 if (asserted & ATTN_GENERAL_ATTN_5) {
2548 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2549 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2550 }
2551 if (asserted & ATTN_GENERAL_ATTN_6) {
2552 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2553 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2554 }
2555 }
2556
2557 } /* if hardwired */
2558
2559 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2560 asserted, hc_addr);
2561 REG_WR(bp, hc_addr, asserted);
2562
2563 /* now set back the mask */
2564 if (asserted & ATTN_NIG_FOR_FUNC) {
2565 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2566 bnx2x_release_phy_lock(bp);
2567 }
2568 }
2569
2570 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2571 {
2572 int port = BP_PORT(bp);
2573 int reg_offset;
2574 u32 val;
2575
2576 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2577 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2578
2579 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2580
2581 val = REG_RD(bp, reg_offset);
2582 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2583 REG_WR(bp, reg_offset, val);
2584
2585 BNX2X_ERR("SPIO5 hw attention\n");
2586
2587 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2588 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2589 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2590 /* Fan failure attention */
2591
2592 /* The PHY reset is controlled by GPIO 1 */
2593 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2594 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2595 /* Low power mode is controlled by GPIO 2 */
2596 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2597 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2598 /* mark the failure */
2599 bp->link_params.ext_phy_config &=
2600 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2601 bp->link_params.ext_phy_config |=
2602 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2603 SHMEM_WR(bp,
2604 dev_info.port_hw_config[port].
2605 external_phy_config,
2606 bp->link_params.ext_phy_config);
2607 /* log the failure */
2608 printk(KERN_ERR PFX "Fan Failure on Network"
2609 " Controller %s has caused the driver to"
2610 " shutdown the card to prevent permanent"
2611 " damage. Please contact Dell Support for"
2612 " assistance\n", bp->dev->name);
2613 break;
2614
2615 default:
2616 break;
2617 }
2618 }
2619
2620 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2621
2622 val = REG_RD(bp, reg_offset);
2623 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2624 REG_WR(bp, reg_offset, val);
2625
2626 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2627 (attn & HW_INTERRUT_ASSERT_SET_0));
2628 bnx2x_panic();
2629 }
2630 }
2631
2632 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2633 {
2634 u32 val;
2635
2636 if (attn & BNX2X_DOORQ_ASSERT) {
2637
2638 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2639 BNX2X_ERR("DB hw attention 0x%x\n", val);
2640 /* DORQ discard attention */
2641 if (val & 0x2)
2642 BNX2X_ERR("FATAL error from DORQ\n");
2643 }
2644
2645 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2646
2647 int port = BP_PORT(bp);
2648 int reg_offset;
2649
2650 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2651 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2652
2653 val = REG_RD(bp, reg_offset);
2654 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2655 REG_WR(bp, reg_offset, val);
2656
2657 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2658 (attn & HW_INTERRUT_ASSERT_SET_1));
2659 bnx2x_panic();
2660 }
2661 }
2662
2663 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2664 {
2665 u32 val;
2666
2667 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2668
2669 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2670 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2671 /* CFC error attention */
2672 if (val & 0x2)
2673 BNX2X_ERR("FATAL error from CFC\n");
2674 }
2675
2676 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2677
2678 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2679 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2680 /* RQ_USDMDP_FIFO_OVERFLOW */
2681 if (val & 0x18000)
2682 BNX2X_ERR("FATAL error from PXP\n");
2683 }
2684
2685 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2686
2687 int port = BP_PORT(bp);
2688 int reg_offset;
2689
2690 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2691 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2692
2693 val = REG_RD(bp, reg_offset);
2694 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2695 REG_WR(bp, reg_offset, val);
2696
2697 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2698 (attn & HW_INTERRUT_ASSERT_SET_2));
2699 bnx2x_panic();
2700 }
2701 }
2702
2703 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2704 {
2705 u32 val;
2706
2707 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2708
2709 if (attn & BNX2X_PMF_LINK_ASSERT) {
2710 int func = BP_FUNC(bp);
2711
2712 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2713 bnx2x__link_status_update(bp);
2714 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2715 DRV_STATUS_PMF)
2716 bnx2x_pmf_update(bp);
2717
2718 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2719
2720 BNX2X_ERR("MC assert!\n");
2721 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2722 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2723 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2724 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2725 bnx2x_panic();
2726
2727 } else if (attn & BNX2X_MCP_ASSERT) {
2728
2729 BNX2X_ERR("MCP assert!\n");
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2731 bnx2x_fw_dump(bp);
2732
2733 } else
2734 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2735 }
2736
2737 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2738 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2739 if (attn & BNX2X_GRC_TIMEOUT) {
2740 val = CHIP_IS_E1H(bp) ?
2741 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2742 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2743 }
2744 if (attn & BNX2X_GRC_RSV) {
2745 val = CHIP_IS_E1H(bp) ?
2746 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2747 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2748 }
2749 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2750 }
2751 }
2752
2753 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2754 {
2755 struct attn_route attn;
2756 struct attn_route group_mask;
2757 int port = BP_PORT(bp);
2758 int index;
2759 u32 reg_addr;
2760 u32 val;
2761 u32 aeu_mask;
2762
2763 /* need to take HW lock because MCP or other port might also
2764 try to handle this event */
2765 bnx2x_acquire_alr(bp);
2766
2767 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2768 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2769 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2770 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2771 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2772 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2773
2774 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2775 if (deasserted & (1 << index)) {
2776 group_mask = bp->attn_group[index];
2777
2778 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2779 index, group_mask.sig[0], group_mask.sig[1],
2780 group_mask.sig[2], group_mask.sig[3]);
2781
2782 bnx2x_attn_int_deasserted3(bp,
2783 attn.sig[3] & group_mask.sig[3]);
2784 bnx2x_attn_int_deasserted1(bp,
2785 attn.sig[1] & group_mask.sig[1]);
2786 bnx2x_attn_int_deasserted2(bp,
2787 attn.sig[2] & group_mask.sig[2]);
2788 bnx2x_attn_int_deasserted0(bp,
2789 attn.sig[0] & group_mask.sig[0]);
2790
2791 if ((attn.sig[0] & group_mask.sig[0] &
2792 HW_PRTY_ASSERT_SET_0) ||
2793 (attn.sig[1] & group_mask.sig[1] &
2794 HW_PRTY_ASSERT_SET_1) ||
2795 (attn.sig[2] & group_mask.sig[2] &
2796 HW_PRTY_ASSERT_SET_2))
2797 BNX2X_ERR("FATAL HW block parity attention\n");
2798 }
2799 }
2800
2801 bnx2x_release_alr(bp);
2802
2803 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2804
2805 val = ~deasserted;
2806 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2807 val, reg_addr);
2808 REG_WR(bp, reg_addr, val);
2809
2810 if (~bp->attn_state & deasserted)
2811 BNX2X_ERR("IGU ERROR\n");
2812
2813 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2814 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2815
2816 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2817 aeu_mask = REG_RD(bp, reg_addr);
2818
2819 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2820 aeu_mask, deasserted);
2821 aeu_mask |= (deasserted & 0xff);
2822 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2823
2824 REG_WR(bp, reg_addr, aeu_mask);
2825 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2826
2827 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2828 bp->attn_state &= ~deasserted;
2829 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2830 }
2831
2832 static void bnx2x_attn_int(struct bnx2x *bp)
2833 {
2834 /* read local copy of bits */
2835 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2836 attn_bits);
2837 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2838 attn_bits_ack);
2839 u32 attn_state = bp->attn_state;
2840
2841 /* look for changed bits */
2842 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2843 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2844
2845 DP(NETIF_MSG_HW,
2846 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2847 attn_bits, attn_ack, asserted, deasserted);
2848
2849 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2850 BNX2X_ERR("BAD attention state\n");
2851
2852 /* handle bits that were raised */
2853 if (asserted)
2854 bnx2x_attn_int_asserted(bp, asserted);
2855
2856 if (deasserted)
2857 bnx2x_attn_int_deasserted(bp, deasserted);
2858 }
2859
2860 static void bnx2x_sp_task(struct work_struct *work)
2861 {
2862 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2863 u16 status;
2864
2865
2866 /* Return here if interrupt is disabled */
2867 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2868 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2869 return;
2870 }
2871
2872 status = bnx2x_update_dsb_idx(bp);
2873 /* if (status == 0) */
2874 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2875
2876 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2877
2878 /* HW attentions */
2879 if (status & 0x1)
2880 bnx2x_attn_int(bp);
2881
2882 /* CStorm events: query_stats, port delete ramrod */
2883 if (status & 0x2)
2884 bp->stats_pending = 0;
2885
2886 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2887 IGU_INT_NOP, 1);
2888 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2889 IGU_INT_NOP, 1);
2890 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2891 IGU_INT_NOP, 1);
2892 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2893 IGU_INT_NOP, 1);
2894 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2895 IGU_INT_ENABLE, 1);
2896
2897 }
2898
2899 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2900 {
2901 struct net_device *dev = dev_instance;
2902 struct bnx2x *bp = netdev_priv(dev);
2903
2904 /* Return here if interrupt is disabled */
2905 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2906 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2907 return IRQ_HANDLED;
2908 }
2909
2910 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2911
2912 #ifdef BNX2X_STOP_ON_ERROR
2913 if (unlikely(bp->panic))
2914 return IRQ_HANDLED;
2915 #endif
2916
2917 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2918
2919 return IRQ_HANDLED;
2920 }
2921
2922 /* end of slow path */
2923
2924 /* Statistics */
2925
2926 /****************************************************************************
2927 * Macros
2928 ****************************************************************************/
2929
2930 /* sum[hi:lo] += add[hi:lo] */
2931 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2932 do { \
2933 s_lo += a_lo; \
2934 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2935 } while (0)
2936
2937 /* difference = minuend - subtrahend */
2938 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2939 do { \
2940 if (m_lo < s_lo) { \
2941 /* underflow */ \
2942 d_hi = m_hi - s_hi; \
2943 if (d_hi > 0) { \
2944 /* we can 'loan' 1 */ \
2945 d_hi--; \
2946 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2947 } else { \
2948 /* m_hi <= s_hi */ \
2949 d_hi = 0; \
2950 d_lo = 0; \
2951 } \
2952 } else { \
2953 /* m_lo >= s_lo */ \
2954 if (m_hi < s_hi) { \
2955 d_hi = 0; \
2956 d_lo = 0; \
2957 } else { \
2958 /* m_hi >= s_hi */ \
2959 d_hi = m_hi - s_hi; \
2960 d_lo = m_lo - s_lo; \
2961 } \
2962 } \
2963 } while (0)
2964
2965 #define UPDATE_STAT64(s, t) \
2966 do { \
2967 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2968 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2969 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2970 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2971 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2972 pstats->mac_stx[1].t##_lo, diff.lo); \
2973 } while (0)
2974
2975 #define UPDATE_STAT64_NIG(s, t) \
2976 do { \
2977 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2978 diff.lo, new->s##_lo, old->s##_lo); \
2979 ADD_64(estats->t##_hi, diff.hi, \
2980 estats->t##_lo, diff.lo); \
2981 } while (0)
2982
2983 /* sum[hi:lo] += add */
2984 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2985 do { \
2986 s_lo += a; \
2987 s_hi += (s_lo < a) ? 1 : 0; \
2988 } while (0)
2989
2990 #define UPDATE_EXTEND_STAT(s) \
2991 do { \
2992 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2993 pstats->mac_stx[1].s##_lo, \
2994 new->s); \
2995 } while (0)
2996
2997 #define UPDATE_EXTEND_TSTAT(s, t) \
2998 do { \
2999 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3000 old_tclient->s = le32_to_cpu(tclient->s); \
3001 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3002 } while (0)
3003
3004 #define UPDATE_EXTEND_XSTAT(s, t) \
3005 do { \
3006 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3007 old_xclient->s = le32_to_cpu(xclient->s); \
3008 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3009 } while (0)
3010
3011 /*
3012 * General service functions
3013 */
3014
3015 static inline long bnx2x_hilo(u32 *hiref)
3016 {
3017 u32 lo = *(hiref + 1);
3018 #if (BITS_PER_LONG == 64)
3019 u32 hi = *hiref;
3020
3021 return HILO_U64(hi, lo);
3022 #else
3023 return lo;
3024 #endif
3025 }
3026
3027 /*
3028 * Init service functions
3029 */
3030
3031 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3032 {
3033 if (!bp->stats_pending) {
3034 struct eth_query_ramrod_data ramrod_data = {0};
3035 int rc;
3036
3037 ramrod_data.drv_counter = bp->stats_counter++;
3038 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3039 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3040
3041 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3042 ((u32 *)&ramrod_data)[1],
3043 ((u32 *)&ramrod_data)[0], 0);
3044 if (rc == 0) {
3045 /* stats ramrod has it's own slot on the spq */
3046 bp->spq_left++;
3047 bp->stats_pending = 1;
3048 }
3049 }
3050 }
3051
3052 static void bnx2x_stats_init(struct bnx2x *bp)
3053 {
3054 int port = BP_PORT(bp);
3055
3056 bp->executer_idx = 0;
3057 bp->stats_counter = 0;
3058
3059 /* port stats */
3060 if (!BP_NOMCP(bp))
3061 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3062 else
3063 bp->port.port_stx = 0;
3064 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3065
3066 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3067 bp->port.old_nig_stats.brb_discard =
3068 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3069 bp->port.old_nig_stats.brb_truncate =
3070 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3071 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3072 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3073 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3074 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3075
3076 /* function stats */
3077 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3078 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3079 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3080 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3081
3082 bp->stats_state = STATS_STATE_DISABLED;
3083 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3084 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3085 }
3086
3087 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3088 {
3089 struct dmae_command *dmae = &bp->stats_dmae;
3090 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3091
3092 *stats_comp = DMAE_COMP_VAL;
3093
3094 /* loader */
3095 if (bp->executer_idx) {
3096 int loader_idx = PMF_DMAE_C(bp);
3097
3098 memset(dmae, 0, sizeof(struct dmae_command));
3099
3100 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3101 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3102 DMAE_CMD_DST_RESET |
3103 #ifdef __BIG_ENDIAN
3104 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3105 #else
3106 DMAE_CMD_ENDIANITY_DW_SWAP |
3107 #endif
3108 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3109 DMAE_CMD_PORT_0) |
3110 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3111 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3112 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3113 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3114 sizeof(struct dmae_command) *
3115 (loader_idx + 1)) >> 2;
3116 dmae->dst_addr_hi = 0;
3117 dmae->len = sizeof(struct dmae_command) >> 2;
3118 if (CHIP_IS_E1(bp))
3119 dmae->len--;
3120 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3121 dmae->comp_addr_hi = 0;
3122 dmae->comp_val = 1;
3123
3124 *stats_comp = 0;
3125 bnx2x_post_dmae(bp, dmae, loader_idx);
3126
3127 } else if (bp->func_stx) {
3128 *stats_comp = 0;
3129 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3130 }
3131 }
3132
3133 static int bnx2x_stats_comp(struct bnx2x *bp)
3134 {
3135 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3136 int cnt = 10;
3137
3138 might_sleep();
3139 while (*stats_comp != DMAE_COMP_VAL) {
3140 if (!cnt) {
3141 BNX2X_ERR("timeout waiting for stats finished\n");
3142 break;
3143 }
3144 cnt--;
3145 msleep(1);
3146 }
3147 return 1;
3148 }
3149
3150 /*
3151 * Statistics service functions
3152 */
3153
3154 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3155 {
3156 struct dmae_command *dmae;
3157 u32 opcode;
3158 int loader_idx = PMF_DMAE_C(bp);
3159 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3160
3161 /* sanity */
3162 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3163 BNX2X_ERR("BUG!\n");
3164 return;
3165 }
3166
3167 bp->executer_idx = 0;
3168
3169 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3170 DMAE_CMD_C_ENABLE |
3171 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3172 #ifdef __BIG_ENDIAN
3173 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3174 #else
3175 DMAE_CMD_ENDIANITY_DW_SWAP |
3176 #endif
3177 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3178 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3179
3180 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3182 dmae->src_addr_lo = bp->port.port_stx >> 2;
3183 dmae->src_addr_hi = 0;
3184 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3185 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3186 dmae->len = DMAE_LEN32_RD_MAX;
3187 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3188 dmae->comp_addr_hi = 0;
3189 dmae->comp_val = 1;
3190
3191 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3192 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3193 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3194 dmae->src_addr_hi = 0;
3195 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3196 DMAE_LEN32_RD_MAX * 4);
3197 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3198 DMAE_LEN32_RD_MAX * 4);
3199 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3200 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3201 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3202 dmae->comp_val = DMAE_COMP_VAL;
3203
3204 *stats_comp = 0;
3205 bnx2x_hw_stats_post(bp);
3206 bnx2x_stats_comp(bp);
3207 }
3208
3209 static void bnx2x_port_stats_init(struct bnx2x *bp)
3210 {
3211 struct dmae_command *dmae;
3212 int port = BP_PORT(bp);
3213 int vn = BP_E1HVN(bp);
3214 u32 opcode;
3215 int loader_idx = PMF_DMAE_C(bp);
3216 u32 mac_addr;
3217 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3218
3219 /* sanity */
3220 if (!bp->link_vars.link_up || !bp->port.pmf) {
3221 BNX2X_ERR("BUG!\n");
3222 return;
3223 }
3224
3225 bp->executer_idx = 0;
3226
3227 /* MCP */
3228 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3229 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3230 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3231 #ifdef __BIG_ENDIAN
3232 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3233 #else
3234 DMAE_CMD_ENDIANITY_DW_SWAP |
3235 #endif
3236 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3237 (vn << DMAE_CMD_E1HVN_SHIFT));
3238
3239 if (bp->port.port_stx) {
3240
3241 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3242 dmae->opcode = opcode;
3243 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3244 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3245 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3246 dmae->dst_addr_hi = 0;
3247 dmae->len = sizeof(struct host_port_stats) >> 2;
3248 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3249 dmae->comp_addr_hi = 0;
3250 dmae->comp_val = 1;
3251 }
3252
3253 if (bp->func_stx) {
3254
3255 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3256 dmae->opcode = opcode;
3257 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3258 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3259 dmae->dst_addr_lo = bp->func_stx >> 2;
3260 dmae->dst_addr_hi = 0;
3261 dmae->len = sizeof(struct host_func_stats) >> 2;
3262 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3263 dmae->comp_addr_hi = 0;
3264 dmae->comp_val = 1;
3265 }
3266
3267 /* MAC */
3268 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3269 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3270 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3271 #ifdef __BIG_ENDIAN
3272 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3273 #else
3274 DMAE_CMD_ENDIANITY_DW_SWAP |
3275 #endif
3276 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3277 (vn << DMAE_CMD_E1HVN_SHIFT));
3278
3279 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3280
3281 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3282 NIG_REG_INGRESS_BMAC0_MEM);
3283
3284 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3285 BIGMAC_REGISTER_TX_STAT_GTBYT */
3286 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3287 dmae->opcode = opcode;
3288 dmae->src_addr_lo = (mac_addr +
3289 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3290 dmae->src_addr_hi = 0;
3291 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3292 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3293 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3294 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3295 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3296 dmae->comp_addr_hi = 0;
3297 dmae->comp_val = 1;
3298
3299 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3300 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3301 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3302 dmae->opcode = opcode;
3303 dmae->src_addr_lo = (mac_addr +
3304 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3305 dmae->src_addr_hi = 0;
3306 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3307 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3309 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3310 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3311 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3312 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313 dmae->comp_addr_hi = 0;
3314 dmae->comp_val = 1;
3315
3316 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3317
3318 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3319
3320 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = (mac_addr +
3324 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3325 dmae->src_addr_hi = 0;
3326 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3327 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3328 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3329 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3330 dmae->comp_addr_hi = 0;
3331 dmae->comp_val = 1;
3332
3333 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335 dmae->opcode = opcode;
3336 dmae->src_addr_lo = (mac_addr +
3337 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3338 dmae->src_addr_hi = 0;
3339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3340 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3341 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3342 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3343 dmae->len = 1;
3344 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345 dmae->comp_addr_hi = 0;
3346 dmae->comp_val = 1;
3347
3348 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3349 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3350 dmae->opcode = opcode;
3351 dmae->src_addr_lo = (mac_addr +
3352 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3353 dmae->src_addr_hi = 0;
3354 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3355 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3356 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3357 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3358 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3359 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3360 dmae->comp_addr_hi = 0;
3361 dmae->comp_val = 1;
3362 }
3363
3364 /* NIG */
3365 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366 dmae->opcode = opcode;
3367 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3368 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3369 dmae->src_addr_hi = 0;
3370 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3371 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3372 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3373 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3374 dmae->comp_addr_hi = 0;
3375 dmae->comp_val = 1;
3376
3377 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3378 dmae->opcode = opcode;
3379 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3380 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3381 dmae->src_addr_hi = 0;
3382 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3383 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3384 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3385 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3386 dmae->len = (2*sizeof(u32)) >> 2;
3387 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3388 dmae->comp_addr_hi = 0;
3389 dmae->comp_val = 1;
3390
3391 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3392 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3393 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3394 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3395 #ifdef __BIG_ENDIAN
3396 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3397 #else
3398 DMAE_CMD_ENDIANITY_DW_SWAP |
3399 #endif
3400 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3401 (vn << DMAE_CMD_E1HVN_SHIFT));
3402 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3403 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3404 dmae->src_addr_hi = 0;
3405 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3406 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3407 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3408 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3409 dmae->len = (2*sizeof(u32)) >> 2;
3410 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3411 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3412 dmae->comp_val = DMAE_COMP_VAL;
3413
3414 *stats_comp = 0;
3415 }
3416
3417 static void bnx2x_func_stats_init(struct bnx2x *bp)
3418 {
3419 struct dmae_command *dmae = &bp->stats_dmae;
3420 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3421
3422 /* sanity */
3423 if (!bp->func_stx) {
3424 BNX2X_ERR("BUG!\n");
3425 return;
3426 }
3427
3428 bp->executer_idx = 0;
3429 memset(dmae, 0, sizeof(struct dmae_command));
3430
3431 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3432 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3433 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3434 #ifdef __BIG_ENDIAN
3435 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3436 #else
3437 DMAE_CMD_ENDIANITY_DW_SWAP |
3438 #endif
3439 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3440 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3441 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3442 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3443 dmae->dst_addr_lo = bp->func_stx >> 2;
3444 dmae->dst_addr_hi = 0;
3445 dmae->len = sizeof(struct host_func_stats) >> 2;
3446 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3447 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3448 dmae->comp_val = DMAE_COMP_VAL;
3449
3450 *stats_comp = 0;
3451 }
3452
3453 static void bnx2x_stats_start(struct bnx2x *bp)
3454 {
3455 if (bp->port.pmf)
3456 bnx2x_port_stats_init(bp);
3457
3458 else if (bp->func_stx)
3459 bnx2x_func_stats_init(bp);
3460
3461 bnx2x_hw_stats_post(bp);
3462 bnx2x_storm_stats_post(bp);
3463 }
3464
3465 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3466 {
3467 bnx2x_stats_comp(bp);
3468 bnx2x_stats_pmf_update(bp);
3469 bnx2x_stats_start(bp);
3470 }
3471
3472 static void bnx2x_stats_restart(struct bnx2x *bp)
3473 {
3474 bnx2x_stats_comp(bp);
3475 bnx2x_stats_start(bp);
3476 }
3477
3478 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3479 {
3480 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3481 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3482 struct regpair diff;
3483
3484 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3485 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3486 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3487 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3488 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3489 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3490 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3491 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3492 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3493 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3494 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3495 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3496 UPDATE_STAT64(tx_stat_gt127,
3497 tx_stat_etherstatspkts65octetsto127octets);
3498 UPDATE_STAT64(tx_stat_gt255,
3499 tx_stat_etherstatspkts128octetsto255octets);
3500 UPDATE_STAT64(tx_stat_gt511,
3501 tx_stat_etherstatspkts256octetsto511octets);
3502 UPDATE_STAT64(tx_stat_gt1023,
3503 tx_stat_etherstatspkts512octetsto1023octets);
3504 UPDATE_STAT64(tx_stat_gt1518,
3505 tx_stat_etherstatspkts1024octetsto1522octets);
3506 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3507 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3508 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3509 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3510 UPDATE_STAT64(tx_stat_gterr,
3511 tx_stat_dot3statsinternalmactransmiterrors);
3512 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3513 }
3514
3515 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3516 {
3517 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3518 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3519
3520 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3521 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3522 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3523 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3524 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3525 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3526 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3527 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3528 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3529 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3530 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3531 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3532 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3533 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3534 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3535 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3536 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3537 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3538 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3539 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3540 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3541 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3542 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3543 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3544 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3545 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3546 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3547 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3548 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3549 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3550 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3551 }
3552
3553 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3554 {
3555 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3556 struct nig_stats *old = &(bp->port.old_nig_stats);
3557 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3558 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3559 struct regpair diff;
3560
3561 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3562 bnx2x_bmac_stats_update(bp);
3563
3564 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3565 bnx2x_emac_stats_update(bp);
3566
3567 else { /* unreached */
3568 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3569 return -1;
3570 }
3571
3572 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3573 new->brb_discard - old->brb_discard);
3574 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3575 new->brb_truncate - old->brb_truncate);
3576
3577 UPDATE_STAT64_NIG(egress_mac_pkt0,
3578 etherstatspkts1024octetsto1522octets);
3579 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3580
3581 memcpy(old, new, sizeof(struct nig_stats));
3582
3583 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3584 sizeof(struct mac_stx));
3585 estats->brb_drop_hi = pstats->brb_drop_hi;
3586 estats->brb_drop_lo = pstats->brb_drop_lo;
3587
3588 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3589
3590 return 0;
3591 }
3592
3593 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3594 {
3595 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3596 int cl_id = BP_CL_ID(bp);
3597 struct tstorm_per_port_stats *tport =
3598 &stats->tstorm_common.port_statistics;
3599 struct tstorm_per_client_stats *tclient =
3600 &stats->tstorm_common.client_statistics[cl_id];
3601 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3602 struct xstorm_per_client_stats *xclient =
3603 &stats->xstorm_common.client_statistics[cl_id];
3604 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3605 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3606 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3607 u32 diff;
3608
3609 /* are storm stats valid? */
3610 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3611 bp->stats_counter) {
3612 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3613 " tstorm counter (%d) != stats_counter (%d)\n",
3614 tclient->stats_counter, bp->stats_counter);
3615 return -1;
3616 }
3617 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3618 bp->stats_counter) {
3619 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3620 " xstorm counter (%d) != stats_counter (%d)\n",
3621 xclient->stats_counter, bp->stats_counter);
3622 return -2;
3623 }
3624
3625 fstats->total_bytes_received_hi =
3626 fstats->valid_bytes_received_hi =
3627 le32_to_cpu(tclient->total_rcv_bytes.hi);
3628 fstats->total_bytes_received_lo =
3629 fstats->valid_bytes_received_lo =
3630 le32_to_cpu(tclient->total_rcv_bytes.lo);
3631
3632 estats->error_bytes_received_hi =
3633 le32_to_cpu(tclient->rcv_error_bytes.hi);
3634 estats->error_bytes_received_lo =
3635 le32_to_cpu(tclient->rcv_error_bytes.lo);
3636 ADD_64(estats->error_bytes_received_hi,
3637 estats->rx_stat_ifhcinbadoctets_hi,
3638 estats->error_bytes_received_lo,
3639 estats->rx_stat_ifhcinbadoctets_lo);
3640
3641 ADD_64(fstats->total_bytes_received_hi,
3642 estats->error_bytes_received_hi,
3643 fstats->total_bytes_received_lo,
3644 estats->error_bytes_received_lo);
3645
3646 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3647 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3648 total_multicast_packets_received);
3649 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3650 total_broadcast_packets_received);
3651
3652 fstats->total_bytes_transmitted_hi =
3653 le32_to_cpu(xclient->total_sent_bytes.hi);
3654 fstats->total_bytes_transmitted_lo =
3655 le32_to_cpu(xclient->total_sent_bytes.lo);
3656
3657 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3658 total_unicast_packets_transmitted);
3659 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3660 total_multicast_packets_transmitted);
3661 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3662 total_broadcast_packets_transmitted);
3663
3664 memcpy(estats, &(fstats->total_bytes_received_hi),
3665 sizeof(struct host_func_stats) - 2*sizeof(u32));
3666
3667 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3668 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3669 estats->brb_truncate_discard =
3670 le32_to_cpu(tport->brb_truncate_discard);
3671 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3672
3673 old_tclient->rcv_unicast_bytes.hi =
3674 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3675 old_tclient->rcv_unicast_bytes.lo =
3676 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3677 old_tclient->rcv_broadcast_bytes.hi =
3678 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3679 old_tclient->rcv_broadcast_bytes.lo =
3680 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3681 old_tclient->rcv_multicast_bytes.hi =
3682 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3683 old_tclient->rcv_multicast_bytes.lo =
3684 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3685 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3686
3687 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3688 old_tclient->packets_too_big_discard =
3689 le32_to_cpu(tclient->packets_too_big_discard);
3690 estats->no_buff_discard =
3691 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3692 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3693
3694 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3695 old_xclient->unicast_bytes_sent.hi =
3696 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3697 old_xclient->unicast_bytes_sent.lo =
3698 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3699 old_xclient->multicast_bytes_sent.hi =
3700 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3701 old_xclient->multicast_bytes_sent.lo =
3702 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3703 old_xclient->broadcast_bytes_sent.hi =
3704 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3705 old_xclient->broadcast_bytes_sent.lo =
3706 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3707
3708 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3709
3710 return 0;
3711 }
3712
3713 static void bnx2x_net_stats_update(struct bnx2x *bp)
3714 {
3715 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3716 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3717 struct net_device_stats *nstats = &bp->dev->stats;
3718
3719 nstats->rx_packets =
3720 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3721 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3722 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3723
3724 nstats->tx_packets =
3725 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3726 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3727 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3728
3729 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3730
3731 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3732
3733 nstats->rx_dropped = old_tclient->checksum_discard +
3734 estats->mac_discard;
3735 nstats->tx_dropped = 0;
3736
3737 nstats->multicast =
3738 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3739
3740 nstats->collisions =
3741 estats->tx_stat_dot3statssinglecollisionframes_lo +
3742 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3743 estats->tx_stat_dot3statslatecollisions_lo +
3744 estats->tx_stat_dot3statsexcessivecollisions_lo;
3745
3746 estats->jabber_packets_received =
3747 old_tclient->packets_too_big_discard +
3748 estats->rx_stat_dot3statsframestoolong_lo;
3749
3750 nstats->rx_length_errors =
3751 estats->rx_stat_etherstatsundersizepkts_lo +
3752 estats->jabber_packets_received;
3753 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3754 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3755 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3756 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3757 nstats->rx_missed_errors = estats->xxoverflow_discard;
3758
3759 nstats->rx_errors = nstats->rx_length_errors +
3760 nstats->rx_over_errors +
3761 nstats->rx_crc_errors +
3762 nstats->rx_frame_errors +
3763 nstats->rx_fifo_errors +
3764 nstats->rx_missed_errors;
3765
3766 nstats->tx_aborted_errors =
3767 estats->tx_stat_dot3statslatecollisions_lo +
3768 estats->tx_stat_dot3statsexcessivecollisions_lo;
3769 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3770 nstats->tx_fifo_errors = 0;
3771 nstats->tx_heartbeat_errors = 0;
3772 nstats->tx_window_errors = 0;
3773
3774 nstats->tx_errors = nstats->tx_aborted_errors +
3775 nstats->tx_carrier_errors;
3776 }
3777
3778 static void bnx2x_stats_update(struct bnx2x *bp)
3779 {
3780 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3781 int update = 0;
3782
3783 if (*stats_comp != DMAE_COMP_VAL)
3784 return;
3785
3786 if (bp->port.pmf)
3787 update = (bnx2x_hw_stats_update(bp) == 0);
3788
3789 update |= (bnx2x_storm_stats_update(bp) == 0);
3790
3791 if (update)
3792 bnx2x_net_stats_update(bp);
3793
3794 else {
3795 if (bp->stats_pending) {
3796 bp->stats_pending++;
3797 if (bp->stats_pending == 3) {
3798 BNX2X_ERR("stats not updated for 3 times\n");
3799 bnx2x_panic();
3800 return;
3801 }
3802 }
3803 }
3804
3805 if (bp->msglevel & NETIF_MSG_TIMER) {
3806 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3807 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3808 struct net_device_stats *nstats = &bp->dev->stats;
3809 int i;
3810
3811 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3812 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3813 " tx pkt (%lx)\n",
3814 bnx2x_tx_avail(bp->fp),
3815 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3816 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3817 " rx pkt (%lx)\n",
3818 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3819 bp->fp->rx_comp_cons),
3820 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3821 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3822 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3823 estats->driver_xoff, estats->brb_drop_lo);
3824 printk(KERN_DEBUG "tstats: checksum_discard %u "
3825 "packets_too_big_discard %u no_buff_discard %u "
3826 "mac_discard %u mac_filter_discard %u "
3827 "xxovrflow_discard %u brb_truncate_discard %u "
3828 "ttl0_discard %u\n",
3829 old_tclient->checksum_discard,
3830 old_tclient->packets_too_big_discard,
3831 old_tclient->no_buff_discard, estats->mac_discard,
3832 estats->mac_filter_discard, estats->xxoverflow_discard,
3833 estats->brb_truncate_discard,
3834 old_tclient->ttl0_discard);
3835
3836 for_each_queue(bp, i) {
3837 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3838 bnx2x_fp(bp, i, tx_pkt),
3839 bnx2x_fp(bp, i, rx_pkt),
3840 bnx2x_fp(bp, i, rx_calls));
3841 }
3842 }
3843
3844 bnx2x_hw_stats_post(bp);
3845 bnx2x_storm_stats_post(bp);
3846 }
3847
3848 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3849 {
3850 struct dmae_command *dmae;
3851 u32 opcode;
3852 int loader_idx = PMF_DMAE_C(bp);
3853 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3854
3855 bp->executer_idx = 0;
3856
3857 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3858 DMAE_CMD_C_ENABLE |
3859 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3860 #ifdef __BIG_ENDIAN
3861 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3862 #else
3863 DMAE_CMD_ENDIANITY_DW_SWAP |
3864 #endif
3865 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3866 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3867
3868 if (bp->port.port_stx) {
3869
3870 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3871 if (bp->func_stx)
3872 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3873 else
3874 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3875 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3876 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3877 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3878 dmae->dst_addr_hi = 0;
3879 dmae->len = sizeof(struct host_port_stats) >> 2;
3880 if (bp->func_stx) {
3881 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3882 dmae->comp_addr_hi = 0;
3883 dmae->comp_val = 1;
3884 } else {
3885 dmae->comp_addr_lo =
3886 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3887 dmae->comp_addr_hi =
3888 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3889 dmae->comp_val = DMAE_COMP_VAL;
3890
3891 *stats_comp = 0;
3892 }
3893 }
3894
3895 if (bp->func_stx) {
3896
3897 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3898 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3899 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3900 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3901 dmae->dst_addr_lo = bp->func_stx >> 2;
3902 dmae->dst_addr_hi = 0;
3903 dmae->len = sizeof(struct host_func_stats) >> 2;
3904 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3905 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3906 dmae->comp_val = DMAE_COMP_VAL;
3907
3908 *stats_comp = 0;
3909 }
3910 }
3911
3912 static void bnx2x_stats_stop(struct bnx2x *bp)
3913 {
3914 int update = 0;
3915
3916 bnx2x_stats_comp(bp);
3917
3918 if (bp->port.pmf)
3919 update = (bnx2x_hw_stats_update(bp) == 0);
3920
3921 update |= (bnx2x_storm_stats_update(bp) == 0);
3922
3923 if (update) {
3924 bnx2x_net_stats_update(bp);
3925
3926 if (bp->port.pmf)
3927 bnx2x_port_stats_stop(bp);
3928
3929 bnx2x_hw_stats_post(bp);
3930 bnx2x_stats_comp(bp);
3931 }
3932 }
3933
3934 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3935 {
3936 }
3937
3938 static const struct {
3939 void (*action)(struct bnx2x *bp);
3940 enum bnx2x_stats_state next_state;
3941 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3942 /* state event */
3943 {
3944 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3945 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3946 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3947 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3948 },
3949 {
3950 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3951 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3952 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3953 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3954 }
3955 };
3956
3957 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3958 {
3959 enum bnx2x_stats_state state = bp->stats_state;
3960
3961 bnx2x_stats_stm[state][event].action(bp);
3962 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3963
3964 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3965 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3966 state, event, bp->stats_state);
3967 }
3968
3969 static void bnx2x_timer(unsigned long data)
3970 {
3971 struct bnx2x *bp = (struct bnx2x *) data;
3972
3973 if (!netif_running(bp->dev))
3974 return;
3975
3976 if (atomic_read(&bp->intr_sem) != 0)
3977 goto timer_restart;
3978
3979 if (poll) {
3980 struct bnx2x_fastpath *fp = &bp->fp[0];
3981 int rc;
3982
3983 bnx2x_tx_int(fp, 1000);
3984 rc = bnx2x_rx_int(fp, 1000);
3985 }
3986
3987 if (!BP_NOMCP(bp)) {
3988 int func = BP_FUNC(bp);
3989 u32 drv_pulse;
3990 u32 mcp_pulse;
3991
3992 ++bp->fw_drv_pulse_wr_seq;
3993 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3994 /* TBD - add SYSTEM_TIME */
3995 drv_pulse = bp->fw_drv_pulse_wr_seq;
3996 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3997
3998 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3999 MCP_PULSE_SEQ_MASK);
4000 /* The delta between driver pulse and mcp response
4001 * should be 1 (before mcp response) or 0 (after mcp response)
4002 */
4003 if ((drv_pulse != mcp_pulse) &&
4004 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4005 /* someone lost a heartbeat... */
4006 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4007 drv_pulse, mcp_pulse);
4008 }
4009 }
4010
4011 if ((bp->state == BNX2X_STATE_OPEN) ||
4012 (bp->state == BNX2X_STATE_DISABLED))
4013 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4014
4015 timer_restart:
4016 mod_timer(&bp->timer, jiffies + bp->current_interval);
4017 }
4018
4019 /* end of Statistics */
4020
4021 /* nic init */
4022
4023 /*
4024 * nic init service functions
4025 */
4026
4027 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4028 {
4029 int port = BP_PORT(bp);
4030
4031 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4032 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4033 sizeof(struct ustorm_status_block)/4);
4034 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4035 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4036 sizeof(struct cstorm_status_block)/4);
4037 }
4038
4039 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4040 dma_addr_t mapping, int sb_id)
4041 {
4042 int port = BP_PORT(bp);
4043 int func = BP_FUNC(bp);
4044 int index;
4045 u64 section;
4046
4047 /* USTORM */
4048 section = ((u64)mapping) + offsetof(struct host_status_block,
4049 u_status_block);
4050 sb->u_status_block.status_block_id = sb_id;
4051
4052 REG_WR(bp, BAR_USTRORM_INTMEM +
4053 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4054 REG_WR(bp, BAR_USTRORM_INTMEM +
4055 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4056 U64_HI(section));
4057 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4058 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4059
4060 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4061 REG_WR16(bp, BAR_USTRORM_INTMEM +
4062 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4063
4064 /* CSTORM */
4065 section = ((u64)mapping) + offsetof(struct host_status_block,
4066 c_status_block);
4067 sb->c_status_block.status_block_id = sb_id;
4068
4069 REG_WR(bp, BAR_CSTRORM_INTMEM +
4070 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4071 REG_WR(bp, BAR_CSTRORM_INTMEM +
4072 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4073 U64_HI(section));
4074 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4075 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4076
4077 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4078 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4079 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4080
4081 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4082 }
4083
4084 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4085 {
4086 int func = BP_FUNC(bp);
4087
4088 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4089 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4090 sizeof(struct ustorm_def_status_block)/4);
4091 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4092 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4093 sizeof(struct cstorm_def_status_block)/4);
4094 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4095 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4096 sizeof(struct xstorm_def_status_block)/4);
4097 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4098 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4099 sizeof(struct tstorm_def_status_block)/4);
4100 }
4101
4102 static void bnx2x_init_def_sb(struct bnx2x *bp,
4103 struct host_def_status_block *def_sb,
4104 dma_addr_t mapping, int sb_id)
4105 {
4106 int port = BP_PORT(bp);
4107 int func = BP_FUNC(bp);
4108 int index, val, reg_offset;
4109 u64 section;
4110
4111 /* ATTN */
4112 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4113 atten_status_block);
4114 def_sb->atten_status_block.status_block_id = sb_id;
4115
4116 bp->attn_state = 0;
4117
4118 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4119 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4120
4121 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4122 bp->attn_group[index].sig[0] = REG_RD(bp,
4123 reg_offset + 0x10*index);
4124 bp->attn_group[index].sig[1] = REG_RD(bp,
4125 reg_offset + 0x4 + 0x10*index);
4126 bp->attn_group[index].sig[2] = REG_RD(bp,
4127 reg_offset + 0x8 + 0x10*index);
4128 bp->attn_group[index].sig[3] = REG_RD(bp,
4129 reg_offset + 0xc + 0x10*index);
4130 }
4131
4132 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4133 HC_REG_ATTN_MSG0_ADDR_L);
4134
4135 REG_WR(bp, reg_offset, U64_LO(section));
4136 REG_WR(bp, reg_offset + 4, U64_HI(section));
4137
4138 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4139
4140 val = REG_RD(bp, reg_offset);
4141 val |= sb_id;
4142 REG_WR(bp, reg_offset, val);
4143
4144 /* USTORM */
4145 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4146 u_def_status_block);
4147 def_sb->u_def_status_block.status_block_id = sb_id;
4148
4149 REG_WR(bp, BAR_USTRORM_INTMEM +
4150 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4151 REG_WR(bp, BAR_USTRORM_INTMEM +
4152 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4153 U64_HI(section));
4154 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4155 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4156
4157 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4158 REG_WR16(bp, BAR_USTRORM_INTMEM +
4159 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4160
4161 /* CSTORM */
4162 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4163 c_def_status_block);
4164 def_sb->c_def_status_block.status_block_id = sb_id;
4165
4166 REG_WR(bp, BAR_CSTRORM_INTMEM +
4167 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4168 REG_WR(bp, BAR_CSTRORM_INTMEM +
4169 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4170 U64_HI(section));
4171 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4172 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4173
4174 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4175 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4176 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4177
4178 /* TSTORM */
4179 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4180 t_def_status_block);
4181 def_sb->t_def_status_block.status_block_id = sb_id;
4182
4183 REG_WR(bp, BAR_TSTRORM_INTMEM +
4184 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4185 REG_WR(bp, BAR_TSTRORM_INTMEM +
4186 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4187 U64_HI(section));
4188 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4189 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4190
4191 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4192 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4193 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4194
4195 /* XSTORM */
4196 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4197 x_def_status_block);
4198 def_sb->x_def_status_block.status_block_id = sb_id;
4199
4200 REG_WR(bp, BAR_XSTRORM_INTMEM +
4201 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4202 REG_WR(bp, BAR_XSTRORM_INTMEM +
4203 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4204 U64_HI(section));
4205 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4206 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4207
4208 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4209 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4210 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4211
4212 bp->stats_pending = 0;
4213 bp->set_mac_pending = 0;
4214
4215 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4216 }
4217
4218 static void bnx2x_update_coalesce(struct bnx2x *bp)
4219 {
4220 int port = BP_PORT(bp);
4221 int i;
4222
4223 for_each_queue(bp, i) {
4224 int sb_id = bp->fp[i].sb_id;
4225
4226 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4227 REG_WR8(bp, BAR_USTRORM_INTMEM +
4228 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4229 U_SB_ETH_RX_CQ_INDEX),
4230 bp->rx_ticks/12);
4231 REG_WR16(bp, BAR_USTRORM_INTMEM +
4232 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4233 U_SB_ETH_RX_CQ_INDEX),
4234 bp->rx_ticks ? 0 : 1);
4235 REG_WR16(bp, BAR_USTRORM_INTMEM +
4236 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4237 U_SB_ETH_RX_BD_INDEX),
4238 bp->rx_ticks ? 0 : 1);
4239
4240 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4241 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4242 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4243 C_SB_ETH_TX_CQ_INDEX),
4244 bp->tx_ticks/12);
4245 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4246 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4247 C_SB_ETH_TX_CQ_INDEX),
4248 bp->tx_ticks ? 0 : 1);
4249 }
4250 }
4251
4252 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4253 struct bnx2x_fastpath *fp, int last)
4254 {
4255 int i;
4256
4257 for (i = 0; i < last; i++) {
4258 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4259 struct sk_buff *skb = rx_buf->skb;
4260
4261 if (skb == NULL) {
4262 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4263 continue;
4264 }
4265
4266 if (fp->tpa_state[i] == BNX2X_TPA_START)
4267 pci_unmap_single(bp->pdev,
4268 pci_unmap_addr(rx_buf, mapping),
4269 bp->rx_buf_size,
4270 PCI_DMA_FROMDEVICE);
4271
4272 dev_kfree_skb(skb);
4273 rx_buf->skb = NULL;
4274 }
4275 }
4276
4277 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4278 {
4279 int func = BP_FUNC(bp);
4280 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4281 ETH_MAX_AGGREGATION_QUEUES_E1H;
4282 u16 ring_prod, cqe_ring_prod;
4283 int i, j;
4284
4285 bp->rx_buf_size = bp->dev->mtu;
4286 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4287 BCM_RX_ETH_PAYLOAD_ALIGN;
4288
4289 if (bp->flags & TPA_ENABLE_FLAG) {
4290 DP(NETIF_MSG_IFUP,
4291 "rx_buf_size %d effective_mtu %d\n",
4292 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4293
4294 for_each_queue(bp, j) {
4295 struct bnx2x_fastpath *fp = &bp->fp[j];
4296
4297 for (i = 0; i < max_agg_queues; i++) {
4298 fp->tpa_pool[i].skb =
4299 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4300 if (!fp->tpa_pool[i].skb) {
4301 BNX2X_ERR("Failed to allocate TPA "
4302 "skb pool for queue[%d] - "
4303 "disabling TPA on this "
4304 "queue!\n", j);
4305 bnx2x_free_tpa_pool(bp, fp, i);
4306 fp->disable_tpa = 1;
4307 break;
4308 }
4309 pci_unmap_addr_set((struct sw_rx_bd *)
4310 &bp->fp->tpa_pool[i],
4311 mapping, 0);
4312 fp->tpa_state[i] = BNX2X_TPA_STOP;
4313 }
4314 }
4315 }
4316
4317 for_each_queue(bp, j) {
4318 struct bnx2x_fastpath *fp = &bp->fp[j];
4319
4320 fp->rx_bd_cons = 0;
4321 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4322 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4323
4324 /* "next page" elements initialization */
4325 /* SGE ring */
4326 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4327 struct eth_rx_sge *sge;
4328
4329 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4330 sge->addr_hi =
4331 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4332 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4333 sge->addr_lo =
4334 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4335 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4336 }
4337
4338 bnx2x_init_sge_ring_bit_mask(fp);
4339
4340 /* RX BD ring */
4341 for (i = 1; i <= NUM_RX_RINGS; i++) {
4342 struct eth_rx_bd *rx_bd;
4343
4344 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4345 rx_bd->addr_hi =
4346 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4347 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4348 rx_bd->addr_lo =
4349 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4350 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4351 }
4352
4353 /* CQ ring */
4354 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4355 struct eth_rx_cqe_next_page *nextpg;
4356
4357 nextpg = (struct eth_rx_cqe_next_page *)
4358 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4359 nextpg->addr_hi =
4360 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4361 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4362 nextpg->addr_lo =
4363 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4364 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4365 }
4366
4367 /* Allocate SGEs and initialize the ring elements */
4368 for (i = 0, ring_prod = 0;
4369 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4370
4371 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4372 BNX2X_ERR("was only able to allocate "
4373 "%d rx sges\n", i);
4374 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4375 /* Cleanup already allocated elements */
4376 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4377 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4378 fp->disable_tpa = 1;
4379 ring_prod = 0;
4380 break;
4381 }
4382 ring_prod = NEXT_SGE_IDX(ring_prod);
4383 }
4384 fp->rx_sge_prod = ring_prod;
4385
4386 /* Allocate BDs and initialize BD ring */
4387 fp->rx_comp_cons = 0;
4388 cqe_ring_prod = ring_prod = 0;
4389 for (i = 0; i < bp->rx_ring_size; i++) {
4390 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4391 BNX2X_ERR("was only able to allocate "
4392 "%d rx skbs\n", i);
4393 bp->eth_stats.rx_skb_alloc_failed++;
4394 break;
4395 }
4396 ring_prod = NEXT_RX_IDX(ring_prod);
4397 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4398 WARN_ON(ring_prod <= i);
4399 }
4400
4401 fp->rx_bd_prod = ring_prod;
4402 /* must not have more available CQEs than BDs */
4403 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4404 cqe_ring_prod);
4405 fp->rx_pkt = fp->rx_calls = 0;
4406
4407 /* Warning!
4408 * this will generate an interrupt (to the TSTORM)
4409 * must only be done after chip is initialized
4410 */
4411 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4412 fp->rx_sge_prod);
4413 if (j != 0)
4414 continue;
4415
4416 REG_WR(bp, BAR_USTRORM_INTMEM +
4417 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4418 U64_LO(fp->rx_comp_mapping));
4419 REG_WR(bp, BAR_USTRORM_INTMEM +
4420 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4421 U64_HI(fp->rx_comp_mapping));
4422 }
4423 }
4424
4425 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4426 {
4427 int i, j;
4428
4429 for_each_queue(bp, j) {
4430 struct bnx2x_fastpath *fp = &bp->fp[j];
4431
4432 for (i = 1; i <= NUM_TX_RINGS; i++) {
4433 struct eth_tx_bd *tx_bd =
4434 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4435
4436 tx_bd->addr_hi =
4437 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4438 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4439 tx_bd->addr_lo =
4440 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4441 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4442 }
4443
4444 fp->tx_pkt_prod = 0;
4445 fp->tx_pkt_cons = 0;
4446 fp->tx_bd_prod = 0;
4447 fp->tx_bd_cons = 0;
4448 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4449 fp->tx_pkt = 0;
4450 }
4451 }
4452
4453 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4454 {
4455 int func = BP_FUNC(bp);
4456
4457 spin_lock_init(&bp->spq_lock);
4458
4459 bp->spq_left = MAX_SPQ_PENDING;
4460 bp->spq_prod_idx = 0;
4461 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4462 bp->spq_prod_bd = bp->spq;
4463 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4464
4465 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4466 U64_LO(bp->spq_mapping));
4467 REG_WR(bp,
4468 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4469 U64_HI(bp->spq_mapping));
4470
4471 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4472 bp->spq_prod_idx);
4473 }
4474
4475 static void bnx2x_init_context(struct bnx2x *bp)
4476 {
4477 int i;
4478
4479 for_each_queue(bp, i) {
4480 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4481 struct bnx2x_fastpath *fp = &bp->fp[i];
4482 u8 sb_id = FP_SB_ID(fp);
4483
4484 context->xstorm_st_context.tx_bd_page_base_hi =
4485 U64_HI(fp->tx_desc_mapping);
4486 context->xstorm_st_context.tx_bd_page_base_lo =
4487 U64_LO(fp->tx_desc_mapping);
4488 context->xstorm_st_context.db_data_addr_hi =
4489 U64_HI(fp->tx_prods_mapping);
4490 context->xstorm_st_context.db_data_addr_lo =
4491 U64_LO(fp->tx_prods_mapping);
4492 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4493 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4494
4495 context->ustorm_st_context.common.sb_index_numbers =
4496 BNX2X_RX_SB_INDEX_NUM;
4497 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4498 context->ustorm_st_context.common.status_block_id = sb_id;
4499 context->ustorm_st_context.common.flags =
4500 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4501 context->ustorm_st_context.common.mc_alignment_size =
4502 BCM_RX_ETH_PAYLOAD_ALIGN;
4503 context->ustorm_st_context.common.bd_buff_size =
4504 bp->rx_buf_size;
4505 context->ustorm_st_context.common.bd_page_base_hi =
4506 U64_HI(fp->rx_desc_mapping);
4507 context->ustorm_st_context.common.bd_page_base_lo =
4508 U64_LO(fp->rx_desc_mapping);
4509 if (!fp->disable_tpa) {
4510 context->ustorm_st_context.common.flags |=
4511 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4512 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4513 context->ustorm_st_context.common.sge_buff_size =
4514 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4515 context->ustorm_st_context.common.sge_page_base_hi =
4516 U64_HI(fp->rx_sge_mapping);
4517 context->ustorm_st_context.common.sge_page_base_lo =
4518 U64_LO(fp->rx_sge_mapping);
4519 }
4520
4521 context->cstorm_st_context.sb_index_number =
4522 C_SB_ETH_TX_CQ_INDEX;
4523 context->cstorm_st_context.status_block_id = sb_id;
4524
4525 context->xstorm_ag_context.cdu_reserved =
4526 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4527 CDU_REGION_NUMBER_XCM_AG,
4528 ETH_CONNECTION_TYPE);
4529 context->ustorm_ag_context.cdu_usage =
4530 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4531 CDU_REGION_NUMBER_UCM_AG,
4532 ETH_CONNECTION_TYPE);
4533 }
4534 }
4535
4536 static void bnx2x_init_ind_table(struct bnx2x *bp)
4537 {
4538 int func = BP_FUNC(bp);
4539 int i;
4540
4541 if (!is_multi(bp))
4542 return;
4543
4544 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4545 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4546 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4547 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4548 BP_CL_ID(bp) + (i % bp->num_queues));
4549 }
4550
4551 static void bnx2x_set_client_config(struct bnx2x *bp)
4552 {
4553 struct tstorm_eth_client_config tstorm_client = {0};
4554 int port = BP_PORT(bp);
4555 int i;
4556
4557 tstorm_client.mtu = bp->dev->mtu;
4558 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4559 tstorm_client.config_flags =
4560 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4561 #ifdef BCM_VLAN
4562 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4563 tstorm_client.config_flags |=
4564 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4565 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4566 }
4567 #endif
4568
4569 if (bp->flags & TPA_ENABLE_FLAG) {
4570 tstorm_client.max_sges_for_packet =
4571 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4572 tstorm_client.max_sges_for_packet =
4573 ((tstorm_client.max_sges_for_packet +
4574 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4575 PAGES_PER_SGE_SHIFT;
4576
4577 tstorm_client.config_flags |=
4578 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4579 }
4580
4581 for_each_queue(bp, i) {
4582 REG_WR(bp, BAR_TSTRORM_INTMEM +
4583 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4584 ((u32 *)&tstorm_client)[0]);
4585 REG_WR(bp, BAR_TSTRORM_INTMEM +
4586 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4587 ((u32 *)&tstorm_client)[1]);
4588 }
4589
4590 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4591 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4592 }
4593
4594 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4595 {
4596 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4597 int mode = bp->rx_mode;
4598 int mask = (1 << BP_L_ID(bp));
4599 int func = BP_FUNC(bp);
4600 int i;
4601
4602 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4603
4604 switch (mode) {
4605 case BNX2X_RX_MODE_NONE: /* no Rx */
4606 tstorm_mac_filter.ucast_drop_all = mask;
4607 tstorm_mac_filter.mcast_drop_all = mask;
4608 tstorm_mac_filter.bcast_drop_all = mask;
4609 break;
4610 case BNX2X_RX_MODE_NORMAL:
4611 tstorm_mac_filter.bcast_accept_all = mask;
4612 break;
4613 case BNX2X_RX_MODE_ALLMULTI:
4614 tstorm_mac_filter.mcast_accept_all = mask;
4615 tstorm_mac_filter.bcast_accept_all = mask;
4616 break;
4617 case BNX2X_RX_MODE_PROMISC:
4618 tstorm_mac_filter.ucast_accept_all = mask;
4619 tstorm_mac_filter.mcast_accept_all = mask;
4620 tstorm_mac_filter.bcast_accept_all = mask;
4621 break;
4622 default:
4623 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4624 break;
4625 }
4626
4627 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4628 REG_WR(bp, BAR_TSTRORM_INTMEM +
4629 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4630 ((u32 *)&tstorm_mac_filter)[i]);
4631
4632 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4633 ((u32 *)&tstorm_mac_filter)[i]); */
4634 }
4635
4636 if (mode != BNX2X_RX_MODE_NONE)
4637 bnx2x_set_client_config(bp);
4638 }
4639
4640 static void bnx2x_init_internal_common(struct bnx2x *bp)
4641 {
4642 int i;
4643
4644 if (bp->flags & TPA_ENABLE_FLAG) {
4645 struct tstorm_eth_tpa_exist tpa = {0};
4646
4647 tpa.tpa_exist = 1;
4648
4649 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4650 ((u32 *)&tpa)[0]);
4651 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4652 ((u32 *)&tpa)[1]);
4653 }
4654
4655 /* Zero this manually as its initialization is
4656 currently missing in the initTool */
4657 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4658 REG_WR(bp, BAR_USTRORM_INTMEM +
4659 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4660 }
4661
4662 static void bnx2x_init_internal_port(struct bnx2x *bp)
4663 {
4664 int port = BP_PORT(bp);
4665
4666 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4667 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4668 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4669 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4670 }
4671
4672 static void bnx2x_init_internal_func(struct bnx2x *bp)
4673 {
4674 struct tstorm_eth_function_common_config tstorm_config = {0};
4675 struct stats_indication_flags stats_flags = {0};
4676 int port = BP_PORT(bp);
4677 int func = BP_FUNC(bp);
4678 int i;
4679 u16 max_agg_size;
4680
4681 if (is_multi(bp)) {
4682 tstorm_config.config_flags = MULTI_FLAGS;
4683 tstorm_config.rss_result_mask = MULTI_MASK;
4684 }
4685
4686 tstorm_config.leading_client_id = BP_L_ID(bp);
4687
4688 REG_WR(bp, BAR_TSTRORM_INTMEM +
4689 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4690 (*(u32 *)&tstorm_config));
4691
4692 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4693 bnx2x_set_storm_rx_mode(bp);
4694
4695 /* reset xstorm per client statistics */
4696 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4697 REG_WR(bp, BAR_XSTRORM_INTMEM +
4698 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4699 i*4, 0);
4700 }
4701 /* reset tstorm per client statistics */
4702 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4703 REG_WR(bp, BAR_TSTRORM_INTMEM +
4704 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4705 i*4, 0);
4706 }
4707
4708 /* Init statistics related context */
4709 stats_flags.collect_eth = 1;
4710
4711 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4712 ((u32 *)&stats_flags)[0]);
4713 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4714 ((u32 *)&stats_flags)[1]);
4715
4716 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4717 ((u32 *)&stats_flags)[0]);
4718 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4719 ((u32 *)&stats_flags)[1]);
4720
4721 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4722 ((u32 *)&stats_flags)[0]);
4723 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4724 ((u32 *)&stats_flags)[1]);
4725
4726 REG_WR(bp, BAR_XSTRORM_INTMEM +
4727 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4728 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4729 REG_WR(bp, BAR_XSTRORM_INTMEM +
4730 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4731 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4732
4733 REG_WR(bp, BAR_TSTRORM_INTMEM +
4734 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4735 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4736 REG_WR(bp, BAR_TSTRORM_INTMEM +
4737 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4738 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4739
4740 if (CHIP_IS_E1H(bp)) {
4741 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4742 IS_E1HMF(bp));
4743 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4744 IS_E1HMF(bp));
4745 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4746 IS_E1HMF(bp));
4747 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4748 IS_E1HMF(bp));
4749
4750 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4751 bp->e1hov);
4752 }
4753
4754 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4755 max_agg_size =
4756 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4757 SGE_PAGE_SIZE * PAGES_PER_SGE),
4758 (u32)0xffff);
4759 for_each_queue(bp, i) {
4760 struct bnx2x_fastpath *fp = &bp->fp[i];
4761
4762 REG_WR(bp, BAR_USTRORM_INTMEM +
4763 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4764 U64_LO(fp->rx_comp_mapping));
4765 REG_WR(bp, BAR_USTRORM_INTMEM +
4766 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4767 U64_HI(fp->rx_comp_mapping));
4768
4769 REG_WR16(bp, BAR_USTRORM_INTMEM +
4770 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4771 max_agg_size);
4772 }
4773 }
4774
4775 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4776 {
4777 switch (load_code) {
4778 case FW_MSG_CODE_DRV_LOAD_COMMON:
4779 bnx2x_init_internal_common(bp);
4780 /* no break */
4781
4782 case FW_MSG_CODE_DRV_LOAD_PORT:
4783 bnx2x_init_internal_port(bp);
4784 /* no break */
4785
4786 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4787 bnx2x_init_internal_func(bp);
4788 break;
4789
4790 default:
4791 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4792 break;
4793 }
4794 }
4795
4796 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4797 {
4798 int i;
4799
4800 for_each_queue(bp, i) {
4801 struct bnx2x_fastpath *fp = &bp->fp[i];
4802
4803 fp->bp = bp;
4804 fp->state = BNX2X_FP_STATE_CLOSED;
4805 fp->index = i;
4806 fp->cl_id = BP_L_ID(bp) + i;
4807 fp->sb_id = fp->cl_id;
4808 DP(NETIF_MSG_IFUP,
4809 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4810 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4811 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4812 FP_SB_ID(fp));
4813 bnx2x_update_fpsb_idx(fp);
4814 }
4815
4816 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4817 DEF_SB_ID);
4818 bnx2x_update_dsb_idx(bp);
4819 bnx2x_update_coalesce(bp);
4820 bnx2x_init_rx_rings(bp);
4821 bnx2x_init_tx_ring(bp);
4822 bnx2x_init_sp_ring(bp);
4823 bnx2x_init_context(bp);
4824 bnx2x_init_internal(bp, load_code);
4825 bnx2x_init_ind_table(bp);
4826 bnx2x_stats_init(bp);
4827
4828 /* At this point, we are ready for interrupts */
4829 atomic_set(&bp->intr_sem, 0);
4830
4831 /* flush all before enabling interrupts */
4832 mb();
4833 mmiowb();
4834
4835 bnx2x_int_enable(bp);
4836 }
4837
4838 /* end of nic init */
4839
4840 /*
4841 * gzip service functions
4842 */
4843
4844 static int bnx2x_gunzip_init(struct bnx2x *bp)
4845 {
4846 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4847 &bp->gunzip_mapping);
4848 if (bp->gunzip_buf == NULL)
4849 goto gunzip_nomem1;
4850
4851 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4852 if (bp->strm == NULL)
4853 goto gunzip_nomem2;
4854
4855 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4856 GFP_KERNEL);
4857 if (bp->strm->workspace == NULL)
4858 goto gunzip_nomem3;
4859
4860 return 0;
4861
4862 gunzip_nomem3:
4863 kfree(bp->strm);
4864 bp->strm = NULL;
4865
4866 gunzip_nomem2:
4867 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4868 bp->gunzip_mapping);
4869 bp->gunzip_buf = NULL;
4870
4871 gunzip_nomem1:
4872 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4873 " un-compression\n", bp->dev->name);
4874 return -ENOMEM;
4875 }
4876
4877 static void bnx2x_gunzip_end(struct bnx2x *bp)
4878 {
4879 kfree(bp->strm->workspace);
4880
4881 kfree(bp->strm);
4882 bp->strm = NULL;
4883
4884 if (bp->gunzip_buf) {
4885 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4886 bp->gunzip_mapping);
4887 bp->gunzip_buf = NULL;
4888 }
4889 }
4890
4891 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4892 {
4893 int n, rc;
4894
4895 /* check gzip header */
4896 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4897 return -EINVAL;
4898
4899 n = 10;
4900
4901 #define FNAME 0x8
4902
4903 if (zbuf[3] & FNAME)
4904 while ((zbuf[n++] != 0) && (n < len));
4905
4906 bp->strm->next_in = zbuf + n;
4907 bp->strm->avail_in = len - n;
4908 bp->strm->next_out = bp->gunzip_buf;
4909 bp->strm->avail_out = FW_BUF_SIZE;
4910
4911 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4912 if (rc != Z_OK)
4913 return rc;
4914
4915 rc = zlib_inflate(bp->strm, Z_FINISH);
4916 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4917 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4918 bp->dev->name, bp->strm->msg);
4919
4920 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4921 if (bp->gunzip_outlen & 0x3)
4922 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4923 " gunzip_outlen (%d) not aligned\n",
4924 bp->dev->name, bp->gunzip_outlen);
4925 bp->gunzip_outlen >>= 2;
4926
4927 zlib_inflateEnd(bp->strm);
4928
4929 if (rc == Z_STREAM_END)
4930 return 0;
4931
4932 return rc;
4933 }
4934
4935 /* nic load/unload */
4936
4937 /*
4938 * General service functions
4939 */
4940
4941 /* send a NIG loopback debug packet */
4942 static void bnx2x_lb_pckt(struct bnx2x *bp)
4943 {
4944 u32 wb_write[3];
4945
4946 /* Ethernet source and destination addresses */
4947 wb_write[0] = 0x55555555;
4948 wb_write[1] = 0x55555555;
4949 wb_write[2] = 0x20; /* SOP */
4950 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4951
4952 /* NON-IP protocol */
4953 wb_write[0] = 0x09000000;
4954 wb_write[1] = 0x55555555;
4955 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4956 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4957 }
4958
4959 /* some of the internal memories
4960 * are not directly readable from the driver
4961 * to test them we send debug packets
4962 */
4963 static int bnx2x_int_mem_test(struct bnx2x *bp)
4964 {
4965 int factor;
4966 int count, i;
4967 u32 val = 0;
4968
4969 if (CHIP_REV_IS_FPGA(bp))
4970 factor = 120;
4971 else if (CHIP_REV_IS_EMUL(bp))
4972 factor = 200;
4973 else
4974 factor = 1;
4975
4976 DP(NETIF_MSG_HW, "start part1\n");
4977
4978 /* Disable inputs of parser neighbor blocks */
4979 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4980 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4981 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4982 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4983
4984 /* Write 0 to parser credits for CFC search request */
4985 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4986
4987 /* send Ethernet packet */
4988 bnx2x_lb_pckt(bp);
4989
4990 /* TODO do i reset NIG statistic? */
4991 /* Wait until NIG register shows 1 packet of size 0x10 */
4992 count = 1000 * factor;
4993 while (count) {
4994
4995 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4996 val = *bnx2x_sp(bp, wb_data[0]);
4997 if (val == 0x10)
4998 break;
4999
5000 msleep(10);
5001 count--;
5002 }
5003 if (val != 0x10) {
5004 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5005 return -1;
5006 }
5007
5008 /* Wait until PRS register shows 1 packet */
5009 count = 1000 * factor;
5010 while (count) {
5011 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5012 if (val == 1)
5013 break;
5014
5015 msleep(10);
5016 count--;
5017 }
5018 if (val != 0x1) {
5019 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5020 return -2;
5021 }
5022
5023 /* Reset and init BRB, PRS */
5024 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5025 msleep(50);
5026 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5027 msleep(50);
5028 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5029 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5030
5031 DP(NETIF_MSG_HW, "part2\n");
5032
5033 /* Disable inputs of parser neighbor blocks */
5034 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5035 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5036 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5037 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5038
5039 /* Write 0 to parser credits for CFC search request */
5040 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5041
5042 /* send 10 Ethernet packets */
5043 for (i = 0; i < 10; i++)
5044 bnx2x_lb_pckt(bp);
5045
5046 /* Wait until NIG register shows 10 + 1
5047 packets of size 11*0x10 = 0xb0 */
5048 count = 1000 * factor;
5049 while (count) {
5050
5051 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5052 val = *bnx2x_sp(bp, wb_data[0]);
5053 if (val == 0xb0)
5054 break;
5055
5056 msleep(10);
5057 count--;
5058 }
5059 if (val != 0xb0) {
5060 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5061 return -3;
5062 }
5063
5064 /* Wait until PRS register shows 2 packets */
5065 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5066 if (val != 2)
5067 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5068
5069 /* Write 1 to parser credits for CFC search request */
5070 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5071
5072 /* Wait until PRS register shows 3 packets */
5073 msleep(10 * factor);
5074 /* Wait until NIG register shows 1 packet of size 0x10 */
5075 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5076 if (val != 3)
5077 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5078
5079 /* clear NIG EOP FIFO */
5080 for (i = 0; i < 11; i++)
5081 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5082 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5083 if (val != 1) {
5084 BNX2X_ERR("clear of NIG failed\n");
5085 return -4;
5086 }
5087
5088 /* Reset and init BRB, PRS, NIG */
5089 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5090 msleep(50);
5091 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5092 msleep(50);
5093 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5094 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5095 #ifndef BCM_ISCSI
5096 /* set NIC mode */
5097 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5098 #endif
5099
5100 /* Enable inputs of parser neighbor blocks */
5101 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5102 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5103 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5104 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5105
5106 DP(NETIF_MSG_HW, "done\n");
5107
5108 return 0; /* OK */
5109 }
5110
5111 static void enable_blocks_attention(struct bnx2x *bp)
5112 {
5113 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5114 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5115 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5116 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5117 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5118 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5119 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5120 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5121 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5122 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5123 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5124 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5125 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5126 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5127 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5128 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5129 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5130 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5131 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5132 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5133 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5134 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5135 if (CHIP_REV_IS_FPGA(bp))
5136 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5137 else
5138 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5139 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5140 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5141 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5142 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5143 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5144 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5145 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5146 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5147 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5148 }
5149
5150
5151 static void bnx2x_reset_common(struct bnx2x *bp)
5152 {
5153 /* reset_common */
5154 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5155 0xd3ffff7f);
5156 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5157 }
5158
5159 static int bnx2x_init_common(struct bnx2x *bp)
5160 {
5161 u32 val, i;
5162
5163 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5164
5165 bnx2x_reset_common(bp);
5166 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5167 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5168
5169 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5170 if (CHIP_IS_E1H(bp))
5171 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5172
5173 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5174 msleep(30);
5175 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5176
5177 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5178 if (CHIP_IS_E1(bp)) {
5179 /* enable HW interrupt from PXP on USDM overflow
5180 bit 16 on INT_MASK_0 */
5181 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5182 }
5183
5184 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5185 bnx2x_init_pxp(bp);
5186
5187 #ifdef __BIG_ENDIAN
5188 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5189 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5190 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5191 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5192 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5193
5194 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5195 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5196 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5197 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5198 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5199 #endif
5200
5201 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5202 #ifdef BCM_ISCSI
5203 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5204 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5205 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5206 #endif
5207
5208 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5209 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5210
5211 /* let the HW do it's magic ... */
5212 msleep(100);
5213 /* finish PXP init */
5214 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5215 if (val != 1) {
5216 BNX2X_ERR("PXP2 CFG failed\n");
5217 return -EBUSY;
5218 }
5219 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5220 if (val != 1) {
5221 BNX2X_ERR("PXP2 RD_INIT failed\n");
5222 return -EBUSY;
5223 }
5224
5225 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5226 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5227
5228 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5229
5230 /* clean the DMAE memory */
5231 bp->dmae_ready = 1;
5232 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5233
5234 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5235 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5236 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5237 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5238
5239 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5240 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5241 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5242 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5243
5244 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5245 /* soft reset pulse */
5246 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5247 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5248
5249 #ifdef BCM_ISCSI
5250 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5251 #endif
5252
5253 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5254 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5255 if (!CHIP_REV_IS_SLOW(bp)) {
5256 /* enable hw interrupt from doorbell Q */
5257 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5258 }
5259
5260 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5261 if (CHIP_REV_IS_SLOW(bp)) {
5262 /* fix for emulation and FPGA for no pause */
5263 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5264 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5265 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5266 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5267 }
5268
5269 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5270 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5271 /* set NIC mode */
5272 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5273 if (CHIP_IS_E1H(bp))
5274 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5275
5276 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5277 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5278 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5279 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5280
5281 if (CHIP_IS_E1H(bp)) {
5282 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5283 STORM_INTMEM_SIZE_E1H/2);
5284 bnx2x_init_fill(bp,
5285 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5286 0, STORM_INTMEM_SIZE_E1H/2);
5287 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5288 STORM_INTMEM_SIZE_E1H/2);
5289 bnx2x_init_fill(bp,
5290 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5291 0, STORM_INTMEM_SIZE_E1H/2);
5292 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5293 STORM_INTMEM_SIZE_E1H/2);
5294 bnx2x_init_fill(bp,
5295 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5296 0, STORM_INTMEM_SIZE_E1H/2);
5297 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5298 STORM_INTMEM_SIZE_E1H/2);
5299 bnx2x_init_fill(bp,
5300 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5301 0, STORM_INTMEM_SIZE_E1H/2);
5302 } else { /* E1 */
5303 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5304 STORM_INTMEM_SIZE_E1);
5305 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5306 STORM_INTMEM_SIZE_E1);
5307 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5308 STORM_INTMEM_SIZE_E1);
5309 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5310 STORM_INTMEM_SIZE_E1);
5311 }
5312
5313 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5314 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5315 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5316 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5317
5318 /* sync semi rtc */
5319 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5320 0x80000000);
5321 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5322 0x80000000);
5323
5324 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5325 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5326 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5327
5328 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5329 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5330 REG_WR(bp, i, 0xc0cac01a);
5331 /* TODO: replace with something meaningful */
5332 }
5333 if (CHIP_IS_E1H(bp))
5334 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5335 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5336
5337 if (sizeof(union cdu_context) != 1024)
5338 /* we currently assume that a context is 1024 bytes */
5339 printk(KERN_ALERT PFX "please adjust the size of"
5340 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5341
5342 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5343 val = (4 << 24) + (0 << 12) + 1024;
5344 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5345 if (CHIP_IS_E1(bp)) {
5346 /* !!! fix pxp client crdit until excel update */
5347 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5348 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5349 }
5350
5351 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5352 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5353
5354 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5355 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5356
5357 /* PXPCS COMMON comes here */
5358 /* Reset PCIE errors for debug */
5359 REG_WR(bp, 0x2814, 0xffffffff);
5360 REG_WR(bp, 0x3820, 0xffffffff);
5361
5362 /* EMAC0 COMMON comes here */
5363 /* EMAC1 COMMON comes here */
5364 /* DBU COMMON comes here */
5365 /* DBG COMMON comes here */
5366
5367 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5368 if (CHIP_IS_E1H(bp)) {
5369 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5370 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5371 }
5372
5373 if (CHIP_REV_IS_SLOW(bp))
5374 msleep(200);
5375
5376 /* finish CFC init */
5377 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5378 if (val != 1) {
5379 BNX2X_ERR("CFC LL_INIT failed\n");
5380 return -EBUSY;
5381 }
5382 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5383 if (val != 1) {
5384 BNX2X_ERR("CFC AC_INIT failed\n");
5385 return -EBUSY;
5386 }
5387 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5388 if (val != 1) {
5389 BNX2X_ERR("CFC CAM_INIT failed\n");
5390 return -EBUSY;
5391 }
5392 REG_WR(bp, CFC_REG_DEBUG0, 0);
5393
5394 /* read NIG statistic
5395 to see if this is our first up since powerup */
5396 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5397 val = *bnx2x_sp(bp, wb_data[0]);
5398
5399 /* do internal memory self test */
5400 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5401 BNX2X_ERR("internal mem self test failed\n");
5402 return -EBUSY;
5403 }
5404
5405 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5406 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5407 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5408 /* Fan failure is indicated by SPIO 5 */
5409 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5410 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5411
5412 /* set to active low mode */
5413 val = REG_RD(bp, MISC_REG_SPIO_INT);
5414 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5415 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5416 REG_WR(bp, MISC_REG_SPIO_INT, val);
5417
5418 /* enable interrupt to signal the IGU */
5419 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5420 val |= (1 << MISC_REGISTERS_SPIO_5);
5421 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5422 break;
5423
5424 default:
5425 break;
5426 }
5427
5428 /* clear PXP2 attentions */
5429 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5430
5431 enable_blocks_attention(bp);
5432
5433 if (!BP_NOMCP(bp)) {
5434 bnx2x_acquire_phy_lock(bp);
5435 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5436 bnx2x_release_phy_lock(bp);
5437 } else
5438 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5439
5440 return 0;
5441 }
5442
5443 static int bnx2x_init_port(struct bnx2x *bp)
5444 {
5445 int port = BP_PORT(bp);
5446 u32 val;
5447
5448 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5449
5450 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5451
5452 /* Port PXP comes here */
5453 /* Port PXP2 comes here */
5454 #ifdef BCM_ISCSI
5455 /* Port0 1
5456 * Port1 385 */
5457 i++;
5458 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5459 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5460 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5461 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5462
5463 /* Port0 2
5464 * Port1 386 */
5465 i++;
5466 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5467 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5468 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5469 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5470
5471 /* Port0 3
5472 * Port1 387 */
5473 i++;
5474 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5475 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5476 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5477 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5478 #endif
5479 /* Port CMs come here */
5480
5481 /* Port QM comes here */
5482 #ifdef BCM_ISCSI
5483 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5484 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5485
5486 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5487 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5488 #endif
5489 /* Port DQ comes here */
5490 /* Port BRB1 comes here */
5491 /* Port PRS comes here */
5492 /* Port TSDM comes here */
5493 /* Port CSDM comes here */
5494 /* Port USDM comes here */
5495 /* Port XSDM comes here */
5496 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5497 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5498 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5499 port ? USEM_PORT1_END : USEM_PORT0_END);
5500 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5501 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5502 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5503 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5504 /* Port UPB comes here */
5505 /* Port XPB comes here */
5506
5507 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5508 port ? PBF_PORT1_END : PBF_PORT0_END);
5509
5510 /* configure PBF to work without PAUSE mtu 9000 */
5511 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5512
5513 /* update threshold */
5514 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5515 /* update init credit */
5516 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5517
5518 /* probe changes */
5519 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5520 msleep(5);
5521 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5522
5523 #ifdef BCM_ISCSI
5524 /* tell the searcher where the T2 table is */
5525 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5526
5527 wb_write[0] = U64_LO(bp->t2_mapping);
5528 wb_write[1] = U64_HI(bp->t2_mapping);
5529 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5530 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5531 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5532 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5533
5534 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5535 /* Port SRCH comes here */
5536 #endif
5537 /* Port CDU comes here */
5538 /* Port CFC comes here */
5539
5540 if (CHIP_IS_E1(bp)) {
5541 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5542 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5543 }
5544 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5545 port ? HC_PORT1_END : HC_PORT0_END);
5546
5547 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5548 MISC_AEU_PORT0_START,
5549 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5550 /* init aeu_mask_attn_func_0/1:
5551 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5552 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5553 * bits 4-7 are used for "per vn group attention" */
5554 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5555 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5556
5557 /* Port PXPCS comes here */
5558 /* Port EMAC0 comes here */
5559 /* Port EMAC1 comes here */
5560 /* Port DBU comes here */
5561 /* Port DBG comes here */
5562 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5563 port ? NIG_PORT1_END : NIG_PORT0_END);
5564
5565 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5566
5567 if (CHIP_IS_E1H(bp)) {
5568 u32 wsum;
5569 struct cmng_struct_per_port m_cmng_port;
5570 int vn;
5571
5572 /* 0x2 disable e1hov, 0x1 enable */
5573 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5574 (IS_E1HMF(bp) ? 0x1 : 0x2));
5575
5576 /* Init RATE SHAPING and FAIRNESS contexts.
5577 Initialize as if there is 10G link. */
5578 wsum = bnx2x_calc_vn_wsum(bp);
5579 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5580 if (IS_E1HMF(bp))
5581 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5582 bnx2x_init_vn_minmax(bp, 2*vn + port,
5583 wsum, 10000, &m_cmng_port);
5584 }
5585
5586 /* Port MCP comes here */
5587 /* Port DMAE comes here */
5588
5589 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5590 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5591 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5592 /* add SPIO 5 to group 0 */
5593 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5594 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5595 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5596 break;
5597
5598 default:
5599 break;
5600 }
5601
5602 bnx2x__link_reset(bp);
5603
5604 return 0;
5605 }
5606
5607 #define ILT_PER_FUNC (768/2)
5608 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5609 /* the phys address is shifted right 12 bits and has an added
5610 1=valid bit added to the 53rd bit
5611 then since this is a wide register(TM)
5612 we split it into two 32 bit writes
5613 */
5614 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5615 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5616 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5617 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5618
5619 #define CNIC_ILT_LINES 0
5620
5621 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5622 {
5623 int reg;
5624
5625 if (CHIP_IS_E1H(bp))
5626 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5627 else /* E1 */
5628 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5629
5630 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5631 }
5632
5633 static int bnx2x_init_func(struct bnx2x *bp)
5634 {
5635 int port = BP_PORT(bp);
5636 int func = BP_FUNC(bp);
5637 int i;
5638
5639 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5640
5641 i = FUNC_ILT_BASE(func);
5642
5643 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5644 if (CHIP_IS_E1H(bp)) {
5645 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5646 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5647 } else /* E1 */
5648 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5649 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5650
5651
5652 if (CHIP_IS_E1H(bp)) {
5653 for (i = 0; i < 9; i++)
5654 bnx2x_init_block(bp,
5655 cm_start[func][i], cm_end[func][i]);
5656
5657 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5658 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5659 }
5660
5661 /* HC init per function */
5662 if (CHIP_IS_E1H(bp)) {
5663 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5664
5665 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5666 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5667 }
5668 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5669
5670 if (CHIP_IS_E1H(bp))
5671 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5672
5673 /* Reset PCIE errors for debug */
5674 REG_WR(bp, 0x2114, 0xffffffff);
5675 REG_WR(bp, 0x2120, 0xffffffff);
5676
5677 return 0;
5678 }
5679
5680 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5681 {
5682 int i, rc = 0;
5683
5684 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5685 BP_FUNC(bp), load_code);
5686
5687 bp->dmae_ready = 0;
5688 mutex_init(&bp->dmae_mutex);
5689 bnx2x_gunzip_init(bp);
5690
5691 switch (load_code) {
5692 case FW_MSG_CODE_DRV_LOAD_COMMON:
5693 rc = bnx2x_init_common(bp);
5694 if (rc)
5695 goto init_hw_err;
5696 /* no break */
5697
5698 case FW_MSG_CODE_DRV_LOAD_PORT:
5699 bp->dmae_ready = 1;
5700 rc = bnx2x_init_port(bp);
5701 if (rc)
5702 goto init_hw_err;
5703 /* no break */
5704
5705 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5706 bp->dmae_ready = 1;
5707 rc = bnx2x_init_func(bp);
5708 if (rc)
5709 goto init_hw_err;
5710 break;
5711
5712 default:
5713 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5714 break;
5715 }
5716
5717 if (!BP_NOMCP(bp)) {
5718 int func = BP_FUNC(bp);
5719
5720 bp->fw_drv_pulse_wr_seq =
5721 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5722 DRV_PULSE_SEQ_MASK);
5723 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5724 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5725 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5726 } else
5727 bp->func_stx = 0;
5728
5729 /* this needs to be done before gunzip end */
5730 bnx2x_zero_def_sb(bp);
5731 for_each_queue(bp, i)
5732 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5733
5734 init_hw_err:
5735 bnx2x_gunzip_end(bp);
5736
5737 return rc;
5738 }
5739
5740 /* send the MCP a request, block until there is a reply */
5741 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5742 {
5743 int func = BP_FUNC(bp);
5744 u32 seq = ++bp->fw_seq;
5745 u32 rc = 0;
5746 u32 cnt = 1;
5747 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5748
5749 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5750 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5751
5752 do {
5753 /* let the FW do it's magic ... */
5754 msleep(delay);
5755
5756 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5757
5758 /* Give the FW up to 2 second (200*10ms) */
5759 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5760
5761 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5762 cnt*delay, rc, seq);
5763
5764 /* is this a reply to our command? */
5765 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5766 rc &= FW_MSG_CODE_MASK;
5767
5768 } else {
5769 /* FW BUG! */
5770 BNX2X_ERR("FW failed to respond!\n");
5771 bnx2x_fw_dump(bp);
5772 rc = 0;
5773 }
5774
5775 return rc;
5776 }
5777
5778 static void bnx2x_free_mem(struct bnx2x *bp)
5779 {
5780
5781 #define BNX2X_PCI_FREE(x, y, size) \
5782 do { \
5783 if (x) { \
5784 pci_free_consistent(bp->pdev, size, x, y); \
5785 x = NULL; \
5786 y = 0; \
5787 } \
5788 } while (0)
5789
5790 #define BNX2X_FREE(x) \
5791 do { \
5792 if (x) { \
5793 vfree(x); \
5794 x = NULL; \
5795 } \
5796 } while (0)
5797
5798 int i;
5799
5800 /* fastpath */
5801 for_each_queue(bp, i) {
5802
5803 /* Status blocks */
5804 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5805 bnx2x_fp(bp, i, status_blk_mapping),
5806 sizeof(struct host_status_block) +
5807 sizeof(struct eth_tx_db_data));
5808
5809 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5810 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5811 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5812 bnx2x_fp(bp, i, tx_desc_mapping),
5813 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5814
5815 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5816 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5817 bnx2x_fp(bp, i, rx_desc_mapping),
5818 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5819
5820 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5821 bnx2x_fp(bp, i, rx_comp_mapping),
5822 sizeof(struct eth_fast_path_rx_cqe) *
5823 NUM_RCQ_BD);
5824
5825 /* SGE ring */
5826 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5827 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5828 bnx2x_fp(bp, i, rx_sge_mapping),
5829 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5830 }
5831 /* end of fastpath */
5832
5833 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5834 sizeof(struct host_def_status_block));
5835
5836 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5837 sizeof(struct bnx2x_slowpath));
5838
5839 #ifdef BCM_ISCSI
5840 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5841 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5842 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5843 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5844 #endif
5845 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5846
5847 #undef BNX2X_PCI_FREE
5848 #undef BNX2X_KFREE
5849 }
5850
5851 static int bnx2x_alloc_mem(struct bnx2x *bp)
5852 {
5853
5854 #define BNX2X_PCI_ALLOC(x, y, size) \
5855 do { \
5856 x = pci_alloc_consistent(bp->pdev, size, y); \
5857 if (x == NULL) \
5858 goto alloc_mem_err; \
5859 memset(x, 0, size); \
5860 } while (0)
5861
5862 #define BNX2X_ALLOC(x, size) \
5863 do { \
5864 x = vmalloc(size); \
5865 if (x == NULL) \
5866 goto alloc_mem_err; \
5867 memset(x, 0, size); \
5868 } while (0)
5869
5870 int i;
5871
5872 /* fastpath */
5873 for_each_queue(bp, i) {
5874 bnx2x_fp(bp, i, bp) = bp;
5875
5876 /* Status blocks */
5877 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5878 &bnx2x_fp(bp, i, status_blk_mapping),
5879 sizeof(struct host_status_block) +
5880 sizeof(struct eth_tx_db_data));
5881
5882 bnx2x_fp(bp, i, hw_tx_prods) =
5883 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5884
5885 bnx2x_fp(bp, i, tx_prods_mapping) =
5886 bnx2x_fp(bp, i, status_blk_mapping) +
5887 sizeof(struct host_status_block);
5888
5889 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5890 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5891 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5892 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5893 &bnx2x_fp(bp, i, tx_desc_mapping),
5894 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5895
5896 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5897 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5898 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5899 &bnx2x_fp(bp, i, rx_desc_mapping),
5900 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5901
5902 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5903 &bnx2x_fp(bp, i, rx_comp_mapping),
5904 sizeof(struct eth_fast_path_rx_cqe) *
5905 NUM_RCQ_BD);
5906
5907 /* SGE ring */
5908 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5909 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5910 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5911 &bnx2x_fp(bp, i, rx_sge_mapping),
5912 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5913 }
5914 /* end of fastpath */
5915
5916 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5917 sizeof(struct host_def_status_block));
5918
5919 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5920 sizeof(struct bnx2x_slowpath));
5921
5922 #ifdef BCM_ISCSI
5923 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5924
5925 /* Initialize T1 */
5926 for (i = 0; i < 64*1024; i += 64) {
5927 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5928 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5929 }
5930
5931 /* allocate searcher T2 table
5932 we allocate 1/4 of alloc num for T2
5933 (which is not entered into the ILT) */
5934 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5935
5936 /* Initialize T2 */
5937 for (i = 0; i < 16*1024; i += 64)
5938 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5939
5940 /* now fixup the last line in the block to point to the next block */
5941 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5942
5943 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5944 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5945
5946 /* QM queues (128*MAX_CONN) */
5947 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5948 #endif
5949
5950 /* Slow path ring */
5951 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5952
5953 return 0;
5954
5955 alloc_mem_err:
5956 bnx2x_free_mem(bp);
5957 return -ENOMEM;
5958
5959 #undef BNX2X_PCI_ALLOC
5960 #undef BNX2X_ALLOC
5961 }
5962
5963 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5964 {
5965 int i;
5966
5967 for_each_queue(bp, i) {
5968 struct bnx2x_fastpath *fp = &bp->fp[i];
5969
5970 u16 bd_cons = fp->tx_bd_cons;
5971 u16 sw_prod = fp->tx_pkt_prod;
5972 u16 sw_cons = fp->tx_pkt_cons;
5973
5974 while (sw_cons != sw_prod) {
5975 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5976 sw_cons++;
5977 }
5978 }
5979 }
5980
5981 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5982 {
5983 int i, j;
5984
5985 for_each_queue(bp, j) {
5986 struct bnx2x_fastpath *fp = &bp->fp[j];
5987
5988 for (i = 0; i < NUM_RX_BD; i++) {
5989 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5990 struct sk_buff *skb = rx_buf->skb;
5991
5992 if (skb == NULL)
5993 continue;
5994
5995 pci_unmap_single(bp->pdev,
5996 pci_unmap_addr(rx_buf, mapping),
5997 bp->rx_buf_size,
5998 PCI_DMA_FROMDEVICE);
5999
6000 rx_buf->skb = NULL;
6001 dev_kfree_skb(skb);
6002 }
6003 if (!fp->disable_tpa)
6004 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6005 ETH_MAX_AGGREGATION_QUEUES_E1 :
6006 ETH_MAX_AGGREGATION_QUEUES_E1H);
6007 }
6008 }
6009
6010 static void bnx2x_free_skbs(struct bnx2x *bp)
6011 {
6012 bnx2x_free_tx_skbs(bp);
6013 bnx2x_free_rx_skbs(bp);
6014 }
6015
6016 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6017 {
6018 int i, offset = 1;
6019
6020 free_irq(bp->msix_table[0].vector, bp->dev);
6021 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6022 bp->msix_table[0].vector);
6023
6024 for_each_queue(bp, i) {
6025 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6026 "state %x\n", i, bp->msix_table[i + offset].vector,
6027 bnx2x_fp(bp, i, state));
6028
6029 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6030 BNX2X_ERR("IRQ of fp #%d being freed while "
6031 "state != closed\n", i);
6032
6033 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6034 }
6035 }
6036
6037 static void bnx2x_free_irq(struct bnx2x *bp)
6038 {
6039 if (bp->flags & USING_MSIX_FLAG) {
6040 bnx2x_free_msix_irqs(bp);
6041 pci_disable_msix(bp->pdev);
6042 bp->flags &= ~USING_MSIX_FLAG;
6043
6044 } else
6045 free_irq(bp->pdev->irq, bp->dev);
6046 }
6047
6048 static int bnx2x_enable_msix(struct bnx2x *bp)
6049 {
6050 int i, rc, offset;
6051
6052 bp->msix_table[0].entry = 0;
6053 offset = 1;
6054 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6055
6056 for_each_queue(bp, i) {
6057 int igu_vec = offset + i + BP_L_ID(bp);
6058
6059 bp->msix_table[i + offset].entry = igu_vec;
6060 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6061 "(fastpath #%u)\n", i + offset, igu_vec, i);
6062 }
6063
6064 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6065 bp->num_queues + offset);
6066 if (rc) {
6067 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6068 return -1;
6069 }
6070 bp->flags |= USING_MSIX_FLAG;
6071
6072 return 0;
6073 }
6074
6075 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6076 {
6077 int i, rc, offset = 1;
6078
6079 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6080 bp->dev->name, bp->dev);
6081 if (rc) {
6082 BNX2X_ERR("request sp irq failed\n");
6083 return -EBUSY;
6084 }
6085
6086 for_each_queue(bp, i) {
6087 rc = request_irq(bp->msix_table[i + offset].vector,
6088 bnx2x_msix_fp_int, 0,
6089 bp->dev->name, &bp->fp[i]);
6090 if (rc) {
6091 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6092 i + offset, -rc);
6093 bnx2x_free_msix_irqs(bp);
6094 return -EBUSY;
6095 }
6096
6097 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6098 }
6099
6100 return 0;
6101 }
6102
6103 static int bnx2x_req_irq(struct bnx2x *bp)
6104 {
6105 int rc;
6106
6107 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6108 bp->dev->name, bp->dev);
6109 if (!rc)
6110 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6111
6112 return rc;
6113 }
6114
6115 static void bnx2x_napi_enable(struct bnx2x *bp)
6116 {
6117 int i;
6118
6119 for_each_queue(bp, i)
6120 napi_enable(&bnx2x_fp(bp, i, napi));
6121 }
6122
6123 static void bnx2x_napi_disable(struct bnx2x *bp)
6124 {
6125 int i;
6126
6127 for_each_queue(bp, i)
6128 napi_disable(&bnx2x_fp(bp, i, napi));
6129 }
6130
6131 static void bnx2x_netif_start(struct bnx2x *bp)
6132 {
6133 if (atomic_dec_and_test(&bp->intr_sem)) {
6134 if (netif_running(bp->dev)) {
6135 if (bp->state == BNX2X_STATE_OPEN)
6136 netif_wake_queue(bp->dev);
6137 bnx2x_napi_enable(bp);
6138 bnx2x_int_enable(bp);
6139 }
6140 }
6141 }
6142
6143 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6144 {
6145 bnx2x_int_disable_sync(bp, disable_hw);
6146 bnx2x_napi_disable(bp);
6147 if (netif_running(bp->dev)) {
6148 netif_tx_disable(bp->dev);
6149 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6150 }
6151 }
6152
6153 /*
6154 * Init service functions
6155 */
6156
6157 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6158 {
6159 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6160 int port = BP_PORT(bp);
6161
6162 /* CAM allocation
6163 * unicasts 0-31:port0 32-63:port1
6164 * multicast 64-127:port0 128-191:port1
6165 */
6166 config->hdr.length_6b = 2;
6167 config->hdr.offset = port ? 32 : 0;
6168 config->hdr.client_id = BP_CL_ID(bp);
6169 config->hdr.reserved1 = 0;
6170
6171 /* primary MAC */
6172 config->config_table[0].cam_entry.msb_mac_addr =
6173 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6174 config->config_table[0].cam_entry.middle_mac_addr =
6175 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6176 config->config_table[0].cam_entry.lsb_mac_addr =
6177 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6178 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6179 if (set)
6180 config->config_table[0].target_table_entry.flags = 0;
6181 else
6182 CAM_INVALIDATE(config->config_table[0]);
6183 config->config_table[0].target_table_entry.client_id = 0;
6184 config->config_table[0].target_table_entry.vlan_id = 0;
6185
6186 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6187 (set ? "setting" : "clearing"),
6188 config->config_table[0].cam_entry.msb_mac_addr,
6189 config->config_table[0].cam_entry.middle_mac_addr,
6190 config->config_table[0].cam_entry.lsb_mac_addr);
6191
6192 /* broadcast */
6193 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6194 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6195 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6196 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6197 if (set)
6198 config->config_table[1].target_table_entry.flags =
6199 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6200 else
6201 CAM_INVALIDATE(config->config_table[1]);
6202 config->config_table[1].target_table_entry.client_id = 0;
6203 config->config_table[1].target_table_entry.vlan_id = 0;
6204
6205 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6206 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6207 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6208 }
6209
6210 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6211 {
6212 struct mac_configuration_cmd_e1h *config =
6213 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6214
6215 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6216 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6217 return;
6218 }
6219
6220 /* CAM allocation for E1H
6221 * unicasts: by func number
6222 * multicast: 20+FUNC*20, 20 each
6223 */
6224 config->hdr.length_6b = 1;
6225 config->hdr.offset = BP_FUNC(bp);
6226 config->hdr.client_id = BP_CL_ID(bp);
6227 config->hdr.reserved1 = 0;
6228
6229 /* primary MAC */
6230 config->config_table[0].msb_mac_addr =
6231 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6232 config->config_table[0].middle_mac_addr =
6233 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6234 config->config_table[0].lsb_mac_addr =
6235 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6236 config->config_table[0].client_id = BP_L_ID(bp);
6237 config->config_table[0].vlan_id = 0;
6238 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6239 if (set)
6240 config->config_table[0].flags = BP_PORT(bp);
6241 else
6242 config->config_table[0].flags =
6243 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6244
6245 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6246 (set ? "setting" : "clearing"),
6247 config->config_table[0].msb_mac_addr,
6248 config->config_table[0].middle_mac_addr,
6249 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6250
6251 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6252 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6253 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6254 }
6255
6256 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6257 int *state_p, int poll)
6258 {
6259 /* can take a while if any port is running */
6260 int cnt = 500;
6261
6262 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6263 poll ? "polling" : "waiting", state, idx);
6264
6265 might_sleep();
6266 while (cnt--) {
6267 if (poll) {
6268 bnx2x_rx_int(bp->fp, 10);
6269 /* if index is different from 0
6270 * the reply for some commands will
6271 * be on the non default queue
6272 */
6273 if (idx)
6274 bnx2x_rx_int(&bp->fp[idx], 10);
6275 }
6276
6277 mb(); /* state is changed by bnx2x_sp_event() */
6278 if (*state_p == state)
6279 return 0;
6280
6281 msleep(1);
6282 }
6283
6284 /* timeout! */
6285 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6286 poll ? "polling" : "waiting", state, idx);
6287 #ifdef BNX2X_STOP_ON_ERROR
6288 bnx2x_panic();
6289 #endif
6290
6291 return -EBUSY;
6292 }
6293
6294 static int bnx2x_setup_leading(struct bnx2x *bp)
6295 {
6296 int rc;
6297
6298 /* reset IGU state */
6299 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6300
6301 /* SETUP ramrod */
6302 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6303
6304 /* Wait for completion */
6305 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6306
6307 return rc;
6308 }
6309
6310 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6311 {
6312 /* reset IGU state */
6313 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6314
6315 /* SETUP ramrod */
6316 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6317 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6318
6319 /* Wait for completion */
6320 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6321 &(bp->fp[index].state), 0);
6322 }
6323
6324 static int bnx2x_poll(struct napi_struct *napi, int budget);
6325 static void bnx2x_set_rx_mode(struct net_device *dev);
6326
6327 /* must be called with rtnl_lock */
6328 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6329 {
6330 u32 load_code;
6331 int i, rc = 0;
6332 #ifdef BNX2X_STOP_ON_ERROR
6333 if (unlikely(bp->panic))
6334 return -EPERM;
6335 #endif
6336
6337 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6338
6339 if (use_inta) {
6340 bp->num_queues = 1;
6341
6342 } else {
6343 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6344 /* user requested number */
6345 bp->num_queues = use_multi;
6346
6347 else if (use_multi)
6348 bp->num_queues = min_t(u32, num_online_cpus(),
6349 BP_MAX_QUEUES(bp));
6350 else
6351 bp->num_queues = 1;
6352
6353 DP(NETIF_MSG_IFUP,
6354 "set number of queues to %d\n", bp->num_queues);
6355
6356 /* if we can't use MSI-X we only need one fp,
6357 * so try to enable MSI-X with the requested number of fp's
6358 * and fallback to MSI or legacy INTx with one fp
6359 */
6360 rc = bnx2x_enable_msix(bp);
6361 if (rc) {
6362 /* failed to enable MSI-X */
6363 bp->num_queues = 1;
6364 if (use_multi)
6365 BNX2X_ERR("Multi requested but failed"
6366 " to enable MSI-X\n");
6367 }
6368 }
6369
6370 if (bnx2x_alloc_mem(bp))
6371 return -ENOMEM;
6372
6373 for_each_queue(bp, i)
6374 bnx2x_fp(bp, i, disable_tpa) =
6375 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6376
6377 for_each_queue(bp, i)
6378 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6379 bnx2x_poll, 128);
6380
6381 #ifdef BNX2X_STOP_ON_ERROR
6382 for_each_queue(bp, i) {
6383 struct bnx2x_fastpath *fp = &bp->fp[i];
6384
6385 fp->poll_no_work = 0;
6386 fp->poll_calls = 0;
6387 fp->poll_max_calls = 0;
6388 fp->poll_complete = 0;
6389 fp->poll_exit = 0;
6390 }
6391 #endif
6392 bnx2x_napi_enable(bp);
6393
6394 if (bp->flags & USING_MSIX_FLAG) {
6395 rc = bnx2x_req_msix_irqs(bp);
6396 if (rc) {
6397 pci_disable_msix(bp->pdev);
6398 goto load_error1;
6399 }
6400 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
6401 } else {
6402 bnx2x_ack_int(bp);
6403 rc = bnx2x_req_irq(bp);
6404 if (rc) {
6405 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6406 goto load_error1;
6407 }
6408 }
6409
6410 /* Send LOAD_REQUEST command to MCP
6411 Returns the type of LOAD command:
6412 if it is the first port to be initialized
6413 common blocks should be initialized, otherwise - not
6414 */
6415 if (!BP_NOMCP(bp)) {
6416 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6417 if (!load_code) {
6418 BNX2X_ERR("MCP response failure, aborting\n");
6419 rc = -EBUSY;
6420 goto load_error2;
6421 }
6422 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6423 rc = -EBUSY; /* other port in diagnostic mode */
6424 goto load_error2;
6425 }
6426
6427 } else {
6428 int port = BP_PORT(bp);
6429
6430 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6431 load_count[0], load_count[1], load_count[2]);
6432 load_count[0]++;
6433 load_count[1 + port]++;
6434 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6435 load_count[0], load_count[1], load_count[2]);
6436 if (load_count[0] == 1)
6437 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6438 else if (load_count[1 + port] == 1)
6439 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6440 else
6441 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6442 }
6443
6444 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6445 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6446 bp->port.pmf = 1;
6447 else
6448 bp->port.pmf = 0;
6449 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6450
6451 /* Initialize HW */
6452 rc = bnx2x_init_hw(bp, load_code);
6453 if (rc) {
6454 BNX2X_ERR("HW init failed, aborting\n");
6455 goto load_error2;
6456 }
6457
6458 /* Setup NIC internals and enable interrupts */
6459 bnx2x_nic_init(bp, load_code);
6460
6461 /* Send LOAD_DONE command to MCP */
6462 if (!BP_NOMCP(bp)) {
6463 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6464 if (!load_code) {
6465 BNX2X_ERR("MCP response failure, aborting\n");
6466 rc = -EBUSY;
6467 goto load_error3;
6468 }
6469 }
6470
6471 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6472
6473 rc = bnx2x_setup_leading(bp);
6474 if (rc) {
6475 BNX2X_ERR("Setup leading failed!\n");
6476 goto load_error3;
6477 }
6478
6479 if (CHIP_IS_E1H(bp))
6480 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6481 BNX2X_ERR("!!! mf_cfg function disabled\n");
6482 bp->state = BNX2X_STATE_DISABLED;
6483 }
6484
6485 if (bp->state == BNX2X_STATE_OPEN)
6486 for_each_nondefault_queue(bp, i) {
6487 rc = bnx2x_setup_multi(bp, i);
6488 if (rc)
6489 goto load_error3;
6490 }
6491
6492 if (CHIP_IS_E1(bp))
6493 bnx2x_set_mac_addr_e1(bp, 1);
6494 else
6495 bnx2x_set_mac_addr_e1h(bp, 1);
6496
6497 if (bp->port.pmf)
6498 bnx2x_initial_phy_init(bp);
6499
6500 /* Start fast path */
6501 switch (load_mode) {
6502 case LOAD_NORMAL:
6503 /* Tx queue should be only reenabled */
6504 netif_wake_queue(bp->dev);
6505 /* Initialize the receive filter. */
6506 bnx2x_set_rx_mode(bp->dev);
6507 break;
6508
6509 case LOAD_OPEN:
6510 netif_start_queue(bp->dev);
6511 /* Initialize the receive filter. */
6512 bnx2x_set_rx_mode(bp->dev);
6513 break;
6514
6515 case LOAD_DIAG:
6516 /* Initialize the receive filter. */
6517 bnx2x_set_rx_mode(bp->dev);
6518 bp->state = BNX2X_STATE_DIAG;
6519 break;
6520
6521 default:
6522 break;
6523 }
6524
6525 if (!bp->port.pmf)
6526 bnx2x__link_status_update(bp);
6527
6528 /* start the timer */
6529 mod_timer(&bp->timer, jiffies + bp->current_interval);
6530
6531
6532 return 0;
6533
6534 load_error3:
6535 bnx2x_int_disable_sync(bp, 1);
6536 if (!BP_NOMCP(bp)) {
6537 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6538 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6539 }
6540 bp->port.pmf = 0;
6541 /* Free SKBs, SGEs, TPA pool and driver internals */
6542 bnx2x_free_skbs(bp);
6543 for_each_queue(bp, i)
6544 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6545 load_error2:
6546 /* Release IRQs */
6547 bnx2x_free_irq(bp);
6548 load_error1:
6549 bnx2x_napi_disable(bp);
6550 bnx2x_free_mem(bp);
6551
6552 /* TBD we really need to reset the chip
6553 if we want to recover from this */
6554 return rc;
6555 }
6556
6557 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6558 {
6559 int rc;
6560
6561 /* halt the connection */
6562 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6563 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6564
6565 /* Wait for completion */
6566 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6567 &(bp->fp[index].state), 1);
6568 if (rc) /* timeout */
6569 return rc;
6570
6571 /* delete cfc entry */
6572 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6573
6574 /* Wait for completion */
6575 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6576 &(bp->fp[index].state), 1);
6577 return rc;
6578 }
6579
6580 static int bnx2x_stop_leading(struct bnx2x *bp)
6581 {
6582 u16 dsb_sp_prod_idx;
6583 /* if the other port is handling traffic,
6584 this can take a lot of time */
6585 int cnt = 500;
6586 int rc;
6587
6588 might_sleep();
6589
6590 /* Send HALT ramrod */
6591 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6592 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6593
6594 /* Wait for completion */
6595 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6596 &(bp->fp[0].state), 1);
6597 if (rc) /* timeout */
6598 return rc;
6599
6600 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6601
6602 /* Send PORT_DELETE ramrod */
6603 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6604
6605 /* Wait for completion to arrive on default status block
6606 we are going to reset the chip anyway
6607 so there is not much to do if this times out
6608 */
6609 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6610 if (!cnt) {
6611 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6612 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6613 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6614 #ifdef BNX2X_STOP_ON_ERROR
6615 bnx2x_panic();
6616 #else
6617 rc = -EBUSY;
6618 #endif
6619 break;
6620 }
6621 cnt--;
6622 msleep(1);
6623 }
6624 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6625 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6626
6627 return rc;
6628 }
6629
6630 static void bnx2x_reset_func(struct bnx2x *bp)
6631 {
6632 int port = BP_PORT(bp);
6633 int func = BP_FUNC(bp);
6634 int base, i;
6635
6636 /* Configure IGU */
6637 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6638 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6639
6640 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6641
6642 /* Clear ILT */
6643 base = FUNC_ILT_BASE(func);
6644 for (i = base; i < base + ILT_PER_FUNC; i++)
6645 bnx2x_ilt_wr(bp, i, 0);
6646 }
6647
6648 static void bnx2x_reset_port(struct bnx2x *bp)
6649 {
6650 int port = BP_PORT(bp);
6651 u32 val;
6652
6653 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6654
6655 /* Do not rcv packets to BRB */
6656 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6657 /* Do not direct rcv packets that are not for MCP to the BRB */
6658 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6659 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6660
6661 /* Configure AEU */
6662 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6663
6664 msleep(100);
6665 /* Check for BRB port occupancy */
6666 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6667 if (val)
6668 DP(NETIF_MSG_IFDOWN,
6669 "BRB1 is not empty %d blocks are occupied\n", val);
6670
6671 /* TODO: Close Doorbell port? */
6672 }
6673
6674 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6675 {
6676 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6677 BP_FUNC(bp), reset_code);
6678
6679 switch (reset_code) {
6680 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6681 bnx2x_reset_port(bp);
6682 bnx2x_reset_func(bp);
6683 bnx2x_reset_common(bp);
6684 break;
6685
6686 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6687 bnx2x_reset_port(bp);
6688 bnx2x_reset_func(bp);
6689 break;
6690
6691 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6692 bnx2x_reset_func(bp);
6693 break;
6694
6695 default:
6696 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6697 break;
6698 }
6699 }
6700
6701 /* must be called with rtnl_lock */
6702 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6703 {
6704 int port = BP_PORT(bp);
6705 u32 reset_code = 0;
6706 int i, cnt, rc;
6707
6708 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6709
6710 bp->rx_mode = BNX2X_RX_MODE_NONE;
6711 bnx2x_set_storm_rx_mode(bp);
6712
6713 bnx2x_netif_stop(bp, 1);
6714
6715 del_timer_sync(&bp->timer);
6716 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6717 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6718 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6719
6720 /* Release IRQs */
6721 bnx2x_free_irq(bp);
6722
6723 /* Wait until tx fast path tasks complete */
6724 for_each_queue(bp, i) {
6725 struct bnx2x_fastpath *fp = &bp->fp[i];
6726
6727 cnt = 1000;
6728 smp_rmb();
6729 while (bnx2x_has_tx_work(fp)) {
6730
6731 bnx2x_tx_int(fp, 1000);
6732 if (!cnt) {
6733 BNX2X_ERR("timeout waiting for queue[%d]\n",
6734 i);
6735 #ifdef BNX2X_STOP_ON_ERROR
6736 bnx2x_panic();
6737 return -EBUSY;
6738 #else
6739 break;
6740 #endif
6741 }
6742 cnt--;
6743 msleep(1);
6744 smp_rmb();
6745 }
6746 }
6747 /* Give HW time to discard old tx messages */
6748 msleep(1);
6749
6750 if (CHIP_IS_E1(bp)) {
6751 struct mac_configuration_cmd *config =
6752 bnx2x_sp(bp, mcast_config);
6753
6754 bnx2x_set_mac_addr_e1(bp, 0);
6755
6756 for (i = 0; i < config->hdr.length_6b; i++)
6757 CAM_INVALIDATE(config->config_table[i]);
6758
6759 config->hdr.length_6b = i;
6760 if (CHIP_REV_IS_SLOW(bp))
6761 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6762 else
6763 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6764 config->hdr.client_id = BP_CL_ID(bp);
6765 config->hdr.reserved1 = 0;
6766
6767 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6768 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6769 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6770
6771 } else { /* E1H */
6772 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6773
6774 bnx2x_set_mac_addr_e1h(bp, 0);
6775
6776 for (i = 0; i < MC_HASH_SIZE; i++)
6777 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6778 }
6779
6780 if (unload_mode == UNLOAD_NORMAL)
6781 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6782
6783 else if (bp->flags & NO_WOL_FLAG) {
6784 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6785 if (CHIP_IS_E1H(bp))
6786 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6787
6788 } else if (bp->wol) {
6789 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6790 u8 *mac_addr = bp->dev->dev_addr;
6791 u32 val;
6792 /* The mac address is written to entries 1-4 to
6793 preserve entry 0 which is used by the PMF */
6794 u8 entry = (BP_E1HVN(bp) + 1)*8;
6795
6796 val = (mac_addr[0] << 8) | mac_addr[1];
6797 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6798
6799 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6800 (mac_addr[4] << 8) | mac_addr[5];
6801 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6802
6803 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6804
6805 } else
6806 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6807
6808 /* Close multi and leading connections
6809 Completions for ramrods are collected in a synchronous way */
6810 for_each_nondefault_queue(bp, i)
6811 if (bnx2x_stop_multi(bp, i))
6812 goto unload_error;
6813
6814 rc = bnx2x_stop_leading(bp);
6815 if (rc) {
6816 BNX2X_ERR("Stop leading failed!\n");
6817 #ifdef BNX2X_STOP_ON_ERROR
6818 return -EBUSY;
6819 #else
6820 goto unload_error;
6821 #endif
6822 }
6823
6824 unload_error:
6825 if (!BP_NOMCP(bp))
6826 reset_code = bnx2x_fw_command(bp, reset_code);
6827 else {
6828 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6829 load_count[0], load_count[1], load_count[2]);
6830 load_count[0]--;
6831 load_count[1 + port]--;
6832 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6833 load_count[0], load_count[1], load_count[2]);
6834 if (load_count[0] == 0)
6835 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6836 else if (load_count[1 + port] == 0)
6837 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6838 else
6839 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6840 }
6841
6842 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6843 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6844 bnx2x__link_reset(bp);
6845
6846 /* Reset the chip */
6847 bnx2x_reset_chip(bp, reset_code);
6848
6849 /* Report UNLOAD_DONE to MCP */
6850 if (!BP_NOMCP(bp))
6851 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6852 bp->port.pmf = 0;
6853
6854 /* Free SKBs, SGEs, TPA pool and driver internals */
6855 bnx2x_free_skbs(bp);
6856 for_each_queue(bp, i)
6857 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6858 bnx2x_free_mem(bp);
6859
6860 bp->state = BNX2X_STATE_CLOSED;
6861
6862 netif_carrier_off(bp->dev);
6863
6864 return 0;
6865 }
6866
6867 static void bnx2x_reset_task(struct work_struct *work)
6868 {
6869 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6870
6871 #ifdef BNX2X_STOP_ON_ERROR
6872 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6873 " so reset not done to allow debug dump,\n"
6874 KERN_ERR " you will need to reboot when done\n");
6875 return;
6876 #endif
6877
6878 rtnl_lock();
6879
6880 if (!netif_running(bp->dev))
6881 goto reset_task_exit;
6882
6883 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6884 bnx2x_nic_load(bp, LOAD_NORMAL);
6885
6886 reset_task_exit:
6887 rtnl_unlock();
6888 }
6889
6890 /* end of nic load/unload */
6891
6892 /* ethtool_ops */
6893
6894 /*
6895 * Init service functions
6896 */
6897
6898 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6899 {
6900 u32 val;
6901
6902 /* Check if there is any driver already loaded */
6903 val = REG_RD(bp, MISC_REG_UNPREPARED);
6904 if (val == 0x1) {
6905 /* Check if it is the UNDI driver
6906 * UNDI driver initializes CID offset for normal bell to 0x7
6907 */
6908 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6909 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6910 if (val == 0x7) {
6911 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6912 /* save our func */
6913 int func = BP_FUNC(bp);
6914 u32 swap_en;
6915 u32 swap_val;
6916
6917 /* clear the UNDI indication */
6918 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6919
6920 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6921
6922 /* try unload UNDI on port 0 */
6923 bp->func = 0;
6924 bp->fw_seq =
6925 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6926 DRV_MSG_SEQ_NUMBER_MASK);
6927 reset_code = bnx2x_fw_command(bp, reset_code);
6928
6929 /* if UNDI is loaded on the other port */
6930 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6931
6932 /* send "DONE" for previous unload */
6933 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6934
6935 /* unload UNDI on port 1 */
6936 bp->func = 1;
6937 bp->fw_seq =
6938 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6939 DRV_MSG_SEQ_NUMBER_MASK);
6940 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6941
6942 bnx2x_fw_command(bp, reset_code);
6943 }
6944
6945 /* now it's safe to release the lock */
6946 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6947
6948 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6949 HC_REG_CONFIG_0), 0x1000);
6950
6951 /* close input traffic and wait for it */
6952 /* Do not rcv packets to BRB */
6953 REG_WR(bp,
6954 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6955 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6956 /* Do not direct rcv packets that are not for MCP to
6957 * the BRB */
6958 REG_WR(bp,
6959 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6960 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6961 /* clear AEU */
6962 REG_WR(bp,
6963 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6964 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6965 msleep(10);
6966
6967 /* save NIG port swap info */
6968 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6969 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6970 /* reset device */
6971 REG_WR(bp,
6972 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6973 0xd3ffffff);
6974 REG_WR(bp,
6975 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6976 0x1403);
6977 /* take the NIG out of reset and restore swap values */
6978 REG_WR(bp,
6979 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6980 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6981 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6982 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6983
6984 /* send unload done to the MCP */
6985 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6986
6987 /* restore our func and fw_seq */
6988 bp->func = func;
6989 bp->fw_seq =
6990 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6991 DRV_MSG_SEQ_NUMBER_MASK);
6992
6993 } else
6994 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6995 }
6996 }
6997
6998 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6999 {
7000 u32 val, val2, val3, val4, id;
7001 u16 pmc;
7002
7003 /* Get the chip revision id and number. */
7004 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7005 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7006 id = ((val & 0xffff) << 16);
7007 val = REG_RD(bp, MISC_REG_CHIP_REV);
7008 id |= ((val & 0xf) << 12);
7009 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7010 id |= ((val & 0xff) << 4);
7011 val = REG_RD(bp, MISC_REG_BOND_ID);
7012 id |= (val & 0xf);
7013 bp->common.chip_id = id;
7014 bp->link_params.chip_id = bp->common.chip_id;
7015 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7016
7017 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7018 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7019 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7020 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7021 bp->common.flash_size, bp->common.flash_size);
7022
7023 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7024 bp->link_params.shmem_base = bp->common.shmem_base;
7025 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7026
7027 if (!bp->common.shmem_base ||
7028 (bp->common.shmem_base < 0xA0000) ||
7029 (bp->common.shmem_base >= 0xC0000)) {
7030 BNX2X_DEV_INFO("MCP not active\n");
7031 bp->flags |= NO_MCP_FLAG;
7032 return;
7033 }
7034
7035 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7036 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7037 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7038 BNX2X_ERR("BAD MCP validity signature\n");
7039
7040 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7041 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7042
7043 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7044 bp->common.hw_config, bp->common.board);
7045
7046 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7047 SHARED_HW_CFG_LED_MODE_MASK) >>
7048 SHARED_HW_CFG_LED_MODE_SHIFT);
7049
7050 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7051 bp->common.bc_ver = val;
7052 BNX2X_DEV_INFO("bc_ver %X\n", val);
7053 if (val < BNX2X_BC_VER) {
7054 /* for now only warn
7055 * later we might need to enforce this */
7056 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7057 " please upgrade BC\n", BNX2X_BC_VER, val);
7058 }
7059
7060 if (BP_E1HVN(bp) == 0) {
7061 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7062 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7063 } else {
7064 /* no WOL capability for E1HVN != 0 */
7065 bp->flags |= NO_WOL_FLAG;
7066 }
7067 BNX2X_DEV_INFO("%sWoL capable\n",
7068 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7069
7070 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7071 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7072 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7073 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7074
7075 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7076 val, val2, val3, val4);
7077 }
7078
7079 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7080 u32 switch_cfg)
7081 {
7082 int port = BP_PORT(bp);
7083 u32 ext_phy_type;
7084
7085 switch (switch_cfg) {
7086 case SWITCH_CFG_1G:
7087 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7088
7089 ext_phy_type =
7090 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7091 switch (ext_phy_type) {
7092 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7093 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7094 ext_phy_type);
7095
7096 bp->port.supported |= (SUPPORTED_10baseT_Half |
7097 SUPPORTED_10baseT_Full |
7098 SUPPORTED_100baseT_Half |
7099 SUPPORTED_100baseT_Full |
7100 SUPPORTED_1000baseT_Full |
7101 SUPPORTED_2500baseX_Full |
7102 SUPPORTED_TP |
7103 SUPPORTED_FIBRE |
7104 SUPPORTED_Autoneg |
7105 SUPPORTED_Pause |
7106 SUPPORTED_Asym_Pause);
7107 break;
7108
7109 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7110 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7111 ext_phy_type);
7112
7113 bp->port.supported |= (SUPPORTED_10baseT_Half |
7114 SUPPORTED_10baseT_Full |
7115 SUPPORTED_100baseT_Half |
7116 SUPPORTED_100baseT_Full |
7117 SUPPORTED_1000baseT_Full |
7118 SUPPORTED_TP |
7119 SUPPORTED_FIBRE |
7120 SUPPORTED_Autoneg |
7121 SUPPORTED_Pause |
7122 SUPPORTED_Asym_Pause);
7123 break;
7124
7125 default:
7126 BNX2X_ERR("NVRAM config error. "
7127 "BAD SerDes ext_phy_config 0x%x\n",
7128 bp->link_params.ext_phy_config);
7129 return;
7130 }
7131
7132 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7133 port*0x10);
7134 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7135 break;
7136
7137 case SWITCH_CFG_10G:
7138 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7139
7140 ext_phy_type =
7141 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7142 switch (ext_phy_type) {
7143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7144 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7145 ext_phy_type);
7146
7147 bp->port.supported |= (SUPPORTED_10baseT_Half |
7148 SUPPORTED_10baseT_Full |
7149 SUPPORTED_100baseT_Half |
7150 SUPPORTED_100baseT_Full |
7151 SUPPORTED_1000baseT_Full |
7152 SUPPORTED_2500baseX_Full |
7153 SUPPORTED_10000baseT_Full |
7154 SUPPORTED_TP |
7155 SUPPORTED_FIBRE |
7156 SUPPORTED_Autoneg |
7157 SUPPORTED_Pause |
7158 SUPPORTED_Asym_Pause);
7159 break;
7160
7161 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7162 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7163 ext_phy_type);
7164
7165 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7166 SUPPORTED_FIBRE |
7167 SUPPORTED_Pause |
7168 SUPPORTED_Asym_Pause);
7169 break;
7170
7171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7172 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7173 ext_phy_type);
7174
7175 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7176 SUPPORTED_1000baseT_Full |
7177 SUPPORTED_FIBRE |
7178 SUPPORTED_Pause |
7179 SUPPORTED_Asym_Pause);
7180 break;
7181
7182 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7183 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7184 ext_phy_type);
7185
7186 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7187 SUPPORTED_1000baseT_Full |
7188 SUPPORTED_FIBRE |
7189 SUPPORTED_Autoneg |
7190 SUPPORTED_Pause |
7191 SUPPORTED_Asym_Pause);
7192 break;
7193
7194 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7195 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7196 ext_phy_type);
7197
7198 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7199 SUPPORTED_2500baseX_Full |
7200 SUPPORTED_1000baseT_Full |
7201 SUPPORTED_FIBRE |
7202 SUPPORTED_Autoneg |
7203 SUPPORTED_Pause |
7204 SUPPORTED_Asym_Pause);
7205 break;
7206
7207 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7208 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7209 ext_phy_type);
7210
7211 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7212 SUPPORTED_TP |
7213 SUPPORTED_Autoneg |
7214 SUPPORTED_Pause |
7215 SUPPORTED_Asym_Pause);
7216 break;
7217
7218 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7219 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7220 bp->link_params.ext_phy_config);
7221 break;
7222
7223 default:
7224 BNX2X_ERR("NVRAM config error. "
7225 "BAD XGXS ext_phy_config 0x%x\n",
7226 bp->link_params.ext_phy_config);
7227 return;
7228 }
7229
7230 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7231 port*0x18);
7232 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7233
7234 break;
7235
7236 default:
7237 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7238 bp->port.link_config);
7239 return;
7240 }
7241 bp->link_params.phy_addr = bp->port.phy_addr;
7242
7243 /* mask what we support according to speed_cap_mask */
7244 if (!(bp->link_params.speed_cap_mask &
7245 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7246 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7247
7248 if (!(bp->link_params.speed_cap_mask &
7249 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7250 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7251
7252 if (!(bp->link_params.speed_cap_mask &
7253 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7254 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7255
7256 if (!(bp->link_params.speed_cap_mask &
7257 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7258 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7259
7260 if (!(bp->link_params.speed_cap_mask &
7261 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7262 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7263 SUPPORTED_1000baseT_Full);
7264
7265 if (!(bp->link_params.speed_cap_mask &
7266 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7267 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7268
7269 if (!(bp->link_params.speed_cap_mask &
7270 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7271 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7272
7273 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7274 }
7275
7276 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7277 {
7278 bp->link_params.req_duplex = DUPLEX_FULL;
7279
7280 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7281 case PORT_FEATURE_LINK_SPEED_AUTO:
7282 if (bp->port.supported & SUPPORTED_Autoneg) {
7283 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7284 bp->port.advertising = bp->port.supported;
7285 } else {
7286 u32 ext_phy_type =
7287 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7288
7289 if ((ext_phy_type ==
7290 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7291 (ext_phy_type ==
7292 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7293 /* force 10G, no AN */
7294 bp->link_params.req_line_speed = SPEED_10000;
7295 bp->port.advertising =
7296 (ADVERTISED_10000baseT_Full |
7297 ADVERTISED_FIBRE);
7298 break;
7299 }
7300 BNX2X_ERR("NVRAM config error. "
7301 "Invalid link_config 0x%x"
7302 " Autoneg not supported\n",
7303 bp->port.link_config);
7304 return;
7305 }
7306 break;
7307
7308 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7309 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7310 bp->link_params.req_line_speed = SPEED_10;
7311 bp->port.advertising = (ADVERTISED_10baseT_Full |
7312 ADVERTISED_TP);
7313 } else {
7314 BNX2X_ERR("NVRAM config error. "
7315 "Invalid link_config 0x%x"
7316 " speed_cap_mask 0x%x\n",
7317 bp->port.link_config,
7318 bp->link_params.speed_cap_mask);
7319 return;
7320 }
7321 break;
7322
7323 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7324 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7325 bp->link_params.req_line_speed = SPEED_10;
7326 bp->link_params.req_duplex = DUPLEX_HALF;
7327 bp->port.advertising = (ADVERTISED_10baseT_Half |
7328 ADVERTISED_TP);
7329 } else {
7330 BNX2X_ERR("NVRAM config error. "
7331 "Invalid link_config 0x%x"
7332 " speed_cap_mask 0x%x\n",
7333 bp->port.link_config,
7334 bp->link_params.speed_cap_mask);
7335 return;
7336 }
7337 break;
7338
7339 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7340 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7341 bp->link_params.req_line_speed = SPEED_100;
7342 bp->port.advertising = (ADVERTISED_100baseT_Full |
7343 ADVERTISED_TP);
7344 } else {
7345 BNX2X_ERR("NVRAM config error. "
7346 "Invalid link_config 0x%x"
7347 " speed_cap_mask 0x%x\n",
7348 bp->port.link_config,
7349 bp->link_params.speed_cap_mask);
7350 return;
7351 }
7352 break;
7353
7354 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7355 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7356 bp->link_params.req_line_speed = SPEED_100;
7357 bp->link_params.req_duplex = DUPLEX_HALF;
7358 bp->port.advertising = (ADVERTISED_100baseT_Half |
7359 ADVERTISED_TP);
7360 } else {
7361 BNX2X_ERR("NVRAM config error. "
7362 "Invalid link_config 0x%x"
7363 " speed_cap_mask 0x%x\n",
7364 bp->port.link_config,
7365 bp->link_params.speed_cap_mask);
7366 return;
7367 }
7368 break;
7369
7370 case PORT_FEATURE_LINK_SPEED_1G:
7371 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7372 bp->link_params.req_line_speed = SPEED_1000;
7373 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7374 ADVERTISED_TP);
7375 } else {
7376 BNX2X_ERR("NVRAM config error. "
7377 "Invalid link_config 0x%x"
7378 " speed_cap_mask 0x%x\n",
7379 bp->port.link_config,
7380 bp->link_params.speed_cap_mask);
7381 return;
7382 }
7383 break;
7384
7385 case PORT_FEATURE_LINK_SPEED_2_5G:
7386 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7387 bp->link_params.req_line_speed = SPEED_2500;
7388 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7389 ADVERTISED_TP);
7390 } else {
7391 BNX2X_ERR("NVRAM config error. "
7392 "Invalid link_config 0x%x"
7393 " speed_cap_mask 0x%x\n",
7394 bp->port.link_config,
7395 bp->link_params.speed_cap_mask);
7396 return;
7397 }
7398 break;
7399
7400 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7401 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7402 case PORT_FEATURE_LINK_SPEED_10G_KR:
7403 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7404 bp->link_params.req_line_speed = SPEED_10000;
7405 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7406 ADVERTISED_FIBRE);
7407 } else {
7408 BNX2X_ERR("NVRAM config error. "
7409 "Invalid link_config 0x%x"
7410 " speed_cap_mask 0x%x\n",
7411 bp->port.link_config,
7412 bp->link_params.speed_cap_mask);
7413 return;
7414 }
7415 break;
7416
7417 default:
7418 BNX2X_ERR("NVRAM config error. "
7419 "BAD link speed link_config 0x%x\n",
7420 bp->port.link_config);
7421 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7422 bp->port.advertising = bp->port.supported;
7423 break;
7424 }
7425
7426 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7427 PORT_FEATURE_FLOW_CONTROL_MASK);
7428 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7429 !(bp->port.supported & SUPPORTED_Autoneg))
7430 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7431
7432 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7433 " advertising 0x%x\n",
7434 bp->link_params.req_line_speed,
7435 bp->link_params.req_duplex,
7436 bp->link_params.req_flow_ctrl, bp->port.advertising);
7437 }
7438
7439 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7440 {
7441 int port = BP_PORT(bp);
7442 u32 val, val2;
7443
7444 bp->link_params.bp = bp;
7445 bp->link_params.port = port;
7446
7447 bp->link_params.serdes_config =
7448 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7449 bp->link_params.lane_config =
7450 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7451 bp->link_params.ext_phy_config =
7452 SHMEM_RD(bp,
7453 dev_info.port_hw_config[port].external_phy_config);
7454 bp->link_params.speed_cap_mask =
7455 SHMEM_RD(bp,
7456 dev_info.port_hw_config[port].speed_capability_mask);
7457
7458 bp->port.link_config =
7459 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7460
7461 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7462 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7463 " link_config 0x%08x\n",
7464 bp->link_params.serdes_config,
7465 bp->link_params.lane_config,
7466 bp->link_params.ext_phy_config,
7467 bp->link_params.speed_cap_mask, bp->port.link_config);
7468
7469 bp->link_params.switch_cfg = (bp->port.link_config &
7470 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7471 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7472
7473 bnx2x_link_settings_requested(bp);
7474
7475 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7476 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7477 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7478 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7479 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7480 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7481 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7482 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7483 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7484 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7485 }
7486
7487 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7488 {
7489 int func = BP_FUNC(bp);
7490 u32 val, val2;
7491 int rc = 0;
7492
7493 bnx2x_get_common_hwinfo(bp);
7494
7495 bp->e1hov = 0;
7496 bp->e1hmf = 0;
7497 if (CHIP_IS_E1H(bp)) {
7498 bp->mf_config =
7499 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7500
7501 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7502 FUNC_MF_CFG_E1HOV_TAG_MASK);
7503 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7504
7505 bp->e1hov = val;
7506 bp->e1hmf = 1;
7507 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7508 "(0x%04x)\n",
7509 func, bp->e1hov, bp->e1hov);
7510 } else {
7511 BNX2X_DEV_INFO("Single function mode\n");
7512 if (BP_E1HVN(bp)) {
7513 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7514 " aborting\n", func);
7515 rc = -EPERM;
7516 }
7517 }
7518 }
7519
7520 if (!BP_NOMCP(bp)) {
7521 bnx2x_get_port_hwinfo(bp);
7522
7523 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7524 DRV_MSG_SEQ_NUMBER_MASK);
7525 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7526 }
7527
7528 if (IS_E1HMF(bp)) {
7529 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7530 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7531 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7532 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7533 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7534 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7535 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7536 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7537 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7538 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7539 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7540 ETH_ALEN);
7541 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7542 ETH_ALEN);
7543 }
7544
7545 return rc;
7546 }
7547
7548 if (BP_NOMCP(bp)) {
7549 /* only supposed to happen on emulation/FPGA */
7550 BNX2X_ERR("warning random MAC workaround active\n");
7551 random_ether_addr(bp->dev->dev_addr);
7552 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7553 }
7554
7555 return rc;
7556 }
7557
7558 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7559 {
7560 int func = BP_FUNC(bp);
7561 int rc;
7562
7563 /* Disable interrupt handling until HW is initialized */
7564 atomic_set(&bp->intr_sem, 1);
7565
7566 mutex_init(&bp->port.phy_mutex);
7567
7568 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7569 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7570
7571 rc = bnx2x_get_hwinfo(bp);
7572
7573 /* need to reset chip if undi was active */
7574 if (!BP_NOMCP(bp))
7575 bnx2x_undi_unload(bp);
7576
7577 if (CHIP_REV_IS_FPGA(bp))
7578 printk(KERN_ERR PFX "FPGA detected\n");
7579
7580 if (BP_NOMCP(bp) && (func == 0))
7581 printk(KERN_ERR PFX
7582 "MCP disabled, must load devices in order!\n");
7583
7584 /* Set TPA flags */
7585 if (disable_tpa) {
7586 bp->flags &= ~TPA_ENABLE_FLAG;
7587 bp->dev->features &= ~NETIF_F_LRO;
7588 } else {
7589 bp->flags |= TPA_ENABLE_FLAG;
7590 bp->dev->features |= NETIF_F_LRO;
7591 }
7592
7593
7594 bp->tx_ring_size = MAX_TX_AVAIL;
7595 bp->rx_ring_size = MAX_RX_AVAIL;
7596
7597 bp->rx_csum = 1;
7598 bp->rx_offset = 0;
7599
7600 bp->tx_ticks = 50;
7601 bp->rx_ticks = 25;
7602
7603 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7604 bp->current_interval = (poll ? poll : bp->timer_interval);
7605
7606 init_timer(&bp->timer);
7607 bp->timer.expires = jiffies + bp->current_interval;
7608 bp->timer.data = (unsigned long) bp;
7609 bp->timer.function = bnx2x_timer;
7610
7611 return rc;
7612 }
7613
7614 /*
7615 * ethtool service functions
7616 */
7617
7618 /* All ethtool functions called with rtnl_lock */
7619
7620 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7621 {
7622 struct bnx2x *bp = netdev_priv(dev);
7623
7624 cmd->supported = bp->port.supported;
7625 cmd->advertising = bp->port.advertising;
7626
7627 if (netif_carrier_ok(dev)) {
7628 cmd->speed = bp->link_vars.line_speed;
7629 cmd->duplex = bp->link_vars.duplex;
7630 } else {
7631 cmd->speed = bp->link_params.req_line_speed;
7632 cmd->duplex = bp->link_params.req_duplex;
7633 }
7634 if (IS_E1HMF(bp)) {
7635 u16 vn_max_rate;
7636
7637 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7638 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7639 if (vn_max_rate < cmd->speed)
7640 cmd->speed = vn_max_rate;
7641 }
7642
7643 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7644 u32 ext_phy_type =
7645 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7646
7647 switch (ext_phy_type) {
7648 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7649 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7650 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7651 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7652 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7653 cmd->port = PORT_FIBRE;
7654 break;
7655
7656 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7657 cmd->port = PORT_TP;
7658 break;
7659
7660 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7661 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7662 bp->link_params.ext_phy_config);
7663 break;
7664
7665 default:
7666 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7667 bp->link_params.ext_phy_config);
7668 break;
7669 }
7670 } else
7671 cmd->port = PORT_TP;
7672
7673 cmd->phy_address = bp->port.phy_addr;
7674 cmd->transceiver = XCVR_INTERNAL;
7675
7676 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7677 cmd->autoneg = AUTONEG_ENABLE;
7678 else
7679 cmd->autoneg = AUTONEG_DISABLE;
7680
7681 cmd->maxtxpkt = 0;
7682 cmd->maxrxpkt = 0;
7683
7684 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7685 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7686 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7687 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7688 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7689 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7690 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7691
7692 return 0;
7693 }
7694
7695 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7696 {
7697 struct bnx2x *bp = netdev_priv(dev);
7698 u32 advertising;
7699
7700 if (IS_E1HMF(bp))
7701 return 0;
7702
7703 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7704 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7705 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7706 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7707 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7708 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7709 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7710
7711 if (cmd->autoneg == AUTONEG_ENABLE) {
7712 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7713 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7714 return -EINVAL;
7715 }
7716
7717 /* advertise the requested speed and duplex if supported */
7718 cmd->advertising &= bp->port.supported;
7719
7720 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7721 bp->link_params.req_duplex = DUPLEX_FULL;
7722 bp->port.advertising |= (ADVERTISED_Autoneg |
7723 cmd->advertising);
7724
7725 } else { /* forced speed */
7726 /* advertise the requested speed and duplex if supported */
7727 switch (cmd->speed) {
7728 case SPEED_10:
7729 if (cmd->duplex == DUPLEX_FULL) {
7730 if (!(bp->port.supported &
7731 SUPPORTED_10baseT_Full)) {
7732 DP(NETIF_MSG_LINK,
7733 "10M full not supported\n");
7734 return -EINVAL;
7735 }
7736
7737 advertising = (ADVERTISED_10baseT_Full |
7738 ADVERTISED_TP);
7739 } else {
7740 if (!(bp->port.supported &
7741 SUPPORTED_10baseT_Half)) {
7742 DP(NETIF_MSG_LINK,
7743 "10M half not supported\n");
7744 return -EINVAL;
7745 }
7746
7747 advertising = (ADVERTISED_10baseT_Half |
7748 ADVERTISED_TP);
7749 }
7750 break;
7751
7752 case SPEED_100:
7753 if (cmd->duplex == DUPLEX_FULL) {
7754 if (!(bp->port.supported &
7755 SUPPORTED_100baseT_Full)) {
7756 DP(NETIF_MSG_LINK,
7757 "100M full not supported\n");
7758 return -EINVAL;
7759 }
7760
7761 advertising = (ADVERTISED_100baseT_Full |
7762 ADVERTISED_TP);
7763 } else {
7764 if (!(bp->port.supported &
7765 SUPPORTED_100baseT_Half)) {
7766 DP(NETIF_MSG_LINK,
7767 "100M half not supported\n");
7768 return -EINVAL;
7769 }
7770
7771 advertising = (ADVERTISED_100baseT_Half |
7772 ADVERTISED_TP);
7773 }
7774 break;
7775
7776 case SPEED_1000:
7777 if (cmd->duplex != DUPLEX_FULL) {
7778 DP(NETIF_MSG_LINK, "1G half not supported\n");
7779 return -EINVAL;
7780 }
7781
7782 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7783 DP(NETIF_MSG_LINK, "1G full not supported\n");
7784 return -EINVAL;
7785 }
7786
7787 advertising = (ADVERTISED_1000baseT_Full |
7788 ADVERTISED_TP);
7789 break;
7790
7791 case SPEED_2500:
7792 if (cmd->duplex != DUPLEX_FULL) {
7793 DP(NETIF_MSG_LINK,
7794 "2.5G half not supported\n");
7795 return -EINVAL;
7796 }
7797
7798 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7799 DP(NETIF_MSG_LINK,
7800 "2.5G full not supported\n");
7801 return -EINVAL;
7802 }
7803
7804 advertising = (ADVERTISED_2500baseX_Full |
7805 ADVERTISED_TP);
7806 break;
7807
7808 case SPEED_10000:
7809 if (cmd->duplex != DUPLEX_FULL) {
7810 DP(NETIF_MSG_LINK, "10G half not supported\n");
7811 return -EINVAL;
7812 }
7813
7814 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7815 DP(NETIF_MSG_LINK, "10G full not supported\n");
7816 return -EINVAL;
7817 }
7818
7819 advertising = (ADVERTISED_10000baseT_Full |
7820 ADVERTISED_FIBRE);
7821 break;
7822
7823 default:
7824 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7825 return -EINVAL;
7826 }
7827
7828 bp->link_params.req_line_speed = cmd->speed;
7829 bp->link_params.req_duplex = cmd->duplex;
7830 bp->port.advertising = advertising;
7831 }
7832
7833 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7834 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7835 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7836 bp->port.advertising);
7837
7838 if (netif_running(dev)) {
7839 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7840 bnx2x_link_set(bp);
7841 }
7842
7843 return 0;
7844 }
7845
7846 #define PHY_FW_VER_LEN 10
7847
7848 static void bnx2x_get_drvinfo(struct net_device *dev,
7849 struct ethtool_drvinfo *info)
7850 {
7851 struct bnx2x *bp = netdev_priv(dev);
7852 u8 phy_fw_ver[PHY_FW_VER_LEN];
7853
7854 strcpy(info->driver, DRV_MODULE_NAME);
7855 strcpy(info->version, DRV_MODULE_VERSION);
7856
7857 phy_fw_ver[0] = '\0';
7858 if (bp->port.pmf) {
7859 bnx2x_acquire_phy_lock(bp);
7860 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7861 (bp->state != BNX2X_STATE_CLOSED),
7862 phy_fw_ver, PHY_FW_VER_LEN);
7863 bnx2x_release_phy_lock(bp);
7864 }
7865
7866 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7867 (bp->common.bc_ver & 0xff0000) >> 16,
7868 (bp->common.bc_ver & 0xff00) >> 8,
7869 (bp->common.bc_ver & 0xff),
7870 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7871 strcpy(info->bus_info, pci_name(bp->pdev));
7872 info->n_stats = BNX2X_NUM_STATS;
7873 info->testinfo_len = BNX2X_NUM_TESTS;
7874 info->eedump_len = bp->common.flash_size;
7875 info->regdump_len = 0;
7876 }
7877
7878 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7879 {
7880 struct bnx2x *bp = netdev_priv(dev);
7881
7882 if (bp->flags & NO_WOL_FLAG) {
7883 wol->supported = 0;
7884 wol->wolopts = 0;
7885 } else {
7886 wol->supported = WAKE_MAGIC;
7887 if (bp->wol)
7888 wol->wolopts = WAKE_MAGIC;
7889 else
7890 wol->wolopts = 0;
7891 }
7892 memset(&wol->sopass, 0, sizeof(wol->sopass));
7893 }
7894
7895 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7896 {
7897 struct bnx2x *bp = netdev_priv(dev);
7898
7899 if (wol->wolopts & ~WAKE_MAGIC)
7900 return -EINVAL;
7901
7902 if (wol->wolopts & WAKE_MAGIC) {
7903 if (bp->flags & NO_WOL_FLAG)
7904 return -EINVAL;
7905
7906 bp->wol = 1;
7907 } else
7908 bp->wol = 0;
7909
7910 return 0;
7911 }
7912
7913 static u32 bnx2x_get_msglevel(struct net_device *dev)
7914 {
7915 struct bnx2x *bp = netdev_priv(dev);
7916
7917 return bp->msglevel;
7918 }
7919
7920 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7921 {
7922 struct bnx2x *bp = netdev_priv(dev);
7923
7924 if (capable(CAP_NET_ADMIN))
7925 bp->msglevel = level;
7926 }
7927
7928 static int bnx2x_nway_reset(struct net_device *dev)
7929 {
7930 struct bnx2x *bp = netdev_priv(dev);
7931
7932 if (!bp->port.pmf)
7933 return 0;
7934
7935 if (netif_running(dev)) {
7936 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7937 bnx2x_link_set(bp);
7938 }
7939
7940 return 0;
7941 }
7942
7943 static int bnx2x_get_eeprom_len(struct net_device *dev)
7944 {
7945 struct bnx2x *bp = netdev_priv(dev);
7946
7947 return bp->common.flash_size;
7948 }
7949
7950 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7951 {
7952 int port = BP_PORT(bp);
7953 int count, i;
7954 u32 val = 0;
7955
7956 /* adjust timeout for emulation/FPGA */
7957 count = NVRAM_TIMEOUT_COUNT;
7958 if (CHIP_REV_IS_SLOW(bp))
7959 count *= 100;
7960
7961 /* request access to nvram interface */
7962 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7963 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7964
7965 for (i = 0; i < count*10; i++) {
7966 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7967 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7968 break;
7969
7970 udelay(5);
7971 }
7972
7973 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7974 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7975 return -EBUSY;
7976 }
7977
7978 return 0;
7979 }
7980
7981 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7982 {
7983 int port = BP_PORT(bp);
7984 int count, i;
7985 u32 val = 0;
7986
7987 /* adjust timeout for emulation/FPGA */
7988 count = NVRAM_TIMEOUT_COUNT;
7989 if (CHIP_REV_IS_SLOW(bp))
7990 count *= 100;
7991
7992 /* relinquish nvram interface */
7993 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7994 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7995
7996 for (i = 0; i < count*10; i++) {
7997 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7998 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7999 break;
8000
8001 udelay(5);
8002 }
8003
8004 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8005 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8006 return -EBUSY;
8007 }
8008
8009 return 0;
8010 }
8011
8012 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8013 {
8014 u32 val;
8015
8016 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8017
8018 /* enable both bits, even on read */
8019 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8020 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8021 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8022 }
8023
8024 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8025 {
8026 u32 val;
8027
8028 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8029
8030 /* disable both bits, even after read */
8031 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8032 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8033 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8034 }
8035
8036 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8037 u32 cmd_flags)
8038 {
8039 int count, i, rc;
8040 u32 val;
8041
8042 /* build the command word */
8043 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8044
8045 /* need to clear DONE bit separately */
8046 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8047
8048 /* address of the NVRAM to read from */
8049 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8050 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8051
8052 /* issue a read command */
8053 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8054
8055 /* adjust timeout for emulation/FPGA */
8056 count = NVRAM_TIMEOUT_COUNT;
8057 if (CHIP_REV_IS_SLOW(bp))
8058 count *= 100;
8059
8060 /* wait for completion */
8061 *ret_val = 0;
8062 rc = -EBUSY;
8063 for (i = 0; i < count; i++) {
8064 udelay(5);
8065 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8066
8067 if (val & MCPR_NVM_COMMAND_DONE) {
8068 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8069 /* we read nvram data in cpu order
8070 * but ethtool sees it as an array of bytes
8071 * converting to big-endian will do the work */
8072 val = cpu_to_be32(val);
8073 *ret_val = val;
8074 rc = 0;
8075 break;
8076 }
8077 }
8078
8079 return rc;
8080 }
8081
8082 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8083 int buf_size)
8084 {
8085 int rc;
8086 u32 cmd_flags;
8087 u32 val;
8088
8089 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8090 DP(BNX2X_MSG_NVM,
8091 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8092 offset, buf_size);
8093 return -EINVAL;
8094 }
8095
8096 if (offset + buf_size > bp->common.flash_size) {
8097 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8098 " buf_size (0x%x) > flash_size (0x%x)\n",
8099 offset, buf_size, bp->common.flash_size);
8100 return -EINVAL;
8101 }
8102
8103 /* request access to nvram interface */
8104 rc = bnx2x_acquire_nvram_lock(bp);
8105 if (rc)
8106 return rc;
8107
8108 /* enable access to nvram interface */
8109 bnx2x_enable_nvram_access(bp);
8110
8111 /* read the first word(s) */
8112 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8113 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8114 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8115 memcpy(ret_buf, &val, 4);
8116
8117 /* advance to the next dword */
8118 offset += sizeof(u32);
8119 ret_buf += sizeof(u32);
8120 buf_size -= sizeof(u32);
8121 cmd_flags = 0;
8122 }
8123
8124 if (rc == 0) {
8125 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8126 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8127 memcpy(ret_buf, &val, 4);
8128 }
8129
8130 /* disable access to nvram interface */
8131 bnx2x_disable_nvram_access(bp);
8132 bnx2x_release_nvram_lock(bp);
8133
8134 return rc;
8135 }
8136
8137 static int bnx2x_get_eeprom(struct net_device *dev,
8138 struct ethtool_eeprom *eeprom, u8 *eebuf)
8139 {
8140 struct bnx2x *bp = netdev_priv(dev);
8141 int rc;
8142
8143 if (!netif_running(dev))
8144 return -EAGAIN;
8145
8146 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8147 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8148 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8149 eeprom->len, eeprom->len);
8150
8151 /* parameters already validated in ethtool_get_eeprom */
8152
8153 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8154
8155 return rc;
8156 }
8157
8158 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8159 u32 cmd_flags)
8160 {
8161 int count, i, rc;
8162
8163 /* build the command word */
8164 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8165
8166 /* need to clear DONE bit separately */
8167 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8168
8169 /* write the data */
8170 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8171
8172 /* address of the NVRAM to write to */
8173 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8174 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8175
8176 /* issue the write command */
8177 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8178
8179 /* adjust timeout for emulation/FPGA */
8180 count = NVRAM_TIMEOUT_COUNT;
8181 if (CHIP_REV_IS_SLOW(bp))
8182 count *= 100;
8183
8184 /* wait for completion */
8185 rc = -EBUSY;
8186 for (i = 0; i < count; i++) {
8187 udelay(5);
8188 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8189 if (val & MCPR_NVM_COMMAND_DONE) {
8190 rc = 0;
8191 break;
8192 }
8193 }
8194
8195 return rc;
8196 }
8197
8198 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8199
8200 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8201 int buf_size)
8202 {
8203 int rc;
8204 u32 cmd_flags;
8205 u32 align_offset;
8206 u32 val;
8207
8208 if (offset + buf_size > bp->common.flash_size) {
8209 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8210 " buf_size (0x%x) > flash_size (0x%x)\n",
8211 offset, buf_size, bp->common.flash_size);
8212 return -EINVAL;
8213 }
8214
8215 /* request access to nvram interface */
8216 rc = bnx2x_acquire_nvram_lock(bp);
8217 if (rc)
8218 return rc;
8219
8220 /* enable access to nvram interface */
8221 bnx2x_enable_nvram_access(bp);
8222
8223 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8224 align_offset = (offset & ~0x03);
8225 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8226
8227 if (rc == 0) {
8228 val &= ~(0xff << BYTE_OFFSET(offset));
8229 val |= (*data_buf << BYTE_OFFSET(offset));
8230
8231 /* nvram data is returned as an array of bytes
8232 * convert it back to cpu order */
8233 val = be32_to_cpu(val);
8234
8235 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8236 cmd_flags);
8237 }
8238
8239 /* disable access to nvram interface */
8240 bnx2x_disable_nvram_access(bp);
8241 bnx2x_release_nvram_lock(bp);
8242
8243 return rc;
8244 }
8245
8246 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8247 int buf_size)
8248 {
8249 int rc;
8250 u32 cmd_flags;
8251 u32 val;
8252 u32 written_so_far;
8253
8254 if (buf_size == 1) /* ethtool */
8255 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8256
8257 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8258 DP(BNX2X_MSG_NVM,
8259 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8260 offset, buf_size);
8261 return -EINVAL;
8262 }
8263
8264 if (offset + buf_size > bp->common.flash_size) {
8265 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8266 " buf_size (0x%x) > flash_size (0x%x)\n",
8267 offset, buf_size, bp->common.flash_size);
8268 return -EINVAL;
8269 }
8270
8271 /* request access to nvram interface */
8272 rc = bnx2x_acquire_nvram_lock(bp);
8273 if (rc)
8274 return rc;
8275
8276 /* enable access to nvram interface */
8277 bnx2x_enable_nvram_access(bp);
8278
8279 written_so_far = 0;
8280 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8281 while ((written_so_far < buf_size) && (rc == 0)) {
8282 if (written_so_far == (buf_size - sizeof(u32)))
8283 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8284 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8285 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8286 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8287 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8288
8289 memcpy(&val, data_buf, 4);
8290
8291 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8292
8293 /* advance to the next dword */
8294 offset += sizeof(u32);
8295 data_buf += sizeof(u32);
8296 written_so_far += sizeof(u32);
8297 cmd_flags = 0;
8298 }
8299
8300 /* disable access to nvram interface */
8301 bnx2x_disable_nvram_access(bp);
8302 bnx2x_release_nvram_lock(bp);
8303
8304 return rc;
8305 }
8306
8307 static int bnx2x_set_eeprom(struct net_device *dev,
8308 struct ethtool_eeprom *eeprom, u8 *eebuf)
8309 {
8310 struct bnx2x *bp = netdev_priv(dev);
8311 int rc;
8312
8313 if (!netif_running(dev))
8314 return -EAGAIN;
8315
8316 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8317 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8318 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8319 eeprom->len, eeprom->len);
8320
8321 /* parameters already validated in ethtool_set_eeprom */
8322
8323 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8324 if (eeprom->magic == 0x00504859)
8325 if (bp->port.pmf) {
8326
8327 bnx2x_acquire_phy_lock(bp);
8328 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8329 bp->link_params.ext_phy_config,
8330 (bp->state != BNX2X_STATE_CLOSED),
8331 eebuf, eeprom->len);
8332 if ((bp->state == BNX2X_STATE_OPEN) ||
8333 (bp->state == BNX2X_STATE_DISABLED)) {
8334 rc |= bnx2x_link_reset(&bp->link_params,
8335 &bp->link_vars);
8336 rc |= bnx2x_phy_init(&bp->link_params,
8337 &bp->link_vars);
8338 }
8339 bnx2x_release_phy_lock(bp);
8340
8341 } else /* Only the PMF can access the PHY */
8342 return -EINVAL;
8343 else
8344 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8345
8346 return rc;
8347 }
8348
8349 static int bnx2x_get_coalesce(struct net_device *dev,
8350 struct ethtool_coalesce *coal)
8351 {
8352 struct bnx2x *bp = netdev_priv(dev);
8353
8354 memset(coal, 0, sizeof(struct ethtool_coalesce));
8355
8356 coal->rx_coalesce_usecs = bp->rx_ticks;
8357 coal->tx_coalesce_usecs = bp->tx_ticks;
8358
8359 return 0;
8360 }
8361
8362 static int bnx2x_set_coalesce(struct net_device *dev,
8363 struct ethtool_coalesce *coal)
8364 {
8365 struct bnx2x *bp = netdev_priv(dev);
8366
8367 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8368 if (bp->rx_ticks > 3000)
8369 bp->rx_ticks = 3000;
8370
8371 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8372 if (bp->tx_ticks > 0x3000)
8373 bp->tx_ticks = 0x3000;
8374
8375 if (netif_running(dev))
8376 bnx2x_update_coalesce(bp);
8377
8378 return 0;
8379 }
8380
8381 static void bnx2x_get_ringparam(struct net_device *dev,
8382 struct ethtool_ringparam *ering)
8383 {
8384 struct bnx2x *bp = netdev_priv(dev);
8385
8386 ering->rx_max_pending = MAX_RX_AVAIL;
8387 ering->rx_mini_max_pending = 0;
8388 ering->rx_jumbo_max_pending = 0;
8389
8390 ering->rx_pending = bp->rx_ring_size;
8391 ering->rx_mini_pending = 0;
8392 ering->rx_jumbo_pending = 0;
8393
8394 ering->tx_max_pending = MAX_TX_AVAIL;
8395 ering->tx_pending = bp->tx_ring_size;
8396 }
8397
8398 static int bnx2x_set_ringparam(struct net_device *dev,
8399 struct ethtool_ringparam *ering)
8400 {
8401 struct bnx2x *bp = netdev_priv(dev);
8402 int rc = 0;
8403
8404 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8405 (ering->tx_pending > MAX_TX_AVAIL) ||
8406 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8407 return -EINVAL;
8408
8409 bp->rx_ring_size = ering->rx_pending;
8410 bp->tx_ring_size = ering->tx_pending;
8411
8412 if (netif_running(dev)) {
8413 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8414 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8415 }
8416
8417 return rc;
8418 }
8419
8420 static void bnx2x_get_pauseparam(struct net_device *dev,
8421 struct ethtool_pauseparam *epause)
8422 {
8423 struct bnx2x *bp = netdev_priv(dev);
8424
8425 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8426 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8427
8428 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8429 BNX2X_FLOW_CTRL_RX);
8430 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8431 BNX2X_FLOW_CTRL_TX);
8432
8433 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8434 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8435 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8436 }
8437
8438 static int bnx2x_set_pauseparam(struct net_device *dev,
8439 struct ethtool_pauseparam *epause)
8440 {
8441 struct bnx2x *bp = netdev_priv(dev);
8442
8443 if (IS_E1HMF(bp))
8444 return 0;
8445
8446 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8447 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8448 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8449
8450 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8451
8452 if (epause->rx_pause)
8453 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8454
8455 if (epause->tx_pause)
8456 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8457
8458 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8459 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8460
8461 if (epause->autoneg) {
8462 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8463 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8464 return -EINVAL;
8465 }
8466
8467 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8468 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8469 }
8470
8471 DP(NETIF_MSG_LINK,
8472 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8473
8474 if (netif_running(dev)) {
8475 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8476 bnx2x_link_set(bp);
8477 }
8478
8479 return 0;
8480 }
8481
8482 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8483 {
8484 struct bnx2x *bp = netdev_priv(dev);
8485 int changed = 0;
8486 int rc = 0;
8487
8488 /* TPA requires Rx CSUM offloading */
8489 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8490 if (!(dev->features & NETIF_F_LRO)) {
8491 dev->features |= NETIF_F_LRO;
8492 bp->flags |= TPA_ENABLE_FLAG;
8493 changed = 1;
8494 }
8495
8496 } else if (dev->features & NETIF_F_LRO) {
8497 dev->features &= ~NETIF_F_LRO;
8498 bp->flags &= ~TPA_ENABLE_FLAG;
8499 changed = 1;
8500 }
8501
8502 if (changed && netif_running(dev)) {
8503 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8504 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8505 }
8506
8507 return rc;
8508 }
8509
8510 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8511 {
8512 struct bnx2x *bp = netdev_priv(dev);
8513
8514 return bp->rx_csum;
8515 }
8516
8517 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8518 {
8519 struct bnx2x *bp = netdev_priv(dev);
8520 int rc = 0;
8521
8522 bp->rx_csum = data;
8523
8524 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8525 TPA'ed packets will be discarded due to wrong TCP CSUM */
8526 if (!data) {
8527 u32 flags = ethtool_op_get_flags(dev);
8528
8529 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8530 }
8531
8532 return rc;
8533 }
8534
8535 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8536 {
8537 if (data) {
8538 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8539 dev->features |= NETIF_F_TSO6;
8540 } else {
8541 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8542 dev->features &= ~NETIF_F_TSO6;
8543 }
8544
8545 return 0;
8546 }
8547
8548 static const struct {
8549 char string[ETH_GSTRING_LEN];
8550 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8551 { "register_test (offline)" },
8552 { "memory_test (offline)" },
8553 { "loopback_test (offline)" },
8554 { "nvram_test (online)" },
8555 { "interrupt_test (online)" },
8556 { "link_test (online)" },
8557 { "idle check (online)" },
8558 { "MC errors (online)" }
8559 };
8560
8561 static int bnx2x_self_test_count(struct net_device *dev)
8562 {
8563 return BNX2X_NUM_TESTS;
8564 }
8565
8566 static int bnx2x_test_registers(struct bnx2x *bp)
8567 {
8568 int idx, i, rc = -ENODEV;
8569 u32 wr_val = 0;
8570 int port = BP_PORT(bp);
8571 static const struct {
8572 u32 offset0;
8573 u32 offset1;
8574 u32 mask;
8575 } reg_tbl[] = {
8576 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8577 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8578 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8579 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8580 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8581 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8582 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8583 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8584 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8585 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8586 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8587 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8588 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8589 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8590 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8591 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8592 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8593 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8594 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8595 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8596 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8597 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8598 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8599 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8600 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8601 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8602 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8603 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8604 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8605 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8606 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8607 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8608 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8609 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8610 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8611 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8612 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8613 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8614
8615 { 0xffffffff, 0, 0x00000000 }
8616 };
8617
8618 if (!netif_running(bp->dev))
8619 return rc;
8620
8621 /* Repeat the test twice:
8622 First by writing 0x00000000, second by writing 0xffffffff */
8623 for (idx = 0; idx < 2; idx++) {
8624
8625 switch (idx) {
8626 case 0:
8627 wr_val = 0;
8628 break;
8629 case 1:
8630 wr_val = 0xffffffff;
8631 break;
8632 }
8633
8634 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8635 u32 offset, mask, save_val, val;
8636
8637 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8638 mask = reg_tbl[i].mask;
8639
8640 save_val = REG_RD(bp, offset);
8641
8642 REG_WR(bp, offset, wr_val);
8643 val = REG_RD(bp, offset);
8644
8645 /* Restore the original register's value */
8646 REG_WR(bp, offset, save_val);
8647
8648 /* verify that value is as expected value */
8649 if ((val & mask) != (wr_val & mask))
8650 goto test_reg_exit;
8651 }
8652 }
8653
8654 rc = 0;
8655
8656 test_reg_exit:
8657 return rc;
8658 }
8659
8660 static int bnx2x_test_memory(struct bnx2x *bp)
8661 {
8662 int i, j, rc = -ENODEV;
8663 u32 val;
8664 static const struct {
8665 u32 offset;
8666 int size;
8667 } mem_tbl[] = {
8668 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8669 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8670 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8671 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8672 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8673 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8674 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8675
8676 { 0xffffffff, 0 }
8677 };
8678 static const struct {
8679 char *name;
8680 u32 offset;
8681 u32 e1_mask;
8682 u32 e1h_mask;
8683 } prty_tbl[] = {
8684 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8685 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8686 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8687 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8688 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8689 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8690
8691 { NULL, 0xffffffff, 0, 0 }
8692 };
8693
8694 if (!netif_running(bp->dev))
8695 return rc;
8696
8697 /* Go through all the memories */
8698 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8699 for (j = 0; j < mem_tbl[i].size; j++)
8700 REG_RD(bp, mem_tbl[i].offset + j*4);
8701
8702 /* Check the parity status */
8703 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8704 val = REG_RD(bp, prty_tbl[i].offset);
8705 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8706 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8707 DP(NETIF_MSG_HW,
8708 "%s is 0x%x\n", prty_tbl[i].name, val);
8709 goto test_mem_exit;
8710 }
8711 }
8712
8713 rc = 0;
8714
8715 test_mem_exit:
8716 return rc;
8717 }
8718
8719 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8720 {
8721 int cnt = 1000;
8722
8723 if (link_up)
8724 while (bnx2x_link_test(bp) && cnt--)
8725 msleep(10);
8726 }
8727
8728 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8729 {
8730 unsigned int pkt_size, num_pkts, i;
8731 struct sk_buff *skb;
8732 unsigned char *packet;
8733 struct bnx2x_fastpath *fp = &bp->fp[0];
8734 u16 tx_start_idx, tx_idx;
8735 u16 rx_start_idx, rx_idx;
8736 u16 pkt_prod;
8737 struct sw_tx_bd *tx_buf;
8738 struct eth_tx_bd *tx_bd;
8739 dma_addr_t mapping;
8740 union eth_rx_cqe *cqe;
8741 u8 cqe_fp_flags;
8742 struct sw_rx_bd *rx_buf;
8743 u16 len;
8744 int rc = -ENODEV;
8745
8746 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8747 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8748 bnx2x_acquire_phy_lock(bp);
8749 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8750 bnx2x_release_phy_lock(bp);
8751
8752 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8753 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8754 bnx2x_acquire_phy_lock(bp);
8755 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8756 bnx2x_release_phy_lock(bp);
8757 /* wait until link state is restored */
8758 bnx2x_wait_for_link(bp, link_up);
8759
8760 } else
8761 return -EINVAL;
8762
8763 pkt_size = 1514;
8764 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8765 if (!skb) {
8766 rc = -ENOMEM;
8767 goto test_loopback_exit;
8768 }
8769 packet = skb_put(skb, pkt_size);
8770 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8771 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8772 for (i = ETH_HLEN; i < pkt_size; i++)
8773 packet[i] = (unsigned char) (i & 0xff);
8774
8775 num_pkts = 0;
8776 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8777 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8778
8779 pkt_prod = fp->tx_pkt_prod++;
8780 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8781 tx_buf->first_bd = fp->tx_bd_prod;
8782 tx_buf->skb = skb;
8783
8784 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8785 mapping = pci_map_single(bp->pdev, skb->data,
8786 skb_headlen(skb), PCI_DMA_TODEVICE);
8787 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8788 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8789 tx_bd->nbd = cpu_to_le16(1);
8790 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8791 tx_bd->vlan = cpu_to_le16(pkt_prod);
8792 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8793 ETH_TX_BD_FLAGS_END_BD);
8794 tx_bd->general_data = ((UNICAST_ADDRESS <<
8795 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8796
8797 wmb();
8798
8799 fp->hw_tx_prods->bds_prod =
8800 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8801 mb(); /* FW restriction: must not reorder writing nbd and packets */
8802 fp->hw_tx_prods->packets_prod =
8803 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8804 DOORBELL(bp, FP_IDX(fp), 0);
8805
8806 mmiowb();
8807
8808 num_pkts++;
8809 fp->tx_bd_prod++;
8810 bp->dev->trans_start = jiffies;
8811
8812 udelay(100);
8813
8814 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8815 if (tx_idx != tx_start_idx + num_pkts)
8816 goto test_loopback_exit;
8817
8818 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8819 if (rx_idx != rx_start_idx + num_pkts)
8820 goto test_loopback_exit;
8821
8822 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8823 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8824 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8825 goto test_loopback_rx_exit;
8826
8827 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8828 if (len != pkt_size)
8829 goto test_loopback_rx_exit;
8830
8831 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8832 skb = rx_buf->skb;
8833 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8834 for (i = ETH_HLEN; i < pkt_size; i++)
8835 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8836 goto test_loopback_rx_exit;
8837
8838 rc = 0;
8839
8840 test_loopback_rx_exit:
8841
8842 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8843 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8844 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8845 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8846
8847 /* Update producers */
8848 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8849 fp->rx_sge_prod);
8850
8851 test_loopback_exit:
8852 bp->link_params.loopback_mode = LOOPBACK_NONE;
8853
8854 return rc;
8855 }
8856
8857 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8858 {
8859 int rc = 0;
8860
8861 if (!netif_running(bp->dev))
8862 return BNX2X_LOOPBACK_FAILED;
8863
8864 bnx2x_netif_stop(bp, 1);
8865
8866 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8867 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8868 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8869 }
8870
8871 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8872 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8873 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8874 }
8875
8876 bnx2x_netif_start(bp);
8877
8878 return rc;
8879 }
8880
8881 #define CRC32_RESIDUAL 0xdebb20e3
8882
8883 static int bnx2x_test_nvram(struct bnx2x *bp)
8884 {
8885 static const struct {
8886 int offset;
8887 int size;
8888 } nvram_tbl[] = {
8889 { 0, 0x14 }, /* bootstrap */
8890 { 0x14, 0xec }, /* dir */
8891 { 0x100, 0x350 }, /* manuf_info */
8892 { 0x450, 0xf0 }, /* feature_info */
8893 { 0x640, 0x64 }, /* upgrade_key_info */
8894 { 0x6a4, 0x64 },
8895 { 0x708, 0x70 }, /* manuf_key_info */
8896 { 0x778, 0x70 },
8897 { 0, 0 }
8898 };
8899 u32 buf[0x350 / 4];
8900 u8 *data = (u8 *)buf;
8901 int i, rc;
8902 u32 magic, csum;
8903
8904 rc = bnx2x_nvram_read(bp, 0, data, 4);
8905 if (rc) {
8906 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8907 goto test_nvram_exit;
8908 }
8909
8910 magic = be32_to_cpu(buf[0]);
8911 if (magic != 0x669955aa) {
8912 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8913 rc = -ENODEV;
8914 goto test_nvram_exit;
8915 }
8916
8917 for (i = 0; nvram_tbl[i].size; i++) {
8918
8919 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8920 nvram_tbl[i].size);
8921 if (rc) {
8922 DP(NETIF_MSG_PROBE,
8923 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8924 goto test_nvram_exit;
8925 }
8926
8927 csum = ether_crc_le(nvram_tbl[i].size, data);
8928 if (csum != CRC32_RESIDUAL) {
8929 DP(NETIF_MSG_PROBE,
8930 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8931 rc = -ENODEV;
8932 goto test_nvram_exit;
8933 }
8934 }
8935
8936 test_nvram_exit:
8937 return rc;
8938 }
8939
8940 static int bnx2x_test_intr(struct bnx2x *bp)
8941 {
8942 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8943 int i, rc;
8944
8945 if (!netif_running(bp->dev))
8946 return -ENODEV;
8947
8948 config->hdr.length_6b = 0;
8949 if (CHIP_IS_E1(bp))
8950 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8951 else
8952 config->hdr.offset = BP_FUNC(bp);
8953 config->hdr.client_id = BP_CL_ID(bp);
8954 config->hdr.reserved1 = 0;
8955
8956 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8957 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8958 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8959 if (rc == 0) {
8960 bp->set_mac_pending++;
8961 for (i = 0; i < 10; i++) {
8962 if (!bp->set_mac_pending)
8963 break;
8964 msleep_interruptible(10);
8965 }
8966 if (i == 10)
8967 rc = -ENODEV;
8968 }
8969
8970 return rc;
8971 }
8972
8973 static void bnx2x_self_test(struct net_device *dev,
8974 struct ethtool_test *etest, u64 *buf)
8975 {
8976 struct bnx2x *bp = netdev_priv(dev);
8977
8978 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8979
8980 if (!netif_running(dev))
8981 return;
8982
8983 /* offline tests are not supported in MF mode */
8984 if (IS_E1HMF(bp))
8985 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8986
8987 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8988 u8 link_up;
8989
8990 link_up = bp->link_vars.link_up;
8991 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8992 bnx2x_nic_load(bp, LOAD_DIAG);
8993 /* wait until link state is restored */
8994 bnx2x_wait_for_link(bp, link_up);
8995
8996 if (bnx2x_test_registers(bp) != 0) {
8997 buf[0] = 1;
8998 etest->flags |= ETH_TEST_FL_FAILED;
8999 }
9000 if (bnx2x_test_memory(bp) != 0) {
9001 buf[1] = 1;
9002 etest->flags |= ETH_TEST_FL_FAILED;
9003 }
9004 buf[2] = bnx2x_test_loopback(bp, link_up);
9005 if (buf[2] != 0)
9006 etest->flags |= ETH_TEST_FL_FAILED;
9007
9008 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9009 bnx2x_nic_load(bp, LOAD_NORMAL);
9010 /* wait until link state is restored */
9011 bnx2x_wait_for_link(bp, link_up);
9012 }
9013 if (bnx2x_test_nvram(bp) != 0) {
9014 buf[3] = 1;
9015 etest->flags |= ETH_TEST_FL_FAILED;
9016 }
9017 if (bnx2x_test_intr(bp) != 0) {
9018 buf[4] = 1;
9019 etest->flags |= ETH_TEST_FL_FAILED;
9020 }
9021 if (bp->port.pmf)
9022 if (bnx2x_link_test(bp) != 0) {
9023 buf[5] = 1;
9024 etest->flags |= ETH_TEST_FL_FAILED;
9025 }
9026 buf[7] = bnx2x_mc_assert(bp);
9027 if (buf[7] != 0)
9028 etest->flags |= ETH_TEST_FL_FAILED;
9029
9030 #ifdef BNX2X_EXTRA_DEBUG
9031 bnx2x_panic_dump(bp);
9032 #endif
9033 }
9034
9035 static const struct {
9036 long offset;
9037 int size;
9038 u32 flags;
9039 #define STATS_FLAGS_PORT 1
9040 #define STATS_FLAGS_FUNC 2
9041 u8 string[ETH_GSTRING_LEN];
9042 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9043 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9044 8, STATS_FLAGS_FUNC, "rx_bytes" },
9045 { STATS_OFFSET32(error_bytes_received_hi),
9046 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9047 { STATS_OFFSET32(total_bytes_transmitted_hi),
9048 8, STATS_FLAGS_FUNC, "tx_bytes" },
9049 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9050 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9051 { STATS_OFFSET32(total_unicast_packets_received_hi),
9052 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9053 { STATS_OFFSET32(total_multicast_packets_received_hi),
9054 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9055 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9056 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9057 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9058 8, STATS_FLAGS_FUNC, "tx_packets" },
9059 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9060 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9061 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9062 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9063 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9064 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9065 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9066 8, STATS_FLAGS_PORT, "rx_align_errors" },
9067 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9068 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9069 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9070 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9071 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9072 8, STATS_FLAGS_PORT, "tx_deferred" },
9073 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9074 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9075 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9076 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9077 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9078 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9079 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9080 8, STATS_FLAGS_PORT, "rx_fragments" },
9081 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9082 8, STATS_FLAGS_PORT, "rx_jabbers" },
9083 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9084 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9085 { STATS_OFFSET32(jabber_packets_received),
9086 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9087 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9088 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9089 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9090 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9091 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9092 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9093 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9094 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9095 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9096 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9097 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9098 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9099 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9100 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9101 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9102 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9103 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9104 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9105 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9106 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9107 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9108 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9109 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9110 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9111 { STATS_OFFSET32(mac_filter_discard),
9112 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9113 { STATS_OFFSET32(no_buff_discard),
9114 4, STATS_FLAGS_FUNC, "rx_discards" },
9115 { STATS_OFFSET32(xxoverflow_discard),
9116 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9117 { STATS_OFFSET32(brb_drop_hi),
9118 8, STATS_FLAGS_PORT, "brb_discard" },
9119 { STATS_OFFSET32(brb_truncate_hi),
9120 8, STATS_FLAGS_PORT, "brb_truncate" },
9121 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9122 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9123 { STATS_OFFSET32(rx_skb_alloc_failed),
9124 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9125 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9126 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9127 };
9128
9129 #define IS_NOT_E1HMF_STAT(bp, i) \
9130 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9131
9132 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9133 {
9134 struct bnx2x *bp = netdev_priv(dev);
9135 int i, j;
9136
9137 switch (stringset) {
9138 case ETH_SS_STATS:
9139 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9140 if (IS_NOT_E1HMF_STAT(bp, i))
9141 continue;
9142 strcpy(buf + j*ETH_GSTRING_LEN,
9143 bnx2x_stats_arr[i].string);
9144 j++;
9145 }
9146 break;
9147
9148 case ETH_SS_TEST:
9149 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9150 break;
9151 }
9152 }
9153
9154 static int bnx2x_get_stats_count(struct net_device *dev)
9155 {
9156 struct bnx2x *bp = netdev_priv(dev);
9157 int i, num_stats = 0;
9158
9159 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9160 if (IS_NOT_E1HMF_STAT(bp, i))
9161 continue;
9162 num_stats++;
9163 }
9164 return num_stats;
9165 }
9166
9167 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9168 struct ethtool_stats *stats, u64 *buf)
9169 {
9170 struct bnx2x *bp = netdev_priv(dev);
9171 u32 *hw_stats = (u32 *)&bp->eth_stats;
9172 int i, j;
9173
9174 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9175 if (IS_NOT_E1HMF_STAT(bp, i))
9176 continue;
9177
9178 if (bnx2x_stats_arr[i].size == 0) {
9179 /* skip this counter */
9180 buf[j] = 0;
9181 j++;
9182 continue;
9183 }
9184 if (bnx2x_stats_arr[i].size == 4) {
9185 /* 4-byte counter */
9186 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9187 j++;
9188 continue;
9189 }
9190 /* 8-byte counter */
9191 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9192 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9193 j++;
9194 }
9195 }
9196
9197 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9198 {
9199 struct bnx2x *bp = netdev_priv(dev);
9200 int port = BP_PORT(bp);
9201 int i;
9202
9203 if (!netif_running(dev))
9204 return 0;
9205
9206 if (!bp->port.pmf)
9207 return 0;
9208
9209 if (data == 0)
9210 data = 2;
9211
9212 for (i = 0; i < (data * 2); i++) {
9213 if ((i % 2) == 0)
9214 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9215 bp->link_params.hw_led_mode,
9216 bp->link_params.chip_id);
9217 else
9218 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9219 bp->link_params.hw_led_mode,
9220 bp->link_params.chip_id);
9221
9222 msleep_interruptible(500);
9223 if (signal_pending(current))
9224 break;
9225 }
9226
9227 if (bp->link_vars.link_up)
9228 bnx2x_set_led(bp, port, LED_MODE_OPER,
9229 bp->link_vars.line_speed,
9230 bp->link_params.hw_led_mode,
9231 bp->link_params.chip_id);
9232
9233 return 0;
9234 }
9235
9236 static struct ethtool_ops bnx2x_ethtool_ops = {
9237 .get_settings = bnx2x_get_settings,
9238 .set_settings = bnx2x_set_settings,
9239 .get_drvinfo = bnx2x_get_drvinfo,
9240 .get_wol = bnx2x_get_wol,
9241 .set_wol = bnx2x_set_wol,
9242 .get_msglevel = bnx2x_get_msglevel,
9243 .set_msglevel = bnx2x_set_msglevel,
9244 .nway_reset = bnx2x_nway_reset,
9245 .get_link = ethtool_op_get_link,
9246 .get_eeprom_len = bnx2x_get_eeprom_len,
9247 .get_eeprom = bnx2x_get_eeprom,
9248 .set_eeprom = bnx2x_set_eeprom,
9249 .get_coalesce = bnx2x_get_coalesce,
9250 .set_coalesce = bnx2x_set_coalesce,
9251 .get_ringparam = bnx2x_get_ringparam,
9252 .set_ringparam = bnx2x_set_ringparam,
9253 .get_pauseparam = bnx2x_get_pauseparam,
9254 .set_pauseparam = bnx2x_set_pauseparam,
9255 .get_rx_csum = bnx2x_get_rx_csum,
9256 .set_rx_csum = bnx2x_set_rx_csum,
9257 .get_tx_csum = ethtool_op_get_tx_csum,
9258 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9259 .set_flags = bnx2x_set_flags,
9260 .get_flags = ethtool_op_get_flags,
9261 .get_sg = ethtool_op_get_sg,
9262 .set_sg = ethtool_op_set_sg,
9263 .get_tso = ethtool_op_get_tso,
9264 .set_tso = bnx2x_set_tso,
9265 .self_test_count = bnx2x_self_test_count,
9266 .self_test = bnx2x_self_test,
9267 .get_strings = bnx2x_get_strings,
9268 .phys_id = bnx2x_phys_id,
9269 .get_stats_count = bnx2x_get_stats_count,
9270 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9271 };
9272
9273 /* end of ethtool_ops */
9274
9275 /****************************************************************************
9276 * General service functions
9277 ****************************************************************************/
9278
9279 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9280 {
9281 u16 pmcsr;
9282
9283 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9284
9285 switch (state) {
9286 case PCI_D0:
9287 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9288 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9289 PCI_PM_CTRL_PME_STATUS));
9290
9291 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9292 /* delay required during transition out of D3hot */
9293 msleep(20);
9294 break;
9295
9296 case PCI_D3hot:
9297 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9298 pmcsr |= 3;
9299
9300 if (bp->wol)
9301 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9302
9303 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9304 pmcsr);
9305
9306 /* No more memory access after this point until
9307 * device is brought back to D0.
9308 */
9309 break;
9310
9311 default:
9312 return -EINVAL;
9313 }
9314 return 0;
9315 }
9316
9317 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9318 {
9319 u16 rx_cons_sb;
9320
9321 /* Tell compiler that status block fields can change */
9322 barrier();
9323 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9324 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9325 rx_cons_sb++;
9326 return (fp->rx_comp_cons != rx_cons_sb);
9327 }
9328
9329 /*
9330 * net_device service functions
9331 */
9332
9333 static int bnx2x_poll(struct napi_struct *napi, int budget)
9334 {
9335 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9336 napi);
9337 struct bnx2x *bp = fp->bp;
9338 int work_done = 0;
9339
9340 #ifdef BNX2X_STOP_ON_ERROR
9341 if (unlikely(bp->panic))
9342 goto poll_panic;
9343 #endif
9344
9345 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9346 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9347 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9348
9349 bnx2x_update_fpsb_idx(fp);
9350
9351 if (bnx2x_has_tx_work(fp))
9352 bnx2x_tx_int(fp, budget);
9353
9354 if (bnx2x_has_rx_work(fp))
9355 work_done = bnx2x_rx_int(fp, budget);
9356 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9357
9358 /* must not complete if we consumed full budget */
9359 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9360
9361 #ifdef BNX2X_STOP_ON_ERROR
9362 poll_panic:
9363 #endif
9364 netif_rx_complete(napi);
9365
9366 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9367 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9368 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9369 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9370 }
9371 return work_done;
9372 }
9373
9374
9375 /* we split the first BD into headers and data BDs
9376 * to ease the pain of our fellow microcode engineers
9377 * we use one mapping for both BDs
9378 * So far this has only been observed to happen
9379 * in Other Operating Systems(TM)
9380 */
9381 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9382 struct bnx2x_fastpath *fp,
9383 struct eth_tx_bd **tx_bd, u16 hlen,
9384 u16 bd_prod, int nbd)
9385 {
9386 struct eth_tx_bd *h_tx_bd = *tx_bd;
9387 struct eth_tx_bd *d_tx_bd;
9388 dma_addr_t mapping;
9389 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9390
9391 /* first fix first BD */
9392 h_tx_bd->nbd = cpu_to_le16(nbd);
9393 h_tx_bd->nbytes = cpu_to_le16(hlen);
9394
9395 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9396 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9397 h_tx_bd->addr_lo, h_tx_bd->nbd);
9398
9399 /* now get a new data BD
9400 * (after the pbd) and fill it */
9401 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9402 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9403
9404 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9405 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9406
9407 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9408 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9409 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9410 d_tx_bd->vlan = 0;
9411 /* this marks the BD as one that has no individual mapping
9412 * the FW ignores this flag in a BD not marked start
9413 */
9414 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9415 DP(NETIF_MSG_TX_QUEUED,
9416 "TSO split data size is %d (%x:%x)\n",
9417 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9418
9419 /* update tx_bd for marking the last BD flag */
9420 *tx_bd = d_tx_bd;
9421
9422 return bd_prod;
9423 }
9424
9425 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9426 {
9427 if (fix > 0)
9428 csum = (u16) ~csum_fold(csum_sub(csum,
9429 csum_partial(t_header - fix, fix, 0)));
9430
9431 else if (fix < 0)
9432 csum = (u16) ~csum_fold(csum_add(csum,
9433 csum_partial(t_header, -fix, 0)));
9434
9435 return swab16(csum);
9436 }
9437
9438 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9439 {
9440 u32 rc;
9441
9442 if (skb->ip_summed != CHECKSUM_PARTIAL)
9443 rc = XMIT_PLAIN;
9444
9445 else {
9446 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9447 rc = XMIT_CSUM_V6;
9448 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9449 rc |= XMIT_CSUM_TCP;
9450
9451 } else {
9452 rc = XMIT_CSUM_V4;
9453 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9454 rc |= XMIT_CSUM_TCP;
9455 }
9456 }
9457
9458 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9459 rc |= XMIT_GSO_V4;
9460
9461 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9462 rc |= XMIT_GSO_V6;
9463
9464 return rc;
9465 }
9466
9467 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9468 /* check if packet requires linearization (packet is too fragmented) */
9469 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9470 u32 xmit_type)
9471 {
9472 int to_copy = 0;
9473 int hlen = 0;
9474 int first_bd_sz = 0;
9475
9476 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9477 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9478
9479 if (xmit_type & XMIT_GSO) {
9480 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9481 /* Check if LSO packet needs to be copied:
9482 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9483 int wnd_size = MAX_FETCH_BD - 3;
9484 /* Number of windows to check */
9485 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9486 int wnd_idx = 0;
9487 int frag_idx = 0;
9488 u32 wnd_sum = 0;
9489
9490 /* Headers length */
9491 hlen = (int)(skb_transport_header(skb) - skb->data) +
9492 tcp_hdrlen(skb);
9493
9494 /* Amount of data (w/o headers) on linear part of SKB*/
9495 first_bd_sz = skb_headlen(skb) - hlen;
9496
9497 wnd_sum = first_bd_sz;
9498
9499 /* Calculate the first sum - it's special */
9500 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9501 wnd_sum +=
9502 skb_shinfo(skb)->frags[frag_idx].size;
9503
9504 /* If there was data on linear skb data - check it */
9505 if (first_bd_sz > 0) {
9506 if (unlikely(wnd_sum < lso_mss)) {
9507 to_copy = 1;
9508 goto exit_lbl;
9509 }
9510
9511 wnd_sum -= first_bd_sz;
9512 }
9513
9514 /* Others are easier: run through the frag list and
9515 check all windows */
9516 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9517 wnd_sum +=
9518 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9519
9520 if (unlikely(wnd_sum < lso_mss)) {
9521 to_copy = 1;
9522 break;
9523 }
9524 wnd_sum -=
9525 skb_shinfo(skb)->frags[wnd_idx].size;
9526 }
9527
9528 } else {
9529 /* in non-LSO too fragmented packet should always
9530 be linearized */
9531 to_copy = 1;
9532 }
9533 }
9534
9535 exit_lbl:
9536 if (unlikely(to_copy))
9537 DP(NETIF_MSG_TX_QUEUED,
9538 "Linearization IS REQUIRED for %s packet. "
9539 "num_frags %d hlen %d first_bd_sz %d\n",
9540 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9541 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9542
9543 return to_copy;
9544 }
9545 #endif
9546
9547 /* called with netif_tx_lock
9548 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9549 * netif_wake_queue()
9550 */
9551 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9552 {
9553 struct bnx2x *bp = netdev_priv(dev);
9554 struct bnx2x_fastpath *fp;
9555 struct sw_tx_bd *tx_buf;
9556 struct eth_tx_bd *tx_bd;
9557 struct eth_tx_parse_bd *pbd = NULL;
9558 u16 pkt_prod, bd_prod;
9559 int nbd, fp_index;
9560 dma_addr_t mapping;
9561 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9562 int vlan_off = (bp->e1hov ? 4 : 0);
9563 int i;
9564 u8 hlen = 0;
9565
9566 #ifdef BNX2X_STOP_ON_ERROR
9567 if (unlikely(bp->panic))
9568 return NETDEV_TX_BUSY;
9569 #endif
9570
9571 fp_index = (smp_processor_id() % bp->num_queues);
9572 fp = &bp->fp[fp_index];
9573
9574 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9575 bp->eth_stats.driver_xoff++,
9576 netif_stop_queue(dev);
9577 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9578 return NETDEV_TX_BUSY;
9579 }
9580
9581 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9582 " gso type %x xmit_type %x\n",
9583 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9584 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9585
9586 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9587 /* First, check if we need to linearize the skb
9588 (due to FW restrictions) */
9589 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9590 /* Statistics of linearization */
9591 bp->lin_cnt++;
9592 if (skb_linearize(skb) != 0) {
9593 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9594 "silently dropping this SKB\n");
9595 dev_kfree_skb_any(skb);
9596 return NETDEV_TX_OK;
9597 }
9598 }
9599 #endif
9600
9601 /*
9602 Please read carefully. First we use one BD which we mark as start,
9603 then for TSO or xsum we have a parsing info BD,
9604 and only then we have the rest of the TSO BDs.
9605 (don't forget to mark the last one as last,
9606 and to unmap only AFTER you write to the BD ...)
9607 And above all, all pdb sizes are in words - NOT DWORDS!
9608 */
9609
9610 pkt_prod = fp->tx_pkt_prod++;
9611 bd_prod = TX_BD(fp->tx_bd_prod);
9612
9613 /* get a tx_buf and first BD */
9614 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9615 tx_bd = &fp->tx_desc_ring[bd_prod];
9616
9617 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9618 tx_bd->general_data = (UNICAST_ADDRESS <<
9619 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9620 /* header nbd */
9621 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9622
9623 /* remember the first BD of the packet */
9624 tx_buf->first_bd = fp->tx_bd_prod;
9625 tx_buf->skb = skb;
9626
9627 DP(NETIF_MSG_TX_QUEUED,
9628 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9629 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9630
9631 #ifdef BCM_VLAN
9632 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9633 (bp->flags & HW_VLAN_TX_FLAG)) {
9634 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9635 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9636 vlan_off += 4;
9637 } else
9638 #endif
9639 tx_bd->vlan = cpu_to_le16(pkt_prod);
9640
9641 if (xmit_type) {
9642 /* turn on parsing and get a BD */
9643 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9644 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9645
9646 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9647 }
9648
9649 if (xmit_type & XMIT_CSUM) {
9650 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9651
9652 /* for now NS flag is not used in Linux */
9653 pbd->global_data = (hlen |
9654 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9655 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9656
9657 pbd->ip_hlen = (skb_transport_header(skb) -
9658 skb_network_header(skb)) / 2;
9659
9660 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9661
9662 pbd->total_hlen = cpu_to_le16(hlen);
9663 hlen = hlen*2 - vlan_off;
9664
9665 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9666
9667 if (xmit_type & XMIT_CSUM_V4)
9668 tx_bd->bd_flags.as_bitfield |=
9669 ETH_TX_BD_FLAGS_IP_CSUM;
9670 else
9671 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9672
9673 if (xmit_type & XMIT_CSUM_TCP) {
9674 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9675
9676 } else {
9677 s8 fix = SKB_CS_OFF(skb); /* signed! */
9678
9679 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9680 pbd->cs_offset = fix / 2;
9681
9682 DP(NETIF_MSG_TX_QUEUED,
9683 "hlen %d offset %d fix %d csum before fix %x\n",
9684 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9685 SKB_CS(skb));
9686
9687 /* HW bug: fixup the CSUM */
9688 pbd->tcp_pseudo_csum =
9689 bnx2x_csum_fix(skb_transport_header(skb),
9690 SKB_CS(skb), fix);
9691
9692 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9693 pbd->tcp_pseudo_csum);
9694 }
9695 }
9696
9697 mapping = pci_map_single(bp->pdev, skb->data,
9698 skb_headlen(skb), PCI_DMA_TODEVICE);
9699
9700 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9701 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9702 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9703 tx_bd->nbd = cpu_to_le16(nbd);
9704 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9705
9706 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9707 " nbytes %d flags %x vlan %x\n",
9708 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9709 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9710 le16_to_cpu(tx_bd->vlan));
9711
9712 if (xmit_type & XMIT_GSO) {
9713
9714 DP(NETIF_MSG_TX_QUEUED,
9715 "TSO packet len %d hlen %d total len %d tso size %d\n",
9716 skb->len, hlen, skb_headlen(skb),
9717 skb_shinfo(skb)->gso_size);
9718
9719 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9720
9721 if (unlikely(skb_headlen(skb) > hlen))
9722 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9723 bd_prod, ++nbd);
9724
9725 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9726 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9727 pbd->tcp_flags = pbd_tcp_flags(skb);
9728
9729 if (xmit_type & XMIT_GSO_V4) {
9730 pbd->ip_id = swab16(ip_hdr(skb)->id);
9731 pbd->tcp_pseudo_csum =
9732 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9733 ip_hdr(skb)->daddr,
9734 0, IPPROTO_TCP, 0));
9735
9736 } else
9737 pbd->tcp_pseudo_csum =
9738 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9739 &ipv6_hdr(skb)->daddr,
9740 0, IPPROTO_TCP, 0));
9741
9742 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9743 }
9744
9745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9746 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9747
9748 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9749 tx_bd = &fp->tx_desc_ring[bd_prod];
9750
9751 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9752 frag->size, PCI_DMA_TODEVICE);
9753
9754 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9755 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9756 tx_bd->nbytes = cpu_to_le16(frag->size);
9757 tx_bd->vlan = cpu_to_le16(pkt_prod);
9758 tx_bd->bd_flags.as_bitfield = 0;
9759
9760 DP(NETIF_MSG_TX_QUEUED,
9761 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9762 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9763 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9764 }
9765
9766 /* now at last mark the BD as the last BD */
9767 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9768
9769 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9770 tx_bd, tx_bd->bd_flags.as_bitfield);
9771
9772 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9773
9774 /* now send a tx doorbell, counting the next BD
9775 * if the packet contains or ends with it
9776 */
9777 if (TX_BD_POFF(bd_prod) < nbd)
9778 nbd++;
9779
9780 if (pbd)
9781 DP(NETIF_MSG_TX_QUEUED,
9782 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9783 " tcp_flags %x xsum %x seq %u hlen %u\n",
9784 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9785 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9786 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9787
9788 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9789
9790 /*
9791 * Make sure that the BD data is updated before updating the producer
9792 * since FW might read the BD right after the producer is updated.
9793 * This is only applicable for weak-ordered memory model archs such
9794 * as IA-64. The following barrier is also mandatory since FW will
9795 * assumes packets must have BDs.
9796 */
9797 wmb();
9798
9799 fp->hw_tx_prods->bds_prod =
9800 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9801 mb(); /* FW restriction: must not reorder writing nbd and packets */
9802 fp->hw_tx_prods->packets_prod =
9803 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9804 DOORBELL(bp, FP_IDX(fp), 0);
9805
9806 mmiowb();
9807
9808 fp->tx_bd_prod += nbd;
9809 dev->trans_start = jiffies;
9810
9811 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9812 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9813 if we put Tx into XOFF state. */
9814 smp_mb();
9815 netif_stop_queue(dev);
9816 bp->eth_stats.driver_xoff++;
9817 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9818 netif_wake_queue(dev);
9819 }
9820 fp->tx_pkt++;
9821
9822 return NETDEV_TX_OK;
9823 }
9824
9825 /* called with rtnl_lock */
9826 static int bnx2x_open(struct net_device *dev)
9827 {
9828 struct bnx2x *bp = netdev_priv(dev);
9829
9830 bnx2x_set_power_state(bp, PCI_D0);
9831
9832 return bnx2x_nic_load(bp, LOAD_OPEN);
9833 }
9834
9835 /* called with rtnl_lock */
9836 static int bnx2x_close(struct net_device *dev)
9837 {
9838 struct bnx2x *bp = netdev_priv(dev);
9839
9840 /* Unload the driver, release IRQs */
9841 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9842 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9843 if (!CHIP_REV_IS_SLOW(bp))
9844 bnx2x_set_power_state(bp, PCI_D3hot);
9845
9846 return 0;
9847 }
9848
9849 /* called with netif_tx_lock from set_multicast */
9850 static void bnx2x_set_rx_mode(struct net_device *dev)
9851 {
9852 struct bnx2x *bp = netdev_priv(dev);
9853 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9854 int port = BP_PORT(bp);
9855
9856 if (bp->state != BNX2X_STATE_OPEN) {
9857 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9858 return;
9859 }
9860
9861 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9862
9863 if (dev->flags & IFF_PROMISC)
9864 rx_mode = BNX2X_RX_MODE_PROMISC;
9865
9866 else if ((dev->flags & IFF_ALLMULTI) ||
9867 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9868 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9869
9870 else { /* some multicasts */
9871 if (CHIP_IS_E1(bp)) {
9872 int i, old, offset;
9873 struct dev_mc_list *mclist;
9874 struct mac_configuration_cmd *config =
9875 bnx2x_sp(bp, mcast_config);
9876
9877 for (i = 0, mclist = dev->mc_list;
9878 mclist && (i < dev->mc_count);
9879 i++, mclist = mclist->next) {
9880
9881 config->config_table[i].
9882 cam_entry.msb_mac_addr =
9883 swab16(*(u16 *)&mclist->dmi_addr[0]);
9884 config->config_table[i].
9885 cam_entry.middle_mac_addr =
9886 swab16(*(u16 *)&mclist->dmi_addr[2]);
9887 config->config_table[i].
9888 cam_entry.lsb_mac_addr =
9889 swab16(*(u16 *)&mclist->dmi_addr[4]);
9890 config->config_table[i].cam_entry.flags =
9891 cpu_to_le16(port);
9892 config->config_table[i].
9893 target_table_entry.flags = 0;
9894 config->config_table[i].
9895 target_table_entry.client_id = 0;
9896 config->config_table[i].
9897 target_table_entry.vlan_id = 0;
9898
9899 DP(NETIF_MSG_IFUP,
9900 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9901 config->config_table[i].
9902 cam_entry.msb_mac_addr,
9903 config->config_table[i].
9904 cam_entry.middle_mac_addr,
9905 config->config_table[i].
9906 cam_entry.lsb_mac_addr);
9907 }
9908 old = config->hdr.length_6b;
9909 if (old > i) {
9910 for (; i < old; i++) {
9911 if (CAM_IS_INVALID(config->
9912 config_table[i])) {
9913 /* already invalidated */
9914 break;
9915 }
9916 /* invalidate */
9917 CAM_INVALIDATE(config->
9918 config_table[i]);
9919 }
9920 }
9921
9922 if (CHIP_REV_IS_SLOW(bp))
9923 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9924 else
9925 offset = BNX2X_MAX_MULTICAST*(1 + port);
9926
9927 config->hdr.length_6b = i;
9928 config->hdr.offset = offset;
9929 config->hdr.client_id = BP_CL_ID(bp);
9930 config->hdr.reserved1 = 0;
9931
9932 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9933 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9934 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9935 0);
9936 } else { /* E1H */
9937 /* Accept one or more multicasts */
9938 struct dev_mc_list *mclist;
9939 u32 mc_filter[MC_HASH_SIZE];
9940 u32 crc, bit, regidx;
9941 int i;
9942
9943 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9944
9945 for (i = 0, mclist = dev->mc_list;
9946 mclist && (i < dev->mc_count);
9947 i++, mclist = mclist->next) {
9948
9949 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9950 mclist->dmi_addr);
9951
9952 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9953 bit = (crc >> 24) & 0xff;
9954 regidx = bit >> 5;
9955 bit &= 0x1f;
9956 mc_filter[regidx] |= (1 << bit);
9957 }
9958
9959 for (i = 0; i < MC_HASH_SIZE; i++)
9960 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9961 mc_filter[i]);
9962 }
9963 }
9964
9965 bp->rx_mode = rx_mode;
9966 bnx2x_set_storm_rx_mode(bp);
9967 }
9968
9969 /* called with rtnl_lock */
9970 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9971 {
9972 struct sockaddr *addr = p;
9973 struct bnx2x *bp = netdev_priv(dev);
9974
9975 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9976 return -EINVAL;
9977
9978 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9979 if (netif_running(dev)) {
9980 if (CHIP_IS_E1(bp))
9981 bnx2x_set_mac_addr_e1(bp, 1);
9982 else
9983 bnx2x_set_mac_addr_e1h(bp, 1);
9984 }
9985
9986 return 0;
9987 }
9988
9989 /* called with rtnl_lock */
9990 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9991 {
9992 struct mii_ioctl_data *data = if_mii(ifr);
9993 struct bnx2x *bp = netdev_priv(dev);
9994 int port = BP_PORT(bp);
9995 int err;
9996
9997 switch (cmd) {
9998 case SIOCGMIIPHY:
9999 data->phy_id = bp->port.phy_addr;
10000
10001 /* fallthrough */
10002
10003 case SIOCGMIIREG: {
10004 u16 mii_regval;
10005
10006 if (!netif_running(dev))
10007 return -EAGAIN;
10008
10009 mutex_lock(&bp->port.phy_mutex);
10010 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10011 DEFAULT_PHY_DEV_ADDR,
10012 (data->reg_num & 0x1f), &mii_regval);
10013 data->val_out = mii_regval;
10014 mutex_unlock(&bp->port.phy_mutex);
10015 return err;
10016 }
10017
10018 case SIOCSMIIREG:
10019 if (!capable(CAP_NET_ADMIN))
10020 return -EPERM;
10021
10022 if (!netif_running(dev))
10023 return -EAGAIN;
10024
10025 mutex_lock(&bp->port.phy_mutex);
10026 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10027 DEFAULT_PHY_DEV_ADDR,
10028 (data->reg_num & 0x1f), data->val_in);
10029 mutex_unlock(&bp->port.phy_mutex);
10030 return err;
10031
10032 default:
10033 /* do nothing */
10034 break;
10035 }
10036
10037 return -EOPNOTSUPP;
10038 }
10039
10040 /* called with rtnl_lock */
10041 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10042 {
10043 struct bnx2x *bp = netdev_priv(dev);
10044 int rc = 0;
10045
10046 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10047 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10048 return -EINVAL;
10049
10050 /* This does not race with packet allocation
10051 * because the actual alloc size is
10052 * only updated as part of load
10053 */
10054 dev->mtu = new_mtu;
10055
10056 if (netif_running(dev)) {
10057 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10058 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10059 }
10060
10061 return rc;
10062 }
10063
10064 static void bnx2x_tx_timeout(struct net_device *dev)
10065 {
10066 struct bnx2x *bp = netdev_priv(dev);
10067
10068 #ifdef BNX2X_STOP_ON_ERROR
10069 if (!bp->panic)
10070 bnx2x_panic();
10071 #endif
10072 /* This allows the netif to be shutdown gracefully before resetting */
10073 schedule_work(&bp->reset_task);
10074 }
10075
10076 #ifdef BCM_VLAN
10077 /* called with rtnl_lock */
10078 static void bnx2x_vlan_rx_register(struct net_device *dev,
10079 struct vlan_group *vlgrp)
10080 {
10081 struct bnx2x *bp = netdev_priv(dev);
10082
10083 bp->vlgrp = vlgrp;
10084
10085 /* Set flags according to the required capabilities */
10086 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10087
10088 if (dev->features & NETIF_F_HW_VLAN_TX)
10089 bp->flags |= HW_VLAN_TX_FLAG;
10090
10091 if (dev->features & NETIF_F_HW_VLAN_RX)
10092 bp->flags |= HW_VLAN_RX_FLAG;
10093
10094 if (netif_running(dev))
10095 bnx2x_set_client_config(bp);
10096 }
10097
10098 #endif
10099
10100 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10101 static void poll_bnx2x(struct net_device *dev)
10102 {
10103 struct bnx2x *bp = netdev_priv(dev);
10104
10105 disable_irq(bp->pdev->irq);
10106 bnx2x_interrupt(bp->pdev->irq, dev);
10107 enable_irq(bp->pdev->irq);
10108 }
10109 #endif
10110
10111 static const struct net_device_ops bnx2x_netdev_ops = {
10112 .ndo_open = bnx2x_open,
10113 .ndo_stop = bnx2x_close,
10114 .ndo_start_xmit = bnx2x_start_xmit,
10115 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10116 .ndo_set_mac_address = bnx2x_change_mac_addr,
10117 .ndo_validate_addr = eth_validate_addr,
10118 .ndo_do_ioctl = bnx2x_ioctl,
10119 .ndo_change_mtu = bnx2x_change_mtu,
10120 .ndo_tx_timeout = bnx2x_tx_timeout,
10121 #ifdef BCM_VLAN
10122 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10123 #endif
10124 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10125 .ndo_poll_controller = poll_bnx2x,
10126 #endif
10127 };
10128
10129
10130 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10131 struct net_device *dev)
10132 {
10133 struct bnx2x *bp;
10134 int rc;
10135
10136 SET_NETDEV_DEV(dev, &pdev->dev);
10137 bp = netdev_priv(dev);
10138
10139 bp->dev = dev;
10140 bp->pdev = pdev;
10141 bp->flags = 0;
10142 bp->func = PCI_FUNC(pdev->devfn);
10143
10144 rc = pci_enable_device(pdev);
10145 if (rc) {
10146 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10147 goto err_out;
10148 }
10149
10150 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10151 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10152 " aborting\n");
10153 rc = -ENODEV;
10154 goto err_out_disable;
10155 }
10156
10157 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10158 printk(KERN_ERR PFX "Cannot find second PCI device"
10159 " base address, aborting\n");
10160 rc = -ENODEV;
10161 goto err_out_disable;
10162 }
10163
10164 if (atomic_read(&pdev->enable_cnt) == 1) {
10165 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10166 if (rc) {
10167 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10168 " aborting\n");
10169 goto err_out_disable;
10170 }
10171
10172 pci_set_master(pdev);
10173 pci_save_state(pdev);
10174 }
10175
10176 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10177 if (bp->pm_cap == 0) {
10178 printk(KERN_ERR PFX "Cannot find power management"
10179 " capability, aborting\n");
10180 rc = -EIO;
10181 goto err_out_release;
10182 }
10183
10184 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10185 if (bp->pcie_cap == 0) {
10186 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10187 " aborting\n");
10188 rc = -EIO;
10189 goto err_out_release;
10190 }
10191
10192 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10193 bp->flags |= USING_DAC_FLAG;
10194 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10195 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10196 " failed, aborting\n");
10197 rc = -EIO;
10198 goto err_out_release;
10199 }
10200
10201 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10202 printk(KERN_ERR PFX "System does not support DMA,"
10203 " aborting\n");
10204 rc = -EIO;
10205 goto err_out_release;
10206 }
10207
10208 dev->mem_start = pci_resource_start(pdev, 0);
10209 dev->base_addr = dev->mem_start;
10210 dev->mem_end = pci_resource_end(pdev, 0);
10211
10212 dev->irq = pdev->irq;
10213
10214 bp->regview = pci_ioremap_bar(pdev, 0);
10215 if (!bp->regview) {
10216 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10217 rc = -ENOMEM;
10218 goto err_out_release;
10219 }
10220
10221 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10222 min_t(u64, BNX2X_DB_SIZE,
10223 pci_resource_len(pdev, 2)));
10224 if (!bp->doorbells) {
10225 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10226 rc = -ENOMEM;
10227 goto err_out_unmap;
10228 }
10229
10230 bnx2x_set_power_state(bp, PCI_D0);
10231
10232 /* clean indirect addresses */
10233 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10234 PCICFG_VENDOR_ID_OFFSET);
10235 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10236 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10237 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10238 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10239
10240 dev->watchdog_timeo = TX_TIMEOUT;
10241
10242 dev->netdev_ops = &bnx2x_netdev_ops;
10243 dev->ethtool_ops = &bnx2x_ethtool_ops;
10244 dev->features |= NETIF_F_SG;
10245 dev->features |= NETIF_F_HW_CSUM;
10246 if (bp->flags & USING_DAC_FLAG)
10247 dev->features |= NETIF_F_HIGHDMA;
10248 #ifdef BCM_VLAN
10249 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10250 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10251 #endif
10252 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10253 dev->features |= NETIF_F_TSO6;
10254
10255 return 0;
10256
10257 err_out_unmap:
10258 if (bp->regview) {
10259 iounmap(bp->regview);
10260 bp->regview = NULL;
10261 }
10262 if (bp->doorbells) {
10263 iounmap(bp->doorbells);
10264 bp->doorbells = NULL;
10265 }
10266
10267 err_out_release:
10268 if (atomic_read(&pdev->enable_cnt) == 1)
10269 pci_release_regions(pdev);
10270
10271 err_out_disable:
10272 pci_disable_device(pdev);
10273 pci_set_drvdata(pdev, NULL);
10274
10275 err_out:
10276 return rc;
10277 }
10278
10279 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10280 {
10281 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10282
10283 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10284 return val;
10285 }
10286
10287 /* return value of 1=2.5GHz 2=5GHz */
10288 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10289 {
10290 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10291
10292 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10293 return val;
10294 }
10295
10296 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10297 const struct pci_device_id *ent)
10298 {
10299 static int version_printed;
10300 struct net_device *dev = NULL;
10301 struct bnx2x *bp;
10302 int rc;
10303
10304 if (version_printed++ == 0)
10305 printk(KERN_INFO "%s", version);
10306
10307 /* dev zeroed in init_etherdev */
10308 dev = alloc_etherdev(sizeof(*bp));
10309 if (!dev) {
10310 printk(KERN_ERR PFX "Cannot allocate net device\n");
10311 return -ENOMEM;
10312 }
10313
10314 bp = netdev_priv(dev);
10315 bp->msglevel = debug;
10316
10317 rc = bnx2x_init_dev(pdev, dev);
10318 if (rc < 0) {
10319 free_netdev(dev);
10320 return rc;
10321 }
10322
10323 pci_set_drvdata(pdev, dev);
10324
10325 rc = bnx2x_init_bp(bp);
10326 if (rc)
10327 goto init_one_exit;
10328
10329 rc = register_netdev(dev);
10330 if (rc) {
10331 dev_err(&pdev->dev, "Cannot register net device\n");
10332 goto init_one_exit;
10333 }
10334
10335 netif_carrier_off(dev);
10336
10337 bp->common.name = board_info[ent->driver_data].name;
10338 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10339 " IRQ %d, ", dev->name, bp->common.name,
10340 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10341 bnx2x_get_pcie_width(bp),
10342 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10343 dev->base_addr, bp->pdev->irq);
10344 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10345 return 0;
10346
10347 init_one_exit:
10348 if (bp->regview)
10349 iounmap(bp->regview);
10350
10351 if (bp->doorbells)
10352 iounmap(bp->doorbells);
10353
10354 free_netdev(dev);
10355
10356 if (atomic_read(&pdev->enable_cnt) == 1)
10357 pci_release_regions(pdev);
10358
10359 pci_disable_device(pdev);
10360 pci_set_drvdata(pdev, NULL);
10361
10362 return rc;
10363 }
10364
10365 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10366 {
10367 struct net_device *dev = pci_get_drvdata(pdev);
10368 struct bnx2x *bp;
10369
10370 if (!dev) {
10371 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10372 return;
10373 }
10374 bp = netdev_priv(dev);
10375
10376 unregister_netdev(dev);
10377
10378 if (bp->regview)
10379 iounmap(bp->regview);
10380
10381 if (bp->doorbells)
10382 iounmap(bp->doorbells);
10383
10384 free_netdev(dev);
10385
10386 if (atomic_read(&pdev->enable_cnt) == 1)
10387 pci_release_regions(pdev);
10388
10389 pci_disable_device(pdev);
10390 pci_set_drvdata(pdev, NULL);
10391 }
10392
10393 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10394 {
10395 struct net_device *dev = pci_get_drvdata(pdev);
10396 struct bnx2x *bp;
10397
10398 if (!dev) {
10399 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10400 return -ENODEV;
10401 }
10402 bp = netdev_priv(dev);
10403
10404 rtnl_lock();
10405
10406 pci_save_state(pdev);
10407
10408 if (!netif_running(dev)) {
10409 rtnl_unlock();
10410 return 0;
10411 }
10412
10413 netif_device_detach(dev);
10414
10415 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10416
10417 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10418
10419 rtnl_unlock();
10420
10421 return 0;
10422 }
10423
10424 static int bnx2x_resume(struct pci_dev *pdev)
10425 {
10426 struct net_device *dev = pci_get_drvdata(pdev);
10427 struct bnx2x *bp;
10428 int rc;
10429
10430 if (!dev) {
10431 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10432 return -ENODEV;
10433 }
10434 bp = netdev_priv(dev);
10435
10436 rtnl_lock();
10437
10438 pci_restore_state(pdev);
10439
10440 if (!netif_running(dev)) {
10441 rtnl_unlock();
10442 return 0;
10443 }
10444
10445 bnx2x_set_power_state(bp, PCI_D0);
10446 netif_device_attach(dev);
10447
10448 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10449
10450 rtnl_unlock();
10451
10452 return rc;
10453 }
10454
10455 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10456 {
10457 int i;
10458
10459 bp->state = BNX2X_STATE_ERROR;
10460
10461 bp->rx_mode = BNX2X_RX_MODE_NONE;
10462
10463 bnx2x_netif_stop(bp, 0);
10464
10465 del_timer_sync(&bp->timer);
10466 bp->stats_state = STATS_STATE_DISABLED;
10467 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10468
10469 /* Release IRQs */
10470 bnx2x_free_irq(bp);
10471
10472 if (CHIP_IS_E1(bp)) {
10473 struct mac_configuration_cmd *config =
10474 bnx2x_sp(bp, mcast_config);
10475
10476 for (i = 0; i < config->hdr.length_6b; i++)
10477 CAM_INVALIDATE(config->config_table[i]);
10478 }
10479
10480 /* Free SKBs, SGEs, TPA pool and driver internals */
10481 bnx2x_free_skbs(bp);
10482 for_each_queue(bp, i)
10483 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10484 bnx2x_free_mem(bp);
10485
10486 bp->state = BNX2X_STATE_CLOSED;
10487
10488 netif_carrier_off(bp->dev);
10489
10490 return 0;
10491 }
10492
10493 static void bnx2x_eeh_recover(struct bnx2x *bp)
10494 {
10495 u32 val;
10496
10497 mutex_init(&bp->port.phy_mutex);
10498
10499 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10500 bp->link_params.shmem_base = bp->common.shmem_base;
10501 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10502
10503 if (!bp->common.shmem_base ||
10504 (bp->common.shmem_base < 0xA0000) ||
10505 (bp->common.shmem_base >= 0xC0000)) {
10506 BNX2X_DEV_INFO("MCP not active\n");
10507 bp->flags |= NO_MCP_FLAG;
10508 return;
10509 }
10510
10511 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10512 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10513 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10514 BNX2X_ERR("BAD MCP validity signature\n");
10515
10516 if (!BP_NOMCP(bp)) {
10517 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10518 & DRV_MSG_SEQ_NUMBER_MASK);
10519 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10520 }
10521 }
10522
10523 /**
10524 * bnx2x_io_error_detected - called when PCI error is detected
10525 * @pdev: Pointer to PCI device
10526 * @state: The current pci connection state
10527 *
10528 * This function is called after a PCI bus error affecting
10529 * this device has been detected.
10530 */
10531 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10532 pci_channel_state_t state)
10533 {
10534 struct net_device *dev = pci_get_drvdata(pdev);
10535 struct bnx2x *bp = netdev_priv(dev);
10536
10537 rtnl_lock();
10538
10539 netif_device_detach(dev);
10540
10541 if (netif_running(dev))
10542 bnx2x_eeh_nic_unload(bp);
10543
10544 pci_disable_device(pdev);
10545
10546 rtnl_unlock();
10547
10548 /* Request a slot reset */
10549 return PCI_ERS_RESULT_NEED_RESET;
10550 }
10551
10552 /**
10553 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10554 * @pdev: Pointer to PCI device
10555 *
10556 * Restart the card from scratch, as if from a cold-boot.
10557 */
10558 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10559 {
10560 struct net_device *dev = pci_get_drvdata(pdev);
10561 struct bnx2x *bp = netdev_priv(dev);
10562
10563 rtnl_lock();
10564
10565 if (pci_enable_device(pdev)) {
10566 dev_err(&pdev->dev,
10567 "Cannot re-enable PCI device after reset\n");
10568 rtnl_unlock();
10569 return PCI_ERS_RESULT_DISCONNECT;
10570 }
10571
10572 pci_set_master(pdev);
10573 pci_restore_state(pdev);
10574
10575 if (netif_running(dev))
10576 bnx2x_set_power_state(bp, PCI_D0);
10577
10578 rtnl_unlock();
10579
10580 return PCI_ERS_RESULT_RECOVERED;
10581 }
10582
10583 /**
10584 * bnx2x_io_resume - called when traffic can start flowing again
10585 * @pdev: Pointer to PCI device
10586 *
10587 * This callback is called when the error recovery driver tells us that
10588 * its OK to resume normal operation.
10589 */
10590 static void bnx2x_io_resume(struct pci_dev *pdev)
10591 {
10592 struct net_device *dev = pci_get_drvdata(pdev);
10593 struct bnx2x *bp = netdev_priv(dev);
10594
10595 rtnl_lock();
10596
10597 bnx2x_eeh_recover(bp);
10598
10599 if (netif_running(dev))
10600 bnx2x_nic_load(bp, LOAD_NORMAL);
10601
10602 netif_device_attach(dev);
10603
10604 rtnl_unlock();
10605 }
10606
10607 static struct pci_error_handlers bnx2x_err_handler = {
10608 .error_detected = bnx2x_io_error_detected,
10609 .slot_reset = bnx2x_io_slot_reset,
10610 .resume = bnx2x_io_resume,
10611 };
10612
10613 static struct pci_driver bnx2x_pci_driver = {
10614 .name = DRV_MODULE_NAME,
10615 .id_table = bnx2x_pci_tbl,
10616 .probe = bnx2x_init_one,
10617 .remove = __devexit_p(bnx2x_remove_one),
10618 .suspend = bnx2x_suspend,
10619 .resume = bnx2x_resume,
10620 .err_handler = &bnx2x_err_handler,
10621 };
10622
10623 static int __init bnx2x_init(void)
10624 {
10625 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10626 if (bnx2x_wq == NULL) {
10627 printk(KERN_ERR PFX "Cannot create workqueue\n");
10628 return -ENOMEM;
10629 }
10630
10631 return pci_register_driver(&bnx2x_pci_driver);
10632 }
10633
10634 static void __exit bnx2x_cleanup(void)
10635 {
10636 pci_unregister_driver(&bnx2x_pci_driver);
10637
10638 destroy_workqueue(bnx2x_wq);
10639 }
10640
10641 module_init(bnx2x_init);
10642 module_exit(bnx2x_cleanup);
10643
This page took 0.253359 seconds and 6 git commands to generate.