[BNX2X]: Correct Link management
[deliverable/linux.git] / drivers / net / bnx2x.c
CommitLineData
a2fbb9ea
ET
1/* bnx2x.c: Broadcom Everest network driver.
2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Eliezer Tamir <eliezert@broadcom.com>
10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov
c14423fe 13 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
14 *
15 */
16
17/* define this to make the driver freeze on error
18 * to allow getting debug info
c14423fe 19 * (you will need to reboot afterwards)
a2fbb9ea
ET
20 */
21/*#define BNX2X_STOP_ON_ERROR*/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/kernel.h>
26#include <linux/device.h> /* for dev_info() */
27#include <linux/timer.h>
28#include <linux/errno.h>
29#include <linux/ioport.h>
30#include <linux/slab.h>
31#include <linux/vmalloc.h>
32#include <linux/interrupt.h>
33#include <linux/pci.h>
34#include <linux/init.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/dma-mapping.h>
39#include <linux/bitops.h>
40#include <linux/irq.h>
41#include <linux/delay.h>
42#include <asm/byteorder.h>
43#include <linux/time.h>
44#include <linux/ethtool.h>
45#include <linux/mii.h>
46#ifdef NETIF_F_HW_VLAN_TX
47 #include <linux/if_vlan.h>
48 #define BCM_VLAN 1
49#endif
50#include <net/ip.h>
51#include <net/tcp.h>
52#include <net/checksum.h>
53#include <linux/workqueue.h>
54#include <linux/crc32.h>
55#include <linux/prefetch.h>
56#include <linux/zlib.h>
57#include <linux/version.h>
58#include <linux/io.h>
59
60#include "bnx2x_reg.h"
61#include "bnx2x_fw_defs.h"
62#include "bnx2x_hsi.h"
63#include "bnx2x.h"
64#include "bnx2x_init.h"
65
66#define DRV_MODULE_VERSION "0.40.15"
67#define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $"
f1410647 68#define BNX2X_BC_VER 0x040200
a2fbb9ea
ET
69
70/* Time in jiffies before concluding the transmitter is hung. */
71#define TX_TIMEOUT (5*HZ)
72
53a10565 73static char version[] __devinitdata =
c14423fe 74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
a2fbb9ea
ET
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
f1410647 81MODULE_INFO(cvs_version, "$Revision: #404 $");
a2fbb9ea
ET
82
83static int use_inta;
84static int poll;
85static int onefunc;
86static int nomcp;
87static int debug;
88static int use_multi;
89
90module_param(use_inta, int, 0);
91module_param(poll, int, 0);
92module_param(onefunc, int, 0);
93module_param(debug, int, 0);
94MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95MODULE_PARM_DESC(poll, "use polling (for debug)");
96MODULE_PARM_DESC(onefunc, "enable only first function");
c14423fe
ET
97MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
99
100#ifdef BNX2X_MULTI
101module_param(use_multi, int, 0);
102MODULE_PARM_DESC(use_multi, "use per-CPU queues");
103#endif
104
105enum bnx2x_board_type {
106 BCM57710 = 0,
107};
108
109/* indexed by board_t, above */
53a10565 110static struct {
a2fbb9ea
ET
111 char *name;
112} board_info[] __devinitdata = {
113 { "Broadcom NetXtreme II BCM57710 XGb" }
114};
115
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { 0 }
120};
121
122MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
123
124/****************************************************************************
125* General service functions
126****************************************************************************/
127
128/* used only at init
129 * locking is done by mcp
130 */
131static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
132{
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136 PCICFG_VENDOR_ID_OFFSET);
137}
138
139#ifdef BNX2X_IND_RD
140static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
141{
142 u32 val;
143
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147 PCICFG_VENDOR_ID_OFFSET);
148
149 return val;
150}
151#endif
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171/* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
177static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178 u32 dst_addr, u32 len32)
179{
180 struct dmae_command *dmae = &bp->dmae;
181 int port = bp->port;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183 int timeout = 200;
184
185 memset(dmae, 0, sizeof(struct dmae_command));
186
187 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
190#ifdef __BIG_ENDIAN
191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
192#else
193 DMAE_CMD_ENDIANITY_DW_SWAP |
194#endif
195 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196 dmae->src_addr_lo = U64_LO(dma_addr);
197 dmae->src_addr_hi = U64_HI(dma_addr);
198 dmae->dst_addr_lo = dst_addr >> 2;
199 dmae->dst_addr_hi = 0;
200 dmae->len = len32;
201 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203 dmae->comp_val = BNX2X_WB_COMP_VAL;
204
205/*
206 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
208 "dst_addr [%x:%08x (%08x)]\n"
209 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
210 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
213*/
214/*
215 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
218*/
219
220 *wb_comp = 0;
221
222 bnx2x_post_dmae(bp, dmae, port * 8);
223
224 udelay(5);
225 /* adjust timeout for emulation/FPGA */
226 if (CHIP_REV_IS_SLOW(bp))
227 timeout *= 100;
228 while (*wb_comp != BNX2X_WB_COMP_VAL) {
229/* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
230 udelay(5);
231 if (!timeout) {
232 BNX2X_ERR("dmae timeout!\n");
233 break;
234 }
235 timeout--;
236 }
237}
238
239#ifdef BNX2X_DMAE_RD
240static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
241{
242 struct dmae_command *dmae = &bp->dmae;
243 int port = bp->port;
244 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
245 int timeout = 200;
246
247 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248 memset(dmae, 0, sizeof(struct dmae_command));
249
250 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
253#ifdef __BIG_ENDIAN
254 DMAE_CMD_ENDIANITY_B_DW_SWAP |
255#else
256 DMAE_CMD_ENDIANITY_DW_SWAP |
257#endif
258 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259 dmae->src_addr_lo = src_addr >> 2;
260 dmae->src_addr_hi = 0;
261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
263 dmae->len = len32;
264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266 dmae->comp_val = BNX2X_WB_COMP_VAL;
267
268/*
269 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
271 "dst_addr [%x:%08x (%08x)]\n"
272 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
273 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
276*/
277
278 *wb_comp = 0;
279
280 bnx2x_post_dmae(bp, dmae, port * 8);
281
282 udelay(5);
283 while (*wb_comp != BNX2X_WB_COMP_VAL) {
284 udelay(5);
285 if (!timeout) {
286 BNX2X_ERR("dmae timeout!\n");
287 break;
288 }
289 timeout--;
290 }
291/*
292 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
295*/
296}
297#endif
298
299static int bnx2x_mc_assert(struct bnx2x *bp)
300{
301 int i, j;
302 int rc = 0;
303 char last_idx;
304 const char storm[] = {"XTCU"};
305 const u32 intmem_base[] = {
306 BAR_XSTRORM_INTMEM,
307 BAR_TSTRORM_INTMEM,
308 BAR_CSTRORM_INTMEM,
309 BAR_USTRORM_INTMEM
310 };
311
312 /* Go through all instances of all SEMIs */
313 for (i = 0; i < 4; i++) {
314 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
315 intmem_base[i]);
316 BNX2X_ERR("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
317 storm[i], last_idx);
318
319 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321 u32 row0, row1, row2, row3;
322
323 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
324 intmem_base[i]);
325 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
326 intmem_base[i]);
327 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
328 intmem_base[i]);
329 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
330 intmem_base[i]);
331
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333 BNX2X_ERR("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0);
336 rc++;
337 } else {
338 break;
339 }
340 }
341 }
342 return rc;
343}
c14423fe 344
a2fbb9ea
ET
345static void bnx2x_fw_dump(struct bnx2x *bp)
346{
347 u32 mark, offset;
348 u32 data[9];
349 int word;
350
351 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
352 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
353
354 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
355 for (word = 0; word < 8; word++)
356 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
357 offset + 4*word));
358 data[8] = 0x0;
359 printk(KERN_ERR PFX "%s", (char *)data);
360 }
361 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
362 for (word = 0; word < 8; word++)
363 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
364 offset + 4*word));
365 data[8] = 0x0;
366 printk(KERN_ERR PFX "%s", (char *)data);
367 }
368 printk("\n" KERN_ERR PFX "end of fw dump\n");
369}
370
371static void bnx2x_panic_dump(struct bnx2x *bp)
372{
373 int i;
374 u16 j, start, end;
375
376 BNX2X_ERR("begin crash dump -----------------\n");
377
378 for_each_queue(bp, i) {
379 struct bnx2x_fastpath *fp = &bp->fp[i];
380 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
381
382 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
383 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
384 " *rx_cons_sb(%x) rx_comp_prod(%x)"
385 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
386 " bd data(%x,%x)\n",
387 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
388 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
389 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
390 fp->fp_u_idx, hw_prods->packets_prod,
391 hw_prods->bds_prod);
392
393 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
394 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
395 for (j = start; j < end; j++) {
396 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
397
398 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
399 sw_bd->skb, sw_bd->first_bd);
400 }
401
402 start = TX_BD(fp->tx_bd_cons - 10);
403 end = TX_BD(fp->tx_bd_cons + 254);
404 for (j = start; j < end; j++) {
405 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
406
407 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
408 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
409 }
410
411 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
412 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
413 for (j = start; j < end; j++) {
414 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
415 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
416
417 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
418 j, rx_bd[0], rx_bd[1], sw_bd->skb);
419 }
420
421 start = RCQ_BD(fp->rx_comp_cons - 10);
422 end = RCQ_BD(fp->rx_comp_cons + 503);
423 for (j = start; j < end; j++) {
424 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
425
426 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
427 j, cqe[0], cqe[1], cqe[2], cqe[3]);
428 }
429 }
430
431 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_t_idx(%u)"
432 " def_x_idx(%u) def_att_idx(%u) attn_state(%u)"
433 " spq_prod_idx(%u)\n",
434 bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx,
435 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
436
437
438 bnx2x_mc_assert(bp);
439 BNX2X_ERR("end crash dump -----------------\n");
440
441 bp->stats_state = STATS_STATE_DISABLE;
442 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
443}
444
445static void bnx2x_enable_int(struct bnx2x *bp)
446{
447 int port = bp->port;
448 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
449 u32 val = REG_RD(bp, addr);
450 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
451
452 if (msix) {
453 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
454 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
455 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
456 } else {
457 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
458 HC_CONFIG_0_REG_INT_LINE_EN_0 |
459 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
460 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
461 }
462
463 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) msi %d\n",
464 val, port, addr, msix);
465
466 REG_WR(bp, addr, val);
467}
468
469static void bnx2x_disable_int(struct bnx2x *bp)
470{
471 int port = bp->port;
472 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
473 u32 val = REG_RD(bp, addr);
474
475 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
476 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
477 HC_CONFIG_0_REG_INT_LINE_EN_0 |
478 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
479
480 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
481 val, port, addr);
482
483 REG_WR(bp, addr, val);
484 if (REG_RD(bp, addr) != val)
485 BNX2X_ERR("BUG! proper val not read from IGU!\n");
486}
487
488static void bnx2x_disable_int_sync(struct bnx2x *bp)
489{
490
491 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
492 int i;
493
494 atomic_inc(&bp->intr_sem);
c14423fe 495 /* prevent the HW from sending interrupts */
a2fbb9ea
ET
496 bnx2x_disable_int(bp);
497
498 /* make sure all ISRs are done */
499 if (msix) {
500 for_each_queue(bp, i)
501 synchronize_irq(bp->msix_table[i].vector);
502
503 /* one more for the Slow Path IRQ */
504 synchronize_irq(bp->msix_table[i].vector);
505 } else
506 synchronize_irq(bp->pdev->irq);
507
508 /* make sure sp_task is not running */
509 cancel_work_sync(&bp->sp_task);
510
511}
512
513/* fast path code */
514
515/*
516 * general service functions
517 */
518
519static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
520 u8 storm, u16 index, u8 op, u8 update)
521{
522 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
523 struct igu_ack_register igu_ack;
524
525 igu_ack.status_block_index = index;
526 igu_ack.sb_id_and_flags =
527 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
528 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
529 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
530 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
531
532/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
533 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
534 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
535}
536
537static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
538{
539 struct host_status_block *fpsb = fp->status_blk;
540 u16 rc = 0;
541
542 barrier(); /* status block is written to by the chip */
543 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
544 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
545 rc |= 1;
546 }
547 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
548 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
549 rc |= 2;
550 }
551 return rc;
552}
553
554static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
555{
556 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
557
558 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
559 rx_cons_sb++;
560
561 if ((rx_cons_sb != fp->rx_comp_cons) ||
562 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
563 return 1;
564
565 return 0;
566}
567
568static u16 bnx2x_ack_int(struct bnx2x *bp)
569{
570 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
571 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
572
573/* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
574 result, BAR_IGU_INTMEM + igu_addr); */
575
576#ifdef IGU_DEBUG
577#warning IGU_DEBUG active
578 if (result == 0) {
579 BNX2X_ERR("read %x from IGU\n", result);
580 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
581 }
582#endif
583 return result;
584}
585
586
587/*
588 * fast path service functions
589 */
590
591/* free skb in the packet ring at pos idx
592 * return idx of last bd freed
593 */
594static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
595 u16 idx)
596{
597 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
598 struct eth_tx_bd *tx_bd;
599 struct sk_buff *skb = tx_buf->skb;
600 u16 bd_idx = tx_buf->first_bd;
601 int nbd;
602
603 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
604 idx, tx_buf, skb);
605
606 /* unmap first bd */
607 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
608 tx_bd = &fp->tx_desc_ring[bd_idx];
609 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
610 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
611
612 nbd = le16_to_cpu(tx_bd->nbd) - 1;
613#ifdef BNX2X_STOP_ON_ERROR
614 if (nbd > (MAX_SKB_FRAGS + 2)) {
615 BNX2X_ERR("bad nbd!\n");
616 bnx2x_panic();
617 }
618#endif
619
620 /* Skip a parse bd and the TSO split header bd
621 since they have no mapping */
622 if (nbd)
623 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
624
625 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
626 ETH_TX_BD_FLAGS_TCP_CSUM |
627 ETH_TX_BD_FLAGS_SW_LSO)) {
628 if (--nbd)
629 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
630 tx_bd = &fp->tx_desc_ring[bd_idx];
631 /* is this a TSO split header bd? */
632 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
633 if (--nbd)
634 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
635 }
636 }
637
638 /* now free frags */
639 while (nbd > 0) {
640
641 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
642 tx_bd = &fp->tx_desc_ring[bd_idx];
643 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
644 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
645 if (--nbd)
646 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
647 }
648
649 /* release skb */
650 BUG_TRAP(skb);
651 dev_kfree_skb(skb);
652 tx_buf->first_bd = 0;
653 tx_buf->skb = NULL;
654
655 return bd_idx;
656}
657
658static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
659{
660 u16 used;
661 u32 prod;
662 u32 cons;
663
664 /* Tell compiler that prod and cons can change */
665 barrier();
666 prod = fp->tx_bd_prod;
667 cons = fp->tx_bd_cons;
668
669 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
670 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
671
672 if (prod >= cons) {
673 /* used = prod - cons - prod/size + cons/size */
674 used -= NUM_TX_BD - NUM_TX_RINGS;
675 }
676
677 BUG_TRAP(used <= fp->bp->tx_ring_size);
678 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
679
680 return (fp->bp->tx_ring_size - used);
681}
682
683static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
684{
685 struct bnx2x *bp = fp->bp;
686 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
687 int done = 0;
688
689#ifdef BNX2X_STOP_ON_ERROR
690 if (unlikely(bp->panic))
691 return;
692#endif
693
694 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
695 sw_cons = fp->tx_pkt_cons;
696
697 while (sw_cons != hw_cons) {
698 u16 pkt_cons;
699
700 pkt_cons = TX_BD(sw_cons);
701
702 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
703
704 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
705 hw_cons, sw_cons, pkt_cons);
706
707/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
708 rmb();
709 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
710 }
711*/
712 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
713 sw_cons++;
714 done++;
715
716 if (done == work)
717 break;
718 }
719
720 fp->tx_pkt_cons = sw_cons;
721 fp->tx_bd_cons = bd_cons;
722
723 /* Need to make the tx_cons update visible to start_xmit()
724 * before checking for netif_queue_stopped(). Without the
725 * memory barrier, there is a small possibility that start_xmit()
726 * will miss it and cause the queue to be stopped forever.
727 */
728 smp_mb();
729
730 /* TBD need a thresh? */
731 if (unlikely(netif_queue_stopped(bp->dev))) {
732
733 netif_tx_lock(bp->dev);
734
735 if (netif_queue_stopped(bp->dev) &&
736 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
737 netif_wake_queue(bp->dev);
738
739 netif_tx_unlock(bp->dev);
740
741 }
742}
743
744static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
745 union eth_rx_cqe *rr_cqe)
746{
747 struct bnx2x *bp = fp->bp;
748 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
749 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
750
751 DP(NETIF_MSG_RX_STATUS,
752 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
753 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
754
755 bp->spq_left++;
756
757 if (fp->index) {
758 switch (command | fp->state) {
759 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
760 BNX2X_FP_STATE_OPENING):
761 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
762 cid);
763 fp->state = BNX2X_FP_STATE_OPEN;
764 break;
765
766 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
767 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
768 cid);
769 fp->state = BNX2X_FP_STATE_HALTED;
770 break;
771
772 default:
773 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
774 command, fp->state);
775 }
776 mb(); /* force bnx2x_wait_ramrod to see the change */
777 return;
778 }
c14423fe 779
a2fbb9ea
ET
780 switch (command | bp->state) {
781 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
782 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
783 bp->state = BNX2X_STATE_OPEN;
784 break;
785
786 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
787 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
788 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
789 fp->state = BNX2X_FP_STATE_HALTED;
790 break;
791
792 case (RAMROD_CMD_ID_ETH_PORT_DEL | BNX2X_STATE_CLOSING_WAIT4_DELETE):
793 DP(NETIF_MSG_IFDOWN, "got delete ramrod\n");
794 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
795 break;
796
797 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
798 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
799 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_DELETED;
800 break;
801
802 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
803 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
804 break;
805
806 default:
807 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
808 command, bp->state);
809 }
810
811 mb(); /* force bnx2x_wait_ramrod to see the change */
812}
813
814static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
815 struct bnx2x_fastpath *fp, u16 index)
816{
817 struct sk_buff *skb;
818 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
819 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
820 dma_addr_t mapping;
821
822 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
823 if (unlikely(skb == NULL))
824 return -ENOMEM;
825
826 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
827 PCI_DMA_FROMDEVICE);
828 if (unlikely(dma_mapping_error(mapping))) {
829
830 dev_kfree_skb(skb);
831 return -ENOMEM;
832 }
833
834 rx_buf->skb = skb;
835 pci_unmap_addr_set(rx_buf, mapping, mapping);
836
837 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
838 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
839
840 return 0;
841}
842
843/* note that we are not allocating a new skb,
844 * we are just moving one from cons to prod
845 * we are not creating a new mapping,
846 * so there is no need to check for dma_mapping_error().
847 */
848static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
849 struct sk_buff *skb, u16 cons, u16 prod)
850{
851 struct bnx2x *bp = fp->bp;
852 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
853 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
854 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
855 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
856
857 pci_dma_sync_single_for_device(bp->pdev,
858 pci_unmap_addr(cons_rx_buf, mapping),
859 bp->rx_offset + RX_COPY_THRESH,
860 PCI_DMA_FROMDEVICE);
861
862 prod_rx_buf->skb = cons_rx_buf->skb;
863 pci_unmap_addr_set(prod_rx_buf, mapping,
864 pci_unmap_addr(cons_rx_buf, mapping));
865 *prod_bd = *cons_bd;
866}
867
868static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
869{
870 struct bnx2x *bp = fp->bp;
871 u16 bd_cons, bd_prod, comp_ring_cons;
872 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
873 int rx_pkt = 0;
874
875#ifdef BNX2X_STOP_ON_ERROR
876 if (unlikely(bp->panic))
877 return 0;
878#endif
879
880 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
881 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
882 hw_comp_cons++;
883
884 bd_cons = fp->rx_bd_cons;
885 bd_prod = fp->rx_bd_prod;
886 sw_comp_cons = fp->rx_comp_cons;
887 sw_comp_prod = fp->rx_comp_prod;
888
889 /* Memory barrier necessary as speculative reads of the rx
890 * buffer can be ahead of the index in the status block
891 */
892 rmb();
893
894 DP(NETIF_MSG_RX_STATUS,
895 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
896 fp->index, hw_comp_cons, sw_comp_cons);
897
898 while (sw_comp_cons != hw_comp_cons) {
899 unsigned int len, pad;
900 struct sw_rx_bd *rx_buf;
901 struct sk_buff *skb;
902 union eth_rx_cqe *cqe;
903
904 comp_ring_cons = RCQ_BD(sw_comp_cons);
905 bd_prod = RX_BD(bd_prod);
906 bd_cons = RX_BD(bd_cons);
907
908 cqe = &fp->rx_comp_ring[comp_ring_cons];
909
910 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
911 " comp_ring (%u) bd_ring (%u,%u)\n",
912 hw_comp_cons, sw_comp_cons,
913 comp_ring_cons, bd_prod, bd_cons);
914 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
915 " queue %x vlan %x len %x\n",
916 cqe->fast_path_cqe.type,
917 cqe->fast_path_cqe.error_type_flags,
918 cqe->fast_path_cqe.status_flags,
919 cqe->fast_path_cqe.rss_hash_result,
920 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
921
922 /* is this a slowpath msg? */
923 if (unlikely(cqe->fast_path_cqe.type)) {
924 bnx2x_sp_event(fp, cqe);
925 goto next_cqe;
926
927 /* this is an rx packet */
928 } else {
929 rx_buf = &fp->rx_buf_ring[bd_cons];
930 skb = rx_buf->skb;
931
932 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
933 pad = cqe->fast_path_cqe.placement_offset;
934
935 pci_dma_sync_single_for_device(bp->pdev,
936 pci_unmap_addr(rx_buf, mapping),
937 pad + RX_COPY_THRESH,
938 PCI_DMA_FROMDEVICE);
939 prefetch(skb);
940 prefetch(((char *)(skb)) + 128);
941
942 /* is this an error packet? */
943 if (unlikely(cqe->fast_path_cqe.error_type_flags &
944 ETH_RX_ERROR_FALGS)) {
945 /* do we sometimes forward error packets anyway? */
946 DP(NETIF_MSG_RX_ERR,
947 "ERROR flags(%u) Rx packet(%u)\n",
948 cqe->fast_path_cqe.error_type_flags,
949 sw_comp_cons);
950 /* TBD make sure MC counts this as a drop */
951 goto reuse_rx;
952 }
953
954 /* Since we don't have a jumbo ring
955 * copy small packets if mtu > 1500
956 */
957 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
958 (len <= RX_COPY_THRESH)) {
959 struct sk_buff *new_skb;
960
961 new_skb = netdev_alloc_skb(bp->dev,
962 len + pad);
963 if (new_skb == NULL) {
964 DP(NETIF_MSG_RX_ERR,
965 "ERROR packet dropped "
966 "because of alloc failure\n");
967 /* TBD count this as a drop? */
968 goto reuse_rx;
969 }
970
971 /* aligned copy */
972 skb_copy_from_linear_data_offset(skb, pad,
973 new_skb->data + pad, len);
974 skb_reserve(new_skb, pad);
975 skb_put(new_skb, len);
976
977 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
978
979 skb = new_skb;
980
981 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
982 pci_unmap_single(bp->pdev,
983 pci_unmap_addr(rx_buf, mapping),
984 bp->rx_buf_use_size,
985 PCI_DMA_FROMDEVICE);
986 skb_reserve(skb, pad);
987 skb_put(skb, len);
988
989 } else {
990 DP(NETIF_MSG_RX_ERR,
991 "ERROR packet dropped because "
992 "of alloc failure\n");
993reuse_rx:
994 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
995 goto next_rx;
996 }
997
998 skb->protocol = eth_type_trans(skb, bp->dev);
999
1000 skb->ip_summed = CHECKSUM_NONE;
1001 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1002 skb->ip_summed = CHECKSUM_UNNECESSARY;
1003
1004 /* TBD do we pass bad csum packets in promisc */
1005 }
1006
1007#ifdef BCM_VLAN
1008 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1009 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1010 && (bp->vlgrp != NULL))
1011 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1012 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1013 else
1014#endif
1015 netif_receive_skb(skb);
1016
1017 bp->dev->last_rx = jiffies;
1018
1019next_rx:
1020 rx_buf->skb = NULL;
1021
1022 bd_cons = NEXT_RX_IDX(bd_cons);
1023 bd_prod = NEXT_RX_IDX(bd_prod);
1024next_cqe:
1025 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1026 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1027 rx_pkt++;
1028
1029 if ((rx_pkt == budget))
1030 break;
1031 } /* while */
1032
1033 fp->rx_bd_cons = bd_cons;
1034 fp->rx_bd_prod = bd_prod;
1035 fp->rx_comp_cons = sw_comp_cons;
1036 fp->rx_comp_prod = sw_comp_prod;
1037
1038 REG_WR(bp, BAR_TSTRORM_INTMEM +
1039 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1040
1041 mmiowb(); /* keep prod updates ordered */
1042
1043 fp->rx_pkt += rx_pkt;
1044 fp->rx_calls++;
1045
1046 return rx_pkt;
1047}
1048
1049static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1050{
1051 struct bnx2x_fastpath *fp = fp_cookie;
1052 struct bnx2x *bp = fp->bp;
1053 struct net_device *dev = bp->dev;
1054 int index = fp->index;
1055
1056 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1057 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1058
1059#ifdef BNX2X_STOP_ON_ERROR
1060 if (unlikely(bp->panic))
1061 return IRQ_HANDLED;
1062#endif
1063
1064 prefetch(fp->rx_cons_sb);
1065 prefetch(fp->tx_cons_sb);
1066 prefetch(&fp->status_blk->c_status_block.status_block_index);
1067 prefetch(&fp->status_blk->u_status_block.status_block_index);
1068
1069 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1070 return IRQ_HANDLED;
1071}
1072
1073static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1074{
1075 struct net_device *dev = dev_instance;
1076 struct bnx2x *bp = netdev_priv(dev);
1077 u16 status = bnx2x_ack_int(bp);
1078
1079 if (unlikely(status == 0)) {
1080 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1081 return IRQ_NONE;
1082 }
1083
1084 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1085
1086#ifdef BNX2X_STOP_ON_ERROR
1087 if (unlikely(bp->panic))
1088 return IRQ_HANDLED;
1089#endif
1090
1091 /* Return here if interrupt is shared and is disabled */
1092 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1093 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1094 return IRQ_HANDLED;
1095 }
1096
1097 if (status & 0x2) {
1098 struct bnx2x_fastpath *fp = &bp->fp[0];
1099
1100 prefetch(fp->rx_cons_sb);
1101 prefetch(fp->tx_cons_sb);
1102 prefetch(&fp->status_blk->c_status_block.status_block_index);
1103 prefetch(&fp->status_blk->u_status_block.status_block_index);
1104
1105 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1106
1107 status &= ~0x2;
1108 if (!status)
1109 return IRQ_HANDLED;
1110 }
1111
1112 if (unlikely(status & 0x1)) {
1113
1114 schedule_work(&bp->sp_task);
1115
1116 status &= ~0x1;
1117 if (!status)
1118 return IRQ_HANDLED;
1119 }
1120
1121 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1122 status);
1123
1124 return IRQ_HANDLED;
1125}
1126
1127/* end of fast path */
1128
1129/* PHY/MAC */
1130
1131/*
1132 * General service functions
1133 */
1134
1135static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1136{
1137 int port = bp->port;
1138
1139 NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1140 ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1141 SHARED_HW_CFG_LED_MODE_SHIFT));
1142 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1143
1144 /* Set blinking rate to ~15.9Hz */
1145 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1146 LED_BLINK_RATE_VAL);
1147 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1148
1149 /* On Ax chip versions for speeds less than 10G
1150 LED scheme is different */
1151 if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1152 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1153 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1154 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1155 }
1156}
1157
1158static void bnx2x_leds_unset(struct bnx2x *bp)
1159{
1160 int port = bp->port;
1161
1162 NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1163 NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1164}
1165
1166static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1167{
1168 u32 val = REG_RD(bp, reg);
1169
1170 val |= bits;
1171 REG_WR(bp, reg, val);
1172 return val;
1173}
1174
1175static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1176{
1177 u32 val = REG_RD(bp, reg);
1178
1179 val &= ~bits;
1180 REG_WR(bp, reg, val);
1181 return val;
1182}
1183
f1410647
ET
1184static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1185{
1186 u32 cnt;
1187 u32 lock_status;
1188 u32 resource_bit = (1 << resource);
1189 u8 func = bp->port;
1190
1191 /* Validating that the resource is within range */
1192 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1193 DP(NETIF_MSG_HW,
1194 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1195 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1196 return -EINVAL;
1197 }
1198
1199 /* Validating that the resource is not already taken */
1200 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1201 if (lock_status & resource_bit) {
1202 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1203 lock_status, resource_bit);
1204 return -EEXIST;
1205 }
1206
1207 /* Try for 1 second every 5ms */
1208 for (cnt = 0; cnt < 200; cnt++) {
1209 /* Try to acquire the lock */
1210 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1211 resource_bit);
1212 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1213 if (lock_status & resource_bit)
1214 return 0;
1215
1216 msleep(5);
1217 }
1218 DP(NETIF_MSG_HW, "Timeout\n");
1219 return -EAGAIN;
1220}
1221
1222static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1223{
1224 u32 lock_status;
1225 u32 resource_bit = (1 << resource);
1226 u8 func = bp->port;
1227
1228 /* Validating that the resource is within range */
1229 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1230 DP(NETIF_MSG_HW,
1231 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1232 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1233 return -EINVAL;
1234 }
1235
1236 /* Validating that the resource is currently taken */
1237 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1238 if (!(lock_status & resource_bit)) {
1239 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1240 lock_status, resource_bit);
1241 return -EFAULT;
1242 }
1243
1244 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1245 return 0;
1246}
1247
1248static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1249{
1250 /* The GPIO should be swapped if swap register is set and active */
1251 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1252 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1253 int gpio_shift = gpio_num +
1254 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1255 u32 gpio_mask = (1 << gpio_shift);
1256 u32 gpio_reg;
1257
1258 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1259 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1260 return -EINVAL;
1261 }
1262
1263 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1264 /* read GPIO and mask except the float bits */
1265 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1266
1267 switch (mode) {
1268 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1269 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1270 gpio_num, gpio_shift);
1271 /* clear FLOAT and set CLR */
1272 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1273 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1274 break;
1275
1276 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1277 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1278 gpio_num, gpio_shift);
1279 /* clear FLOAT and set SET */
1280 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1281 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1282 break;
1283
1284 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1285 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1286 gpio_num, gpio_shift);
1287 /* set FLOAT */
1288 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1289 break;
1290
1291 default:
1292 break;
1293 }
1294
1295 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1296 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1297
1298 return 0;
1299}
1300
1301static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1302{
1303 u32 spio_mask = (1 << spio_num);
1304 u32 spio_reg;
1305
1306 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1307 (spio_num > MISC_REGISTERS_SPIO_7)) {
1308 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1309 return -EINVAL;
1310 }
1311
1312 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1313 /* read SPIO and mask except the float bits */
1314 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1315
1316 switch (mode) {
1317 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1318 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1319 /* clear FLOAT and set CLR */
1320 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1321 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1322 break;
1323
1324 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1325 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1326 /* clear FLOAT and set SET */
1327 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1328 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1329 break;
1330
1331 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1332 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1333 /* set FLOAT */
1334 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1335 break;
1336
1337 default:
1338 break;
1339 }
1340
1341 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1342 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1343
1344 return 0;
1345}
1346
a2fbb9ea
ET
1347static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1348{
a2fbb9ea
ET
1349 int port = bp->port;
1350 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
f1410647
ET
1351 u32 tmp;
1352 int i, rc;
a2fbb9ea
ET
1353
1354/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1355 bp->phy_addr, reg, val); */
1356
1357 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1358
1359 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1360 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1361 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1362 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1363 udelay(40);
1364 }
1365
1366 tmp = ((bp->phy_addr << 21) | (reg << 16) |
1367 (val & EMAC_MDIO_COMM_DATA) |
1368 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1369 EMAC_MDIO_COMM_START_BUSY);
1370 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1371
1372 for (i = 0; i < 50; i++) {
1373 udelay(10);
1374
1375 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1376 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1377 udelay(5);
1378 break;
1379 }
1380 }
1381
1382 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1383 BNX2X_ERR("write phy register failed\n");
1384
1385 rc = -EBUSY;
1386 } else {
1387 rc = 0;
1388 }
1389
1390 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1391
1392 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1393 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1394 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1395 }
1396
1397 return rc;
1398}
1399
1400static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1401{
1402 int port = bp->port;
1403 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
f1410647
ET
1404 u32 val;
1405 int i, rc;
a2fbb9ea
ET
1406
1407 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1408
1409 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1410 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1411 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1412 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1413 udelay(40);
1414 }
1415
1416 val = ((bp->phy_addr << 21) | (reg << 16) |
1417 EMAC_MDIO_COMM_COMMAND_READ_22 |
1418 EMAC_MDIO_COMM_START_BUSY);
1419 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1420
1421 for (i = 0; i < 50; i++) {
1422 udelay(10);
1423
1424 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1425 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1426 val &= EMAC_MDIO_COMM_DATA;
1427 break;
1428 }
1429 }
1430
1431 if (val & EMAC_MDIO_COMM_START_BUSY) {
1432 BNX2X_ERR("read phy register failed\n");
1433
1434 *ret_val = 0x0;
1435 rc = -EBUSY;
1436 } else {
1437 *ret_val = val;
1438 rc = 0;
1439 }
1440
1441 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1442
1443 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1444 val |= EMAC_MDIO_MODE_AUTO_POLL;
1445 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1446 }
1447
1448/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
1449 bp->phy_addr, reg, *ret_val); */
1450
1451 return rc;
1452}
1453
f1410647
ET
1454static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1455 u32 phy_addr, u32 reg, u32 addr, u32 val)
a2fbb9ea 1456{
f1410647
ET
1457 u32 tmp;
1458 int i, rc = 0;
a2fbb9ea 1459
f1410647
ET
1460 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1461 * (a value of 49==0x31) and make sure that the AUTO poll is off
1462 */
1463 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1464 tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1465 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1466 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1467 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1468 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1469 udelay(40);
a2fbb9ea
ET
1470
1471 /* address */
f1410647 1472 tmp = ((phy_addr << 21) | (reg << 16) | addr |
a2fbb9ea
ET
1473 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1474 EMAC_MDIO_COMM_START_BUSY);
f1410647 1475 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
a2fbb9ea
ET
1476
1477 for (i = 0; i < 50; i++) {
1478 udelay(10);
1479
f1410647 1480 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1481 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1482 udelay(5);
1483 break;
1484 }
1485 }
a2fbb9ea
ET
1486 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1487 BNX2X_ERR("write phy register failed\n");
1488
1489 rc = -EBUSY;
f1410647 1490
a2fbb9ea
ET
1491 } else {
1492 /* data */
f1410647 1493 tmp = ((phy_addr << 21) | (reg << 16) | val |
a2fbb9ea
ET
1494 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1495 EMAC_MDIO_COMM_START_BUSY);
f1410647 1496 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
a2fbb9ea
ET
1497
1498 for (i = 0; i < 50; i++) {
1499 udelay(10);
1500
f1410647 1501 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1502 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1503 udelay(5);
1504 break;
1505 }
1506 }
1507
1508 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1509 BNX2X_ERR("write phy register failed\n");
1510
1511 rc = -EBUSY;
1512 }
1513 }
1514
f1410647
ET
1515 /* unset clause 45 mode, set the MDIO clock to a faster value
1516 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1517 */
1518 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1519 tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1520 tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1521 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
a2fbb9ea 1522 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
f1410647 1523 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
a2fbb9ea
ET
1524
1525 return rc;
1526}
1527
f1410647
ET
1528static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1529 u32 addr, u32 val)
a2fbb9ea 1530{
f1410647 1531 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 1532
f1410647
ET
1533 return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1534 reg, addr, val);
1535}
a2fbb9ea 1536
f1410647
ET
1537static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1538 u32 phy_addr, u32 reg, u32 addr,
1539 u32 *ret_val)
1540{
1541 u32 val;
1542 int i, rc = 0;
a2fbb9ea 1543
f1410647
ET
1544 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1545 * (a value of 49==0x31) and make sure that the AUTO poll is off
1546 */
1547 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1548 val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1549 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1550 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1551 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1552 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1553 udelay(40);
a2fbb9ea
ET
1554
1555 /* address */
f1410647 1556 val = ((phy_addr << 21) | (reg << 16) | addr |
a2fbb9ea
ET
1557 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1558 EMAC_MDIO_COMM_START_BUSY);
f1410647 1559 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
a2fbb9ea
ET
1560
1561 for (i = 0; i < 50; i++) {
1562 udelay(10);
1563
f1410647 1564 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1565 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1566 udelay(5);
1567 break;
1568 }
1569 }
a2fbb9ea
ET
1570 if (val & EMAC_MDIO_COMM_START_BUSY) {
1571 BNX2X_ERR("read phy register failed\n");
1572
1573 *ret_val = 0;
1574 rc = -EBUSY;
f1410647 1575
a2fbb9ea
ET
1576 } else {
1577 /* data */
f1410647 1578 val = ((phy_addr << 21) | (reg << 16) |
a2fbb9ea
ET
1579 EMAC_MDIO_COMM_COMMAND_READ_45 |
1580 EMAC_MDIO_COMM_START_BUSY);
f1410647 1581 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
a2fbb9ea
ET
1582
1583 for (i = 0; i < 50; i++) {
1584 udelay(10);
1585
f1410647 1586 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1587 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1588 val &= EMAC_MDIO_COMM_DATA;
1589 break;
1590 }
1591 }
1592
1593 if (val & EMAC_MDIO_COMM_START_BUSY) {
1594 BNX2X_ERR("read phy register failed\n");
1595
1596 val = 0;
1597 rc = -EBUSY;
1598 }
1599
1600 *ret_val = val;
1601 }
1602
f1410647
ET
1603 /* unset clause 45 mode, set the MDIO clock to a faster value
1604 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1605 */
1606 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1607 val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1608 val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1609 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
a2fbb9ea 1610 val |= EMAC_MDIO_MODE_AUTO_POLL;
f1410647 1611 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
a2fbb9ea
ET
1612
1613 return rc;
1614}
1615
f1410647
ET
1616static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1617 u32 addr, u32 *ret_val)
1618{
1619 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1620
1621 return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1622 reg, addr, ret_val);
1623}
1624
1625static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1626 u32 addr, u32 val)
a2fbb9ea
ET
1627{
1628 int i;
1629 u32 rd_val;
1630
1631 might_sleep();
1632 for (i = 0; i < 10; i++) {
f1410647 1633 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
a2fbb9ea 1634 msleep(5);
f1410647 1635 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
a2fbb9ea
ET
1636 /* if the read value is not the same as the value we wrote,
1637 we should write it again */
1638 if (rd_val == val)
1639 return 0;
1640 }
1641 BNX2X_ERR("MDIO write in CL45 failed\n");
1642 return -EBUSY;
1643}
1644
1645/*
c14423fe 1646 * link management
a2fbb9ea
ET
1647 */
1648
f1410647
ET
1649static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1650{
1651 switch (pause_result) { /* ASYM P ASYM P */
1652 case 0xb: /* 1 0 1 1 */
1653 bp->flow_ctrl = FLOW_CTRL_TX;
1654 break;
1655
1656 case 0xe: /* 1 1 1 0 */
1657 bp->flow_ctrl = FLOW_CTRL_RX;
1658 break;
1659
1660 case 0x5: /* 0 1 0 1 */
1661 case 0x7: /* 0 1 1 1 */
1662 case 0xd: /* 1 1 0 1 */
1663 case 0xf: /* 1 1 1 1 */
1664 bp->flow_ctrl = FLOW_CTRL_BOTH;
1665 break;
1666
1667 default:
1668 break;
1669 }
1670}
1671
1672static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1673{
1674 u32 ext_phy_addr;
1675 u32 ld_pause; /* local */
1676 u32 lp_pause; /* link partner */
1677 u32 an_complete; /* AN complete */
1678 u32 pause_result;
1679 u8 ret = 0;
1680
1681 ext_phy_addr = ((bp->ext_phy_config &
1682 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1683 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1684
1685 /* read twice */
1686 bnx2x_mdio45_read(bp, ext_phy_addr,
1687 EXT_PHY_KR_AUTO_NEG_DEVAD,
1688 EXT_PHY_KR_STATUS, &an_complete);
1689 bnx2x_mdio45_read(bp, ext_phy_addr,
1690 EXT_PHY_KR_AUTO_NEG_DEVAD,
1691 EXT_PHY_KR_STATUS, &an_complete);
1692
1693 if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1694 ret = 1;
1695 bnx2x_mdio45_read(bp, ext_phy_addr,
1696 EXT_PHY_KR_AUTO_NEG_DEVAD,
1697 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1698 bnx2x_mdio45_read(bp, ext_phy_addr,
1699 EXT_PHY_KR_AUTO_NEG_DEVAD,
1700 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1701 pause_result = (ld_pause &
1702 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1703 pause_result |= (lp_pause &
1704 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1705 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1706 pause_result);
1707 bnx2x_pause_resolve(bp, pause_result);
1708 }
1709 return ret;
1710}
1711
a2fbb9ea
ET
1712static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1713{
f1410647
ET
1714 u32 ld_pause; /* local driver */
1715 u32 lp_pause; /* link partner */
a2fbb9ea
ET
1716 u32 pause_result;
1717
1718 bp->flow_ctrl = 0;
1719
c14423fe 1720 /* resolve from gp_status in case of AN complete and not sgmii */
a2fbb9ea
ET
1721 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1722 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1723 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1724 (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1725
1726 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1727 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1728 &ld_pause);
1729 bnx2x_mdio22_read(bp,
1730 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1731 &lp_pause);
1732 pause_result = (ld_pause &
1733 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1734 pause_result |= (lp_pause &
1735 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1736 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
f1410647
ET
1737 bnx2x_pause_resolve(bp, pause_result);
1738 } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1739 !(bnx2x_ext_phy_resove_fc(bp))) {
1740 /* forced speed */
1741 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1742 switch (bp->req_flow_ctrl) {
1743 case FLOW_CTRL_AUTO:
1744 if (bp->dev->mtu <= 4500)
1745 bp->flow_ctrl = FLOW_CTRL_BOTH;
1746 else
1747 bp->flow_ctrl = FLOW_CTRL_TX;
1748 break;
a2fbb9ea 1749
f1410647
ET
1750 case FLOW_CTRL_TX:
1751 bp->flow_ctrl = FLOW_CTRL_TX;
1752 break;
a2fbb9ea 1753
f1410647
ET
1754 case FLOW_CTRL_RX:
1755 if (bp->dev->mtu <= 4500)
1756 bp->flow_ctrl = FLOW_CTRL_RX;
1757 break;
a2fbb9ea 1758
f1410647
ET
1759 case FLOW_CTRL_BOTH:
1760 if (bp->dev->mtu <= 4500)
1761 bp->flow_ctrl = FLOW_CTRL_BOTH;
1762 else
1763 bp->flow_ctrl = FLOW_CTRL_TX;
1764 break;
a2fbb9ea 1765
f1410647
ET
1766 case FLOW_CTRL_NONE:
1767 default:
1768 break;
1769 }
1770 } else { /* forced mode */
1771 switch (bp->req_flow_ctrl) {
1772 case FLOW_CTRL_AUTO:
1773 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1774 " req_autoneg 0x%x\n",
1775 bp->req_flow_ctrl, bp->req_autoneg);
1776 break;
a2fbb9ea 1777
f1410647
ET
1778 case FLOW_CTRL_TX:
1779 case FLOW_CTRL_RX:
1780 case FLOW_CTRL_BOTH:
1781 bp->flow_ctrl = bp->req_flow_ctrl;
1782 break;
a2fbb9ea 1783
f1410647
ET
1784 case FLOW_CTRL_NONE:
1785 default:
1786 break;
1787 }
a2fbb9ea
ET
1788 }
1789 }
1790 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1791}
1792
1793static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1794{
1795 bp->link_status = 0;
1796
1797 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
f1410647 1798 DP(NETIF_MSG_LINK, "phy link up\n");
a2fbb9ea 1799
f1410647 1800 bp->phy_link_up = 1;
a2fbb9ea
ET
1801 bp->link_status |= LINK_STATUS_LINK_UP;
1802
1803 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1804 bp->duplex = DUPLEX_FULL;
1805 else
1806 bp->duplex = DUPLEX_HALF;
1807
1808 bnx2x_flow_ctrl_resolve(bp, gp_status);
1809
1810 switch (gp_status & GP_STATUS_SPEED_MASK) {
1811 case GP_STATUS_10M:
1812 bp->line_speed = SPEED_10;
1813 if (bp->duplex == DUPLEX_FULL)
1814 bp->link_status |= LINK_10TFD;
1815 else
1816 bp->link_status |= LINK_10THD;
1817 break;
1818
1819 case GP_STATUS_100M:
1820 bp->line_speed = SPEED_100;
1821 if (bp->duplex == DUPLEX_FULL)
1822 bp->link_status |= LINK_100TXFD;
1823 else
1824 bp->link_status |= LINK_100TXHD;
1825 break;
1826
1827 case GP_STATUS_1G:
1828 case GP_STATUS_1G_KX:
1829 bp->line_speed = SPEED_1000;
1830 if (bp->duplex == DUPLEX_FULL)
1831 bp->link_status |= LINK_1000TFD;
1832 else
1833 bp->link_status |= LINK_1000THD;
1834 break;
1835
1836 case GP_STATUS_2_5G:
1837 bp->line_speed = SPEED_2500;
1838 if (bp->duplex == DUPLEX_FULL)
1839 bp->link_status |= LINK_2500TFD;
1840 else
1841 bp->link_status |= LINK_2500THD;
1842 break;
1843
1844 case GP_STATUS_5G:
1845 case GP_STATUS_6G:
1846 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1847 gp_status);
1848 break;
1849
1850 case GP_STATUS_10G_KX4:
1851 case GP_STATUS_10G_HIG:
1852 case GP_STATUS_10G_CX4:
1853 bp->line_speed = SPEED_10000;
1854 bp->link_status |= LINK_10GTFD;
1855 break;
1856
1857 case GP_STATUS_12G_HIG:
1858 bp->line_speed = SPEED_12000;
1859 bp->link_status |= LINK_12GTFD;
1860 break;
1861
1862 case GP_STATUS_12_5G:
1863 bp->line_speed = SPEED_12500;
1864 bp->link_status |= LINK_12_5GTFD;
1865 break;
1866
1867 case GP_STATUS_13G:
1868 bp->line_speed = SPEED_13000;
1869 bp->link_status |= LINK_13GTFD;
1870 break;
1871
1872 case GP_STATUS_15G:
1873 bp->line_speed = SPEED_15000;
1874 bp->link_status |= LINK_15GTFD;
1875 break;
1876
1877 case GP_STATUS_16G:
1878 bp->line_speed = SPEED_16000;
1879 bp->link_status |= LINK_16GTFD;
1880 break;
1881
1882 default:
1883 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1884 gp_status);
1885 break;
1886 }
1887
1888 bp->link_status |= LINK_STATUS_SERDES_LINK;
1889
1890 if (bp->req_autoneg & AUTONEG_SPEED) {
1891 bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1892
1893 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1894 bp->link_status |=
1895 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1896
1897 if (bp->autoneg & AUTONEG_PARALLEL)
1898 bp->link_status |=
1899 LINK_STATUS_PARALLEL_DETECTION_USED;
1900 }
1901
1902 if (bp->flow_ctrl & FLOW_CTRL_TX)
1903 bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1904
1905 if (bp->flow_ctrl & FLOW_CTRL_RX)
1906 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1907
1908 } else { /* link_down */
f1410647 1909 DP(NETIF_MSG_LINK, "phy link down\n");
a2fbb9ea 1910
f1410647 1911 bp->phy_link_up = 0;
a2fbb9ea
ET
1912
1913 bp->line_speed = 0;
1914 bp->duplex = DUPLEX_FULL;
1915 bp->flow_ctrl = 0;
1916 }
1917
f1410647 1918 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
a2fbb9ea
ET
1919 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1920 " link_status 0x%x\n",
f1410647
ET
1921 gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1922 bp->flow_ctrl, bp->link_status);
a2fbb9ea
ET
1923}
1924
1925static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1926{
1927 int port = bp->port;
1928
1929 /* first reset all status
c14423fe 1930 * we assume only one line will be change at a time */
a2fbb9ea 1931 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647
ET
1932 (NIG_STATUS_XGXS0_LINK10G |
1933 NIG_STATUS_XGXS0_LINK_STATUS |
1934 NIG_STATUS_SERDES0_LINK_STATUS));
1935 if (bp->phy_link_up) {
a2fbb9ea
ET
1936 if (is_10g) {
1937 /* Disable the 10G link interrupt
1938 * by writing 1 to the status register
1939 */
f1410647 1940 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
a2fbb9ea
ET
1941 bnx2x_bits_en(bp,
1942 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647 1943 NIG_STATUS_XGXS0_LINK10G);
a2fbb9ea
ET
1944
1945 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1946 /* Disable the link interrupt
1947 * by writing 1 to the relevant lane
1948 * in the status register
1949 */
f1410647 1950 DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
a2fbb9ea
ET
1951 bnx2x_bits_en(bp,
1952 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1953 ((1 << bp->ser_lane) <<
f1410647 1954 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
a2fbb9ea
ET
1955
1956 } else { /* SerDes */
f1410647 1957 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
a2fbb9ea
ET
1958 /* Disable the link interrupt
1959 * by writing 1 to the status register
1960 */
1961 bnx2x_bits_en(bp,
1962 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647 1963 NIG_STATUS_SERDES0_LINK_STATUS);
a2fbb9ea
ET
1964 }
1965
1966 } else { /* link_down */
1967 }
1968}
1969
1970static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1971{
1972 u32 ext_phy_type;
1973 u32 ext_phy_addr;
f1410647 1974 u32 val1 = 0, val2;
a2fbb9ea
ET
1975 u32 rx_sd, pcs_status;
1976
1977 if (bp->phy_flags & PHY_XGXS_FLAG) {
a2fbb9ea
ET
1978 ext_phy_addr = ((bp->ext_phy_config &
1979 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1980 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
a2fbb9ea
ET
1981
1982 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1983 switch (ext_phy_type) {
1984 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1985 DP(NETIF_MSG_LINK, "XGXS Direct\n");
f1410647 1986 val1 = 1;
a2fbb9ea
ET
1987 break;
1988
1989 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1990 DP(NETIF_MSG_LINK, "XGXS 8705\n");
f1410647
ET
1991 bnx2x_mdio45_read(bp, ext_phy_addr,
1992 EXT_PHY_OPT_WIS_DEVAD,
1993 EXT_PHY_OPT_LASI_STATUS, &val1);
1994 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
1995
1996 bnx2x_mdio45_read(bp, ext_phy_addr,
1997 EXT_PHY_OPT_WIS_DEVAD,
1998 EXT_PHY_OPT_LASI_STATUS, &val1);
1999 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2000
2001 bnx2x_mdio45_read(bp, ext_phy_addr,
2002 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 2003 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
f1410647
ET
2004 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2005 val1 = (rx_sd & 0x1);
a2fbb9ea
ET
2006 break;
2007
2008 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2009 DP(NETIF_MSG_LINK, "XGXS 8706\n");
f1410647
ET
2010 bnx2x_mdio45_read(bp, ext_phy_addr,
2011 EXT_PHY_OPT_PMA_PMD_DEVAD,
2012 EXT_PHY_OPT_LASI_STATUS, &val1);
2013 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2014
2015 bnx2x_mdio45_read(bp, ext_phy_addr,
2016 EXT_PHY_OPT_PMA_PMD_DEVAD,
2017 EXT_PHY_OPT_LASI_STATUS, &val1);
2018 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2019
2020 bnx2x_mdio45_read(bp, ext_phy_addr,
2021 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 2022 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
f1410647
ET
2023 bnx2x_mdio45_read(bp, ext_phy_addr,
2024 EXT_PHY_OPT_PCS_DEVAD,
2025 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2026 bnx2x_mdio45_read(bp, ext_phy_addr,
2027 EXT_PHY_AUTO_NEG_DEVAD,
2028 EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2029
a2fbb9ea 2030 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
f1410647
ET
2031 " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
2032 rx_sd, pcs_status, val2, (val2 & (1<<1)));
2033 /* link is up if both bit 0 of pmd_rx_sd and
2034 * bit 0 of pcs_status are set, or if the autoneg bit
2035 1 is set
2036 */
2037 val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2038 break;
2039
2040 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2041 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2042
2043 /* clear the interrupt LASI status register */
2044 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2045 ext_phy_addr,
2046 EXT_PHY_KR_PCS_DEVAD,
2047 EXT_PHY_KR_LASI_STATUS, &val2);
2048 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2049 ext_phy_addr,
2050 EXT_PHY_KR_PCS_DEVAD,
2051 EXT_PHY_KR_LASI_STATUS, &val1);
2052 DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2053 val2, val1);
2054 /* Check the LASI */
2055 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2056 ext_phy_addr,
2057 EXT_PHY_KR_PMA_PMD_DEVAD,
2058 0x9003, &val2);
2059 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2060 ext_phy_addr,
2061 EXT_PHY_KR_PMA_PMD_DEVAD,
2062 0x9003, &val1);
2063 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2064 val2, val1);
2065 /* Check the link status */
2066 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2067 ext_phy_addr,
2068 EXT_PHY_KR_PCS_DEVAD,
2069 EXT_PHY_KR_PCS_STATUS, &val2);
2070 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2071 /* Check the link status on 1.1.2 */
2072 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2073 ext_phy_addr,
2074 EXT_PHY_OPT_PMA_PMD_DEVAD,
2075 EXT_PHY_KR_STATUS, &val2);
2076 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2077 ext_phy_addr,
2078 EXT_PHY_OPT_PMA_PMD_DEVAD,
2079 EXT_PHY_KR_STATUS, &val1);
2080 DP(NETIF_MSG_LINK,
2081 "KR PMA status 0x%x->0x%x\n", val2, val1);
2082 val1 = ((val1 & 4) == 4);
2083 /* If 1G was requested assume the link is up */
2084 if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2085 (bp->req_line_speed == SPEED_1000))
2086 val1 = 1;
2087 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2088 break;
2089
2090 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2091 bnx2x_mdio45_read(bp, ext_phy_addr,
2092 EXT_PHY_OPT_PMA_PMD_DEVAD,
2093 EXT_PHY_OPT_LASI_STATUS, &val2);
2094 bnx2x_mdio45_read(bp, ext_phy_addr,
2095 EXT_PHY_OPT_PMA_PMD_DEVAD,
2096 EXT_PHY_OPT_LASI_STATUS, &val1);
2097 DP(NETIF_MSG_LINK,
2098 "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2099 bnx2x_mdio45_read(bp, ext_phy_addr,
2100 EXT_PHY_OPT_PMA_PMD_DEVAD,
2101 EXT_PHY_KR_STATUS, &val2);
2102 bnx2x_mdio45_read(bp, ext_phy_addr,
2103 EXT_PHY_OPT_PMA_PMD_DEVAD,
2104 EXT_PHY_KR_STATUS, &val1);
2105 DP(NETIF_MSG_LINK,
2106 "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2107 val1 = ((val1 & 4) == 4);
2108 /* if link is up
2109 * print the AN outcome of the SFX7101 PHY
a2fbb9ea 2110 */
f1410647
ET
2111 if (val1) {
2112 bnx2x_mdio45_read(bp, ext_phy_addr,
2113 EXT_PHY_KR_AUTO_NEG_DEVAD,
2114 0x21, &val2);
2115 DP(NETIF_MSG_LINK,
2116 "SFX7101 AN status 0x%x->%s\n", val2,
2117 (val2 & (1<<14)) ? "Master" : "Slave");
2118 }
a2fbb9ea
ET
2119 break;
2120
2121 default:
2122 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2123 bp->ext_phy_config);
f1410647 2124 val1 = 0;
a2fbb9ea
ET
2125 break;
2126 }
a2fbb9ea
ET
2127
2128 } else { /* SerDes */
2129 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2130 switch (ext_phy_type) {
2131 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2132 DP(NETIF_MSG_LINK, "SerDes Direct\n");
f1410647 2133 val1 = 1;
a2fbb9ea
ET
2134 break;
2135
2136 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2137 DP(NETIF_MSG_LINK, "SerDes 5482\n");
f1410647 2138 val1 = 1;
a2fbb9ea
ET
2139 break;
2140
2141 default:
2142 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2143 bp->ext_phy_config);
f1410647 2144 val1 = 0;
a2fbb9ea
ET
2145 break;
2146 }
2147 }
2148
f1410647 2149 return val1;
a2fbb9ea
ET
2150}
2151
2152static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
2153{
2154 int port = bp->port;
2155 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2156 NIG_REG_INGRESS_BMAC0_MEM;
2157 u32 wb_write[2];
2158 u32 val;
2159
c14423fe 2160 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
a2fbb9ea
ET
2161 /* reset and unreset the BigMac */
2162 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2163 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2164 msleep(5);
2165 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2166 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2167
2168 /* enable access for bmac registers */
2169 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2170
2171 /* XGXS control */
2172 wb_write[0] = 0x3c;
2173 wb_write[1] = 0;
2174 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2175 wb_write, 2);
2176
2177 /* tx MAC SA */
2178 wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
2179 (bp->dev->dev_addr[3] << 16) |
2180 (bp->dev->dev_addr[4] << 8) |
2181 bp->dev->dev_addr[5]);
2182 wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
2183 bp->dev->dev_addr[1]);
2184 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
2185 wb_write, 2);
2186
2187 /* tx control */
2188 val = 0xc0;
2189 if (bp->flow_ctrl & FLOW_CTRL_TX)
2190 val |= 0x800000;
2191 wb_write[0] = val;
2192 wb_write[1] = 0;
2193 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
2194
2195 /* set tx mtu */
2196 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
2197 wb_write[1] = 0;
2198 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
2199
2200 /* mac control */
2201 val = 0x3;
2202 if (is_lb) {
2203 val |= 0x4;
2204 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2205 }
2206 wb_write[0] = val;
2207 wb_write[1] = 0;
2208 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2209 wb_write, 2);
2210
2211 /* rx control set to don't strip crc */
2212 val = 0x14;
2213 if (bp->flow_ctrl & FLOW_CTRL_RX)
2214 val |= 0x20;
2215 wb_write[0] = val;
2216 wb_write[1] = 0;
2217 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
2218
2219 /* set rx mtu */
2220 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2221 wb_write[1] = 0;
2222 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
2223
2224 /* set cnt max size */
2225 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
2226 wb_write[1] = 0;
2227 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
2228 wb_write, 2);
2229
2230 /* configure safc */
2231 wb_write[0] = 0x1000200;
2232 wb_write[1] = 0;
2233 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2234 wb_write, 2);
2235
2236 /* fix for emulation */
2237 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2238 wb_write[0] = 0xf000;
2239 wb_write[1] = 0;
2240 REG_WR_DMAE(bp,
2241 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
2242 wb_write, 2);
2243 }
2244
2245 /* reset old bmac stats */
2246 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2247
2248 NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
2249
2250 /* select XGXS */
2251 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2252 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2253
2254 /* disable the NIG in/out to the emac */
2255 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2256 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2257 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2258
2259 /* enable the NIG in/out to the bmac */
2260 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2261
2262 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2263 val = 0;
2264 if (bp->flow_ctrl & FLOW_CTRL_TX)
2265 val = 1;
2266 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2267 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2268
2269 bp->phy_flags |= PHY_BMAC_FLAG;
2270
2271 bp->stats_state = STATS_STATE_ENABLE;
2272}
2273
f1410647
ET
2274static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2275{
2276 int port = bp->port;
2277 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2278 NIG_REG_INGRESS_BMAC0_MEM;
2279 u32 wb_write[2];
2280
2281 /* Only if the bmac is out of reset */
2282 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2283 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2284 /* Clear Rx Enable bit in BMAC_CONTROL register */
2285#ifdef BNX2X_DMAE_RD
2286 bnx2x_read_dmae(bp, bmac_addr +
2287 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2288 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2289 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2290#else
2291 wb_write[0] = REG_RD(bp,
2292 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2293 wb_write[1] = REG_RD(bp,
2294 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2295#endif
2296 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2297 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2298 wb_write, 2);
2299 msleep(1);
2300 }
2301}
2302
a2fbb9ea
ET
2303static void bnx2x_emac_enable(struct bnx2x *bp)
2304{
2305 int port = bp->port;
2306 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2307 u32 val;
2308 int timeout;
2309
c14423fe 2310 DP(NETIF_MSG_LINK, "enabling EMAC\n");
a2fbb9ea
ET
2311 /* reset and unreset the emac core */
2312 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2313 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2314 msleep(5);
2315 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2316 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2317
2318 /* enable emac and not bmac */
2319 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
2320
2321 /* for paladium */
2322 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2323 /* Use lane 1 (of lanes 0-3) */
2324 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2325 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2326 }
2327 /* for fpga */
2328 else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2329 /* Use lane 1 (of lanes 0-3) */
2330 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2331 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2332 }
2333 /* ASIC */
2334 else {
2335 if (bp->phy_flags & PHY_XGXS_FLAG) {
2336 DP(NETIF_MSG_LINK, "XGXS\n");
2337 /* select the master lanes (out of 0-3) */
2338 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
2339 bp->ser_lane);
2340 /* select XGXS */
2341 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2342
2343 } else { /* SerDes */
2344 DP(NETIF_MSG_LINK, "SerDes\n");
2345 /* select SerDes */
2346 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2347 }
2348 }
2349
2350 /* enable emac */
2351 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
2352
2353 /* init emac - use read-modify-write */
2354 /* self clear reset */
2355 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2356 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
2357
2358 timeout = 200;
2359 while (val & EMAC_MODE_RESET) {
2360 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2361 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2362 if (!timeout) {
2363 BNX2X_ERR("EMAC timeout!\n");
2364 break;
2365 }
2366 timeout--;
2367 }
2368
2369 /* reset tx part */
2370 EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2371
2372 timeout = 200;
2373 while (val & EMAC_TX_MODE_RESET) {
2374 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2375 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2376 if (!timeout) {
2377 BNX2X_ERR("EMAC timeout!\n");
2378 break;
2379 }
2380 timeout--;
2381 }
2382
2383 if (CHIP_REV_IS_SLOW(bp)) {
2384 /* config GMII mode */
2385 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2386 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2387
2388 } else { /* ASIC */
2389 /* pause enable/disable */
2390 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2391 EMAC_RX_MODE_FLOW_EN);
2392 if (bp->flow_ctrl & FLOW_CTRL_RX)
2393 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2394 EMAC_RX_MODE_FLOW_EN);
2395
2396 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2397 EMAC_TX_MODE_EXT_PAUSE_EN);
2398 if (bp->flow_ctrl & FLOW_CTRL_TX)
2399 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2400 EMAC_TX_MODE_EXT_PAUSE_EN);
2401 }
2402
c14423fe 2403 /* KEEP_VLAN_TAG, promiscuous */
a2fbb9ea
ET
2404 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2405 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2406 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2407
2408 /* identify magic packets */
2409 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2410 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2411
2412 /* enable emac for jumbo packets */
2413 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2414 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2415 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2416
2417 /* strip CRC */
2418 NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2419
2420 val = ((bp->dev->dev_addr[0] << 8) |
2421 bp->dev->dev_addr[1]);
2422 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2423
2424 val = ((bp->dev->dev_addr[2] << 24) |
2425 (bp->dev->dev_addr[3] << 16) |
2426 (bp->dev->dev_addr[4] << 8) |
2427 bp->dev->dev_addr[5]);
2428 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2429
2430 /* disable the NIG in/out to the bmac */
2431 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2432 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2433 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2434
2435 /* enable the NIG in/out to the emac */
2436 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2437 val = 0;
2438 if (bp->flow_ctrl & FLOW_CTRL_TX)
2439 val = 1;
2440 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2441 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2442
2443 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2444 /* take the BigMac out of reset */
2445 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2446 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2447
2448 /* enable access for bmac registers */
2449 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2450 }
2451
2452 bp->phy_flags |= PHY_EMAC_FLAG;
2453
2454 bp->stats_state = STATS_STATE_ENABLE;
2455}
2456
2457static void bnx2x_emac_program(struct bnx2x *bp)
2458{
2459 u16 mode = 0;
2460 int port = bp->port;
2461
2462 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2463 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2464 (EMAC_MODE_25G_MODE |
2465 EMAC_MODE_PORT_MII_10M |
2466 EMAC_MODE_HALF_DUPLEX));
2467 switch (bp->line_speed) {
2468 case SPEED_10:
2469 mode |= EMAC_MODE_PORT_MII_10M;
2470 break;
2471
2472 case SPEED_100:
2473 mode |= EMAC_MODE_PORT_MII;
2474 break;
2475
2476 case SPEED_1000:
2477 mode |= EMAC_MODE_PORT_GMII;
2478 break;
2479
2480 case SPEED_2500:
2481 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2482 break;
2483
2484 default:
2485 /* 10G not valid for EMAC */
2486 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2487 break;
2488 }
2489
2490 if (bp->duplex == DUPLEX_HALF)
2491 mode |= EMAC_MODE_HALF_DUPLEX;
2492 bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2493 mode);
2494
2495 bnx2x_leds_set(bp, bp->line_speed);
2496}
2497
2498static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2499{
2500 u32 lp_up2;
2501 u32 tx_driver;
2502
2503 /* read precomp */
2504 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2505 bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2506
2507 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2508 bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2509
2510 /* bits [10:7] at lp_up2, positioned at [15:12] */
2511 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2512 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2513 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2514
2515 if ((lp_up2 != 0) &&
2516 (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2517 /* replace tx_driver bits [15:12] */
2518 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2519 tx_driver |= lp_up2;
2520 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2521 }
2522}
2523
2524static void bnx2x_pbf_update(struct bnx2x *bp)
2525{
2526 int port = bp->port;
2527 u32 init_crd, crd;
2528 u32 count = 1000;
2529 u32 pause = 0;
2530
a2fbb9ea
ET
2531 /* disable port */
2532 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2533
2534 /* wait for init credit */
2535 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2536 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2537 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2538
2539 while ((init_crd != crd) && count) {
2540 msleep(5);
2541
2542 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2543 count--;
2544 }
2545 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2546 if (init_crd != crd)
2547 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2548
2549 if (bp->flow_ctrl & FLOW_CTRL_RX)
2550 pause = 1;
2551 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2552 if (pause) {
2553 /* update threshold */
2554 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2555 /* update init credit */
2556 init_crd = 778; /* (800-18-4) */
2557
2558 } else {
2559 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2560
2561 /* update threshold */
2562 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2563 /* update init credit */
2564 switch (bp->line_speed) {
2565 case SPEED_10:
2566 case SPEED_100:
2567 case SPEED_1000:
2568 init_crd = thresh + 55 - 22;
2569 break;
2570
2571 case SPEED_2500:
2572 init_crd = thresh + 138 - 22;
2573 break;
2574
2575 case SPEED_10000:
2576 init_crd = thresh + 553 - 22;
2577 break;
2578
2579 default:
2580 BNX2X_ERR("Invalid line_speed 0x%x\n",
2581 bp->line_speed);
2582 break;
2583 }
2584 }
2585 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2586 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2587 bp->line_speed, init_crd);
2588
2589 /* probe the credit changes */
2590 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2591 msleep(5);
2592 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2593
2594 /* enable port */
2595 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2596}
2597
2598static void bnx2x_update_mng(struct bnx2x *bp)
2599{
2600 if (!nomcp)
f1410647 2601 SHMEM_WR(bp, port_mb[bp->port].link_status,
a2fbb9ea
ET
2602 bp->link_status);
2603}
2604
2605static void bnx2x_link_report(struct bnx2x *bp)
2606{
2607 if (bp->link_up) {
2608 netif_carrier_on(bp->dev);
2609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2610
2611 printk("%d Mbps ", bp->line_speed);
2612
2613 if (bp->duplex == DUPLEX_FULL)
2614 printk("full duplex");
2615 else
2616 printk("half duplex");
2617
2618 if (bp->flow_ctrl) {
2619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
2620 printk(", receive ");
2621 if (bp->flow_ctrl & FLOW_CTRL_TX)
2622 printk("& transmit ");
2623 } else {
2624 printk(", transmit ");
2625 }
2626 printk("flow control ON");
2627 }
2628 printk("\n");
2629
2630 } else { /* link_down */
2631 netif_carrier_off(bp->dev);
2632 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2633 }
2634}
2635
2636static void bnx2x_link_up(struct bnx2x *bp)
2637{
2638 int port = bp->port;
2639
2640 /* PBF - link up */
2641 bnx2x_pbf_update(bp);
2642
2643 /* disable drain */
2644 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2645
2646 /* update shared memory */
2647 bnx2x_update_mng(bp);
2648
2649 /* indicate link up */
2650 bnx2x_link_report(bp);
2651}
2652
2653static void bnx2x_link_down(struct bnx2x *bp)
2654{
2655 int port = bp->port;
2656
2657 /* notify stats */
2658 if (bp->stats_state != STATS_STATE_DISABLE) {
2659 bp->stats_state = STATS_STATE_STOP;
2660 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2661 }
2662
f1410647 2663 /* indicate no mac active */
a2fbb9ea
ET
2664 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2665
f1410647
ET
2666 /* update shared memory */
2667 bnx2x_update_mng(bp);
a2fbb9ea 2668
a2fbb9ea
ET
2669 /* activate nig drain */
2670 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2671
f1410647
ET
2672 /* reset BigMac */
2673 bnx2x_bmac_rx_disable(bp);
2674 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2675 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
a2fbb9ea
ET
2676
2677 /* indicate link down */
2678 bnx2x_link_report(bp);
2679}
2680
2681static void bnx2x_init_mac_stats(struct bnx2x *bp);
2682
2683/* This function is called upon link interrupt */
2684static void bnx2x_link_update(struct bnx2x *bp)
2685{
a2fbb9ea
ET
2686 int port = bp->port;
2687 int i;
f1410647 2688 u32 gp_status;
a2fbb9ea
ET
2689 int link_10g;
2690
f1410647 2691 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
a2fbb9ea 2692 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
f1410647
ET
2693 " 10G %x, XGXS_LINK %x\n", port,
2694 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
a2fbb9ea
ET
2695 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2696 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2697 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2698 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2699 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2700 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2701 );
2702
2703 might_sleep();
2704 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2705 /* avoid fast toggling */
f1410647 2706 for (i = 0; i < 10; i++) {
a2fbb9ea
ET
2707 msleep(10);
2708 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2709 &gp_status);
2710 }
2711
2712 bnx2x_link_settings_status(bp, gp_status);
2713
2714 /* anything 10 and over uses the bmac */
2715 link_10g = ((bp->line_speed >= SPEED_10000) &&
2716 (bp->line_speed <= SPEED_16000));
2717
2718 bnx2x_link_int_ack(bp, link_10g);
2719
2720 /* link is up only if both local phy and external phy are up */
f1410647
ET
2721 bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2722 if (bp->link_up) {
a2fbb9ea
ET
2723 if (link_10g) {
2724 bnx2x_bmac_enable(bp, 0);
2725 bnx2x_leds_set(bp, SPEED_10000);
2726
2727 } else {
2728 bnx2x_emac_enable(bp);
2729 bnx2x_emac_program(bp);
2730
2731 /* AN complete? */
2732 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2733 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2734 bnx2x_set_sgmii_tx_driver(bp);
2735 }
2736 }
2737 bnx2x_link_up(bp);
2738
2739 } else { /* link down */
2740 bnx2x_leds_unset(bp);
2741 bnx2x_link_down(bp);
2742 }
2743
2744 bnx2x_init_mac_stats(bp);
2745}
2746
2747/*
2748 * Init service functions
2749 */
2750
2751static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2752{
2753 u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2754 (bp->phy_addr + bp->ser_lane) : 0;
2755
2756 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2757 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2758}
2759
2760static void bnx2x_set_master_ln(struct bnx2x *bp)
2761{
2762 u32 new_master_ln;
2763
2764 /* set the master_ln for AN */
2765 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2766 bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2767 &new_master_ln);
2768 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2769 (new_master_ln | bp->ser_lane));
2770}
2771
2772static void bnx2x_reset_unicore(struct bnx2x *bp)
2773{
2774 u32 mii_control;
2775 int i;
2776
2777 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2778 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2779 /* reset the unicore */
2780 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2781 (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2782
2783 /* wait for the reset to self clear */
2784 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2785 udelay(5);
2786
2787 /* the reset erased the previous bank value */
2788 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2789 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2790 &mii_control);
2791
2792 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2793 udelay(5);
2794 return;
2795 }
2796 }
2797
f1410647
ET
2798 BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2799 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2800 bp->phy_addr);
a2fbb9ea
ET
2801}
2802
2803static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2804{
2805 /* Each two bits represents a lane number:
2806 No swap is 0123 => 0x1b no need to enable the swap */
2807
2808 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2809 if (bp->rx_lane_swap != 0x1b) {
2810 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2811 (bp->rx_lane_swap |
2812 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2813 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2814 } else {
2815 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2816 }
2817
2818 if (bp->tx_lane_swap != 0x1b) {
2819 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2820 (bp->tx_lane_swap |
2821 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2822 } else {
2823 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2824 }
2825}
2826
2827static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2828{
2829 u32 control2;
2830
2831 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2832 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2833 &control2);
2834
2835 if (bp->autoneg & AUTONEG_PARALLEL) {
2836 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2837 } else {
2838 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2839 }
2840 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2841 control2);
2842
2843 if (bp->phy_flags & PHY_XGXS_FLAG) {
2844 DP(NETIF_MSG_LINK, "XGXS\n");
2845 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2846
2847 bnx2x_mdio22_write(bp,
f1410647 2848 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
a2fbb9ea
ET
2849 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2850
2851 bnx2x_mdio22_read(bp,
f1410647
ET
2852 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2853 &control2);
a2fbb9ea
ET
2854
2855 if (bp->autoneg & AUTONEG_PARALLEL) {
2856 control2 |=
2857 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2858 } else {
2859 control2 &=
2860 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2861 }
2862 bnx2x_mdio22_write(bp,
f1410647
ET
2863 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2864 control2);
2865
2866 /* Disable parallel detection of HiG */
2867 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2868 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2869 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2870 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
a2fbb9ea
ET
2871 }
2872}
2873
2874static void bnx2x_set_autoneg(struct bnx2x *bp)
2875{
2876 u32 reg_val;
2877
2878 /* CL37 Autoneg */
2879 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2880 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2881 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2882 (bp->autoneg & AUTONEG_CL37)) {
2883 /* CL37 Autoneg Enabled */
2884 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2885 } else {
2886 /* CL37 Autoneg Disabled */
2887 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2888 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2889 }
2890 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2891
2892 /* Enable/Disable Autodetection */
2893 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2894 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
2895 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2896
2897 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2898 (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2899 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2900 } else {
2901 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2902 }
2903 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2904
2905 /* Enable TetonII and BAM autoneg */
2906 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2907 bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2908 &reg_val);
2909 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2910 (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2911 /* Enable BAM aneg Mode and TetonII aneg Mode */
2912 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2913 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2914 } else {
2915 /* TetonII and BAM Autoneg Disabled */
2916 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2917 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2918 }
2919 bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2920 reg_val);
2921
2922 /* Enable Clause 73 Aneg */
2923 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2924 (bp->autoneg & AUTONEG_CL73)) {
2925 /* Enable BAM Station Manager */
2926 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2927 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2928 (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2929 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2930 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2931
2932 /* Merge CL73 and CL37 aneg resolution */
2933 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2934 &reg_val);
2935 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2936 (reg_val |
2937 MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2938
2939 /* Set the CL73 AN speed */
2940 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2941 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
2942 /* In the SerDes we support only the 1G.
2943 In the XGXS we support the 10G KX4
2944 but we currently do not support the KR */
2945 if (bp->phy_flags & PHY_XGXS_FLAG) {
2946 DP(NETIF_MSG_LINK, "XGXS\n");
2947 /* 10G KX4 */
2948 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2949 } else {
2950 DP(NETIF_MSG_LINK, "SerDes\n");
2951 /* 1000M KX */
2952 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2953 }
2954 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2955
2956 /* CL73 Autoneg Enabled */
2957 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2958 } else {
2959 /* CL73 Autoneg Disabled */
2960 reg_val = 0;
2961 }
2962 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2963 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2964}
2965
2966/* program SerDes, forced speed */
2967static void bnx2x_program_serdes(struct bnx2x *bp)
2968{
2969 u32 reg_val;
2970
2971 /* program duplex, disable autoneg */
2972 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2973 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2974 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2975 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2976 if (bp->req_duplex == DUPLEX_FULL)
2977 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2978 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2979
2980 /* program speed
2981 - needed only if the speed is greater than 1G (2.5G or 10G) */
2982 if (bp->req_line_speed > SPEED_1000) {
2983 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2984 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2985 /* clearing the speed value before setting the right speed */
2986 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2987 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2988 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2989 if (bp->req_line_speed == SPEED_10000)
2990 reg_val |=
2991 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
2992 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
2993 }
2994}
2995
2996static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
2997{
2998 u32 val = 0;
2999
3000 /* configure the 48 bits for BAM AN */
3001 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
3002
3003 /* set extended capabilities */
f1410647 3004 if (bp->advertising & ADVERTISED_2500baseX_Full)
a2fbb9ea
ET
3005 val |= MDIO_OVER_1G_UP1_2_5G;
3006 if (bp->advertising & ADVERTISED_10000baseT_Full)
3007 val |= MDIO_OVER_1G_UP1_10G;
3008 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
3009
3010 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
3011}
3012
3013static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
3014{
3015 u32 an_adv;
3016
3017 /* for AN, we are always publishing full duplex */
3018 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3019
f1410647
ET
3020 /* resolve pause mode and advertisement
3021 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
3022 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
3023 switch (bp->req_flow_ctrl) {
3024 case FLOW_CTRL_AUTO:
3025 if (bp->dev->mtu <= 4500) {
3026 an_adv |=
3027 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3028 bp->advertising |= (ADVERTISED_Pause |
3029 ADVERTISED_Asym_Pause);
3030 } else {
3031 an_adv |=
3032 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3033 bp->advertising |= ADVERTISED_Asym_Pause;
3034 }
3035 break;
3036
3037 case FLOW_CTRL_TX:
3038 an_adv |=
3039 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3040 bp->advertising |= ADVERTISED_Asym_Pause;
3041 break;
3042
3043 case FLOW_CTRL_RX:
3044 if (bp->dev->mtu <= 4500) {
3045 an_adv |=
3046 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3047 bp->advertising |= (ADVERTISED_Pause |
3048 ADVERTISED_Asym_Pause);
3049 } else {
3050 an_adv |=
3051 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3052 bp->advertising &= ~(ADVERTISED_Pause |
3053 ADVERTISED_Asym_Pause);
3054 }
3055 break;
3056
3057 case FLOW_CTRL_BOTH:
3058 if (bp->dev->mtu <= 4500) {
3059 an_adv |=
3060 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3061 bp->advertising |= (ADVERTISED_Pause |
3062 ADVERTISED_Asym_Pause);
3063 } else {
3064 an_adv |=
3065 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3066 bp->advertising |= ADVERTISED_Asym_Pause;
3067 }
3068 break;
3069
3070 case FLOW_CTRL_NONE:
3071 default:
3072 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3073 bp->advertising &= ~(ADVERTISED_Pause |
3074 ADVERTISED_Asym_Pause);
3075 break;
3076 }
3077 } else { /* forced mode */
3078 switch (bp->req_flow_ctrl) {
3079 case FLOW_CTRL_AUTO:
3080 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3081 " req_autoneg 0x%x\n",
3082 bp->req_flow_ctrl, bp->req_autoneg);
3083 break;
3084
3085 case FLOW_CTRL_TX:
3086 an_adv |=
3087 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3088 bp->advertising |= ADVERTISED_Asym_Pause;
3089 break;
3090
3091 case FLOW_CTRL_RX:
3092 case FLOW_CTRL_BOTH:
3093 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3094 bp->advertising |= (ADVERTISED_Pause |
3095 ADVERTISED_Asym_Pause);
3096 break;
3097
3098 case FLOW_CTRL_NONE:
3099 default:
3100 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3101 bp->advertising &= ~(ADVERTISED_Pause |
3102 ADVERTISED_Asym_Pause);
3103 break;
3104 }
a2fbb9ea
ET
3105 }
3106
3107 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3108 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
3109}
3110
3111static void bnx2x_restart_autoneg(struct bnx2x *bp)
3112{
3113 if (bp->autoneg & AUTONEG_CL73) {
3114 /* enable and restart clause 73 aneg */
3115 u32 an_ctrl;
3116
3117 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3118 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3119 &an_ctrl);
3120 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3121 (an_ctrl |
3122 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
3123 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
3124
3125 } else {
3126 /* Enable and restart BAM/CL37 aneg */
3127 u32 mii_control;
3128
3129 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3130 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3131 &mii_control);
3132 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3133 (mii_control |
3134 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3135 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
3136 }
3137}
3138
3139static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
3140{
3141 u32 control1;
3142
3143 /* in SGMII mode, the unicore is always slave */
3144 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
3145 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3146 &control1);
3147 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
3148 /* set sgmii mode (and not fiber) */
3149 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
3150 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
3151 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
3152 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3153 control1);
3154
3155 /* if forced speed */
3156 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3157 /* set speed, disable autoneg */
3158 u32 mii_control;
3159
3160 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3161 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3162 &mii_control);
3163 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3164 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
3165 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
3166
3167 switch (bp->req_line_speed) {
3168 case SPEED_100:
3169 mii_control |=
3170 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
3171 break;
3172 case SPEED_1000:
3173 mii_control |=
3174 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
3175 break;
3176 case SPEED_10:
3177 /* there is nothing to set for 10M */
3178 break;
3179 default:
3180 /* invalid speed for SGMII */
3181 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
3182 bp->req_line_speed);
3183 break;
3184 }
3185
3186 /* setting the full duplex */
3187 if (bp->req_duplex == DUPLEX_FULL)
3188 mii_control |=
3189 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
3190 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3191 mii_control);
3192
3193 } else { /* AN mode */
3194 /* enable and restart AN */
3195 bnx2x_restart_autoneg(bp);
3196 }
3197}
3198
3199static void bnx2x_link_int_enable(struct bnx2x *bp)
3200{
3201 int port = bp->port;
f1410647
ET
3202 u32 ext_phy_type;
3203 u32 mask;
a2fbb9ea
ET
3204
3205 /* setting the status to report on link up
3206 for either XGXS or SerDes */
3207 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647
ET
3208 (NIG_STATUS_XGXS0_LINK10G |
3209 NIG_STATUS_XGXS0_LINK_STATUS |
3210 NIG_STATUS_SERDES0_LINK_STATUS));
a2fbb9ea
ET
3211
3212 if (bp->phy_flags & PHY_XGXS_FLAG) {
f1410647
ET
3213 mask = (NIG_MASK_XGXS0_LINK10G |
3214 NIG_MASK_XGXS0_LINK_STATUS);
3215 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
3216 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3217 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3218 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3219 (ext_phy_type !=
3220 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
3221 mask |= NIG_MASK_MI_INT;
3222 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3223 }
a2fbb9ea
ET
3224
3225 } else { /* SerDes */
f1410647
ET
3226 mask = NIG_MASK_SERDES0_LINK_STATUS;
3227 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
3228 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3229 if ((ext_phy_type !=
3230 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3231 (ext_phy_type !=
3232 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3233 mask |= NIG_MASK_MI_INT;
3234 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3235 }
a2fbb9ea 3236 }
f1410647
ET
3237 bnx2x_bits_en(bp,
3238 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3239 mask);
3240 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3241 " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3242 " 10G %x, XGXS_LINK %x\n", port,
3243 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3244 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3245 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3246 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3247 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3248 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3249 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3250 );
3251}
3252
3253static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3254{
3255 u32 ext_phy_addr = ((bp->ext_phy_config &
3256 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3257 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3258 u32 fw_ver1, fw_ver2;
3259
3260 /* Need to wait 200ms after reset */
3261 msleep(200);
3262 /* Boot port from external ROM
3263 * Set ser_boot_ctl bit in the MISC_CTRL1 register
3264 */
3265 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3266 EXT_PHY_KR_PMA_PMD_DEVAD,
3267 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3268
3269 /* Reset internal microprocessor */
3270 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3271 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3272 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3273 /* set micro reset = 0 */
3274 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3275 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3276 EXT_PHY_KR_ROM_MICRO_RESET);
3277 /* Reset internal microprocessor */
3278 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3279 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3280 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3281 /* wait for 100ms for code download via SPI port */
3282 msleep(100);
3283
3284 /* Clear ser_boot_ctl bit */
3285 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3286 EXT_PHY_KR_PMA_PMD_DEVAD,
3287 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3288 /* Wait 100ms */
3289 msleep(100);
3290
3291 /* Print the PHY FW version */
3292 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3293 EXT_PHY_KR_PMA_PMD_DEVAD,
3294 0xca19, &fw_ver1);
3295 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3296 EXT_PHY_KR_PMA_PMD_DEVAD,
3297 0xca1a, &fw_ver2);
3298 DP(NETIF_MSG_LINK,
3299 "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3300}
3301
3302static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3303{
3304 u32 ext_phy_addr = ((bp->ext_phy_config &
3305 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3306 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3307
3308 /* Force KR or KX */
3309 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3310 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3311 0x2040);
3312 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3313 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3314 0x000b);
3315 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3316 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3317 0x0000);
3318 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3319 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3320 0x0000);
a2fbb9ea
ET
3321}
3322
3323static void bnx2x_ext_phy_init(struct bnx2x *bp)
3324{
a2fbb9ea
ET
3325 u32 ext_phy_type;
3326 u32 ext_phy_addr;
f1410647
ET
3327 u32 cnt;
3328 u32 ctrl;
3329 u32 val = 0;
a2fbb9ea
ET
3330
3331 if (bp->phy_flags & PHY_XGXS_FLAG) {
a2fbb9ea
ET
3332 ext_phy_addr = ((bp->ext_phy_config &
3333 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3334 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3335
3336 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
f1410647
ET
3337 /* Make sure that the soft reset is off (expect for the 8072:
3338 * due to the lock, it will be done inside the specific
3339 * handling)
3340 */
3341 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3342 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3343 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3344 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3345 /* Wait for soft reset to get cleared upto 1 sec */
3346 for (cnt = 0; cnt < 1000; cnt++) {
3347 bnx2x_mdio45_read(bp, ext_phy_addr,
3348 EXT_PHY_OPT_PMA_PMD_DEVAD,
3349 EXT_PHY_OPT_CNTL, &ctrl);
3350 if (!(ctrl & (1<<15)))
3351 break;
3352 msleep(1);
3353 }
3354 DP(NETIF_MSG_LINK,
3355 "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3356 }
3357
a2fbb9ea
ET
3358 switch (ext_phy_type) {
3359 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3360 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3361 break;
3362
3363 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3364 DP(NETIF_MSG_LINK, "XGXS 8705\n");
a2fbb9ea 3365
f1410647
ET
3366 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3367 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3368 EXT_PHY_OPT_PMD_MISC_CNTL,
3369 0x8288);
f1410647
ET
3370 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3371 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3372 EXT_PHY_OPT_PHY_IDENTIFIER,
3373 0x7fbf);
f1410647
ET
3374 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3375 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3376 EXT_PHY_OPT_CMU_PLL_BYPASS,
3377 0x0100);
f1410647
ET
3378 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3379 EXT_PHY_OPT_WIS_DEVAD,
a2fbb9ea
ET
3380 EXT_PHY_OPT_LASI_CNTL, 0x1);
3381 break;
3382
3383 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3384 DP(NETIF_MSG_LINK, "XGXS 8706\n");
a2fbb9ea 3385
f1410647
ET
3386 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3387 /* Force speed */
3388 if (bp->req_line_speed == SPEED_10000) {
3389 DP(NETIF_MSG_LINK,
3390 "XGXS 8706 force 10Gbps\n");
3391 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3392 EXT_PHY_OPT_PMA_PMD_DEVAD,
3393 EXT_PHY_OPT_PMD_DIGITAL_CNT,
3394 0x400);
3395 } else {
3396 /* Force 1Gbps */
3397 DP(NETIF_MSG_LINK,
3398 "XGXS 8706 force 1Gbps\n");
3399
3400 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3401 EXT_PHY_OPT_PMA_PMD_DEVAD,
3402 EXT_PHY_OPT_CNTL,
3403 0x0040);
3404
3405 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3406 EXT_PHY_OPT_PMA_PMD_DEVAD,
3407 EXT_PHY_OPT_CNTL2,
3408 0x000D);
3409 }
3410
3411 /* Enable LASI */
3412 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3413 EXT_PHY_OPT_PMA_PMD_DEVAD,
3414 EXT_PHY_OPT_LASI_CNTL,
3415 0x1);
3416 } else {
3417 /* AUTONEG */
3418 /* Allow CL37 through CL73 */
3419 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3420 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3421 EXT_PHY_AUTO_NEG_DEVAD,
3422 EXT_PHY_OPT_AN_CL37_CL73,
3423 0x040c);
3424
3425 /* Enable Full-Duplex advertisment on CL37 */
3426 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3427 EXT_PHY_AUTO_NEG_DEVAD,
3428 EXT_PHY_OPT_AN_CL37_FD,
3429 0x0020);
3430 /* Enable CL37 AN */
3431 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3432 EXT_PHY_AUTO_NEG_DEVAD,
3433 EXT_PHY_OPT_AN_CL37_AN,
3434 0x1000);
3435 /* Advertise 10G/1G support */
3436 if (bp->advertising &
3437 ADVERTISED_1000baseT_Full)
3438 val = (1<<5);
3439 if (bp->advertising &
3440 ADVERTISED_10000baseT_Full)
3441 val |= (1<<7);
3442
3443 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3444 EXT_PHY_AUTO_NEG_DEVAD,
3445 EXT_PHY_OPT_AN_ADV, val);
3446 /* Enable LASI */
3447 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3448 EXT_PHY_OPT_PMA_PMD_DEVAD,
3449 EXT_PHY_OPT_LASI_CNTL,
3450 0x1);
3451
3452 /* Enable clause 73 AN */
3453 bnx2x_mdio45_write(bp, ext_phy_addr,
3454 EXT_PHY_AUTO_NEG_DEVAD,
3455 EXT_PHY_OPT_CNTL,
3456 0x1200);
3457 }
3458 break;
3459
3460 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3461 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3462 /* Wait for soft reset to get cleared upto 1 sec */
3463 for (cnt = 0; cnt < 1000; cnt++) {
3464 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3465 ext_phy_addr,
3466 EXT_PHY_OPT_PMA_PMD_DEVAD,
3467 EXT_PHY_OPT_CNTL, &ctrl);
3468 if (!(ctrl & (1<<15)))
3469 break;
3470 msleep(1);
3471 }
3472 DP(NETIF_MSG_LINK,
3473 "8072 control reg 0x%x (after %d ms)\n",
3474 ctrl, cnt);
3475
3476 bnx2x_bcm8072_external_rom_boot(bp);
3477 DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3478
3479 /* enable LASI */
3480 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3481 ext_phy_addr,
3482 EXT_PHY_KR_PMA_PMD_DEVAD,
3483 0x9000, 0x0400);
3484 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3485 ext_phy_addr,
3486 EXT_PHY_KR_PMA_PMD_DEVAD,
3487 EXT_PHY_KR_LASI_CNTL, 0x0004);
3488
3489 /* If this is forced speed, set to KR or KX
3490 * (all other are not supported)
3491 */
3492 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3493 if (bp->req_line_speed == SPEED_10000) {
3494 bnx2x_bcm8072_force_10G(bp);
3495 DP(NETIF_MSG_LINK,
3496 "Forced speed 10G on 8072\n");
3497 /* unlock */
3498 bnx2x_hw_unlock(bp,
3499 HW_LOCK_RESOURCE_8072_MDIO);
3500 break;
3501 } else
3502 val = (1<<5);
3503 } else {
3504
3505 /* Advertise 10G/1G support */
3506 if (bp->advertising &
3507 ADVERTISED_1000baseT_Full)
3508 val = (1<<5);
3509 if (bp->advertising &
3510 ADVERTISED_10000baseT_Full)
3511 val |= (1<<7);
3512 }
3513 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3514 ext_phy_addr,
3515 EXT_PHY_KR_AUTO_NEG_DEVAD,
3516 0x11, val);
3517 /* Add support for CL37 ( passive mode ) I */
3518 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3519 ext_phy_addr,
3520 EXT_PHY_KR_AUTO_NEG_DEVAD,
3521 0x8370, 0x040c);
3522 /* Add support for CL37 ( passive mode ) II */
3523 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3524 ext_phy_addr,
3525 EXT_PHY_KR_AUTO_NEG_DEVAD,
3526 0xffe4, 0x20);
3527 /* Add support for CL37 ( passive mode ) III */
3528 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3529 ext_phy_addr,
3530 EXT_PHY_KR_AUTO_NEG_DEVAD,
3531 0xffe0, 0x1000);
3532 /* Restart autoneg */
3533 msleep(500);
3534 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3535 ext_phy_addr,
3536 EXT_PHY_KR_AUTO_NEG_DEVAD,
3537 EXT_PHY_KR_CTRL, 0x1200);
3538 DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3539 "1G %ssupported 10G %ssupported\n",
3540 (val & (1<<5)) ? "" : "not ",
3541 (val & (1<<7)) ? "" : "not ");
3542
3543 /* unlock */
3544 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3545 break;
3546
3547 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3548 DP(NETIF_MSG_LINK,
3549 "Setting the SFX7101 LASI indication\n");
3550 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3551 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 3552 EXT_PHY_OPT_LASI_CNTL, 0x1);
f1410647
ET
3553 DP(NETIF_MSG_LINK,
3554 "Setting the SFX7101 LED to blink on traffic\n");
3555 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3556 EXT_PHY_OPT_PMA_PMD_DEVAD,
3557 0xC007, (1<<3));
3558
3559 /* read modify write pause advertizing */
3560 bnx2x_mdio45_read(bp, ext_phy_addr,
3561 EXT_PHY_KR_AUTO_NEG_DEVAD,
3562 EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3563 val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3564 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3565 if (bp->advertising & ADVERTISED_Pause)
3566 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3567
3568 if (bp->advertising & ADVERTISED_Asym_Pause) {
3569 val |=
3570 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3571 }
3572 DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3573 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3574 EXT_PHY_KR_AUTO_NEG_DEVAD,
3575 EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3576 /* Restart autoneg */
3577 bnx2x_mdio45_read(bp, ext_phy_addr,
3578 EXT_PHY_KR_AUTO_NEG_DEVAD,
3579 EXT_PHY_KR_CTRL, &val);
3580 val |= 0x200;
3581 bnx2x_mdio45_write(bp, ext_phy_addr,
3582 EXT_PHY_KR_AUTO_NEG_DEVAD,
3583 EXT_PHY_KR_CTRL, val);
a2fbb9ea
ET
3584 break;
3585
3586 default:
f1410647
ET
3587 BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
3588 bp->ext_phy_config);
a2fbb9ea
ET
3589 break;
3590 }
a2fbb9ea
ET
3591
3592 } else { /* SerDes */
f1410647 3593/* ext_phy_addr = ((bp->ext_phy_config &
a2fbb9ea
ET
3594 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
3595 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
3596*/
3597 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3598 switch (ext_phy_type) {
3599 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3600 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3601 break;
3602
3603 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3604 DP(NETIF_MSG_LINK, "SerDes 5482\n");
a2fbb9ea
ET
3605 break;
3606
3607 default:
3608 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3609 bp->ext_phy_config);
3610 break;
3611 }
3612 }
3613}
3614
3615static void bnx2x_ext_phy_reset(struct bnx2x *bp)
3616{
3617 u32 ext_phy_type;
f1410647
ET
3618 u32 ext_phy_addr = ((bp->ext_phy_config &
3619 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3620 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3621 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3622
3623 /* The PHY reset is controled by GPIO 1
3624 * Give it 1ms of reset pulse
3625 */
3626 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3627 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3628 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3629 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3630 msleep(1);
3631 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3632 MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3633 }
a2fbb9ea
ET
3634
3635 if (bp->phy_flags & PHY_XGXS_FLAG) {
3636 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3637 switch (ext_phy_type) {
3638 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3639 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3640 break;
3641
3642 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3643 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
3644 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
3645 bnx2x_mdio45_write(bp, ext_phy_addr,
3646 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 3647 EXT_PHY_OPT_CNTL, 0xa040);
f1410647
ET
3648 break;
3649
3650 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3651 DP(NETIF_MSG_LINK, "XGXS 8072\n");
3652 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3653 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3654 ext_phy_addr,
3655 EXT_PHY_KR_PMA_PMD_DEVAD,
3656 0, 1<<15);
3657 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3658 break;
3659
3660 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3661 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
a2fbb9ea
ET
3662 break;
3663
3664 default:
3665 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
3666 bp->ext_phy_config);
3667 break;
3668 }
3669
3670 } else { /* SerDes */
3671 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3672 switch (ext_phy_type) {
3673 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3674 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3675 break;
3676
3677 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3678 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3679 break;
3680
3681 default:
3682 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3683 bp->ext_phy_config);
3684 break;
3685 }
3686 }
3687}
3688
3689static void bnx2x_link_initialize(struct bnx2x *bp)
3690{
3691 int port = bp->port;
3692
3693 /* disable attentions */
3694 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3695 (NIG_MASK_XGXS0_LINK_STATUS |
3696 NIG_MASK_XGXS0_LINK10G |
3697 NIG_MASK_SERDES0_LINK_STATUS |
3698 NIG_MASK_MI_INT));
3699
f1410647 3700 /* Activate the external PHY */
a2fbb9ea
ET
3701 bnx2x_ext_phy_reset(bp);
3702
3703 bnx2x_set_aer_mmd(bp);
3704
3705 if (bp->phy_flags & PHY_XGXS_FLAG)
3706 bnx2x_set_master_ln(bp);
3707
3708 /* reset the SerDes and wait for reset bit return low */
3709 bnx2x_reset_unicore(bp);
3710
3711 bnx2x_set_aer_mmd(bp);
3712
3713 /* setting the masterLn_def again after the reset */
3714 if (bp->phy_flags & PHY_XGXS_FLAG) {
3715 bnx2x_set_master_ln(bp);
3716 bnx2x_set_swap_lanes(bp);
3717 }
3718
3719 /* Set Parallel Detect */
3720 if (bp->req_autoneg & AUTONEG_SPEED)
3721 bnx2x_set_parallel_detection(bp);
3722
3723 if (bp->phy_flags & PHY_XGXS_FLAG) {
3724 if (bp->req_line_speed &&
3725 bp->req_line_speed < SPEED_1000) {
3726 bp->phy_flags |= PHY_SGMII_FLAG;
3727 } else {
3728 bp->phy_flags &= ~PHY_SGMII_FLAG;
3729 }
3730 }
3731
3732 if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
3733 u16 bank, rx_eq;
3734
3735 rx_eq = ((bp->serdes_config &
3736 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3737 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3738
3739 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
3740 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
3741 bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
3742 MDIO_SET_REG_BANK(bp, bank);
3743 bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
3744 ((rx_eq &
3745 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
3746 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
3747 }
3748
3749 /* forced speed requested? */
3750 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3751 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3752
3753 /* disable autoneg */
3754 bnx2x_set_autoneg(bp);
3755
3756 /* program speed and duplex */
3757 bnx2x_program_serdes(bp);
3758
3759 } else { /* AN_mode */
3760 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3761
3762 /* AN enabled */
3763 bnx2x_set_brcm_cl37_advertisment(bp);
3764
c14423fe 3765 /* program duplex & pause advertisement (for aneg) */
a2fbb9ea
ET
3766 bnx2x_set_ieee_aneg_advertisment(bp);
3767
3768 /* enable autoneg */
3769 bnx2x_set_autoneg(bp);
3770
c14423fe 3771 /* enable and restart AN */
a2fbb9ea
ET
3772 bnx2x_restart_autoneg(bp);
3773 }
3774
3775 } else { /* SGMII mode */
3776 DP(NETIF_MSG_LINK, "SGMII\n");
3777
3778 bnx2x_initialize_sgmii_process(bp);
3779 }
3780
a2fbb9ea
ET
3781 /* init ext phy and enable link state int */
3782 bnx2x_ext_phy_init(bp);
f1410647
ET
3783
3784 /* enable the interrupt */
3785 bnx2x_link_int_enable(bp);
a2fbb9ea
ET
3786}
3787
3788static void bnx2x_phy_deassert(struct bnx2x *bp)
3789{
3790 int port = bp->port;
3791 u32 val;
3792
3793 if (bp->phy_flags & PHY_XGXS_FLAG) {
3794 DP(NETIF_MSG_LINK, "XGXS\n");
3795 val = XGXS_RESET_BITS;
3796
3797 } else { /* SerDes */
3798 DP(NETIF_MSG_LINK, "SerDes\n");
3799 val = SERDES_RESET_BITS;
3800 }
3801
3802 val = val << (port*16);
3803
3804 /* reset and unreset the SerDes/XGXS */
3805 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3806 msleep(5);
3807 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3808}
3809
3810static int bnx2x_phy_init(struct bnx2x *bp)
3811{
3812 DP(NETIF_MSG_LINK, "started\n");
3813 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3814 bp->phy_flags |= PHY_EMAC_FLAG;
3815 bp->link_up = 1;
3816 bp->line_speed = SPEED_10000;
3817 bp->duplex = DUPLEX_FULL;
3818 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3819 bnx2x_emac_enable(bp);
3820 bnx2x_link_report(bp);
3821 return 0;
3822
3823 } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3824 bp->phy_flags |= PHY_BMAC_FLAG;
3825 bp->link_up = 1;
3826 bp->line_speed = SPEED_10000;
3827 bp->duplex = DUPLEX_FULL;
3828 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3829 bnx2x_bmac_enable(bp, 0);
3830 bnx2x_link_report(bp);
3831 return 0;
3832
3833 } else {
3834 bnx2x_phy_deassert(bp);
3835 bnx2x_link_initialize(bp);
3836 }
3837
3838 return 0;
3839}
3840
3841static void bnx2x_link_reset(struct bnx2x *bp)
3842{
3843 int port = bp->port;
f1410647
ET
3844 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3845
3846 /* update shared memory */
3847 bp->link_status = 0;
3848 bnx2x_update_mng(bp);
a2fbb9ea
ET
3849
3850 /* disable attentions */
3851 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3852 (NIG_MASK_XGXS0_LINK_STATUS |
3853 NIG_MASK_XGXS0_LINK10G |
3854 NIG_MASK_SERDES0_LINK_STATUS |
3855 NIG_MASK_MI_INT));
3856
f1410647
ET
3857 /* activate nig drain */
3858 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3859
3860 /* disable nig egress interface */
3861 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3862 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3863
3864 /* Stop BigMac rx */
3865 bnx2x_bmac_rx_disable(bp);
3866
3867 /* disable emac */
3868 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3869
3870 msleep(10);
3871
3872 /* The PHY reset is controled by GPIO 1
3873 * Hold it as output low
3874 */
3875 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3876 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3877 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3878 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3879 DP(NETIF_MSG_LINK, "reset external PHY\n");
3880 }
a2fbb9ea
ET
3881
3882 /* reset the SerDes/XGXS */
3883 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3884 (0x1ff << (port*16)));
3885
f1410647
ET
3886 /* reset BigMac */
3887 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3888 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
a2fbb9ea 3889
f1410647
ET
3890 /* disable nig ingress interface */
3891 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
a2fbb9ea 3892 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
a2fbb9ea 3893
f1410647
ET
3894 /* set link down */
3895 bp->link_up = 0;
a2fbb9ea
ET
3896}
3897
3898#ifdef BNX2X_XGXS_LB
3899static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3900{
3901 int port = bp->port;
3902
3903 if (is_10g) {
3904 u32 md_devad;
3905
3906 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3907
3908 /* change the uni_phy_addr in the nig */
3909 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3910 &md_devad);
3911 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3912
3913 /* change the aer mmd */
3914 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3915 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3916
3917 /* config combo IEEE0 control reg for loopback */
3918 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3919 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3920 0x6041);
3921
3922 /* set aer mmd back */
3923 bnx2x_set_aer_mmd(bp);
3924
3925 /* and md_devad */
3926 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3927
3928 } else {
3929 u32 mii_control;
3930
3931 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3932
3933 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3934 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3935 &mii_control);
3936 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3937 (mii_control |
3938 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3939 }
3940}
3941#endif
3942
3943/* end of PHY/MAC */
3944
3945/* slow path */
3946
3947/*
3948 * General service functions
3949 */
3950
3951/* the slow path queue is odd since completions arrive on the fastpath ring */
3952static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3953 u32 data_hi, u32 data_lo, int common)
3954{
3955 int port = bp->port;
3956
3957 DP(NETIF_MSG_TIMER,
c14423fe 3958 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
3959 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3960 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3961 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3962
3963#ifdef BNX2X_STOP_ON_ERROR
3964 if (unlikely(bp->panic))
3965 return -EIO;
3966#endif
3967
3968 spin_lock(&bp->spq_lock);
3969
3970 if (!bp->spq_left) {
3971 BNX2X_ERR("BUG! SPQ ring full!\n");
3972 spin_unlock(&bp->spq_lock);
3973 bnx2x_panic();
3974 return -EBUSY;
3975 }
f1410647 3976
a2fbb9ea
ET
3977 /* CID needs port number to be encoded int it */
3978 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3979 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3980 HW_CID(bp, cid)));
3981 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3982 if (common)
3983 bp->spq_prod_bd->hdr.type |=
3984 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3985
3986 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3987 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3988
3989 bp->spq_left--;
3990
3991 if (bp->spq_prod_bd == bp->spq_last_bd) {
3992 bp->spq_prod_bd = bp->spq;
3993 bp->spq_prod_idx = 0;
3994 DP(NETIF_MSG_TIMER, "end of spq\n");
3995
3996 } else {
3997 bp->spq_prod_bd++;
3998 bp->spq_prod_idx++;
3999 }
4000
4001 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
4002 bp->spq_prod_idx);
4003
4004 spin_unlock(&bp->spq_lock);
4005 return 0;
4006}
4007
4008/* acquire split MCP access lock register */
4009static int bnx2x_lock_alr(struct bnx2x *bp)
4010{
4011 int rc = 0;
4012 u32 i, j, val;
4013
4014 might_sleep();
4015 i = 100;
4016 for (j = 0; j < i*10; j++) {
4017 val = (1UL << 31);
4018 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4019 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
4020 if (val & (1L << 31))
4021 break;
4022
4023 msleep(5);
4024 }
4025
4026 if (!(val & (1L << 31))) {
4027 BNX2X_ERR("Cannot acquire nvram interface\n");
4028
4029 rc = -EBUSY;
4030 }
4031
4032 return rc;
4033}
4034
4035/* Release split MCP access lock register */
4036static void bnx2x_unlock_alr(struct bnx2x *bp)
4037{
4038 u32 val = 0;
4039
4040 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4041}
4042
4043static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
4044{
4045 struct host_def_status_block *def_sb = bp->def_status_blk;
4046 u16 rc = 0;
4047
4048 barrier(); /* status block is written to by the chip */
4049
4050 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
4051 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4052 rc |= 1;
4053 }
4054 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
4055 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
4056 rc |= 2;
4057 }
4058 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
4059 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
4060 rc |= 4;
4061 }
4062 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
4063 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
4064 rc |= 8;
4065 }
4066 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
4067 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
4068 rc |= 16;
4069 }
4070 return rc;
4071}
4072
4073/*
4074 * slow path service functions
4075 */
4076
4077static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4078{
4079 int port = bp->port;
4080 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
4081 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4082 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4083 u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4084 NIG_REG_MASK_INTERRUPT_PORT0;
4085
4086 if (~bp->aeu_mask & (asserted & 0xff))
4087 BNX2X_ERR("IGU ERROR\n");
4088 if (bp->attn_state & asserted)
4089 BNX2X_ERR("IGU ERROR\n");
4090
4091 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4092 bp->aeu_mask, asserted);
4093 bp->aeu_mask &= ~(asserted & 0xff);
4094 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
4095
4096 REG_WR(bp, aeu_addr, bp->aeu_mask);
4097
4098 bp->attn_state |= asserted;
4099
4100 if (asserted & ATTN_HARD_WIRED_MASK) {
4101 if (asserted & ATTN_NIG_FOR_FUNC) {
4102 u32 nig_status_port;
4103 u32 nig_int_addr = port ?
4104 NIG_REG_STATUS_INTERRUPT_PORT1 :
4105 NIG_REG_STATUS_INTERRUPT_PORT0;
4106
4107 bp->nig_mask = REG_RD(bp, nig_mask_addr);
4108 REG_WR(bp, nig_mask_addr, 0);
4109
4110 nig_status_port = REG_RD(bp, nig_int_addr);
4111 bnx2x_link_update(bp);
4112
4113 /* handle unicore attn? */
4114 }
4115 if (asserted & ATTN_SW_TIMER_4_FUNC)
4116 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4117
4118 if (asserted & GPIO_2_FUNC)
4119 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4120
4121 if (asserted & GPIO_3_FUNC)
4122 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4123
4124 if (asserted & GPIO_4_FUNC)
4125 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4126
4127 if (port == 0) {
4128 if (asserted & ATTN_GENERAL_ATTN_1) {
4129 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4130 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4131 }
4132 if (asserted & ATTN_GENERAL_ATTN_2) {
4133 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4134 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4135 }
4136 if (asserted & ATTN_GENERAL_ATTN_3) {
4137 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4138 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4139 }
4140 } else {
4141 if (asserted & ATTN_GENERAL_ATTN_4) {
4142 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4144 }
4145 if (asserted & ATTN_GENERAL_ATTN_5) {
4146 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4147 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4148 }
4149 if (asserted & ATTN_GENERAL_ATTN_6) {
4150 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4151 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4152 }
4153 }
4154
4155 } /* if hardwired */
4156
4157 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
4158 asserted, BAR_IGU_INTMEM + igu_addr);
4159 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
4160
4161 /* now set back the mask */
4162 if (asserted & ATTN_NIG_FOR_FUNC)
4163 REG_WR(bp, nig_mask_addr, bp->nig_mask);
4164}
4165
4166static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4167{
4168 int port = bp->port;
4169 int index;
4170 struct attn_route attn;
4171 struct attn_route group_mask;
4172 u32 reg_addr;
4173 u32 val;
4174
4175 /* need to take HW lock because MCP or other port might also
4176 try to handle this event */
4177 bnx2x_lock_alr(bp);
4178
4179 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4180 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4181 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4182 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4183 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
4184
4185 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4186 if (deasserted & (1 << index)) {
4187 group_mask = bp->attn_group[index];
4188
4189 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
4190 (unsigned long long)group_mask.sig[0]);
4191
4192 if (attn.sig[3] & group_mask.sig[3] &
4193 EVEREST_GEN_ATTN_IN_USE_MASK) {
4194
4195 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
4196
4197 BNX2X_ERR("MC assert!\n");
4198 bnx2x_panic();
4199
4200 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
4201
4202 BNX2X_ERR("MCP assert!\n");
4203 REG_WR(bp,
4204 MISC_REG_AEU_GENERAL_ATTN_11, 0);
4205 bnx2x_mc_assert(bp);
4206
4207 } else {
4208 BNX2X_ERR("UNKOWEN HW ASSERT!\n");
4209 }
4210 }
4211
4212 if (attn.sig[1] & group_mask.sig[1] &
4213 BNX2X_DOORQ_ASSERT) {
4214
4215 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4216 BNX2X_ERR("DB hw attention 0x%x\n", val);
4217 /* DORQ discard attention */
4218 if (val & 0x2)
4219 BNX2X_ERR("FATAL error from DORQ\n");
4220 }
4221
4222 if (attn.sig[2] & group_mask.sig[2] &
4223 AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4224
4225 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4226 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4227 /* CFC error attention */
4228 if (val & 0x2)
4229 BNX2X_ERR("FATAL error from CFC\n");
4230 }
4231
4232 if (attn.sig[2] & group_mask.sig[2] &
4233 AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4234
4235 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4236 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4237 /* RQ_USDMDP_FIFO_OVERFLOW */
4238 if (val & 0x18000)
4239 BNX2X_ERR("FATAL error from PXP\n");
4240 }
4241
4242 if (attn.sig[3] & group_mask.sig[3] &
4243 EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4244
4245 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4246 0x7ff);
4247 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
4248 attn.sig[3]);
4249 }
4250
4251 if ((attn.sig[0] & group_mask.sig[0] &
4252 HW_INTERRUT_ASSERT_SET_0) ||
4253 (attn.sig[1] & group_mask.sig[1] &
4254 HW_INTERRUT_ASSERT_SET_1) ||
4255 (attn.sig[2] & group_mask.sig[2] &
4256 HW_INTERRUT_ASSERT_SET_2))
4257 BNX2X_ERR("FATAL HW block attention\n");
4258
4259 if ((attn.sig[0] & group_mask.sig[0] &
4260 HW_PRTY_ASSERT_SET_0) ||
4261 (attn.sig[1] & group_mask.sig[1] &
4262 HW_PRTY_ASSERT_SET_1) ||
4263 (attn.sig[2] & group_mask.sig[2] &
4264 HW_PRTY_ASSERT_SET_2))
c14423fe 4265 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
4266 }
4267 }
4268
4269 bnx2x_unlock_alr(bp);
4270
4271 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
4272
4273 val = ~deasserted;
4274/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
4275 val, BAR_IGU_INTMEM + reg_addr); */
4276 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
4277
4278 if (bp->aeu_mask & (deasserted & 0xff))
4279 BNX2X_ERR("IGU BUG\n");
4280 if (~bp->attn_state & deasserted)
4281 BNX2X_ERR("IGU BUG\n");
4282
4283 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4284 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4285
4286 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
4287 bp->aeu_mask |= (deasserted & 0xff);
4288
4289 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
4290 REG_WR(bp, reg_addr, bp->aeu_mask);
4291
4292 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4293 bp->attn_state &= ~deasserted;
4294 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4295}
4296
4297static void bnx2x_attn_int(struct bnx2x *bp)
4298{
4299 /* read local copy of bits */
4300 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
4301 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
4302 u32 attn_state = bp->attn_state;
4303
4304 /* look for changed bits */
4305 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4306 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4307
4308 DP(NETIF_MSG_HW,
4309 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4310 attn_bits, attn_ack, asserted, deasserted);
4311
4312 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4313 BNX2X_ERR("bad attention state\n");
4314
4315 /* handle bits that were raised */
4316 if (asserted)
4317 bnx2x_attn_int_asserted(bp, asserted);
4318
4319 if (deasserted)
4320 bnx2x_attn_int_deasserted(bp, deasserted);
4321}
4322
4323static void bnx2x_sp_task(struct work_struct *work)
4324{
4325 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
4326 u16 status;
4327
4328 /* Return here if interrupt is disabled */
4329 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4330 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4331 return;
4332 }
4333
4334 status = bnx2x_update_dsb_idx(bp);
4335 if (status == 0)
4336 BNX2X_ERR("spurious slowpath interrupt!\n");
4337
4338 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
4339
4340 if (status & 0x1) {
4341 /* HW attentions */
4342 bnx2x_attn_int(bp);
4343 }
4344
4345 /* CStorm events: query_stats, cfc delete ramrods */
4346 if (status & 0x2)
4347 bp->stat_pending = 0;
4348
4349 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
4350 IGU_INT_NOP, 1);
4351 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
4352 IGU_INT_NOP, 1);
4353 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
4354 IGU_INT_NOP, 1);
4355 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
4356 IGU_INT_NOP, 1);
4357 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
4358 IGU_INT_ENABLE, 1);
4359}
4360
4361static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4362{
4363 struct net_device *dev = dev_instance;
4364 struct bnx2x *bp = netdev_priv(dev);
4365
4366 /* Return here if interrupt is disabled */
4367 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4368 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4369 return IRQ_HANDLED;
4370 }
4371
4372 bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
4373
4374#ifdef BNX2X_STOP_ON_ERROR
4375 if (unlikely(bp->panic))
4376 return IRQ_HANDLED;
4377#endif
4378
4379 schedule_work(&bp->sp_task);
4380
4381 return IRQ_HANDLED;
4382}
4383
4384/* end of slow path */
4385
4386/* Statistics */
4387
4388/****************************************************************************
4389* Macros
4390****************************************************************************/
4391
4392#define UPDATE_STAT(s, t) \
4393 do { \
4394 estats->t += new->s - old->s; \
4395 old->s = new->s; \
4396 } while (0)
4397
4398/* sum[hi:lo] += add[hi:lo] */
4399#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
4400 do { \
4401 s_lo += a_lo; \
4402 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
4403 } while (0)
4404
4405/* difference = minuend - subtrahend */
4406#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
4407 do { \
4408 if (m_lo < s_lo) { /* underflow */ \
4409 d_hi = m_hi - s_hi; \
4410 if (d_hi > 0) { /* we can 'loan' 1 */ \
4411 d_hi--; \
4412 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
4413 } else { /* m_hi <= s_hi */ \
4414 d_hi = 0; \
4415 d_lo = 0; \
4416 } \
4417 } else { /* m_lo >= s_lo */ \
4418 if (m_hi < s_hi) { \
4419 d_hi = 0; \
4420 d_lo = 0; \
4421 } else { /* m_hi >= s_hi */ \
4422 d_hi = m_hi - s_hi; \
4423 d_lo = m_lo - s_lo; \
4424 } \
4425 } \
4426 } while (0)
4427
4428/* minuend -= subtrahend */
4429#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
4430 do { \
4431 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
4432 } while (0)
4433
4434#define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
4435 do { \
4436 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
4437 diff.lo, new->s_lo, old->s_lo); \
4438 old->s_hi = new->s_hi; \
4439 old->s_lo = new->s_lo; \
4440 ADD_64(estats->t_hi, diff.hi, \
4441 estats->t_lo, diff.lo); \
4442 } while (0)
4443
4444/* sum[hi:lo] += add */
4445#define ADD_EXTEND_64(s_hi, s_lo, a) \
4446 do { \
4447 s_lo += a; \
4448 s_hi += (s_lo < a) ? 1 : 0; \
4449 } while (0)
4450
4451#define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
4452 do { \
4453 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
4454 } while (0)
4455
4456#define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
4457 do { \
4458 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
4459 old_tclient->s = le32_to_cpu(tclient->s); \
4460 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
4461 } while (0)
4462
4463/*
4464 * General service functions
4465 */
4466
4467static inline long bnx2x_hilo(u32 *hiref)
4468{
4469 u32 lo = *(hiref + 1);
4470#if (BITS_PER_LONG == 64)
4471 u32 hi = *hiref;
4472
4473 return HILO_U64(hi, lo);
4474#else
4475 return lo;
4476#endif
4477}
4478
4479/*
4480 * Init service functions
4481 */
4482
4483static void bnx2x_init_mac_stats(struct bnx2x *bp)
4484{
4485 struct dmae_command *dmae;
4486 int port = bp->port;
4487 int loader_idx = port * 8;
4488 u32 opcode;
4489 u32 mac_addr;
4490
4491 bp->executer_idx = 0;
4492 if (bp->fw_mb) {
4493 /* MCP */
4494 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4495 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4496#ifdef __BIG_ENDIAN
4497 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4498#else
4499 DMAE_CMD_ENDIANITY_DW_SWAP |
4500#endif
4501 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4502
4503 if (bp->link_up)
4504 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
4505
4506 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4507 dmae->opcode = opcode;
4508 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
4509 sizeof(u32));
4510 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
4511 sizeof(u32));
4512 dmae->dst_addr_lo = bp->fw_mb >> 2;
4513 dmae->dst_addr_hi = 0;
4514 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
4515 sizeof(u32)) >> 2;
4516 if (bp->link_up) {
4517 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4518 dmae->comp_addr_hi = 0;
4519 dmae->comp_val = 1;
4520 } else {
4521 dmae->comp_addr_lo = 0;
4522 dmae->comp_addr_hi = 0;
4523 dmae->comp_val = 0;
4524 }
4525 }
4526
4527 if (!bp->link_up) {
4528 /* no need to collect statistics in link down */
4529 return;
4530 }
4531
4532 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4533 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4534 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4535#ifdef __BIG_ENDIAN
4536 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4537#else
4538 DMAE_CMD_ENDIANITY_DW_SWAP |
4539#endif
4540 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4541
4542 if (bp->phy_flags & PHY_BMAC_FLAG) {
4543
4544 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4545 NIG_REG_INGRESS_BMAC0_MEM);
4546
4547 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4548 BIGMAC_REGISTER_TX_STAT_GTBYT */
4549 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4550 dmae->opcode = opcode;
4551 dmae->src_addr_lo = (mac_addr +
4552 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4553 dmae->src_addr_hi = 0;
4554 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4555 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4556 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4557 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4558 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4559 dmae->comp_addr_hi = 0;
4560 dmae->comp_val = 1;
4561
4562 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4563 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4564 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4565 dmae->opcode = opcode;
4566 dmae->src_addr_lo = (mac_addr +
4567 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4568 dmae->src_addr_hi = 0;
4569 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4570 offsetof(struct bmac_stats, rx_gr64));
4571 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4572 offsetof(struct bmac_stats, rx_gr64));
4573 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4574 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4575 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4576 dmae->comp_addr_hi = 0;
4577 dmae->comp_val = 1;
4578
4579 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4580
4581 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4582
4583 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4584 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4585 dmae->opcode = opcode;
4586 dmae->src_addr_lo = (mac_addr +
4587 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4588 dmae->src_addr_hi = 0;
4589 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4590 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4591 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4592 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4593 dmae->comp_addr_hi = 0;
4594 dmae->comp_val = 1;
4595
4596 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4597 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4598 dmae->opcode = opcode;
4599 dmae->src_addr_lo = (mac_addr +
4600 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4601 dmae->src_addr_hi = 0;
4602 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4603 offsetof(struct emac_stats,
4604 rx_falsecarriererrors));
4605 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4606 offsetof(struct emac_stats,
4607 rx_falsecarriererrors));
4608 dmae->len = 1;
4609 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4610 dmae->comp_addr_hi = 0;
4611 dmae->comp_val = 1;
4612
4613 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4614 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4615 dmae->opcode = opcode;
4616 dmae->src_addr_lo = (mac_addr +
4617 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4618 dmae->src_addr_hi = 0;
4619 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4620 offsetof(struct emac_stats,
4621 tx_ifhcoutoctets));
4622 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4623 offsetof(struct emac_stats,
4624 tx_ifhcoutoctets));
4625 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4626 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4627 dmae->comp_addr_hi = 0;
4628 dmae->comp_val = 1;
4629 }
4630
4631 /* NIG */
4632 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4633 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4634 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4635 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4636#ifdef __BIG_ENDIAN
4637 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4638#else
4639 DMAE_CMD_ENDIANITY_DW_SWAP |
4640#endif
4641 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4642 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4643 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4644 dmae->src_addr_hi = 0;
4645 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
4646 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
4647 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
4648 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
4649 offsetof(struct nig_stats, done));
4650 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
4651 offsetof(struct nig_stats, done));
4652 dmae->comp_val = 0xffffffff;
4653}
4654
4655static void bnx2x_init_stats(struct bnx2x *bp)
4656{
4657 int port = bp->port;
4658
4659 bp->stats_state = STATS_STATE_DISABLE;
4660 bp->executer_idx = 0;
4661
4662 bp->old_brb_discard = REG_RD(bp,
4663 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4664
4665 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
4666 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
4667 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4668
4669 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
4670 REG_WR(bp, BAR_XSTRORM_INTMEM +
4671 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4672
4673 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
4674 REG_WR(bp, BAR_TSTRORM_INTMEM +
4675 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4676
4677 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
4678 REG_WR(bp, BAR_CSTRORM_INTMEM +
4679 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4680
4681 REG_WR(bp, BAR_XSTRORM_INTMEM +
4682 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4683 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4684 REG_WR(bp, BAR_XSTRORM_INTMEM +
4685 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4686 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4687
4688 REG_WR(bp, BAR_TSTRORM_INTMEM +
4689 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4690 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4691 REG_WR(bp, BAR_TSTRORM_INTMEM +
4692 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4693 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4694}
4695
4696static void bnx2x_stop_stats(struct bnx2x *bp)
4697{
4698 might_sleep();
4699 if (bp->stats_state != STATS_STATE_DISABLE) {
4700 int timeout = 10;
4701
4702 bp->stats_state = STATS_STATE_STOP;
4703 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
4704
4705 while (bp->stats_state != STATS_STATE_DISABLE) {
4706 if (!timeout) {
c14423fe 4707 BNX2X_ERR("timeout waiting for stats stop\n");
a2fbb9ea
ET
4708 break;
4709 }
4710 timeout--;
4711 msleep(100);
4712 }
4713 }
4714 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
4715}
4716
4717/*
4718 * Statistics service functions
4719 */
4720
4721static void bnx2x_update_bmac_stats(struct bnx2x *bp)
4722{
4723 struct regp diff;
4724 struct regp sum;
4725 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
4726 struct bmac_stats *old = &bp->old_bmac;
4727 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4728
4729 sum.hi = 0;
4730 sum.lo = 0;
4731
4732 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
4733 tx_gtbyt.lo, total_bytes_transmitted_lo);
4734
4735 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
4736 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
4737 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4738
4739 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
4740 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
4741 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4742
4743 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
4744 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
4745 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
4746 estats->total_unicast_packets_transmitted_lo, sum.lo);
4747
4748 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
4749 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
4750 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
4751 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
4752 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
4753 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
4754 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
4755 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
4756 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
4757 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
4758 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
4759
4760 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
4761 UPDATE_STAT(rx_grund.lo, runt_packets_received);
4762 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
4763 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
4764 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
4765 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
4766 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
4767 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
4768
4769 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
4770 rx_grerb.lo, stat_IfHCInBadOctets_lo);
4771 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
4772 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
4773 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
4774 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
4775 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
4776}
4777
4778static void bnx2x_update_emac_stats(struct bnx2x *bp)
4779{
4780 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
4781 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4782
4783 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
4784 total_bytes_transmitted_lo);
4785 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
4786 total_unicast_packets_transmitted_hi,
4787 total_unicast_packets_transmitted_lo);
4788 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
4789 total_multicast_packets_transmitted_hi,
4790 total_multicast_packets_transmitted_lo);
4791 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
4792 total_broadcast_packets_transmitted_hi,
4793 total_broadcast_packets_transmitted_lo);
4794
4795 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
4796 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4797 estats->single_collision_transmit_frames +=
4798 new->tx_dot3statssinglecollisionframes;
4799 estats->multiple_collision_transmit_frames +=
4800 new->tx_dot3statsmultiplecollisionframes;
4801 estats->late_collision_frames += new->tx_dot3statslatecollisions;
4802 estats->excessive_collision_frames +=
4803 new->tx_dot3statsexcessivecollisions;
4804 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4805 estats->frames_transmitted_65_127_bytes +=
4806 new->tx_etherstatspkts65octetsto127octets;
4807 estats->frames_transmitted_128_255_bytes +=
4808 new->tx_etherstatspkts128octetsto255octets;
4809 estats->frames_transmitted_256_511_bytes +=
4810 new->tx_etherstatspkts256octetsto511octets;
4811 estats->frames_transmitted_512_1023_bytes +=
4812 new->tx_etherstatspkts512octetsto1023octets;
4813 estats->frames_transmitted_1024_1522_bytes +=
4814 new->tx_etherstatspkts1024octetsto1522octet;
4815 estats->frames_transmitted_1523_9022_bytes +=
4816 new->tx_etherstatspktsover1522octets;
4817
4818 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4819 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4820 estats->false_carrier_detections += new->rx_falsecarriererrors;
4821 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4822 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4823 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4824 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4825 estats->control_frames_received += new->rx_maccontrolframesreceived;
4826 estats->error_runt_packets_received += new->rx_etherstatsfragments;
4827 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4828
4829 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4830 stat_IfHCInBadOctets_lo);
4831 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4832 stat_IfHCOutBadOctets_lo);
4833 estats->stat_Dot3statsInternalMacTransmitErrors +=
4834 new->tx_dot3statsinternalmactransmiterrors;
4835 estats->stat_Dot3StatsCarrierSenseErrors +=
4836 new->rx_dot3statscarriersenseerrors;
4837 estats->stat_Dot3StatsDeferredTransmissions +=
4838 new->tx_dot3statsdeferredtransmissions;
4839 estats->stat_FlowControlDone += new->tx_flowcontroldone;
4840 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4841}
4842
4843static int bnx2x_update_storm_stats(struct bnx2x *bp)
4844{
4845 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4846 struct tstorm_common_stats *tstats = &stats->tstorm_common;
4847 struct tstorm_per_client_stats *tclient =
4848 &tstats->client_statistics[0];
4849 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4850 struct xstorm_common_stats *xstats = &stats->xstorm_common;
4851 struct nig_stats *nstats = bnx2x_sp(bp, nig);
4852 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4853 u32 diff;
4854
4855 /* are DMAE stats valid? */
4856 if (nstats->done != 0xffffffff) {
4857 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4858 return -1;
4859 }
4860
4861 /* are storm stats valid? */
4862 if (tstats->done.hi != 0xffffffff) {
4863 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4864 return -2;
4865 }
4866 if (xstats->done.hi != 0xffffffff) {
4867 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4868 return -3;
4869 }
4870
4871 estats->total_bytes_received_hi =
4872 estats->valid_bytes_received_hi =
4873 le32_to_cpu(tclient->total_rcv_bytes.hi);
4874 estats->total_bytes_received_lo =
4875 estats->valid_bytes_received_lo =
4876 le32_to_cpu(tclient->total_rcv_bytes.lo);
4877 ADD_64(estats->total_bytes_received_hi,
4878 le32_to_cpu(tclient->rcv_error_bytes.hi),
4879 estats->total_bytes_received_lo,
4880 le32_to_cpu(tclient->rcv_error_bytes.lo));
4881
4882 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4883 total_unicast_packets_received_hi,
4884 total_unicast_packets_received_lo);
4885 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4886 total_multicast_packets_received_hi,
4887 total_multicast_packets_received_lo);
4888 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4889 total_broadcast_packets_received_hi,
4890 total_broadcast_packets_received_lo);
4891
4892 estats->frames_received_64_bytes = MAC_STX_NA;
4893 estats->frames_received_65_127_bytes = MAC_STX_NA;
4894 estats->frames_received_128_255_bytes = MAC_STX_NA;
4895 estats->frames_received_256_511_bytes = MAC_STX_NA;
4896 estats->frames_received_512_1023_bytes = MAC_STX_NA;
4897 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4898 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4899
4900 estats->x_total_sent_bytes_hi =
4901 le32_to_cpu(xstats->total_sent_bytes.hi);
4902 estats->x_total_sent_bytes_lo =
4903 le32_to_cpu(xstats->total_sent_bytes.lo);
4904 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4905
4906 estats->t_rcv_unicast_bytes_hi =
4907 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4908 estats->t_rcv_unicast_bytes_lo =
4909 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4910 estats->t_rcv_broadcast_bytes_hi =
4911 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4912 estats->t_rcv_broadcast_bytes_lo =
4913 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4914 estats->t_rcv_multicast_bytes_hi =
4915 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4916 estats->t_rcv_multicast_bytes_lo =
4917 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4918 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4919
4920 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
4921 estats->packets_too_big_discard =
4922 le32_to_cpu(tclient->packets_too_big_discard);
4923 estats->jabber_packets_received = estats->packets_too_big_discard +
4924 estats->stat_Dot3statsFramesTooLong;
4925 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
4926 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
4927 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
4928 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
4929 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
4930 estats->brb_truncate_discard =
4931 le32_to_cpu(tstats->brb_truncate_discard);
4932
4933 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
4934 bp->old_brb_discard = nstats->brb_discard;
4935
4936 estats->brb_packet = nstats->brb_packet;
4937 estats->brb_truncate = nstats->brb_truncate;
4938 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
4939 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
4940 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
4941 estats->mng_discard = nstats->mng_discard;
4942 estats->mng_octet_inp = nstats->mng_octet_inp;
4943 estats->mng_octet_out = nstats->mng_octet_out;
4944 estats->mng_packet_inp = nstats->mng_packet_inp;
4945 estats->mng_packet_out = nstats->mng_packet_out;
4946 estats->pbf_octets = nstats->pbf_octets;
4947 estats->pbf_packet = nstats->pbf_packet;
4948 estats->safc_inp = nstats->safc_inp;
4949
4950 xstats->done.hi = 0;
4951 tstats->done.hi = 0;
4952 nstats->done = 0;
4953
4954 return 0;
4955}
4956
4957static void bnx2x_update_net_stats(struct bnx2x *bp)
4958{
4959 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4960 struct net_device_stats *nstats = &bp->dev->stats;
4961
4962 nstats->rx_packets =
4963 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4964 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4965 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4966
4967 nstats->tx_packets =
4968 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4969 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4970 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4971
4972 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4973
4974 nstats->tx_bytes =
4975 bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4976
4977 nstats->rx_dropped = estats->checksum_discard +
4978 estats->mac_discard;
4979 nstats->tx_dropped = 0;
4980
4981 nstats->multicast =
4982 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4983
4984 nstats->collisions =
4985 estats->single_collision_transmit_frames +
4986 estats->multiple_collision_transmit_frames +
4987 estats->late_collision_frames +
4988 estats->excessive_collision_frames;
4989
4990 nstats->rx_length_errors = estats->runt_packets_received +
4991 estats->jabber_packets_received;
4992 nstats->rx_over_errors = estats->no_buff_discard;
4993 nstats->rx_crc_errors = estats->crc_receive_errors;
4994 nstats->rx_frame_errors = estats->alignment_errors;
4995 nstats->rx_fifo_errors = estats->brb_discard +
4996 estats->brb_truncate_discard;
4997 nstats->rx_missed_errors = estats->xxoverflow_discard;
4998
4999 nstats->rx_errors = nstats->rx_length_errors +
5000 nstats->rx_over_errors +
5001 nstats->rx_crc_errors +
5002 nstats->rx_frame_errors +
5003 nstats->rx_fifo_errors;
5004
5005 nstats->tx_aborted_errors = estats->late_collision_frames +
5006 estats->excessive_collision_frames;
5007 nstats->tx_carrier_errors = estats->false_carrier_detections;
5008 nstats->tx_fifo_errors = 0;
5009 nstats->tx_heartbeat_errors = 0;
5010 nstats->tx_window_errors = 0;
5011
5012 nstats->tx_errors = nstats->tx_aborted_errors +
5013 nstats->tx_carrier_errors;
5014
5015 estats->mac_stx_start = ++estats->mac_stx_end;
5016}
5017
5018static void bnx2x_update_stats(struct bnx2x *bp)
5019{
5020 int i;
5021
5022 if (!bnx2x_update_storm_stats(bp)) {
5023
5024 if (bp->phy_flags & PHY_BMAC_FLAG) {
5025 bnx2x_update_bmac_stats(bp);
5026
5027 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
5028 bnx2x_update_emac_stats(bp);
5029
5030 } else { /* unreached */
5031 BNX2X_ERR("no MAC active\n");
5032 return;
5033 }
5034
5035 bnx2x_update_net_stats(bp);
5036 }
5037
5038 if (bp->msglevel & NETIF_MSG_TIMER) {
5039 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5040 struct net_device_stats *nstats = &bp->dev->stats;
5041
5042 printk(KERN_DEBUG "%s:\n", bp->dev->name);
5043 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
5044 " tx pkt (%lx)\n",
5045 bnx2x_tx_avail(bp->fp),
5046 *bp->fp->tx_cons_sb, nstats->tx_packets);
5047 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
5048 " rx pkt (%lx)\n",
5049 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
5050 *bp->fp->rx_cons_sb, nstats->rx_packets);
5051 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
5052 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
5053 estats->driver_xoff, estats->brb_discard);
5054 printk(KERN_DEBUG "tstats: checksum_discard %u "
5055 "packets_too_big_discard %u no_buff_discard %u "
5056 "mac_discard %u mac_filter_discard %u "
5057 "xxovrflow_discard %u brb_truncate_discard %u "
5058 "ttl0_discard %u\n",
5059 estats->checksum_discard,
5060 estats->packets_too_big_discard,
5061 estats->no_buff_discard, estats->mac_discard,
5062 estats->mac_filter_discard, estats->xxoverflow_discard,
5063 estats->brb_truncate_discard, estats->ttl0_discard);
5064
5065 for_each_queue(bp, i) {
5066 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
5067 bnx2x_fp(bp, i, tx_pkt),
5068 bnx2x_fp(bp, i, rx_pkt),
5069 bnx2x_fp(bp, i, rx_calls));
5070 }
5071 }
5072
5073 if (bp->state != BNX2X_STATE_OPEN) {
5074 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
5075 return;
5076 }
5077
5078#ifdef BNX2X_STOP_ON_ERROR
5079 if (unlikely(bp->panic))
5080 return;
5081#endif
5082
5083 /* loader */
5084 if (bp->executer_idx) {
5085 struct dmae_command *dmae = &bp->dmae;
5086 int port = bp->port;
5087 int loader_idx = port * 8;
5088
5089 memset(dmae, 0, sizeof(struct dmae_command));
5090
5091 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
5092 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
5093 DMAE_CMD_DST_RESET |
5094#ifdef __BIG_ENDIAN
5095 DMAE_CMD_ENDIANITY_B_DW_SWAP |
5096#else
5097 DMAE_CMD_ENDIANITY_DW_SWAP |
5098#endif
5099 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
5100 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
5101 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
5102 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
5103 sizeof(struct dmae_command) *
5104 (loader_idx + 1)) >> 2;
5105 dmae->dst_addr_hi = 0;
5106 dmae->len = sizeof(struct dmae_command) >> 2;
5107 dmae->len--; /* !!! for A0/1 only */
5108 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
5109 dmae->comp_addr_hi = 0;
5110 dmae->comp_val = 1;
5111
5112 bnx2x_post_dmae(bp, dmae, loader_idx);
5113 }
5114
5115 if (bp->stats_state != STATS_STATE_ENABLE) {
5116 bp->stats_state = STATS_STATE_DISABLE;
5117 return;
5118 }
5119
5120 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
5121 /* stats ramrod has it's own slot on the spe */
5122 bp->spq_left++;
5123 bp->stat_pending = 1;
5124 }
5125}
5126
5127static void bnx2x_timer(unsigned long data)
5128{
5129 struct bnx2x *bp = (struct bnx2x *) data;
5130
5131 if (!netif_running(bp->dev))
5132 return;
5133
5134 if (atomic_read(&bp->intr_sem) != 0)
f1410647 5135 goto timer_restart;
a2fbb9ea
ET
5136
5137 if (poll) {
5138 struct bnx2x_fastpath *fp = &bp->fp[0];
5139 int rc;
5140
5141 bnx2x_tx_int(fp, 1000);
5142 rc = bnx2x_rx_int(fp, 1000);
5143 }
5144
f1410647 5145 if (!nomcp) {
a2fbb9ea
ET
5146 int port = bp->port;
5147 u32 drv_pulse;
5148 u32 mcp_pulse;
5149
5150 ++bp->fw_drv_pulse_wr_seq;
5151 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5152 /* TBD - add SYSTEM_TIME */
5153 drv_pulse = bp->fw_drv_pulse_wr_seq;
f1410647 5154 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
a2fbb9ea 5155
f1410647 5156 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
a2fbb9ea
ET
5157 MCP_PULSE_SEQ_MASK);
5158 /* The delta between driver pulse and mcp response
5159 * should be 1 (before mcp response) or 0 (after mcp response)
5160 */
5161 if ((drv_pulse != mcp_pulse) &&
5162 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5163 /* someone lost a heartbeat... */
5164 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5165 drv_pulse, mcp_pulse);
5166 }
5167 }
5168
5169 if (bp->stats_state == STATS_STATE_DISABLE)
f1410647 5170 goto timer_restart;
a2fbb9ea
ET
5171
5172 bnx2x_update_stats(bp);
5173
f1410647 5174timer_restart:
a2fbb9ea
ET
5175 mod_timer(&bp->timer, jiffies + bp->current_interval);
5176}
5177
5178/* end of Statistics */
5179
5180/* nic init */
5181
5182/*
5183 * nic init service functions
5184 */
5185
5186static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5187 dma_addr_t mapping, int id)
5188{
5189 int port = bp->port;
5190 u64 section;
5191 int index;
5192
5193 /* USTORM */
5194 section = ((u64)mapping) + offsetof(struct host_status_block,
5195 u_status_block);
5196 sb->u_status_block.status_block_id = id;
5197
5198 REG_WR(bp, BAR_USTRORM_INTMEM +
5199 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5200 REG_WR(bp, BAR_USTRORM_INTMEM +
5201 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5202 U64_HI(section));
5203
5204 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5205 REG_WR16(bp, BAR_USTRORM_INTMEM +
5206 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5207
5208 /* CSTORM */
5209 section = ((u64)mapping) + offsetof(struct host_status_block,
5210 c_status_block);
5211 sb->c_status_block.status_block_id = id;
5212
5213 REG_WR(bp, BAR_CSTRORM_INTMEM +
5214 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5215 REG_WR(bp, BAR_CSTRORM_INTMEM +
5216 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5217 U64_HI(section));
5218
5219 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5220 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5221 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5222
5223 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5224}
5225
5226static void bnx2x_init_def_sb(struct bnx2x *bp,
5227 struct host_def_status_block *def_sb,
5228 dma_addr_t mapping, int id)
5229{
5230 int port = bp->port;
5231 int index, val, reg_offset;
5232 u64 section;
5233
5234 /* ATTN */
5235 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5236 atten_status_block);
5237 def_sb->atten_status_block.status_block_id = id;
5238
5239 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5240 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5241
5242 for (index = 0; index < 3; index++) {
5243 bp->attn_group[index].sig[0] = REG_RD(bp,
5244 reg_offset + 0x10*index);
5245 bp->attn_group[index].sig[1] = REG_RD(bp,
5246 reg_offset + 0x4 + 0x10*index);
5247 bp->attn_group[index].sig[2] = REG_RD(bp,
5248 reg_offset + 0x8 + 0x10*index);
5249 bp->attn_group[index].sig[3] = REG_RD(bp,
5250 reg_offset + 0xc + 0x10*index);
5251 }
5252
5253 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5254 MISC_REG_AEU_MASK_ATTN_FUNC_0));
5255
5256 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5257 HC_REG_ATTN_MSG0_ADDR_L);
5258
5259 REG_WR(bp, reg_offset, U64_LO(section));
5260 REG_WR(bp, reg_offset + 4, U64_HI(section));
5261
5262 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5263
5264 val = REG_RD(bp, reg_offset);
5265 val |= id;
5266 REG_WR(bp, reg_offset, val);
5267
5268 /* USTORM */
5269 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5270 u_def_status_block);
5271 def_sb->u_def_status_block.status_block_id = id;
5272
5273 REG_WR(bp, BAR_USTRORM_INTMEM +
5274 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5275 REG_WR(bp, BAR_USTRORM_INTMEM +
5276 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5277 U64_HI(section));
5278 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
5279 BNX2X_BTR);
5280
5281 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5282 REG_WR16(bp, BAR_USTRORM_INTMEM +
5283 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5284
5285 /* CSTORM */
5286 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5287 c_def_status_block);
5288 def_sb->c_def_status_block.status_block_id = id;
5289
5290 REG_WR(bp, BAR_CSTRORM_INTMEM +
5291 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5292 REG_WR(bp, BAR_CSTRORM_INTMEM +
5293 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5294 U64_HI(section));
5295 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
5296 BNX2X_BTR);
5297
5298 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5299 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5300 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5301
5302 /* TSTORM */
5303 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5304 t_def_status_block);
5305 def_sb->t_def_status_block.status_block_id = id;
5306
5307 REG_WR(bp, BAR_TSTRORM_INTMEM +
5308 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5309 REG_WR(bp, BAR_TSTRORM_INTMEM +
5310 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5311 U64_HI(section));
5312 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
5313 BNX2X_BTR);
5314
5315 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5316 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5317 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5318
5319 /* XSTORM */
5320 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5321 x_def_status_block);
5322 def_sb->x_def_status_block.status_block_id = id;
5323
5324 REG_WR(bp, BAR_XSTRORM_INTMEM +
5325 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5326 REG_WR(bp, BAR_XSTRORM_INTMEM +
5327 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5328 U64_HI(section));
5329 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
5330 BNX2X_BTR);
5331
5332 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5333 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5334 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5335
5336 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5337}
5338
5339static void bnx2x_update_coalesce(struct bnx2x *bp)
5340{
5341 int port = bp->port;
5342 int i;
5343
5344 for_each_queue(bp, i) {
5345
5346 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5347 REG_WR8(bp, BAR_USTRORM_INTMEM +
5348 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5349 HC_INDEX_U_ETH_RX_CQ_CONS),
5350 bp->rx_ticks_int/12);
5351 REG_WR16(bp, BAR_USTRORM_INTMEM +
5352 USTORM_SB_HC_DISABLE_OFFSET(port, i,
5353 HC_INDEX_U_ETH_RX_CQ_CONS),
5354 bp->rx_ticks_int ? 0 : 1);
5355
5356 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5357 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5358 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5359 HC_INDEX_C_ETH_TX_CQ_CONS),
5360 bp->tx_ticks_int/12);
5361 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5362 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
5363 HC_INDEX_C_ETH_TX_CQ_CONS),
5364 bp->tx_ticks_int ? 0 : 1);
5365 }
5366}
5367
5368static void bnx2x_init_rx_rings(struct bnx2x *bp)
5369{
5370 u16 ring_prod;
5371 int i, j;
5372 int port = bp->port;
5373
5374 bp->rx_buf_use_size = bp->dev->mtu;
5375
5376 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
5377 bp->rx_buf_size = bp->rx_buf_use_size + 64;
5378
5379 for_each_queue(bp, j) {
5380 struct bnx2x_fastpath *fp = &bp->fp[j];
5381
5382 fp->rx_bd_cons = 0;
5383 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5384
5385 for (i = 1; i <= NUM_RX_RINGS; i++) {
5386 struct eth_rx_bd *rx_bd;
5387
5388 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5389 rx_bd->addr_hi =
5390 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5391 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5392 rx_bd->addr_lo =
5393 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5394 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5395
5396 }
5397
5398 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5399 struct eth_rx_cqe_next_page *nextpg;
5400
5401 nextpg = (struct eth_rx_cqe_next_page *)
5402 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5403 nextpg->addr_hi =
5404 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5405 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5406 nextpg->addr_lo =
5407 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5408 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5409 }
5410
5411 /* rx completion queue */
5412 fp->rx_comp_cons = ring_prod = 0;
5413
5414 for (i = 0; i < bp->rx_ring_size; i++) {
5415 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5416 BNX2X_ERR("was only able to allocate "
5417 "%d rx skbs\n", i);
5418 break;
5419 }
5420 ring_prod = NEXT_RX_IDX(ring_prod);
5421 BUG_TRAP(ring_prod > i);
5422 }
5423
5424 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
5425 fp->rx_pkt = fp->rx_calls = 0;
5426
c14423fe 5427 /* Warning! this will generate an interrupt (to the TSTORM) */
a2fbb9ea
ET
5428 /* must only be done when chip is initialized */
5429 REG_WR(bp, BAR_TSTRORM_INTMEM +
5430 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
5431 if (j != 0)
5432 continue;
5433
5434 REG_WR(bp, BAR_USTRORM_INTMEM +
5435 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
5436 U64_LO(fp->rx_comp_mapping));
5437 REG_WR(bp, BAR_USTRORM_INTMEM +
5438 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
5439 U64_HI(fp->rx_comp_mapping));
5440 }
5441}
5442
5443static void bnx2x_init_tx_ring(struct bnx2x *bp)
5444{
5445 int i, j;
5446
5447 for_each_queue(bp, j) {
5448 struct bnx2x_fastpath *fp = &bp->fp[j];
5449
5450 for (i = 1; i <= NUM_TX_RINGS; i++) {
5451 struct eth_tx_bd *tx_bd =
5452 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
5453
5454 tx_bd->addr_hi =
5455 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5456 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5457 tx_bd->addr_lo =
5458 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5459 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5460 }
5461
5462 fp->tx_pkt_prod = 0;
5463 fp->tx_pkt_cons = 0;
5464 fp->tx_bd_prod = 0;
5465 fp->tx_bd_cons = 0;
5466 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5467 fp->tx_pkt = 0;
5468 }
5469}
5470
5471static void bnx2x_init_sp_ring(struct bnx2x *bp)
5472{
5473 int port = bp->port;
5474
5475 spin_lock_init(&bp->spq_lock);
5476
5477 bp->spq_left = MAX_SPQ_PENDING;
5478 bp->spq_prod_idx = 0;
5479 bp->dsb_sp_prod_idx = 0;
5480 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5481 bp->spq_prod_bd = bp->spq;
5482 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5483
5484 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
5485 U64_LO(bp->spq_mapping));
5486 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
5487 U64_HI(bp->spq_mapping));
5488
5489 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
5490 bp->spq_prod_idx);
5491}
5492
5493static void bnx2x_init_context(struct bnx2x *bp)
5494{
5495 int i;
5496
5497 for_each_queue(bp, i) {
5498 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5499 struct bnx2x_fastpath *fp = &bp->fp[i];
5500
5501 context->xstorm_st_context.tx_bd_page_base_hi =
5502 U64_HI(fp->tx_desc_mapping);
5503 context->xstorm_st_context.tx_bd_page_base_lo =
5504 U64_LO(fp->tx_desc_mapping);
5505 context->xstorm_st_context.db_data_addr_hi =
5506 U64_HI(fp->tx_prods_mapping);
5507 context->xstorm_st_context.db_data_addr_lo =
5508 U64_LO(fp->tx_prods_mapping);
5509
5510 context->ustorm_st_context.rx_bd_page_base_hi =
5511 U64_HI(fp->rx_desc_mapping);
5512 context->ustorm_st_context.rx_bd_page_base_lo =
5513 U64_LO(fp->rx_desc_mapping);
5514 context->ustorm_st_context.status_block_id = i;
5515 context->ustorm_st_context.sb_index_number =
5516 HC_INDEX_U_ETH_RX_CQ_CONS;
5517 context->ustorm_st_context.rcq_base_address_hi =
5518 U64_HI(fp->rx_comp_mapping);
5519 context->ustorm_st_context.rcq_base_address_lo =
5520 U64_LO(fp->rx_comp_mapping);
5521 context->ustorm_st_context.flags =
5522 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
5523 context->ustorm_st_context.mc_alignment_size = 64;
5524 context->ustorm_st_context.num_rss = bp->num_queues;
5525
5526 context->cstorm_st_context.sb_index_number =
5527 HC_INDEX_C_ETH_TX_CQ_CONS;
5528 context->cstorm_st_context.status_block_id = i;
5529
5530 context->xstorm_ag_context.cdu_reserved =
5531 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5532 CDU_REGION_NUMBER_XCM_AG,
5533 ETH_CONNECTION_TYPE);
5534 context->ustorm_ag_context.cdu_usage =
5535 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5536 CDU_REGION_NUMBER_UCM_AG,
5537 ETH_CONNECTION_TYPE);
5538 }
5539}
5540
5541static void bnx2x_init_ind_table(struct bnx2x *bp)
5542{
5543 int port = bp->port;
5544 int i;
5545
5546 if (!is_multi(bp))
5547 return;
5548
5549 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5550 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
5551 i % bp->num_queues);
5552
5553 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5554}
5555
5556static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5557{
5558 int mode = bp->rx_mode;
5559 int port = bp->port;
5560 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5561 int i;
5562
5563 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
5564
5565 switch (mode) {
5566 case BNX2X_RX_MODE_NONE: /* no Rx */
5567 tstorm_mac_filter.ucast_drop_all = 1;
5568 tstorm_mac_filter.mcast_drop_all = 1;
5569 tstorm_mac_filter.bcast_drop_all = 1;
5570 break;
5571 case BNX2X_RX_MODE_NORMAL:
5572 tstorm_mac_filter.bcast_accept_all = 1;
5573 break;
5574 case BNX2X_RX_MODE_ALLMULTI:
5575 tstorm_mac_filter.mcast_accept_all = 1;
5576 tstorm_mac_filter.bcast_accept_all = 1;
5577 break;
5578 case BNX2X_RX_MODE_PROMISC:
5579 tstorm_mac_filter.ucast_accept_all = 1;
5580 tstorm_mac_filter.mcast_accept_all = 1;
5581 tstorm_mac_filter.bcast_accept_all = 1;
5582 break;
5583 default:
5584 BNX2X_ERR("bad rx mode (%d)\n", mode);
5585 }
5586
5587 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5588 REG_WR(bp, BAR_TSTRORM_INTMEM +
5589 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
5590 ((u32 *)&tstorm_mac_filter)[i]);
5591
5592/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5593 ((u32 *)&tstorm_mac_filter)[i]); */
5594 }
5595}
5596
5597static void bnx2x_set_client_config(struct bnx2x *bp, int client_id)
5598{
5599#ifdef BCM_VLAN
5600 int mode = bp->rx_mode;
5601#endif
5602 int port = bp->port;
5603 struct tstorm_eth_client_config tstorm_client = {0};
5604
5605 tstorm_client.mtu = bp->dev->mtu;
5606 tstorm_client.statistics_counter_id = 0;
5607 tstorm_client.config_flags =
5608 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
5609#ifdef BCM_VLAN
5610 if (mode && bp->vlgrp) {
5611 tstorm_client.config_flags |=
5612 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
5613 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5614 }
5615#endif
5616 tstorm_client.drop_flags = (TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR |
5617 TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR |
5618 TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR |
5619 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR);
5620
5621 REG_WR(bp, BAR_TSTRORM_INTMEM +
5622 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id),
5623 ((u32 *)&tstorm_client)[0]);
5624 REG_WR(bp, BAR_TSTRORM_INTMEM +
5625 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) + 4,
5626 ((u32 *)&tstorm_client)[1]);
5627
5628/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
5629 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
5630}
5631
5632static void bnx2x_init_internal(struct bnx2x *bp)
5633{
5634 int port = bp->port;
5635 struct tstorm_eth_function_common_config tstorm_config = {0};
5636 struct stats_indication_flags stats_flags = {0};
5637 int i;
5638
5639 if (is_multi(bp)) {
5640 tstorm_config.config_flags = MULTI_FLAGS;
5641 tstorm_config.rss_result_mask = MULTI_MASK;
5642 }
5643
5644 REG_WR(bp, BAR_TSTRORM_INTMEM +
5645 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
5646 (*(u32 *)&tstorm_config));
5647
5648/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
5649 (*(u32 *)&tstorm_config)); */
5650
c14423fe 5651 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5652 bnx2x_set_storm_rx_mode(bp);
5653
5654 for_each_queue(bp, i)
5655 bnx2x_set_client_config(bp, i);
5656
5657
5658 stats_flags.collect_eth = cpu_to_le32(1);
5659
5660 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
5661 ((u32 *)&stats_flags)[0]);
5662 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
5663 ((u32 *)&stats_flags)[1]);
5664
5665 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
5666 ((u32 *)&stats_flags)[0]);
5667 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
5668 ((u32 *)&stats_flags)[1]);
5669
5670 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
5671 ((u32 *)&stats_flags)[0]);
5672 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
5673 ((u32 *)&stats_flags)[1]);
5674
5675/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
5676 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
5677}
5678
5679static void bnx2x_nic_init(struct bnx2x *bp)
5680{
5681 int i;
5682
5683 for_each_queue(bp, i) {
5684 struct bnx2x_fastpath *fp = &bp->fp[i];
5685
5686 fp->state = BNX2X_FP_STATE_CLOSED;
5687 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
5688 bp, fp->status_blk, i);
5689 fp->index = i;
5690 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
5691 }
5692
5693 bnx2x_init_def_sb(bp, bp->def_status_blk,
5694 bp->def_status_blk_mapping, 0x10);
5695 bnx2x_update_coalesce(bp);
5696 bnx2x_init_rx_rings(bp);
5697 bnx2x_init_tx_ring(bp);
5698 bnx2x_init_sp_ring(bp);
5699 bnx2x_init_context(bp);
5700 bnx2x_init_internal(bp);
5701 bnx2x_init_stats(bp);
5702 bnx2x_init_ind_table(bp);
5703 bnx2x_enable_int(bp);
5704
5705}
5706
5707/* end of nic init */
5708
5709/*
5710 * gzip service functions
5711 */
5712
5713static int bnx2x_gunzip_init(struct bnx2x *bp)
5714{
5715 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5716 &bp->gunzip_mapping);
5717 if (bp->gunzip_buf == NULL)
5718 goto gunzip_nomem1;
5719
5720 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5721 if (bp->strm == NULL)
5722 goto gunzip_nomem2;
5723
5724 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5725 GFP_KERNEL);
5726 if (bp->strm->workspace == NULL)
5727 goto gunzip_nomem3;
5728
5729 return 0;
5730
5731gunzip_nomem3:
5732 kfree(bp->strm);
5733 bp->strm = NULL;
5734
5735gunzip_nomem2:
5736 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5737 bp->gunzip_mapping);
5738 bp->gunzip_buf = NULL;
5739
5740gunzip_nomem1:
5741 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5742 " uncompression\n", bp->dev->name);
5743 return -ENOMEM;
5744}
5745
5746static void bnx2x_gunzip_end(struct bnx2x *bp)
5747{
5748 kfree(bp->strm->workspace);
5749
5750 kfree(bp->strm);
5751 bp->strm = NULL;
5752
5753 if (bp->gunzip_buf) {
5754 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5755 bp->gunzip_mapping);
5756 bp->gunzip_buf = NULL;
5757 }
5758}
5759
5760static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5761{
5762 int n, rc;
5763
5764 /* check gzip header */
5765 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5766 return -EINVAL;
5767
5768 n = 10;
5769
5770#define FNAME 0x8
5771
5772 if (zbuf[3] & FNAME)
5773 while ((zbuf[n++] != 0) && (n < len));
5774
5775 bp->strm->next_in = zbuf + n;
5776 bp->strm->avail_in = len - n;
5777 bp->strm->next_out = bp->gunzip_buf;
5778 bp->strm->avail_out = FW_BUF_SIZE;
5779
5780 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5781 if (rc != Z_OK)
5782 return rc;
5783
5784 rc = zlib_inflate(bp->strm, Z_FINISH);
5785 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5786 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5787 bp->dev->name, bp->strm->msg);
5788
5789 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5790 if (bp->gunzip_outlen & 0x3)
5791 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5792 " gunzip_outlen (%d) not aligned\n",
5793 bp->dev->name, bp->gunzip_outlen);
5794 bp->gunzip_outlen >>= 2;
5795
5796 zlib_inflateEnd(bp->strm);
5797
5798 if (rc == Z_STREAM_END)
5799 return 0;
5800
5801 return rc;
5802}
5803
5804/* nic load/unload */
5805
5806/*
5807 * general service functions
5808 */
5809
5810/* send a NIG loopback debug packet */
5811static void bnx2x_lb_pckt(struct bnx2x *bp)
5812{
5813#ifdef USE_DMAE
5814 u32 wb_write[3];
5815#endif
5816
5817 /* Ethernet source and destination addresses */
5818#ifdef USE_DMAE
5819 wb_write[0] = 0x55555555;
5820 wb_write[1] = 0x55555555;
5821 wb_write[2] = 0x20; /* SOP */
5822 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5823#else
5824 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5825 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5826 /* SOP */
5827 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5828#endif
5829
5830 /* NON-IP protocol */
5831#ifdef USE_DMAE
5832 wb_write[0] = 0x09000000;
5833 wb_write[1] = 0x55555555;
5834 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5835 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5836#else
5837 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5838 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5839 /* EOP, eop_bvalid = 0 */
5840 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5841#endif
5842}
5843
5844/* some of the internal memories
5845 * are not directly readable from the driver
5846 * to test them we send debug packets
5847 */
5848static int bnx2x_int_mem_test(struct bnx2x *bp)
5849{
5850 int factor;
5851 int count, i;
5852 u32 val = 0;
5853
5854 switch (CHIP_REV(bp)) {
5855 case CHIP_REV_EMUL:
5856 factor = 200;
5857 break;
5858 case CHIP_REV_FPGA:
5859 factor = 120;
5860 break;
5861 default:
5862 factor = 1;
5863 break;
5864 }
5865
5866 DP(NETIF_MSG_HW, "start part1\n");
5867
5868 /* Disable inputs of parser neighbor blocks */
5869 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5870 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5871 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5872 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5873
5874 /* Write 0 to parser credits for CFC search request */
5875 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5876
5877 /* send Ethernet packet */
5878 bnx2x_lb_pckt(bp);
5879
5880 /* TODO do i reset NIG statistic? */
5881 /* Wait until NIG register shows 1 packet of size 0x10 */
5882 count = 1000 * factor;
5883 while (count) {
5884#ifdef BNX2X_DMAE_RD
5885 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5886 val = *bnx2x_sp(bp, wb_data[0]);
5887#else
5888 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5889 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5890#endif
5891 if (val == 0x10)
5892 break;
5893
5894 msleep(10);
5895 count--;
5896 }
5897 if (val != 0x10) {
5898 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5899 return -1;
5900 }
5901
5902 /* Wait until PRS register shows 1 packet */
5903 count = 1000 * factor;
5904 while (count) {
5905 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5906
5907 if (val == 1)
5908 break;
5909
5910 msleep(10);
5911 count--;
5912 }
5913 if (val != 0x1) {
5914 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5915 return -2;
5916 }
5917
5918 /* Reset and init BRB, PRS */
5919 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
5920 msleep(50);
5921 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
5922 msleep(50);
5923 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5924 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5925
5926 DP(NETIF_MSG_HW, "part2\n");
5927
5928 /* Disable inputs of parser neighbor blocks */
5929 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5930 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5931 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5932 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5933
5934 /* Write 0 to parser credits for CFC search request */
5935 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5936
5937 /* send 10 Ethernet packets */
5938 for (i = 0; i < 10; i++)
5939 bnx2x_lb_pckt(bp);
5940
5941 /* Wait until NIG register shows 10 + 1
5942 packets of size 11*0x10 = 0xb0 */
5943 count = 1000 * factor;
5944 while (count) {
5945#ifdef BNX2X_DMAE_RD
5946 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5947 val = *bnx2x_sp(bp, wb_data[0]);
5948#else
5949 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5950 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5951#endif
5952 if (val == 0xb0)
5953 break;
5954
5955 msleep(10);
5956 count--;
5957 }
5958 if (val != 0xb0) {
5959 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5960 return -3;
5961 }
5962
5963 /* Wait until PRS register shows 2 packets */
5964 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5965 if (val != 2)
5966 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5967
5968 /* Write 1 to parser credits for CFC search request */
5969 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5970
5971 /* Wait until PRS register shows 3 packets */
5972 msleep(10 * factor);
5973 /* Wait until NIG register shows 1 packet of size 0x10 */
5974 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5975 if (val != 3)
5976 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5977
5978 /* clear NIG EOP FIFO */
5979 for (i = 0; i < 11; i++)
5980 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5981 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5982 if (val != 1) {
5983 BNX2X_ERR("clear of NIG failed\n");
5984 return -4;
5985 }
5986
5987 /* Reset and init BRB, PRS, NIG */
5988 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5989 msleep(50);
5990 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5991 msleep(50);
5992 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5993 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5994#ifndef BCM_ISCSI
5995 /* set NIC mode */
5996 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5997#endif
5998
5999 /* Enable inputs of parser neighbor blocks */
6000 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6001 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6002 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6003 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
6004
6005 DP(NETIF_MSG_HW, "done\n");
6006
6007 return 0; /* OK */
6008}
6009
6010static void enable_blocks_attention(struct bnx2x *bp)
6011{
6012 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6013 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6014 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6015 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6016 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6017 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6018 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6019 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6020 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6021/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6022/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6023 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6024 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6025 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6026/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6027/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6028 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6029 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6030 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6031 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6032/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6033/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6034 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
6035 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6036 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6037 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6038/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6039/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6040 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6041 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6042/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6043 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6044}
6045
6046static int bnx2x_function_init(struct bnx2x *bp, int mode)
6047{
6048 int func = bp->port;
6049 int port = func ? PORT1 : PORT0;
6050 u32 val, i;
6051#ifdef USE_DMAE
6052 u32 wb_write[2];
6053#endif
6054
6055 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
6056 if ((func != 0) && (func != 1)) {
6057 BNX2X_ERR("BAD function number (%d)\n", func);
6058 return -ENODEV;
6059 }
6060
6061 bnx2x_gunzip_init(bp);
6062
6063 if (mode & 0x1) { /* init common */
6064 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
6065 func, mode);
f1410647
ET
6066 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6067 0xffffffff);
6068 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6069 0xfffc);
a2fbb9ea
ET
6070 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
6071
6072 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6073 msleep(30);
6074 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6075
6076 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
6077 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
6078
6079 bnx2x_init_pxp(bp);
6080
6081 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6082 /* enable HW interrupt from PXP on USDM
6083 overflow bit 16 on INT_MASK_0 */
6084 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6085 }
6086
6087#ifdef __BIG_ENDIAN
6088 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6089 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6090 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6091 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6092 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6093 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
6094
6095/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6096 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6097 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6098 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6099 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6100#endif
6101
6102#ifndef BCM_ISCSI
6103 /* set NIC mode */
6104 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6105#endif
6106
6107 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
6108#ifdef BCM_ISCSI
6109 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6110 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6111 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6112#endif
6113
6114 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
6115
6116 /* let the HW do it's magic ... */
6117 msleep(100);
6118 /* finish PXP init
6119 (can be moved up if we want to use the DMAE) */
6120 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6121 if (val != 1) {
6122 BNX2X_ERR("PXP2 CFG failed\n");
6123 return -EBUSY;
6124 }
6125
6126 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6127 if (val != 1) {
6128 BNX2X_ERR("PXP2 RD_INIT failed\n");
6129 return -EBUSY;
6130 }
6131
6132 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6133 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6134
6135 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6136
6137 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
6138 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
6139 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
6140 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
6141
6142#ifdef BNX2X_DMAE_RD
6143 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6144 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6145 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6146 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6147#else
6148 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
6149 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
6150 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
6151 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
6152 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
6153 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
6154 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
6155 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
6156 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
6157 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
6158 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
6159 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
6160#endif
6161 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
c14423fe 6162 /* soft reset pulse */
a2fbb9ea
ET
6163 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6164 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6165
6166#ifdef BCM_ISCSI
6167 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
6168#endif
6169 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
6170 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
6171 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6172 /* enable hw interrupt from doorbell Q */
6173 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6174 }
6175
6176 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6177
6178 if (CHIP_REV_IS_SLOW(bp)) {
6179 /* fix for emulation and FPGA for no pause */
6180 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
6181 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
6182 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
6183 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
6184 }
6185
6186 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6187
6188 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
6189 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
6190 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
6191 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
6192
6193 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6194 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6195 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6196 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6197
6198 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
6199 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
6200 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
6201 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
6202
6203 /* sync semi rtc */
6204 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6205 0x80000000);
6206 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6207 0x80000000);
6208
6209 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
6210 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
6211 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
6212
6213 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6214 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6215 REG_WR(bp, i, 0xc0cac01a);
c14423fe 6216 /* TODO: replace with something meaningful */
a2fbb9ea
ET
6217 }
6218 /* SRCH COMMON comes here */
6219 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6220
6221 if (sizeof(union cdu_context) != 1024) {
6222 /* we currently assume that a context is 1024 bytes */
6223 printk(KERN_ALERT PFX "please adjust the size of"
6224 " cdu_context(%ld)\n",
6225 (long)sizeof(union cdu_context));
6226 }
6227 val = (4 << 24) + (0 << 12) + 1024;
6228 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6229 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
6230
6231 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
6232 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6233
6234 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
6235 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
6236 MISC_AEU_COMMON_END);
6237 /* RXPCS COMMON comes here */
6238 /* EMAC0 COMMON comes here */
6239 /* EMAC1 COMMON comes here */
6240 /* DBU COMMON comes here */
6241 /* DBG COMMON comes here */
6242 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
6243
6244 if (CHIP_REV_IS_SLOW(bp))
6245 msleep(200);
6246
6247 /* finish CFC init */
6248 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
6249 if (val != 1) {
6250 BNX2X_ERR("CFC LL_INIT failed\n");
6251 return -EBUSY;
6252 }
6253
6254 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
6255 if (val != 1) {
6256 BNX2X_ERR("CFC AC_INIT failed\n");
6257 return -EBUSY;
6258 }
6259
6260 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
6261 if (val != 1) {
6262 BNX2X_ERR("CFC CAM_INIT failed\n");
6263 return -EBUSY;
6264 }
6265
6266 REG_WR(bp, CFC_REG_DEBUG0, 0);
6267
6268 /* read NIG statistic
6269 to see if this is our first up since powerup */
6270#ifdef BNX2X_DMAE_RD
6271 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6272 val = *bnx2x_sp(bp, wb_data[0]);
6273#else
6274 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6275 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6276#endif
6277 /* do internal memory self test */
6278 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6279 BNX2X_ERR("internal mem selftest failed\n");
6280 return -EBUSY;
6281 }
6282
6283 /* clear PXP2 attentions */
6284 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
6285
6286 enable_blocks_attention(bp);
6287 /* enable_blocks_parity(bp); */
6288
f1410647
ET
6289 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6290 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6291 /* Fan failure is indicated by SPIO 5 */
6292 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6293 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6294
6295 /* set to active low mode */
6296 val = REG_RD(bp, MISC_REG_SPIO_INT);
6297 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6298 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6299 REG_WR(bp, MISC_REG_SPIO_INT, val);
6300
6301 /* enable interrupt to signal the IGU */
6302 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6303 val |= (1 << MISC_REGISTERS_SPIO_5);
6304 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6305 break;
6306
6307 default:
6308 break;
6309 }
6310
a2fbb9ea
ET
6311 } /* end of common init */
6312
6313 /* per port init */
6314
6315 /* the phys address is shifted right 12 bits and has an added
6316 1=valid bit added to the 53rd bit
6317 then since this is a wide register(TM)
6318 we split it into two 32 bit writes
6319 */
6320#define RQ_ONCHIP_AT_PORT_SIZE 384
6321#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6322#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6323#define PXP_ONE_ILT(x) ((x << 10) | x)
6324
6325 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
6326
6327 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
6328
6329 /* Port PXP comes here */
6330 /* Port PXP2 comes here */
6331
6332 /* Offset is
6333 * Port0 0
6334 * Port1 384 */
6335 i = func * RQ_ONCHIP_AT_PORT_SIZE;
6336#ifdef USE_DMAE
6337 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
6338 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
6339 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6340#else
6341 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
6342 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
6343 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
6344 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
6345#endif
6346 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
6347
6348#ifdef BCM_ISCSI
6349 /* Port0 1
6350 * Port1 385 */
6351 i++;
6352 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6353 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6354 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6355 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6356
6357 /* Port0 2
6358 * Port1 386 */
6359 i++;
6360 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6361 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6362 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6363 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6364
6365 /* Port0 3
6366 * Port1 387 */
6367 i++;
6368 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6369 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6370 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6371 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6372#endif
6373
6374 /* Port TCM comes here */
6375 /* Port UCM comes here */
6376 /* Port CCM comes here */
6377 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
6378 func ? XCM_PORT1_END : XCM_PORT0_END);
6379
6380#ifdef USE_DMAE
6381 wb_write[0] = 0;
6382 wb_write[1] = 0;
6383#endif
6384 for (i = 0; i < 32; i++) {
6385 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
6386#ifdef USE_DMAE
6387 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
6388#else
6389 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
6390 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
6391#endif
6392 }
6393 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
6394
6395 /* Port QM comes here */
6396
6397#ifdef BCM_ISCSI
6398 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6399 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6400
6401 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
6402 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
6403#endif
6404 /* Port DQ comes here */
6405 /* Port BRB1 comes here */
6406 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
6407 func ? PRS_PORT1_END : PRS_PORT0_END);
6408 /* Port TSDM comes here */
6409 /* Port CSDM comes here */
6410 /* Port USDM comes here */
6411 /* Port XSDM comes here */
6412 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
6413 func ? TSEM_PORT1_END : TSEM_PORT0_END);
6414 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
6415 func ? USEM_PORT1_END : USEM_PORT0_END);
6416 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
6417 func ? CSEM_PORT1_END : CSEM_PORT0_END);
6418 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
6419 func ? XSEM_PORT1_END : XSEM_PORT0_END);
6420 /* Port UPB comes here */
6421 /* Port XSDM comes here */
6422 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
6423 func ? PBF_PORT1_END : PBF_PORT0_END);
6424
6425 /* configure PBF to work without PAUSE mtu 9000 */
6426 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
6427
6428 /* update threshold */
6429 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
6430 /* update init credit */
6431 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
6432
6433 /* probe changes */
6434 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
6435 msleep(5);
6436 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
6437
6438#ifdef BCM_ISCSI
6439 /* tell the searcher where the T2 table is */
6440 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6441
6442 wb_write[0] = U64_LO(bp->t2_mapping);
6443 wb_write[1] = U64_HI(bp->t2_mapping);
6444 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6445 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6446 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6447 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6448
6449 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6450 /* Port SRCH comes here */
6451#endif
6452 /* Port CDU comes here */
6453 /* Port CFC comes here */
6454 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
6455 func ? HC_PORT1_END : HC_PORT0_END);
6456 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
6457 MISC_AEU_PORT0_START,
6458 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
6459 /* Port PXPCS comes here */
6460 /* Port EMAC0 comes here */
6461 /* Port EMAC1 comes here */
6462 /* Port DBU comes here */
6463 /* Port DBG comes here */
6464 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
6465 func ? NIG_PORT1_END : NIG_PORT0_END);
6466 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
6467 /* Port MCP comes here */
6468 /* Port DMAE comes here */
6469
f1410647
ET
6470 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6471 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6472 /* add SPIO 5 to group 0 */
6473 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6474 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6475 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6476 break;
6477
6478 default:
6479 break;
6480 }
6481
a2fbb9ea
ET
6482 bnx2x_link_reset(bp);
6483
c14423fe 6484 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6485 REG_WR(bp, 0x2114, 0xffffffff);
6486 REG_WR(bp, 0x2120, 0xffffffff);
6487 REG_WR(bp, 0x2814, 0xffffffff);
6488
6489 /* !!! move to init_values.h */
6490 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6491 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6492 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6493 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6494
6495 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
6496 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
6497 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
6498 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
6499
6500 bnx2x_gunzip_end(bp);
6501
6502 if (!nomcp) {
6503 port = bp->port;
6504
6505 bp->fw_drv_pulse_wr_seq =
f1410647 6506 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
a2fbb9ea 6507 DRV_PULSE_SEQ_MASK);
f1410647 6508 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
a2fbb9ea
ET
6509 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
6510 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
6511 } else {
6512 bp->fw_mb = 0;
6513 }
6514
6515 return 0;
6516}
6517
c14423fe 6518/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6519static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6520{
a2fbb9ea 6521 int port = bp->port;
f1410647
ET
6522 u32 seq = ++bp->fw_seq;
6523 u32 rc = 0;
a2fbb9ea 6524
f1410647
ET
6525 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
6526 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea
ET
6527
6528 /* let the FW do it's magic ... */
6529 msleep(100); /* TBD */
6530
6531 if (CHIP_REV_IS_SLOW(bp))
6532 msleep(900);
6533
f1410647 6534 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
a2fbb9ea
ET
6535 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
6536
6537 /* is this a reply to our command? */
6538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6539 rc &= FW_MSG_CODE_MASK;
f1410647 6540
a2fbb9ea
ET
6541 } else {
6542 /* FW BUG! */
6543 BNX2X_ERR("FW failed to respond!\n");
6544 bnx2x_fw_dump(bp);
6545 rc = 0;
6546 }
f1410647 6547
a2fbb9ea
ET
6548 return rc;
6549}
6550
6551static void bnx2x_free_mem(struct bnx2x *bp)
6552{
6553
6554#define BNX2X_PCI_FREE(x, y, size) \
6555 do { \
6556 if (x) { \
6557 pci_free_consistent(bp->pdev, size, x, y); \
6558 x = NULL; \
6559 y = 0; \
6560 } \
6561 } while (0)
6562
6563#define BNX2X_FREE(x) \
6564 do { \
6565 if (x) { \
6566 vfree(x); \
6567 x = NULL; \
6568 } \
6569 } while (0)
6570
6571 int i;
6572
6573 /* fastpath */
6574 for_each_queue(bp, i) {
6575
6576 /* Status blocks */
6577 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6578 bnx2x_fp(bp, i, status_blk_mapping),
6579 sizeof(struct host_status_block) +
6580 sizeof(struct eth_tx_db_data));
6581
6582 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6583 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6584 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6585 bnx2x_fp(bp, i, tx_desc_mapping),
6586 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6587
6588 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6589 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6590 bnx2x_fp(bp, i, rx_desc_mapping),
6591 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6592
6593 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6594 bnx2x_fp(bp, i, rx_comp_mapping),
6595 sizeof(struct eth_fast_path_rx_cqe) *
6596 NUM_RCQ_BD);
6597 }
6598
6599 BNX2X_FREE(bp->fp);
6600
6601 /* end of fastpath */
6602
6603 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6604 (sizeof(struct host_def_status_block)));
6605
6606 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6607 (sizeof(struct bnx2x_slowpath)));
6608
6609#ifdef BCM_ISCSI
6610 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6611 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6612 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6613 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6614#endif
6615 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
6616
6617#undef BNX2X_PCI_FREE
6618#undef BNX2X_KFREE
6619}
6620
6621static int bnx2x_alloc_mem(struct bnx2x *bp)
6622{
6623
6624#define BNX2X_PCI_ALLOC(x, y, size) \
6625 do { \
6626 x = pci_alloc_consistent(bp->pdev, size, y); \
6627 if (x == NULL) \
6628 goto alloc_mem_err; \
6629 memset(x, 0, size); \
6630 } while (0)
6631
6632#define BNX2X_ALLOC(x, size) \
6633 do { \
6634 x = vmalloc(size); \
6635 if (x == NULL) \
6636 goto alloc_mem_err; \
6637 memset(x, 0, size); \
6638 } while (0)
6639
6640 int i;
6641
6642 /* fastpath */
6643 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
6644
6645 for_each_queue(bp, i) {
6646 bnx2x_fp(bp, i, bp) = bp;
6647
6648 /* Status blocks */
6649 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6650 &bnx2x_fp(bp, i, status_blk_mapping),
6651 sizeof(struct host_status_block) +
6652 sizeof(struct eth_tx_db_data));
6653
6654 bnx2x_fp(bp, i, hw_tx_prods) =
6655 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6656
6657 bnx2x_fp(bp, i, tx_prods_mapping) =
6658 bnx2x_fp(bp, i, status_blk_mapping) +
6659 sizeof(struct host_status_block);
6660
6661 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6662 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6663 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6664 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6665 &bnx2x_fp(bp, i, tx_desc_mapping),
6666 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6667
6668 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6669 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6670 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6671 &bnx2x_fp(bp, i, rx_desc_mapping),
6672 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6673
6674 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6675 &bnx2x_fp(bp, i, rx_comp_mapping),
6676 sizeof(struct eth_fast_path_rx_cqe) *
6677 NUM_RCQ_BD);
6678
6679 }
6680 /* end of fastpath */
6681
6682 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6683 sizeof(struct host_def_status_block));
6684
6685 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6686 sizeof(struct bnx2x_slowpath));
6687
6688#ifdef BCM_ISCSI
6689 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6690
6691 /* Initialize T1 */
6692 for (i = 0; i < 64*1024; i += 64) {
6693 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6694 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6695 }
6696
6697 /* allocate searcher T2 table
6698 we allocate 1/4 of alloc num for T2
6699 (which is not entered into the ILT) */
6700 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6701
6702 /* Initialize T2 */
6703 for (i = 0; i < 16*1024; i += 64)
6704 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6705
c14423fe 6706 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6707 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6708
6709 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6710 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6711
6712 /* QM queues (128*MAX_CONN) */
6713 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6714#endif
6715
6716 /* Slow path ring */
6717 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6718
6719 return 0;
6720
6721alloc_mem_err:
6722 bnx2x_free_mem(bp);
6723 return -ENOMEM;
6724
6725#undef BNX2X_PCI_ALLOC
6726#undef BNX2X_ALLOC
6727}
6728
6729static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6730{
6731 int i;
6732
6733 for_each_queue(bp, i) {
6734 struct bnx2x_fastpath *fp = &bp->fp[i];
6735
6736 u16 bd_cons = fp->tx_bd_cons;
6737 u16 sw_prod = fp->tx_pkt_prod;
6738 u16 sw_cons = fp->tx_pkt_cons;
6739
6740 BUG_TRAP(fp->tx_buf_ring != NULL);
6741
6742 while (sw_cons != sw_prod) {
6743 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6744 sw_cons++;
6745 }
6746 }
6747}
6748
6749static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6750{
6751 int i, j;
6752
6753 for_each_queue(bp, j) {
6754 struct bnx2x_fastpath *fp = &bp->fp[j];
6755
6756 BUG_TRAP(fp->rx_buf_ring != NULL);
6757
6758 for (i = 0; i < NUM_RX_BD; i++) {
6759 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6760 struct sk_buff *skb = rx_buf->skb;
6761
6762 if (skb == NULL)
6763 continue;
6764
6765 pci_unmap_single(bp->pdev,
6766 pci_unmap_addr(rx_buf, mapping),
6767 bp->rx_buf_use_size,
6768 PCI_DMA_FROMDEVICE);
6769
6770 rx_buf->skb = NULL;
6771 dev_kfree_skb(skb);
6772 }
6773 }
6774}
6775
6776static void bnx2x_free_skbs(struct bnx2x *bp)
6777{
6778 bnx2x_free_tx_skbs(bp);
6779 bnx2x_free_rx_skbs(bp);
6780}
6781
6782static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6783{
6784 int i;
6785
6786 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6787 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6788 bp->msix_table[0].vector);
6789
6790 for_each_queue(bp, i) {
c14423fe 6791 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
a2fbb9ea
ET
6792 "state(%x)\n", i, bp->msix_table[i + 1].vector,
6793 bnx2x_fp(bp, i, state));
6794
6795 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
6796
6797 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
6798 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
6799
6800 } else
6801 DP(NETIF_MSG_IFDOWN, "irq not freed\n");
6802
6803 }
6804
6805}
6806
6807static void bnx2x_free_irq(struct bnx2x *bp)
6808{
6809
6810 if (bp->flags & USING_MSIX_FLAG) {
6811
6812 bnx2x_free_msix_irqs(bp);
6813 pci_disable_msix(bp->pdev);
6814
6815 bp->flags &= ~USING_MSIX_FLAG;
6816
6817 } else
6818 free_irq(bp->pdev->irq, bp->dev);
6819}
6820
6821static int bnx2x_enable_msix(struct bnx2x *bp)
6822{
6823
6824 int i;
6825
6826 bp->msix_table[0].entry = 0;
6827 for_each_queue(bp, i)
6828 bp->msix_table[i + 1].entry = i + 1;
6829
6830 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
6831 bp->num_queues + 1)){
6832 BNX2X_ERR("failed to enable msix\n");
6833 return -1;
6834
6835 }
6836
6837 bp->flags |= USING_MSIX_FLAG;
6838
6839 return 0;
6840
6841}
6842
6843
6844static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6845{
6846
a2fbb9ea
ET
6847 int i, rc;
6848
6849 DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6850
6851 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6852 bp->dev->name, bp->dev);
6853
6854 if (rc) {
6855 BNX2X_ERR("request sp irq failed\n");
6856 return -EBUSY;
6857 }
6858
6859 for_each_queue(bp, i) {
6860 rc = request_irq(bp->msix_table[i + 1].vector,
6861 bnx2x_msix_fp_int, 0,
6862 bp->dev->name, &bp->fp[i]);
6863
6864 if (rc) {
6865 BNX2X_ERR("request fp #%d irq failed\n", i);
6866 bnx2x_free_msix_irqs(bp);
6867 return -EBUSY;
6868 }
6869
6870 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6871
6872 }
6873
6874 return 0;
6875
6876}
6877
6878static int bnx2x_req_irq(struct bnx2x *bp)
6879{
6880
6881 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6882 IRQF_SHARED, bp->dev->name, bp->dev);
6883 if (!rc)
6884 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6885
6886 return rc;
6887
6888}
6889
6890/*
6891 * Init service functions
6892 */
6893
6894static void bnx2x_set_mac_addr(struct bnx2x *bp)
6895{
6896 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6897
6898 /* CAM allocation
6899 * unicasts 0-31:port0 32-63:port1
6900 * multicast 64-127:port0 128-191:port1
6901 */
6902 config->hdr.length_6b = 2;
6903 config->hdr.offset = bp->port ? 31 : 0;
6904 config->hdr.reserved0 = 0;
6905 config->hdr.reserved1 = 0;
6906
6907 /* primary MAC */
6908 config->config_table[0].cam_entry.msb_mac_addr =
6909 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6910 config->config_table[0].cam_entry.middle_mac_addr =
6911 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6912 config->config_table[0].cam_entry.lsb_mac_addr =
6913 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6914 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
6915 config->config_table[0].target_table_entry.flags = 0;
6916 config->config_table[0].target_table_entry.client_id = 0;
6917 config->config_table[0].target_table_entry.vlan_id = 0;
6918
6919 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6920 config->config_table[0].cam_entry.msb_mac_addr,
6921 config->config_table[0].cam_entry.middle_mac_addr,
6922 config->config_table[0].cam_entry.lsb_mac_addr);
6923
6924 /* broadcast */
6925 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6926 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6927 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6928 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
6929 config->config_table[1].target_table_entry.flags =
6930 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6931 config->config_table[1].target_table_entry.client_id = 0;
6932 config->config_table[1].target_table_entry.vlan_id = 0;
6933
6934 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6935 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6936 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6937}
6938
6939static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6940 int *state_p, int poll)
6941{
6942 /* can take a while if any port is running */
6943 int timeout = 500;
6944
c14423fe
ET
6945 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6946 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6947
6948 might_sleep();
6949
6950 while (timeout) {
6951
6952 if (poll) {
6953 bnx2x_rx_int(bp->fp, 10);
6954 /* If index is different from 0
6955 * The reply for some commands will
6956 * be on the none default queue
6957 */
6958 if (idx)
6959 bnx2x_rx_int(&bp->fp[idx], 10);
6960 }
6961
6962 mb(); /* state is changed by bnx2x_sp_event()*/
6963
6964 if (*state_p != state)
6965 return 0;
6966
6967 timeout--;
6968 msleep(1);
6969
6970 }
6971
a2fbb9ea
ET
6972 /* timeout! */
6973 BNX2X_ERR("timeout waiting for ramrod %d on %d\n", state, idx);
6974 return -EBUSY;
6975
6976}
6977
6978static int bnx2x_setup_leading(struct bnx2x *bp)
6979{
6980
c14423fe 6981 /* reset IGU state */
a2fbb9ea
ET
6982 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6983
6984 /* SETUP ramrod */
6985 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6986
6987 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6988
6989}
6990
6991static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6992{
6993
6994 /* reset IGU state */
6995 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6996
6997 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6998 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6999
7000 /* Wait for completion */
7001 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7002 &(bp->fp[index].state), 1);
7003
7004}
7005
7006
7007static int bnx2x_poll(struct napi_struct *napi, int budget);
7008static void bnx2x_set_rx_mode(struct net_device *dev);
7009
7010static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
7011{
7012 int rc;
7013 int i = 0;
7014
7015 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7016
7017 /* Send LOAD_REQUEST command to MCP.
7018 Returns the type of LOAD command: if it is the
7019 first port to be initialized common blocks should be
7020 initialized, otherwise - not.
7021 */
7022 if (!nomcp) {
7023 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7024 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7025 return -EBUSY; /* other port in diagnostic mode */
7026 }
7027 } else {
7028 rc = FW_MSG_CODE_DRV_LOAD_COMMON;
7029 }
7030
a2fbb9ea
ET
7031 /* if we can't use msix we only need one fp,
7032 * so try to enable msix with the requested number of fp's
7033 * and fallback to inta with one fp
7034 */
7035 if (req_irq) {
a2fbb9ea
ET
7036 if (use_inta) {
7037 bp->num_queues = 1;
7038 } else {
c14423fe 7039 if ((use_multi > 1) && (use_multi <= 16))
a2fbb9ea
ET
7040 /* user requested number */
7041 bp->num_queues = use_multi;
7042 else if (use_multi == 1)
7043 bp->num_queues = num_online_cpus();
7044 else
7045 bp->num_queues = 1;
7046
7047 if (bnx2x_enable_msix(bp)) {
c14423fe 7048 /* failed to enable msix */
a2fbb9ea
ET
7049 bp->num_queues = 1;
7050 if (use_multi)
c14423fe 7051 BNX2X_ERR("Multi requested but failed"
a2fbb9ea
ET
7052 " to enable MSI-X\n");
7053 }
7054 }
7055 }
7056
c14423fe
ET
7057 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
7058
a2fbb9ea
ET
7059 if (bnx2x_alloc_mem(bp))
7060 return -ENOMEM;
7061
7062 if (req_irq) {
7063 if (bp->flags & USING_MSIX_FLAG) {
7064 if (bnx2x_req_msix_irqs(bp)) {
7065 pci_disable_msix(bp->pdev);
7066 goto out_error;
7067 }
7068
7069 } else {
7070 if (bnx2x_req_irq(bp)) {
7071 BNX2X_ERR("IRQ request failed, aborting\n");
7072 goto out_error;
7073 }
7074 }
7075 }
7076
7077 for_each_queue(bp, i)
7078 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7079 bnx2x_poll, 128);
7080
7081
7082 /* Initialize HW */
7083 if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
7084 BNX2X_ERR("HW init failed, aborting\n");
7085 goto out_error;
7086 }
7087
7088
7089 atomic_set(&bp->intr_sem, 0);
7090
a2fbb9ea
ET
7091
7092 /* Setup NIC internals and enable interrupts */
7093 bnx2x_nic_init(bp);
7094
7095 /* Send LOAD_DONE command to MCP */
7096 if (!nomcp) {
7097 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7098 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc);
7099 if (!rc) {
7100 BNX2X_ERR("MCP response failure, unloading\n");
7101 goto int_disable;
7102 }
7103 }
7104
7105 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7106
7107 /* Enable Rx interrupt handling before sending the ramrod
7108 as it's completed on Rx FP queue */
7109 for_each_queue(bp, i)
7110 napi_enable(&bnx2x_fp(bp, i, napi));
7111
7112 if (bnx2x_setup_leading(bp))
7113 goto stop_netif;
7114
7115 for_each_nondefault_queue(bp, i)
7116 if (bnx2x_setup_multi(bp, i))
7117 goto stop_netif;
7118
7119 bnx2x_set_mac_addr(bp);
7120
7121 bnx2x_phy_init(bp);
7122
7123 /* Start fast path */
7124 if (req_irq) { /* IRQ is only requested from bnx2x_open */
7125 netif_start_queue(bp->dev);
7126 if (bp->flags & USING_MSIX_FLAG)
7127 printk(KERN_INFO PFX "%s: using MSI-X\n",
7128 bp->dev->name);
7129
7130 /* Otherwise Tx queue should be only reenabled */
7131 } else if (netif_running(bp->dev)) {
7132 netif_wake_queue(bp->dev);
7133 bnx2x_set_rx_mode(bp->dev);
7134 }
7135
7136 /* start the timer */
7137 mod_timer(&bp->timer, jiffies + bp->current_interval);
7138
7139 return 0;
7140
7141stop_netif:
7142 for_each_queue(bp, i)
7143 napi_disable(&bnx2x_fp(bp, i, napi));
7144
7145int_disable:
7146 bnx2x_disable_int_sync(bp);
7147
7148 bnx2x_free_skbs(bp);
7149 bnx2x_free_irq(bp);
7150
7151out_error:
7152 bnx2x_free_mem(bp);
7153
7154 /* TBD we really need to reset the chip
7155 if we want to recover from this */
7156 return rc;
7157}
7158
7159static void bnx2x_netif_stop(struct bnx2x *bp)
7160{
7161 int i;
7162
7163 bp->rx_mode = BNX2X_RX_MODE_NONE;
7164 bnx2x_set_storm_rx_mode(bp);
7165
7166 bnx2x_disable_int_sync(bp);
7167 bnx2x_link_reset(bp);
7168
7169 for_each_queue(bp, i)
7170 napi_disable(&bnx2x_fp(bp, i, napi));
7171
7172 if (netif_running(bp->dev)) {
7173 netif_tx_disable(bp->dev);
7174 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7175 }
7176}
7177
7178static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7179{
7180 int port = bp->port;
7181#ifdef USE_DMAE
7182 u32 wb_write[2];
7183#endif
7184 int base, i;
7185
7186 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
7187
7188 /* Do not rcv packets to BRB */
7189 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7190 /* Do not direct rcv packets that are not for MCP to the BRB */
7191 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7192 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7193
7194 /* Configure IGU and AEU */
7195 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
7196 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7197
7198 /* TODO: Close Doorbell port? */
7199
7200 /* Clear ILT */
7201#ifdef USE_DMAE
7202 wb_write[0] = 0;
7203 wb_write[1] = 0;
7204#endif
7205 base = port * RQ_ONCHIP_AT_PORT_SIZE;
7206 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
7207#ifdef USE_DMAE
7208 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
7209#else
7210 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
7211 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
7212#endif
7213 }
7214
7215 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7216 /* reset_common */
7217 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7218 0xd3ffff7f);
7219 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7220 0x1403);
7221 }
7222}
7223
7224static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7225{
7226
7227 int rc;
7228
c14423fe 7229 /* halt the connection */
a2fbb9ea
ET
7230 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
7231 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
7232
7233
7234 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7235 &(bp->fp[index].state), 1);
c14423fe 7236 if (rc) /* timeout */
a2fbb9ea
ET
7237 return rc;
7238
7239 /* delete cfc entry */
7240 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7241
7242 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_DELETED, index,
7243 &(bp->fp[index].state), 1);
7244
7245}
7246
7247
7248static void bnx2x_stop_leading(struct bnx2x *bp)
7249{
7250
c14423fe 7251 /* if the other port is handling traffic,
a2fbb9ea
ET
7252 this can take a lot of time */
7253 int timeout = 500;
7254
7255 might_sleep();
7256
7257 /* Send HALT ramrod */
7258 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7259 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
7260
7261 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7262 &(bp->fp[0].state), 1))
7263 return;
7264
7265 bp->dsb_sp_prod_idx = *bp->dsb_sp_prod;
7266
7267 /* Send CFC_DELETE ramrod */
7268 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7269
7270 /*
7271 Wait for completion.
7272 we are going to reset the chip anyway
7273 so there is not much to do if this times out
7274 */
7275 while (bp->dsb_sp_prod_idx == *bp->dsb_sp_prod && timeout) {
7276 timeout--;
7277 msleep(1);
7278 }
7279
7280}
7281
7282static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
7283{
7284 u32 reset_code = 0;
7285 int rc;
7286 int i;
7287
7288 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7289
7290 /* Calling flush_scheduled_work() may deadlock because
7291 * linkwatch_event() may be on the workqueue and it will try to get
7292 * the rtnl_lock which we are holding.
7293 */
7294
7295 while (bp->in_reset_task)
7296 msleep(1);
7297
7298 /* Delete the timer: do it before disabling interrupts, as it
c14423fe 7299 may be still STAT_QUERY ramrod pending after stopping the timer */
a2fbb9ea
ET
7300 del_timer_sync(&bp->timer);
7301
7302 /* Wait until stat ramrod returns and all SP tasks complete */
7303 while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING))
7304 msleep(1);
7305
7306 /* Stop fast path, disable MAC, disable interrupts, disable napi */
7307 bnx2x_netif_stop(bp);
7308
7309 if (bp->flags & NO_WOL_FLAG)
7310 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7311 else if (bp->wol) {
7312 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
7313 u8 *mac_addr = bp->dev->dev_addr;
7314 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
7315 EMAC_MODE_ACPI_RCVD);
7316
7317 EMAC_WR(EMAC_REG_EMAC_MODE, val);
7318
7319 val = (mac_addr[0] << 8) | mac_addr[1];
7320 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
7321
7322 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7323 (mac_addr[4] << 8) | mac_addr[5];
7324 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
7325
7326 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7327 } else
7328 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7329
7330 for_each_nondefault_queue(bp, i)
7331 if (bnx2x_stop_multi(bp, i))
7332 goto error;
7333
7334
7335 bnx2x_stop_leading(bp);
7336
7337error:
7338 if (!nomcp)
7339 rc = bnx2x_fw_command(bp, reset_code);
7340 else
7341 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7342
7343 /* Release IRQs */
7344 if (fre_irq)
7345 bnx2x_free_irq(bp);
7346
7347 /* Reset the chip */
7348 bnx2x_reset_chip(bp, rc);
7349
7350 /* Report UNLOAD_DONE to MCP */
7351 if (!nomcp)
7352 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7353
7354 /* Free SKBs and driver internals */
7355 bnx2x_free_skbs(bp);
7356 bnx2x_free_mem(bp);
7357
7358 bp->state = BNX2X_STATE_CLOSED;
7359 /* Set link down */
7360 bp->link_up = 0;
7361 netif_carrier_off(bp->dev);
7362
7363 return 0;
7364}
7365
7366/* end of nic load/unload */
7367
7368/* ethtool_ops */
7369
7370/*
7371 * Init service functions
7372 */
7373
7374static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
7375{
7376 int port = bp->port;
7377 u32 ext_phy_type;
7378
7379 bp->phy_flags = 0;
7380
7381 switch (switch_cfg) {
7382 case SWITCH_CFG_1G:
7383 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7384
7385 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
7386 switch (ext_phy_type) {
7387 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7388 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7389 ext_phy_type);
7390
7391 bp->supported |= (SUPPORTED_10baseT_Half |
7392 SUPPORTED_10baseT_Full |
7393 SUPPORTED_100baseT_Half |
7394 SUPPORTED_100baseT_Full |
7395 SUPPORTED_1000baseT_Full |
f1410647 7396 SUPPORTED_2500baseX_Full |
a2fbb9ea
ET
7397 SUPPORTED_TP | SUPPORTED_FIBRE |
7398 SUPPORTED_Autoneg |
7399 SUPPORTED_Pause |
7400 SUPPORTED_Asym_Pause);
7401 break;
7402
7403 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7404 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7405 ext_phy_type);
7406
7407 bp->phy_flags |= PHY_SGMII_FLAG;
7408
f1410647
ET
7409 bp->supported |= (SUPPORTED_10baseT_Half |
7410 SUPPORTED_10baseT_Full |
7411 SUPPORTED_100baseT_Half |
7412 SUPPORTED_100baseT_Full |
a2fbb9ea
ET
7413 SUPPORTED_1000baseT_Full |
7414 SUPPORTED_TP | SUPPORTED_FIBRE |
7415 SUPPORTED_Autoneg |
7416 SUPPORTED_Pause |
7417 SUPPORTED_Asym_Pause);
7418 break;
7419
7420 default:
7421 BNX2X_ERR("NVRAM config error. "
7422 "BAD SerDes ext_phy_config 0x%x\n",
7423 bp->ext_phy_config);
7424 return;
7425 }
7426
7427 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7428 port*0x10);
7429 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7430 break;
7431
7432 case SWITCH_CFG_10G:
7433 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7434
7435 bp->phy_flags |= PHY_XGXS_FLAG;
7436
7437 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7438 switch (ext_phy_type) {
7439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7440 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7441 ext_phy_type);
7442
7443 bp->supported |= (SUPPORTED_10baseT_Half |
7444 SUPPORTED_10baseT_Full |
7445 SUPPORTED_100baseT_Half |
7446 SUPPORTED_100baseT_Full |
7447 SUPPORTED_1000baseT_Full |
f1410647 7448 SUPPORTED_2500baseX_Full |
a2fbb9ea
ET
7449 SUPPORTED_10000baseT_Full |
7450 SUPPORTED_TP | SUPPORTED_FIBRE |
7451 SUPPORTED_Autoneg |
7452 SUPPORTED_Pause |
7453 SUPPORTED_Asym_Pause);
7454 break;
7455
7456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647
ET
7457 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7458 ext_phy_type);
7459
7460 bp->supported |= (SUPPORTED_10000baseT_Full |
7461 SUPPORTED_FIBRE |
7462 SUPPORTED_Pause |
7463 SUPPORTED_Asym_Pause);
7464 break;
7465
a2fbb9ea 7466 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7467 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7468 ext_phy_type);
7469
7470 bp->supported |= (SUPPORTED_10000baseT_Full |
7471 SUPPORTED_1000baseT_Full |
7472 SUPPORTED_Autoneg |
7473 SUPPORTED_FIBRE |
7474 SUPPORTED_Pause |
7475 SUPPORTED_Asym_Pause);
7476 break;
7477
7478 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7479 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7480 ext_phy_type);
7481
7482 bp->supported |= (SUPPORTED_10000baseT_Full |
f1410647 7483 SUPPORTED_1000baseT_Full |
a2fbb9ea 7484 SUPPORTED_FIBRE |
f1410647
ET
7485 SUPPORTED_Autoneg |
7486 SUPPORTED_Pause |
7487 SUPPORTED_Asym_Pause);
7488 break;
7489
7490 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7491 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7492 ext_phy_type);
7493
7494 bp->supported |= (SUPPORTED_10000baseT_Full |
7495 SUPPORTED_TP |
7496 SUPPORTED_Autoneg |
a2fbb9ea
ET
7497 SUPPORTED_Pause |
7498 SUPPORTED_Asym_Pause);
7499 break;
7500
7501 default:
7502 BNX2X_ERR("NVRAM config error. "
7503 "BAD XGXS ext_phy_config 0x%x\n",
7504 bp->ext_phy_config);
7505 return;
7506 }
7507
7508 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7509 port*0x18);
7510 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7511
7512 bp->ser_lane = ((bp->lane_config &
7513 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
7514 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
7515 bp->rx_lane_swap = ((bp->lane_config &
7516 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
7517 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
7518 bp->tx_lane_swap = ((bp->lane_config &
7519 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
7520 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
7521 BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
7522 bp->rx_lane_swap, bp->tx_lane_swap);
7523 break;
7524
7525 default:
7526 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7527 bp->link_config);
7528 return;
7529 }
7530
7531 /* mask what we support according to speed_cap_mask */
7532 if (!(bp->speed_cap_mask &
7533 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7534 bp->supported &= ~SUPPORTED_10baseT_Half;
7535
7536 if (!(bp->speed_cap_mask &
7537 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7538 bp->supported &= ~SUPPORTED_10baseT_Full;
7539
7540 if (!(bp->speed_cap_mask &
7541 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7542 bp->supported &= ~SUPPORTED_100baseT_Half;
7543
7544 if (!(bp->speed_cap_mask &
7545 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7546 bp->supported &= ~SUPPORTED_100baseT_Full;
7547
7548 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7549 bp->supported &= ~(SUPPORTED_1000baseT_Half |
7550 SUPPORTED_1000baseT_Full);
7551
7552 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
f1410647 7553 bp->supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea
ET
7554
7555 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7556 bp->supported &= ~SUPPORTED_10000baseT_Full;
7557
7558 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
7559}
7560
7561static void bnx2x_link_settings_requested(struct bnx2x *bp)
7562{
7563 bp->req_autoneg = 0;
7564 bp->req_duplex = DUPLEX_FULL;
7565
7566 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7567 case PORT_FEATURE_LINK_SPEED_AUTO:
7568 if (bp->supported & SUPPORTED_Autoneg) {
7569 bp->req_autoneg |= AUTONEG_SPEED;
7570 bp->req_line_speed = 0;
7571 bp->advertising = bp->supported;
7572 } else {
f1410647
ET
7573 if (XGXS_EXT_PHY_TYPE(bp) ==
7574 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
a2fbb9ea
ET
7575 /* force 10G, no AN */
7576 bp->req_line_speed = SPEED_10000;
7577 bp->advertising =
7578 (ADVERTISED_10000baseT_Full |
7579 ADVERTISED_FIBRE);
7580 break;
7581 }
7582 BNX2X_ERR("NVRAM config error. "
7583 "Invalid link_config 0x%x"
7584 " Autoneg not supported\n",
7585 bp->link_config);
7586 return;
7587 }
7588 break;
7589
7590 case PORT_FEATURE_LINK_SPEED_10M_FULL:
f1410647 7591 if (bp->supported & SUPPORTED_10baseT_Full) {
a2fbb9ea
ET
7592 bp->req_line_speed = SPEED_10;
7593 bp->advertising = (ADVERTISED_10baseT_Full |
7594 ADVERTISED_TP);
7595 } else {
7596 BNX2X_ERR("NVRAM config error. "
7597 "Invalid link_config 0x%x"
7598 " speed_cap_mask 0x%x\n",
7599 bp->link_config, bp->speed_cap_mask);
7600 return;
7601 }
7602 break;
7603
7604 case PORT_FEATURE_LINK_SPEED_10M_HALF:
f1410647 7605 if (bp->supported & SUPPORTED_10baseT_Half) {
a2fbb9ea
ET
7606 bp->req_line_speed = SPEED_10;
7607 bp->req_duplex = DUPLEX_HALF;
7608 bp->advertising = (ADVERTISED_10baseT_Half |
7609 ADVERTISED_TP);
7610 } else {
7611 BNX2X_ERR("NVRAM config error. "
7612 "Invalid link_config 0x%x"
7613 " speed_cap_mask 0x%x\n",
7614 bp->link_config, bp->speed_cap_mask);
7615 return;
7616 }
7617 break;
7618
7619 case PORT_FEATURE_LINK_SPEED_100M_FULL:
f1410647 7620 if (bp->supported & SUPPORTED_100baseT_Full) {
a2fbb9ea
ET
7621 bp->req_line_speed = SPEED_100;
7622 bp->advertising = (ADVERTISED_100baseT_Full |
7623 ADVERTISED_TP);
7624 } else {
7625 BNX2X_ERR("NVRAM config error. "
7626 "Invalid link_config 0x%x"
7627 " speed_cap_mask 0x%x\n",
7628 bp->link_config, bp->speed_cap_mask);
7629 return;
7630 }
7631 break;
7632
7633 case PORT_FEATURE_LINK_SPEED_100M_HALF:
f1410647 7634 if (bp->supported & SUPPORTED_100baseT_Half) {
a2fbb9ea
ET
7635 bp->req_line_speed = SPEED_100;
7636 bp->req_duplex = DUPLEX_HALF;
7637 bp->advertising = (ADVERTISED_100baseT_Half |
7638 ADVERTISED_TP);
7639 } else {
7640 BNX2X_ERR("NVRAM config error. "
7641 "Invalid link_config 0x%x"
7642 " speed_cap_mask 0x%x\n",
7643 bp->link_config, bp->speed_cap_mask);
7644 return;
7645 }
7646 break;
7647
7648 case PORT_FEATURE_LINK_SPEED_1G:
f1410647 7649 if (bp->supported & SUPPORTED_1000baseT_Full) {
a2fbb9ea
ET
7650 bp->req_line_speed = SPEED_1000;
7651 bp->advertising = (ADVERTISED_1000baseT_Full |
7652 ADVERTISED_TP);
7653 } else {
7654 BNX2X_ERR("NVRAM config error. "
7655 "Invalid link_config 0x%x"
7656 " speed_cap_mask 0x%x\n",
7657 bp->link_config, bp->speed_cap_mask);
7658 return;
7659 }
7660 break;
7661
7662 case PORT_FEATURE_LINK_SPEED_2_5G:
f1410647 7663 if (bp->supported & SUPPORTED_2500baseX_Full) {
a2fbb9ea 7664 bp->req_line_speed = SPEED_2500;
f1410647 7665 bp->advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7666 ADVERTISED_TP);
7667 } else {
7668 BNX2X_ERR("NVRAM config error. "
7669 "Invalid link_config 0x%x"
7670 " speed_cap_mask 0x%x\n",
7671 bp->link_config, bp->speed_cap_mask);
7672 return;
7673 }
7674 break;
7675
7676 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7677 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7678 case PORT_FEATURE_LINK_SPEED_10G_KR:
f1410647 7679 if (bp->supported & SUPPORTED_10000baseT_Full) {
a2fbb9ea
ET
7680 bp->req_line_speed = SPEED_10000;
7681 bp->advertising = (ADVERTISED_10000baseT_Full |
7682 ADVERTISED_FIBRE);
7683 } else {
7684 BNX2X_ERR("NVRAM config error. "
7685 "Invalid link_config 0x%x"
7686 " speed_cap_mask 0x%x\n",
7687 bp->link_config, bp->speed_cap_mask);
7688 return;
7689 }
7690 break;
7691
7692 default:
7693 BNX2X_ERR("NVRAM config error. "
7694 "BAD link speed link_config 0x%x\n",
7695 bp->link_config);
7696 bp->req_autoneg |= AUTONEG_SPEED;
7697 bp->req_line_speed = 0;
7698 bp->advertising = bp->supported;
7699 break;
7700 }
7701 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
7702 bp->req_line_speed, bp->req_duplex);
7703
7704 bp->req_flow_ctrl = (bp->link_config &
7705 PORT_FEATURE_FLOW_CONTROL_MASK);
f1410647
ET
7706 if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
7707 (bp->supported & SUPPORTED_Autoneg))
a2fbb9ea 7708 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
a2fbb9ea 7709
f1410647
ET
7710 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
7711 " advertising 0x%x\n",
7712 bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
a2fbb9ea
ET
7713}
7714
7715static void bnx2x_get_hwinfo(struct bnx2x *bp)
7716{
7717 u32 val, val2, val3, val4, id;
7718 int port = bp->port;
7719 u32 switch_cfg;
7720
7721 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7722 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
7723
7724 /* Get the chip revision id and number. */
7725 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7726 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7727 id = ((val & 0xffff) << 16);
7728 val = REG_RD(bp, MISC_REG_CHIP_REV);
7729 id |= ((val & 0xf) << 12);
7730 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7731 id |= ((val & 0xff) << 4);
7732 REG_RD(bp, MISC_REG_BOND_ID);
7733 id |= (val & 0xf);
7734 bp->chip_id = id;
7735 BNX2X_DEV_INFO("chip ID is %x\n", id);
7736
7737 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
7738 BNX2X_DEV_INFO("MCP not active\n");
7739 nomcp = 1;
7740 goto set_mac;
7741 }
7742
7743 val = SHMEM_RD(bp, validity_map[port]);
7744 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f1410647
ET
7745 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7746 BNX2X_ERR("BAD MCP validity signature\n");
a2fbb9ea 7747
f1410647 7748 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
a2fbb9ea
ET
7749 DRV_MSG_SEQ_NUMBER_MASK);
7750
7751 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
f1410647 7752 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
a2fbb9ea 7753 bp->serdes_config =
f1410647 7754 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
a2fbb9ea
ET
7755 bp->lane_config =
7756 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7757 bp->ext_phy_config =
7758 SHMEM_RD(bp,
7759 dev_info.port_hw_config[port].external_phy_config);
7760 bp->speed_cap_mask =
7761 SHMEM_RD(bp,
7762 dev_info.port_hw_config[port].speed_capability_mask);
7763
7764 bp->link_config =
7765 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7766
f1410647 7767 BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
a2fbb9ea
ET
7768 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
7769 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
7770 " fw_seq (%08x)\n",
f1410647
ET
7771 bp->hw_config, bp->board, bp->serdes_config,
7772 bp->lane_config, bp->ext_phy_config,
7773 bp->speed_cap_mask, bp->link_config, bp->fw_seq);
a2fbb9ea
ET
7774
7775 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
7776 bnx2x_link_settings_supported(bp, switch_cfg);
7777
7778 bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
7779 /* for now disable cl73 */
7780 bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
7781 BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
7782
7783 bnx2x_link_settings_requested(bp);
7784
7785 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7786 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7787 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7788 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7789 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7790 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7791 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7792 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7793
7794 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7795
7796
7797 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7798 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7799 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7800 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7801
7802 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7803 val, val2, val3, val4);
7804
7805 /* bc ver */
7806 if (!nomcp) {
7807 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
7808 BNX2X_DEV_INFO("bc_ver %X\n", val);
7809 if (val < BNX2X_BC_VER) {
7810 /* for now only warn
7811 * later we might need to enforce this */
7812 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7813 " please upgrade BC\n", BNX2X_BC_VER, val);
7814 }
7815 } else {
7816 bp->bc_ver = 0;
7817 }
7818
7819 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7820 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7821 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7822 bp->flash_size, bp->flash_size);
7823
7824 return;
7825
7826set_mac: /* only supposed to happen on emulation/FPGA */
f1410647
ET
7827 BNX2X_ERR("warning rendom MAC workaround active\n");
7828 random_ether_addr(bp->dev->dev_addr);
a2fbb9ea
ET
7829 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7830
7831}
7832
7833/*
7834 * ethtool service functions
7835 */
7836
7837/* All ethtool functions called with rtnl_lock */
7838
7839static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7840{
7841 struct bnx2x *bp = netdev_priv(dev);
7842
7843 cmd->supported = bp->supported;
7844 cmd->advertising = bp->advertising;
7845
7846 if (netif_carrier_ok(dev)) {
7847 cmd->speed = bp->line_speed;
7848 cmd->duplex = bp->duplex;
7849 } else {
7850 cmd->speed = bp->req_line_speed;
7851 cmd->duplex = bp->req_duplex;
7852 }
7853
7854 if (bp->phy_flags & PHY_XGXS_FLAG) {
f1410647
ET
7855 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7856
7857 switch (ext_phy_type) {
7858 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7859 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7860 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7861 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7862 cmd->port = PORT_FIBRE;
7863 break;
7864
7865 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7866 cmd->port = PORT_TP;
7867 break;
7868
7869 default:
7870 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7871 bp->ext_phy_config);
7872 }
7873 } else
a2fbb9ea 7874 cmd->port = PORT_TP;
a2fbb9ea
ET
7875
7876 cmd->phy_address = bp->phy_addr;
7877 cmd->transceiver = XCVR_INTERNAL;
7878
f1410647 7879 if (bp->req_autoneg & AUTONEG_SPEED)
a2fbb9ea 7880 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7881 else
a2fbb9ea 7882 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7883
7884 cmd->maxtxpkt = 0;
7885 cmd->maxrxpkt = 0;
7886
7887 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7888 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7889 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7890 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7891 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7892 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7893 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7894
7895 return 0;
7896}
7897
7898static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7899{
7900 struct bnx2x *bp = netdev_priv(dev);
7901 u32 advertising;
7902
7903 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7904 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7905 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7906 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7907 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7908 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7909 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7910
7911 switch (cmd->port) {
7912 case PORT_TP:
f1410647
ET
7913 if (!(bp->supported & SUPPORTED_TP)) {
7914 DP(NETIF_MSG_LINK, "TP not supported\n");
a2fbb9ea 7915 return -EINVAL;
f1410647 7916 }
a2fbb9ea
ET
7917
7918 if (bp->phy_flags & PHY_XGXS_FLAG) {
7919 bnx2x_link_reset(bp);
7920 bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
7921 bnx2x_phy_deassert(bp);
7922 }
7923 break;
7924
7925 case PORT_FIBRE:
f1410647
ET
7926 if (!(bp->supported & SUPPORTED_FIBRE)) {
7927 DP(NETIF_MSG_LINK, "FIBRE not supported\n");
a2fbb9ea 7928 return -EINVAL;
f1410647 7929 }
a2fbb9ea
ET
7930
7931 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
7932 bnx2x_link_reset(bp);
7933 bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
7934 bnx2x_phy_deassert(bp);
7935 }
7936 break;
7937
7938 default:
f1410647 7939 DP(NETIF_MSG_LINK, "Unknown port type\n");
a2fbb9ea
ET
7940 return -EINVAL;
7941 }
7942
7943 if (cmd->autoneg == AUTONEG_ENABLE) {
f1410647
ET
7944 if (!(bp->supported & SUPPORTED_Autoneg)) {
7945 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
a2fbb9ea 7946 return -EINVAL;
f1410647 7947 }
a2fbb9ea
ET
7948
7949 /* advertise the requested speed and duplex if supported */
7950 cmd->advertising &= bp->supported;
7951
7952 bp->req_autoneg |= AUTONEG_SPEED;
7953 bp->req_line_speed = 0;
7954 bp->req_duplex = DUPLEX_FULL;
7955 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
7956
7957 } else { /* forced speed */
7958 /* advertise the requested speed and duplex if supported */
7959 switch (cmd->speed) {
7960 case SPEED_10:
7961 if (cmd->duplex == DUPLEX_FULL) {
f1410647
ET
7962 if (!(bp->supported &
7963 SUPPORTED_10baseT_Full)) {
7964 DP(NETIF_MSG_LINK,
7965 "10M full not supported\n");
a2fbb9ea 7966 return -EINVAL;
f1410647 7967 }
a2fbb9ea
ET
7968
7969 advertising = (ADVERTISED_10baseT_Full |
7970 ADVERTISED_TP);
7971 } else {
f1410647
ET
7972 if (!(bp->supported &
7973 SUPPORTED_10baseT_Half)) {
7974 DP(NETIF_MSG_LINK,
7975 "10M half not supported\n");
a2fbb9ea 7976 return -EINVAL;
f1410647 7977 }
a2fbb9ea
ET
7978
7979 advertising = (ADVERTISED_10baseT_Half |
7980 ADVERTISED_TP);
7981 }
7982 break;
7983
7984 case SPEED_100:
7985 if (cmd->duplex == DUPLEX_FULL) {
7986 if (!(bp->supported &
f1410647
ET
7987 SUPPORTED_100baseT_Full)) {
7988 DP(NETIF_MSG_LINK,
7989 "100M full not supported\n");
a2fbb9ea 7990 return -EINVAL;
f1410647 7991 }
a2fbb9ea
ET
7992
7993 advertising = (ADVERTISED_100baseT_Full |
7994 ADVERTISED_TP);
7995 } else {
7996 if (!(bp->supported &
f1410647
ET
7997 SUPPORTED_100baseT_Half)) {
7998 DP(NETIF_MSG_LINK,
7999 "100M half not supported\n");
a2fbb9ea 8000 return -EINVAL;
f1410647 8001 }
a2fbb9ea
ET
8002
8003 advertising = (ADVERTISED_100baseT_Half |
8004 ADVERTISED_TP);
8005 }
8006 break;
8007
8008 case SPEED_1000:
f1410647
ET
8009 if (cmd->duplex != DUPLEX_FULL) {
8010 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8011 return -EINVAL;
f1410647 8012 }
a2fbb9ea 8013
f1410647
ET
8014 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
8015 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8016 return -EINVAL;
f1410647 8017 }
a2fbb9ea
ET
8018
8019 advertising = (ADVERTISED_1000baseT_Full |
8020 ADVERTISED_TP);
8021 break;
8022
8023 case SPEED_2500:
f1410647
ET
8024 if (cmd->duplex != DUPLEX_FULL) {
8025 DP(NETIF_MSG_LINK,
8026 "2.5G half not supported\n");
a2fbb9ea 8027 return -EINVAL;
f1410647 8028 }
a2fbb9ea 8029
f1410647
ET
8030 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
8031 DP(NETIF_MSG_LINK,
8032 "2.5G full not supported\n");
a2fbb9ea 8033 return -EINVAL;
f1410647 8034 }
a2fbb9ea 8035
f1410647 8036 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8037 ADVERTISED_TP);
8038 break;
8039
8040 case SPEED_10000:
f1410647
ET
8041 if (cmd->duplex != DUPLEX_FULL) {
8042 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8043 return -EINVAL;
f1410647 8044 }
a2fbb9ea 8045
f1410647
ET
8046 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
8047 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8048 return -EINVAL;
f1410647 8049 }
a2fbb9ea
ET
8050
8051 advertising = (ADVERTISED_10000baseT_Full |
8052 ADVERTISED_FIBRE);
8053 break;
8054
8055 default:
f1410647 8056 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8057 return -EINVAL;
8058 }
8059
8060 bp->req_autoneg &= ~AUTONEG_SPEED;
8061 bp->req_line_speed = cmd->speed;
8062 bp->req_duplex = cmd->duplex;
8063 bp->advertising = advertising;
8064 }
8065
8066 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
8067 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8068 bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
8069 bp->advertising);
8070
8071 bnx2x_stop_stats(bp);
8072 bnx2x_link_initialize(bp);
8073
8074 return 0;
8075}
8076
8077static void bnx2x_get_drvinfo(struct net_device *dev,
8078 struct ethtool_drvinfo *info)
8079{
8080 struct bnx2x *bp = netdev_priv(dev);
8081
8082 strcpy(info->driver, DRV_MODULE_NAME);
8083 strcpy(info->version, DRV_MODULE_VERSION);
8084 snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
8085 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
8086 BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
8087 bp->bc_ver);
8088 strcpy(info->bus_info, pci_name(bp->pdev));
8089 info->n_stats = BNX2X_NUM_STATS;
8090 info->testinfo_len = BNX2X_NUM_TESTS;
8091 info->eedump_len = bp->flash_size;
8092 info->regdump_len = 0;
8093}
8094
8095static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8096{
8097 struct bnx2x *bp = netdev_priv(dev);
8098
8099 if (bp->flags & NO_WOL_FLAG) {
8100 wol->supported = 0;
8101 wol->wolopts = 0;
8102 } else {
8103 wol->supported = WAKE_MAGIC;
8104 if (bp->wol)
8105 wol->wolopts = WAKE_MAGIC;
8106 else
8107 wol->wolopts = 0;
8108 }
8109 memset(&wol->sopass, 0, sizeof(wol->sopass));
8110}
8111
8112static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8113{
8114 struct bnx2x *bp = netdev_priv(dev);
8115
8116 if (wol->wolopts & ~WAKE_MAGIC)
8117 return -EINVAL;
8118
8119 if (wol->wolopts & WAKE_MAGIC) {
8120 if (bp->flags & NO_WOL_FLAG)
8121 return -EINVAL;
8122
8123 bp->wol = 1;
8124 } else {
8125 bp->wol = 0;
8126 }
8127 return 0;
8128}
8129
8130static u32 bnx2x_get_msglevel(struct net_device *dev)
8131{
8132 struct bnx2x *bp = netdev_priv(dev);
8133
8134 return bp->msglevel;
8135}
8136
8137static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8138{
8139 struct bnx2x *bp = netdev_priv(dev);
8140
8141 if (capable(CAP_NET_ADMIN))
8142 bp->msglevel = level;
8143}
8144
8145static int bnx2x_nway_reset(struct net_device *dev)
8146{
8147 struct bnx2x *bp = netdev_priv(dev);
8148
8149 if (bp->state != BNX2X_STATE_OPEN) {
8150 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8151 return -EAGAIN;
8152 }
8153
8154 bnx2x_stop_stats(bp);
8155 bnx2x_link_initialize(bp);
8156
8157 return 0;
8158}
8159
8160static int bnx2x_get_eeprom_len(struct net_device *dev)
8161{
8162 struct bnx2x *bp = netdev_priv(dev);
8163
8164 return bp->flash_size;
8165}
8166
8167static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8168{
8169 int port = bp->port;
8170 int count, i;
8171 u32 val = 0;
8172
8173 /* adjust timeout for emulation/FPGA */
8174 count = NVRAM_TIMEOUT_COUNT;
8175 if (CHIP_REV_IS_SLOW(bp))
8176 count *= 100;
8177
8178 /* request access to nvram interface */
8179 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8180 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8181
8182 for (i = 0; i < count*10; i++) {
8183 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8184 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8185 break;
8186
8187 udelay(5);
8188 }
8189
8190 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8191 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
8192 return -EBUSY;
8193 }
8194
8195 return 0;
8196}
8197
8198static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8199{
8200 int port = bp->port;
8201 int count, i;
8202 u32 val = 0;
8203
8204 /* adjust timeout for emulation/FPGA */
8205 count = NVRAM_TIMEOUT_COUNT;
8206 if (CHIP_REV_IS_SLOW(bp))
8207 count *= 100;
8208
8209 /* relinquish nvram interface */
8210 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8211 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8212
8213 for (i = 0; i < count*10; i++) {
8214 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8215 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8216 break;
8217
8218 udelay(5);
8219 }
8220
8221 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8222 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
8223 return -EBUSY;
8224 }
8225
8226 return 0;
8227}
8228
8229static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8230{
8231 u32 val;
8232
8233 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8234
8235 /* enable both bits, even on read */
8236 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8237 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8238 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8239}
8240
8241static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8242{
8243 u32 val;
8244
8245 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8246
8247 /* disable both bits, even after read */
8248 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8249 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8250 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8251}
8252
8253static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8254 u32 cmd_flags)
8255{
f1410647 8256 int count, i, rc;
a2fbb9ea
ET
8257 u32 val;
8258
8259 /* build the command word */
8260 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8261
8262 /* need to clear DONE bit separately */
8263 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8264
8265 /* address of the NVRAM to read from */
8266 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8267 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8268
8269 /* issue a read command */
8270 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8271
8272 /* adjust timeout for emulation/FPGA */
8273 count = NVRAM_TIMEOUT_COUNT;
8274 if (CHIP_REV_IS_SLOW(bp))
8275 count *= 100;
8276
8277 /* wait for completion */
8278 *ret_val = 0;
8279 rc = -EBUSY;
8280 for (i = 0; i < count; i++) {
8281 udelay(5);
8282 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8283
8284 if (val & MCPR_NVM_COMMAND_DONE) {
8285 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8286 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8287 /* we read nvram data in cpu order
8288 * but ethtool sees it as an array of bytes
8289 * converting to big-endian will do the work */
8290 val = cpu_to_be32(val);
8291 *ret_val = val;
8292 rc = 0;
8293 break;
8294 }
8295 }
8296
8297 return rc;
8298}
8299
8300static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8301 int buf_size)
8302{
8303 int rc;
8304 u32 cmd_flags;
8305 u32 val;
8306
8307 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8308 DP(NETIF_MSG_NVM,
c14423fe 8309 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8310 offset, buf_size);
8311 return -EINVAL;
8312 }
8313
8314 if (offset + buf_size > bp->flash_size) {
c14423fe 8315 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8316 " buf_size (0x%x) > flash_size (0x%x)\n",
8317 offset, buf_size, bp->flash_size);
8318 return -EINVAL;
8319 }
8320
8321 /* request access to nvram interface */
8322 rc = bnx2x_acquire_nvram_lock(bp);
8323 if (rc)
8324 return rc;
8325
8326 /* enable access to nvram interface */
8327 bnx2x_enable_nvram_access(bp);
8328
8329 /* read the first word(s) */
8330 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8331 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8332 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8333 memcpy(ret_buf, &val, 4);
8334
8335 /* advance to the next dword */
8336 offset += sizeof(u32);
8337 ret_buf += sizeof(u32);
8338 buf_size -= sizeof(u32);
8339 cmd_flags = 0;
8340 }
8341
8342 if (rc == 0) {
8343 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8344 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8345 memcpy(ret_buf, &val, 4);
8346 }
8347
8348 /* disable access to nvram interface */
8349 bnx2x_disable_nvram_access(bp);
8350 bnx2x_release_nvram_lock(bp);
8351
8352 return rc;
8353}
8354
8355static int bnx2x_get_eeprom(struct net_device *dev,
8356 struct ethtool_eeprom *eeprom, u8 *eebuf)
8357{
8358 struct bnx2x *bp = netdev_priv(dev);
8359 int rc;
8360
8361 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8362 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8363 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8364 eeprom->len, eeprom->len);
8365
8366 /* parameters already validated in ethtool_get_eeprom */
8367
8368 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8369
8370 return rc;
8371}
8372
8373static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8374 u32 cmd_flags)
8375{
f1410647 8376 int count, i, rc;
a2fbb9ea
ET
8377
8378 /* build the command word */
8379 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8380
8381 /* need to clear DONE bit separately */
8382 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8383
8384 /* write the data */
8385 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8386
8387 /* address of the NVRAM to write to */
8388 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8389 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8390
8391 /* issue the write command */
8392 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8393
8394 /* adjust timeout for emulation/FPGA */
8395 count = NVRAM_TIMEOUT_COUNT;
8396 if (CHIP_REV_IS_SLOW(bp))
8397 count *= 100;
8398
8399 /* wait for completion */
8400 rc = -EBUSY;
8401 for (i = 0; i < count; i++) {
8402 udelay(5);
8403 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8404 if (val & MCPR_NVM_COMMAND_DONE) {
8405 rc = 0;
8406 break;
8407 }
8408 }
8409
8410 return rc;
8411}
8412
f1410647 8413#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8414
8415static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8416 int buf_size)
8417{
8418 int rc;
8419 u32 cmd_flags;
8420 u32 align_offset;
8421 u32 val;
8422
8423 if (offset + buf_size > bp->flash_size) {
c14423fe 8424 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8425 " buf_size (0x%x) > flash_size (0x%x)\n",
8426 offset, buf_size, bp->flash_size);
8427 return -EINVAL;
8428 }
8429
8430 /* request access to nvram interface */
8431 rc = bnx2x_acquire_nvram_lock(bp);
8432 if (rc)
8433 return rc;
8434
8435 /* enable access to nvram interface */
8436 bnx2x_enable_nvram_access(bp);
8437
8438 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8439 align_offset = (offset & ~0x03);
8440 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8441
8442 if (rc == 0) {
8443 val &= ~(0xff << BYTE_OFFSET(offset));
8444 val |= (*data_buf << BYTE_OFFSET(offset));
8445
8446 /* nvram data is returned as an array of bytes
8447 * convert it back to cpu order */
8448 val = be32_to_cpu(val);
8449
8450 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8451
8452 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8453 cmd_flags);
8454 }
8455
8456 /* disable access to nvram interface */
8457 bnx2x_disable_nvram_access(bp);
8458 bnx2x_release_nvram_lock(bp);
8459
8460 return rc;
8461}
8462
8463static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8464 int buf_size)
8465{
8466 int rc;
8467 u32 cmd_flags;
8468 u32 val;
8469 u32 written_so_far;
8470
8471 if (buf_size == 1) { /* ethtool */
8472 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8473 }
8474
8475 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8476 DP(NETIF_MSG_NVM,
c14423fe 8477 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8478 offset, buf_size);
8479 return -EINVAL;
8480 }
8481
8482 if (offset + buf_size > bp->flash_size) {
c14423fe 8483 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8484 " buf_size (0x%x) > flash_size (0x%x)\n",
8485 offset, buf_size, bp->flash_size);
8486 return -EINVAL;
8487 }
8488
8489 /* request access to nvram interface */
8490 rc = bnx2x_acquire_nvram_lock(bp);
8491 if (rc)
8492 return rc;
8493
8494 /* enable access to nvram interface */
8495 bnx2x_enable_nvram_access(bp);
8496
8497 written_so_far = 0;
8498 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8499 while ((written_so_far < buf_size) && (rc == 0)) {
8500 if (written_so_far == (buf_size - sizeof(u32)))
8501 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8502 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8503 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8504 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8505 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8506
8507 memcpy(&val, data_buf, 4);
8508 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8509
8510 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8511
8512 /* advance to the next dword */
8513 offset += sizeof(u32);
8514 data_buf += sizeof(u32);
8515 written_so_far += sizeof(u32);
8516 cmd_flags = 0;
8517 }
8518
8519 /* disable access to nvram interface */
8520 bnx2x_disable_nvram_access(bp);
8521 bnx2x_release_nvram_lock(bp);
8522
8523 return rc;
8524}
8525
8526static int bnx2x_set_eeprom(struct net_device *dev,
8527 struct ethtool_eeprom *eeprom, u8 *eebuf)
8528{
8529 struct bnx2x *bp = netdev_priv(dev);
8530 int rc;
8531
8532 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8533 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8534 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8535 eeprom->len, eeprom->len);
8536
8537 /* parameters already validated in ethtool_set_eeprom */
8538
8539 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8540
8541 return rc;
8542}
8543
8544static int bnx2x_get_coalesce(struct net_device *dev,
8545 struct ethtool_coalesce *coal)
8546{
8547 struct bnx2x *bp = netdev_priv(dev);
8548
8549 memset(coal, 0, sizeof(struct ethtool_coalesce));
8550
8551 coal->rx_coalesce_usecs = bp->rx_ticks;
8552 coal->tx_coalesce_usecs = bp->tx_ticks;
8553 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8554
8555 return 0;
8556}
8557
8558static int bnx2x_set_coalesce(struct net_device *dev,
8559 struct ethtool_coalesce *coal)
8560{
8561 struct bnx2x *bp = netdev_priv(dev);
8562
8563 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8564 if (bp->rx_ticks > 3000)
8565 bp->rx_ticks = 3000;
8566
8567 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8568 if (bp->tx_ticks > 0x3000)
8569 bp->tx_ticks = 0x3000;
8570
8571 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8572 if (bp->stats_ticks > 0xffff00)
8573 bp->stats_ticks = 0xffff00;
8574 bp->stats_ticks &= 0xffff00;
8575
8576 if (netif_running(bp->dev))
8577 bnx2x_update_coalesce(bp);
8578
8579 return 0;
8580}
8581
8582static void bnx2x_get_ringparam(struct net_device *dev,
8583 struct ethtool_ringparam *ering)
8584{
8585 struct bnx2x *bp = netdev_priv(dev);
8586
8587 ering->rx_max_pending = MAX_RX_AVAIL;
8588 ering->rx_mini_max_pending = 0;
8589 ering->rx_jumbo_max_pending = 0;
8590
8591 ering->rx_pending = bp->rx_ring_size;
8592 ering->rx_mini_pending = 0;
8593 ering->rx_jumbo_pending = 0;
8594
8595 ering->tx_max_pending = MAX_TX_AVAIL;
8596 ering->tx_pending = bp->tx_ring_size;
8597}
8598
8599static int bnx2x_set_ringparam(struct net_device *dev,
8600 struct ethtool_ringparam *ering)
8601{
8602 struct bnx2x *bp = netdev_priv(dev);
8603
8604 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8605 (ering->tx_pending > MAX_TX_AVAIL) ||
8606 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8607 return -EINVAL;
8608
8609 bp->rx_ring_size = ering->rx_pending;
8610 bp->tx_ring_size = ering->tx_pending;
8611
8612 if (netif_running(bp->dev)) {
8613 bnx2x_nic_unload(bp, 0);
8614 bnx2x_nic_load(bp, 0);
8615 }
8616
8617 return 0;
8618}
8619
8620static void bnx2x_get_pauseparam(struct net_device *dev,
8621 struct ethtool_pauseparam *epause)
8622{
8623 struct bnx2x *bp = netdev_priv(dev);
8624
8625 epause->autoneg =
8626 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
8627 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
8628 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
8629
8630 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8631 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8632 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8633}
8634
8635static int bnx2x_set_pauseparam(struct net_device *dev,
8636 struct ethtool_pauseparam *epause)
8637{
8638 struct bnx2x *bp = netdev_priv(dev);
8639
8640 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8641 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8642 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8643
a2fbb9ea 8644 if (epause->autoneg) {
f1410647
ET
8645 if (!(bp->supported & SUPPORTED_Autoneg)) {
8646 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
8647 return -EINVAL;
a2fbb9ea
ET
8648 }
8649
f1410647
ET
8650 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
8651 } else
a2fbb9ea
ET
8652 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
8653
f1410647 8654 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8655
f1410647
ET
8656 if (epause->rx_pause)
8657 bp->req_flow_ctrl |= FLOW_CTRL_RX;
8658 if (epause->tx_pause)
8659 bp->req_flow_ctrl |= FLOW_CTRL_TX;
a2fbb9ea 8660
f1410647
ET
8661 if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
8662 (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
8663 bp->req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8664
f1410647
ET
8665 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
8666 bp->req_autoneg, bp->req_flow_ctrl);
a2fbb9ea
ET
8667
8668 bnx2x_stop_stats(bp);
8669 bnx2x_link_initialize(bp);
8670
8671 return 0;
8672}
8673
8674static u32 bnx2x_get_rx_csum(struct net_device *dev)
8675{
8676 struct bnx2x *bp = netdev_priv(dev);
8677
8678 return bp->rx_csum;
8679}
8680
8681static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8682{
8683 struct bnx2x *bp = netdev_priv(dev);
8684
8685 bp->rx_csum = data;
8686 return 0;
8687}
8688
8689static int bnx2x_set_tso(struct net_device *dev, u32 data)
8690{
8691 if (data)
8692 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8693 else
8694 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8695 return 0;
8696}
8697
8698static struct {
8699 char string[ETH_GSTRING_LEN];
8700} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8701 { "MC Errors (online)" }
8702};
8703
8704static int bnx2x_self_test_count(struct net_device *dev)
8705{
8706 return BNX2X_NUM_TESTS;
8707}
8708
8709static void bnx2x_self_test(struct net_device *dev,
8710 struct ethtool_test *etest, u64 *buf)
8711{
8712 struct bnx2x *bp = netdev_priv(dev);
8713 int stats_state;
8714
8715 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8716
8717 if (bp->state != BNX2X_STATE_OPEN) {
8718 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8719 return;
8720 }
8721
8722 stats_state = bp->stats_state;
8723 bnx2x_stop_stats(bp);
8724
8725 if (bnx2x_mc_assert(bp) != 0) {
8726 buf[0] = 1;
8727 etest->flags |= ETH_TEST_FL_FAILED;
8728 }
8729
8730#ifdef BNX2X_EXTRA_DEBUG
8731 bnx2x_panic_dump(bp);
8732#endif
8733 bp->stats_state = stats_state;
8734}
8735
8736static struct {
8737 char string[ETH_GSTRING_LEN];
8738} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
8739 { "rx_bytes"}, /* 0 */
8740 { "rx_error_bytes"}, /* 1 */
8741 { "tx_bytes"}, /* 2 */
8742 { "tx_error_bytes"}, /* 3 */
8743 { "rx_ucast_packets"}, /* 4 */
8744 { "rx_mcast_packets"}, /* 5 */
8745 { "rx_bcast_packets"}, /* 6 */
8746 { "tx_ucast_packets"}, /* 7 */
8747 { "tx_mcast_packets"}, /* 8 */
8748 { "tx_bcast_packets"}, /* 9 */
8749 { "tx_mac_errors"}, /* 10 */
8750 { "tx_carrier_errors"}, /* 11 */
8751 { "rx_crc_errors"}, /* 12 */
8752 { "rx_align_errors"}, /* 13 */
8753 { "tx_single_collisions"}, /* 14 */
8754 { "tx_multi_collisions"}, /* 15 */
8755 { "tx_deferred"}, /* 16 */
8756 { "tx_excess_collisions"}, /* 17 */
8757 { "tx_late_collisions"}, /* 18 */
8758 { "tx_total_collisions"}, /* 19 */
8759 { "rx_fragments"}, /* 20 */
8760 { "rx_jabbers"}, /* 21 */
8761 { "rx_undersize_packets"}, /* 22 */
8762 { "rx_oversize_packets"}, /* 23 */
8763 { "rx_xon_frames"}, /* 24 */
8764 { "rx_xoff_frames"}, /* 25 */
8765 { "tx_xon_frames"}, /* 26 */
8766 { "tx_xoff_frames"}, /* 27 */
8767 { "rx_mac_ctrl_frames"}, /* 28 */
8768 { "rx_filtered_packets"}, /* 29 */
8769 { "rx_discards"}, /* 30 */
8770};
8771
8772#define STATS_OFFSET32(offset_name) \
8773 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
8774
8775static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
8776 STATS_OFFSET32(total_bytes_received_hi), /* 0 */
8777 STATS_OFFSET32(stat_IfHCInBadOctets_hi), /* 1 */
8778 STATS_OFFSET32(total_bytes_transmitted_hi), /* 2 */
8779 STATS_OFFSET32(stat_IfHCOutBadOctets_hi), /* 3 */
8780 STATS_OFFSET32(total_unicast_packets_received_hi), /* 4 */
8781 STATS_OFFSET32(total_multicast_packets_received_hi), /* 5 */
8782 STATS_OFFSET32(total_broadcast_packets_received_hi), /* 6 */
8783 STATS_OFFSET32(total_unicast_packets_transmitted_hi), /* 7 */
8784 STATS_OFFSET32(total_multicast_packets_transmitted_hi), /* 8 */
8785 STATS_OFFSET32(total_broadcast_packets_transmitted_hi), /* 9 */
8786 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
8787 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), /* 11 */
8788 STATS_OFFSET32(crc_receive_errors), /* 12 */
8789 STATS_OFFSET32(alignment_errors), /* 13 */
8790 STATS_OFFSET32(single_collision_transmit_frames), /* 14 */
8791 STATS_OFFSET32(multiple_collision_transmit_frames), /* 15 */
8792 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), /* 16 */
8793 STATS_OFFSET32(excessive_collision_frames), /* 17 */
8794 STATS_OFFSET32(late_collision_frames), /* 18 */
8795 STATS_OFFSET32(number_of_bugs_found_in_stats_spec), /* 19 */
8796 STATS_OFFSET32(runt_packets_received), /* 20 */
8797 STATS_OFFSET32(jabber_packets_received), /* 21 */
8798 STATS_OFFSET32(error_runt_packets_received), /* 22 */
8799 STATS_OFFSET32(error_jabber_packets_received), /* 23 */
8800 STATS_OFFSET32(pause_xon_frames_received), /* 24 */
8801 STATS_OFFSET32(pause_xoff_frames_received), /* 25 */
8802 STATS_OFFSET32(pause_xon_frames_transmitted), /* 26 */
8803 STATS_OFFSET32(pause_xoff_frames_transmitted), /* 27 */
8804 STATS_OFFSET32(control_frames_received), /* 28 */
8805 STATS_OFFSET32(mac_filter_discard), /* 29 */
8806 STATS_OFFSET32(no_buff_discard), /* 30 */
8807};
8808
8809static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
8810 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
8811 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
8812 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
8813 4,
8814};
8815
8816static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8817{
8818 switch (stringset) {
8819 case ETH_SS_STATS:
8820 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
8821 break;
8822
8823 case ETH_SS_TEST:
8824 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8825 break;
8826 }
8827}
8828
8829static int bnx2x_get_stats_count(struct net_device *dev)
8830{
8831 return BNX2X_NUM_STATS;
8832}
8833
8834static void bnx2x_get_ethtool_stats(struct net_device *dev,
8835 struct ethtool_stats *stats, u64 *buf)
8836{
8837 struct bnx2x *bp = netdev_priv(dev);
8838 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8839 int i;
8840
8841 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8842 if (bnx2x_stats_len_arr[i] == 0) {
8843 /* skip this counter */
8844 buf[i] = 0;
8845 continue;
8846 }
8847 if (!hw_stats) {
8848 buf[i] = 0;
8849 continue;
8850 }
8851 if (bnx2x_stats_len_arr[i] == 4) {
8852 /* 4-byte counter */
8853 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8854 continue;
8855 }
8856 /* 8-byte counter */
8857 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8858 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8859 }
8860}
8861
8862static int bnx2x_phys_id(struct net_device *dev, u32 data)
8863{
8864 struct bnx2x *bp = netdev_priv(dev);
8865 int i;
8866
8867 if (data == 0)
8868 data = 2;
8869
8870 for (i = 0; i < (data * 2); i++) {
8871 if ((i % 2) == 0) {
8872 bnx2x_leds_set(bp, SPEED_1000);
8873 } else {
8874 bnx2x_leds_unset(bp);
8875 }
8876 msleep_interruptible(500);
8877 if (signal_pending(current))
8878 break;
8879 }
8880
8881 if (bp->link_up)
8882 bnx2x_leds_set(bp, bp->line_speed);
8883
8884 return 0;
8885}
8886
8887static struct ethtool_ops bnx2x_ethtool_ops = {
8888 .get_settings = bnx2x_get_settings,
8889 .set_settings = bnx2x_set_settings,
8890 .get_drvinfo = bnx2x_get_drvinfo,
8891 .get_wol = bnx2x_get_wol,
8892 .set_wol = bnx2x_set_wol,
8893 .get_msglevel = bnx2x_get_msglevel,
8894 .set_msglevel = bnx2x_set_msglevel,
8895 .nway_reset = bnx2x_nway_reset,
8896 .get_link = ethtool_op_get_link,
8897 .get_eeprom_len = bnx2x_get_eeprom_len,
8898 .get_eeprom = bnx2x_get_eeprom,
8899 .set_eeprom = bnx2x_set_eeprom,
8900 .get_coalesce = bnx2x_get_coalesce,
8901 .set_coalesce = bnx2x_set_coalesce,
8902 .get_ringparam = bnx2x_get_ringparam,
8903 .set_ringparam = bnx2x_set_ringparam,
8904 .get_pauseparam = bnx2x_get_pauseparam,
8905 .set_pauseparam = bnx2x_set_pauseparam,
8906 .get_rx_csum = bnx2x_get_rx_csum,
8907 .set_rx_csum = bnx2x_set_rx_csum,
8908 .get_tx_csum = ethtool_op_get_tx_csum,
8909 .set_tx_csum = ethtool_op_set_tx_csum,
8910 .get_sg = ethtool_op_get_sg,
8911 .set_sg = ethtool_op_set_sg,
8912 .get_tso = ethtool_op_get_tso,
8913 .set_tso = bnx2x_set_tso,
8914 .self_test_count = bnx2x_self_test_count,
8915 .self_test = bnx2x_self_test,
8916 .get_strings = bnx2x_get_strings,
8917 .phys_id = bnx2x_phys_id,
8918 .get_stats_count = bnx2x_get_stats_count,
8919 .get_ethtool_stats = bnx2x_get_ethtool_stats
8920};
8921
8922/* end of ethtool_ops */
8923
8924/****************************************************************************
8925* General service functions
8926****************************************************************************/
8927
8928static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
8929{
8930 u16 pmcsr;
8931
8932 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
8933
8934 switch (state) {
8935 case PCI_D0:
8936 pci_write_config_word(bp->pdev,
8937 bp->pm_cap + PCI_PM_CTRL,
8938 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
8939 PCI_PM_CTRL_PME_STATUS));
8940
8941 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
8942 /* delay required during transition out of D3hot */
8943 msleep(20);
8944 break;
8945
8946 case PCI_D3hot:
8947 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
8948 pmcsr |= 3;
8949
8950 if (bp->wol)
8951 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
8952
8953 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
8954 pmcsr);
8955
8956 /* No more memory access after this point until
8957 * device is brought back to D0.
8958 */
8959 break;
8960
8961 default:
8962 return -EINVAL;
8963 }
8964 return 0;
8965}
8966
8967/*
8968 * net_device service functions
8969 */
8970
8971/* Called with rtnl_lock from vlan functions and also netif_tx_lock
8972 * from set_multicast.
8973 */
8974static void bnx2x_set_rx_mode(struct net_device *dev)
8975{
8976 struct bnx2x *bp = netdev_priv(dev);
8977 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8978
8979 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
8980
8981 if (dev->flags & IFF_PROMISC)
8982 rx_mode = BNX2X_RX_MODE_PROMISC;
8983
8984 else if ((dev->flags & IFF_ALLMULTI) ||
8985 (dev->mc_count > BNX2X_MAX_MULTICAST))
8986 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8987
8988 else { /* some multicasts */
8989 int i, old, offset;
8990 struct dev_mc_list *mclist;
8991 struct mac_configuration_cmd *config =
8992 bnx2x_sp(bp, mcast_config);
8993
8994 for (i = 0, mclist = dev->mc_list;
8995 mclist && (i < dev->mc_count);
8996 i++, mclist = mclist->next) {
8997
8998 config->config_table[i].cam_entry.msb_mac_addr =
8999 swab16(*(u16 *)&mclist->dmi_addr[0]);
9000 config->config_table[i].cam_entry.middle_mac_addr =
9001 swab16(*(u16 *)&mclist->dmi_addr[2]);
9002 config->config_table[i].cam_entry.lsb_mac_addr =
9003 swab16(*(u16 *)&mclist->dmi_addr[4]);
9004 config->config_table[i].cam_entry.flags =
9005 cpu_to_le16(bp->port);
9006 config->config_table[i].target_table_entry.flags = 0;
9007 config->config_table[i].target_table_entry.
9008 client_id = 0;
9009 config->config_table[i].target_table_entry.
9010 vlan_id = 0;
9011
9012 DP(NETIF_MSG_IFUP,
9013 "setting MCAST[%d] (%04x:%04x:%04x)\n",
9014 i, config->config_table[i].cam_entry.msb_mac_addr,
9015 config->config_table[i].cam_entry.middle_mac_addr,
9016 config->config_table[i].cam_entry.lsb_mac_addr);
9017 }
9018 old = config->hdr.length_6b;
9019 if (old > i) {
9020 for (; i < old; i++) {
9021 if (CAM_IS_INVALID(config->config_table[i])) {
9022 i--; /* already invalidated */
9023 break;
9024 }
9025 /* invalidate */
9026 CAM_INVALIDATE(config->config_table[i]);
9027 }
9028 }
9029
9030 if (CHIP_REV_IS_SLOW(bp))
9031 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
9032 else
9033 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
9034
9035 config->hdr.length_6b = i;
9036 config->hdr.offset = offset;
9037 config->hdr.reserved0 = 0;
9038 config->hdr.reserved1 = 0;
9039
9040 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9041 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9042 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
9043 }
9044
9045 bp->rx_mode = rx_mode;
9046 bnx2x_set_storm_rx_mode(bp);
9047}
9048
9049static int bnx2x_poll(struct napi_struct *napi, int budget)
9050{
9051 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9052 napi);
9053 struct bnx2x *bp = fp->bp;
9054 int work_done = 0;
9055
9056#ifdef BNX2X_STOP_ON_ERROR
9057 if (unlikely(bp->panic))
9058 goto out_panic;
9059#endif
9060
9061 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9062 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9063 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9064
9065 bnx2x_update_fpsb_idx(fp);
9066
9067 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
9068 bnx2x_tx_int(fp, budget);
9069
9070
9071 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9072 work_done = bnx2x_rx_int(fp, budget);
9073
9074
9075 rmb(); /* bnx2x_has_work() reads the status block */
9076
9077 /* must not complete if we consumed full budget */
9078 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9079
9080#ifdef BNX2X_STOP_ON_ERROR
9081out_panic:
9082#endif
9083 netif_rx_complete(bp->dev, napi);
9084
9085 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
9086 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9087 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
9088 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9089 }
9090
9091 return work_done;
9092}
9093
9094/* Called with netif_tx_lock.
9095 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9096 * netif_wake_queue().
9097 */
9098static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9099{
9100 struct bnx2x *bp = netdev_priv(dev);
9101 struct bnx2x_fastpath *fp;
9102 struct sw_tx_bd *tx_buf;
9103 struct eth_tx_bd *tx_bd;
9104 struct eth_tx_parse_bd *pbd = NULL;
9105 u16 pkt_prod, bd_prod;
9106 int nbd, fp_index = 0;
9107 dma_addr_t mapping;
9108
9109#ifdef BNX2X_STOP_ON_ERROR
9110 if (unlikely(bp->panic))
9111 return NETDEV_TX_BUSY;
9112#endif
9113
9114 fp_index = smp_processor_id() % (bp->num_queues);
9115
9116 fp = &bp->fp[fp_index];
9117 if (unlikely(bnx2x_tx_avail(bp->fp) <
9118 (skb_shinfo(skb)->nr_frags + 3))) {
9119 bp->slowpath->eth_stats.driver_xoff++,
9120 netif_stop_queue(dev);
9121 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9122 return NETDEV_TX_BUSY;
9123 }
9124
9125 /*
9126 This is a bit ugly. First we use one BD which we mark as start,
9127 then for TSO or xsum we have a parsing info BD,
9128 and only then we have the rest of the TSO bds.
9129 (don't forget to mark the last one as last,
9130 and to unmap only AFTER you write to the BD ...)
9131 I would like to thank DovH for this mess.
9132 */
9133
9134 pkt_prod = fp->tx_pkt_prod++;
9135 bd_prod = fp->tx_bd_prod;
9136 bd_prod = TX_BD(bd_prod);
9137
9138 /* get a tx_buff and first bd */
9139 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9140 tx_bd = &fp->tx_desc_ring[bd_prod];
9141
9142 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9143 tx_bd->general_data = (UNICAST_ADDRESS <<
9144 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9145 tx_bd->general_data |= 1; /* header nbd */
9146
c14423fe 9147 /* remember the first bd of the packet */
a2fbb9ea
ET
9148 tx_buf->first_bd = bd_prod;
9149
9150 DP(NETIF_MSG_TX_QUEUED,
9151 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9152 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9153
9154 if (skb->ip_summed == CHECKSUM_PARTIAL) {
9155 struct iphdr *iph = ip_hdr(skb);
9156 u8 len;
9157
9158 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
9159
9160 /* turn on parsing and get a bd */
9161 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9162 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9163 len = ((u8 *)iph - (u8 *)skb->data) / 2;
9164
9165 /* for now NS flag is not used in Linux */
9166 pbd->global_data = (len |
9167 ((skb->protocol == ETH_P_8021Q) <<
9168 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9169 pbd->ip_hlen = ip_hdrlen(skb) / 2;
9170 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
9171 if (iph->protocol == IPPROTO_TCP) {
9172 struct tcphdr *th = tcp_hdr(skb);
9173
9174 tx_bd->bd_flags.as_bitfield |=
9175 ETH_TX_BD_FLAGS_TCP_CSUM;
9176 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
9177 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
9178 pbd->tcp_pseudo_csum = swab16(th->check);
9179
9180 } else if (iph->protocol == IPPROTO_UDP) {
9181 struct udphdr *uh = udp_hdr(skb);
9182
9183 tx_bd->bd_flags.as_bitfield |=
9184 ETH_TX_BD_FLAGS_TCP_CSUM;
9185 pbd->total_hlen += cpu_to_le16(4);
9186 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9187 pbd->cs_offset = 5; /* 10 >> 1 */
9188 pbd->tcp_pseudo_csum = 0;
9189 /* HW bug: we need to subtract 10 bytes before the
9190 * UDP header from the csum
9191 */
9192 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
9193 csum_partial(((u8 *)(uh)-10), 10, 0)));
9194 }
9195 }
9196
9197 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9198 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9199 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9200 } else {
9201 tx_bd->vlan = cpu_to_le16(pkt_prod);
9202 }
9203
9204 mapping = pci_map_single(bp->pdev, skb->data,
9205 skb->len, PCI_DMA_TODEVICE);
9206
9207 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9208 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9209 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9210 tx_bd->nbd = cpu_to_le16(nbd);
9211 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9212
9213 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9214 " nbytes %d flags %x vlan %u\n",
9215 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
9216 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
9217
9218 if (skb_shinfo(skb)->gso_size &&
9219 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
9220 int hlen = 2 * le32_to_cpu(pbd->total_hlen);
9221
9222 DP(NETIF_MSG_TX_QUEUED,
9223 "TSO packet len %d hlen %d total len %d tso size %d\n",
9224 skb->len, hlen, skb_headlen(skb),
9225 skb_shinfo(skb)->gso_size);
9226
9227 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9228
9229 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
9230 /* we split the first bd into headers and data bds
9231 * to ease the pain of our fellow micocode engineers
9232 * we use one mapping for both bds
9233 * So far this has only been observed to happen
9234 * in Other Operating Systems(TM)
9235 */
9236
9237 /* first fix first bd */
9238 nbd++;
9239 tx_bd->nbd = cpu_to_le16(nbd);
9240 tx_bd->nbytes = cpu_to_le16(hlen);
9241
9242 /* we only print this as an error
9243 * because we don't think this will ever happen.
9244 */
9245 BNX2X_ERR("TSO split header size is %d (%x:%x)"
9246 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
9247 tx_bd->addr_lo, tx_bd->nbd);
9248
9249 /* now get a new data bd
9250 * (after the pbd) and fill it */
9251 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9252 tx_bd = &fp->tx_desc_ring[bd_prod];
9253
9254 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9255 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
9256 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
9257 tx_bd->vlan = cpu_to_le16(pkt_prod);
9258 /* this marks the bd
9259 * as one that has no individual mapping
c14423fe 9260 * the FW ignores this flag in a bd not marked start
a2fbb9ea
ET
9261 */
9262 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9263 DP(NETIF_MSG_TX_QUEUED,
9264 "TSO split data size is %d (%x:%x)\n",
9265 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
9266 }
9267
9268 if (!pbd) {
9269 /* supposed to be unreached
9270 * (and therefore not handled properly...)
9271 */
9272 BNX2X_ERR("LSO with no PBD\n");
9273 BUG();
9274 }
9275
9276 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9277 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9278 pbd->ip_id = swab16(ip_hdr(skb)->id);
9279 pbd->tcp_pseudo_csum =
9280 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9281 ip_hdr(skb)->daddr,
9282 0, IPPROTO_TCP, 0));
9283 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9284 }
9285
9286 {
9287 int i;
9288
9289 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9290 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9291
9292 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9293 tx_bd = &fp->tx_desc_ring[bd_prod];
9294
9295 mapping = pci_map_page(bp->pdev, frag->page,
9296 frag->page_offset,
9297 frag->size, PCI_DMA_TODEVICE);
9298
9299 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9300 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9301 tx_bd->nbytes = cpu_to_le16(frag->size);
9302 tx_bd->vlan = cpu_to_le16(pkt_prod);
9303 tx_bd->bd_flags.as_bitfield = 0;
9304 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
9305 " addr (%x:%x) nbytes %d flags %x\n",
9306 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9307 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
9308 } /* for */
9309 }
9310
9311 /* now at last mark the bd as the last bd */
9312 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9313
9314 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9315 tx_bd, tx_bd->bd_flags.as_bitfield);
9316
9317 tx_buf->skb = skb;
9318
9319 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9320
9321 /* now send a tx doorbell, counting the next bd
9322 * if the packet contains or ends with it
9323 */
9324 if (TX_BD_POFF(bd_prod) < nbd)
9325 nbd++;
9326
9327 if (pbd)
9328 DP(NETIF_MSG_TX_QUEUED,
9329 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9330 " tcp_flags %x xsum %x seq %u hlen %u\n",
9331 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9332 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9333 pbd->tcp_send_seq, pbd->total_hlen);
9334
9335 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
9336
9337 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
9338 mb(); /* FW restriction: must not reorder writing nbd and packets */
9339 fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
9340 DOORBELL(bp, fp_index, 0);
9341
9342 mmiowb();
9343
9344 fp->tx_bd_prod = bd_prod;
9345 dev->trans_start = jiffies;
9346
9347 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9348 netif_stop_queue(dev);
9349 bp->slowpath->eth_stats.driver_xoff++;
9350 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9351 netif_wake_queue(dev);
9352 }
9353 fp->tx_pkt++;
9354
9355 return NETDEV_TX_OK;
9356}
9357
9358static struct net_device_stats *bnx2x_get_stats(struct net_device *dev)
9359{
9360 return &dev->stats;
9361}
9362
9363/* Called with rtnl_lock */
9364static int bnx2x_open(struct net_device *dev)
9365{
9366 struct bnx2x *bp = netdev_priv(dev);
9367
9368 bnx2x_set_power_state(bp, PCI_D0);
9369
9370 return bnx2x_nic_load(bp, 1);
9371}
9372
9373/* Called with rtnl_lock */
9374static int bnx2x_close(struct net_device *dev)
9375{
9376 int rc;
9377 struct bnx2x *bp = netdev_priv(dev);
9378
9379 /* Unload the driver, release IRQs */
9380 rc = bnx2x_nic_unload(bp, 1);
9381 if (rc) {
9382 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
9383 return rc;
9384 }
9385 bnx2x_set_power_state(bp, PCI_D3hot);
9386
9387 return 0;
9388}
9389
9390/* Called with rtnl_lock */
9391static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9392{
9393 struct sockaddr *addr = p;
9394 struct bnx2x *bp = netdev_priv(dev);
9395
9396 if (!is_valid_ether_addr(addr->sa_data))
9397 return -EINVAL;
9398
9399 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9400 if (netif_running(dev))
9401 bnx2x_set_mac_addr(bp);
9402
9403 return 0;
9404}
9405
9406/* Called with rtnl_lock */
9407static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9408{
9409 struct mii_ioctl_data *data = if_mii(ifr);
9410 struct bnx2x *bp = netdev_priv(dev);
9411 int err;
9412
9413 switch (cmd) {
9414 case SIOCGMIIPHY:
9415 data->phy_id = bp->phy_addr;
9416
c14423fe 9417 /* fallthrough */
a2fbb9ea
ET
9418 case SIOCGMIIREG: {
9419 u32 mii_regval;
9420
9421 spin_lock_bh(&bp->phy_lock);
9422 if (bp->state == BNX2X_STATE_OPEN) {
9423 err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
9424 &mii_regval);
9425
9426 data->val_out = mii_regval;
9427 } else {
9428 err = -EAGAIN;
9429 }
9430 spin_unlock_bh(&bp->phy_lock);
9431 return err;
9432 }
9433
9434 case SIOCSMIIREG:
9435 if (!capable(CAP_NET_ADMIN))
9436 return -EPERM;
9437
9438 spin_lock_bh(&bp->phy_lock);
9439 if (bp->state == BNX2X_STATE_OPEN) {
9440 err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
9441 data->val_in);
9442 } else {
9443 err = -EAGAIN;
9444 }
9445 spin_unlock_bh(&bp->phy_lock);
9446 return err;
9447
9448 default:
9449 /* do nothing */
9450 break;
9451 }
9452
9453 return -EOPNOTSUPP;
9454}
9455
9456/* Called with rtnl_lock */
9457static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9458{
9459 struct bnx2x *bp = netdev_priv(dev);
9460
9461 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9462 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9463 return -EINVAL;
9464
9465 /* This does not race with packet allocation
c14423fe 9466 * because the actual alloc size is
a2fbb9ea
ET
9467 * only updated as part of load
9468 */
9469 dev->mtu = new_mtu;
9470
9471 if (netif_running(dev)) {
9472 bnx2x_nic_unload(bp, 0);
9473 bnx2x_nic_load(bp, 0);
9474 }
9475 return 0;
9476}
9477
9478static void bnx2x_tx_timeout(struct net_device *dev)
9479{
9480 struct bnx2x *bp = netdev_priv(dev);
9481
9482#ifdef BNX2X_STOP_ON_ERROR
9483 if (!bp->panic)
9484 bnx2x_panic();
9485#endif
9486 /* This allows the netif to be shutdown gracefully before resetting */
9487 schedule_work(&bp->reset_task);
9488}
9489
9490#ifdef BCM_VLAN
9491/* Called with rtnl_lock */
9492static void bnx2x_vlan_rx_register(struct net_device *dev,
9493 struct vlan_group *vlgrp)
9494{
9495 struct bnx2x *bp = netdev_priv(dev);
9496
9497 bp->vlgrp = vlgrp;
9498 if (netif_running(dev))
9499 bnx2x_set_rx_mode(dev);
9500}
9501#endif
9502
9503#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9504static void poll_bnx2x(struct net_device *dev)
9505{
9506 struct bnx2x *bp = netdev_priv(dev);
9507
9508 disable_irq(bp->pdev->irq);
9509 bnx2x_interrupt(bp->pdev->irq, dev);
9510 enable_irq(bp->pdev->irq);
9511}
9512#endif
9513
9514static void bnx2x_reset_task(struct work_struct *work)
9515{
9516 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
9517
9518#ifdef BNX2X_STOP_ON_ERROR
9519 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
9520 " so reset not done to allow debug dump,\n"
9521 KERN_ERR " you will need to reboot when done\n");
9522 return;
9523#endif
9524
9525 if (!netif_running(bp->dev))
9526 return;
9527
9528 bp->in_reset_task = 1;
9529
9530 bnx2x_netif_stop(bp);
9531
9532 bnx2x_nic_unload(bp, 0);
9533 bnx2x_nic_load(bp, 0);
9534
9535 bp->in_reset_task = 0;
9536}
9537
9538static int __devinit bnx2x_init_board(struct pci_dev *pdev,
9539 struct net_device *dev)
9540{
9541 struct bnx2x *bp;
9542 int rc;
9543
9544 SET_NETDEV_DEV(dev, &pdev->dev);
9545 bp = netdev_priv(dev);
9546
9547 bp->flags = 0;
9548 bp->port = PCI_FUNC(pdev->devfn);
9549
9550 rc = pci_enable_device(pdev);
9551 if (rc) {
9552 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9553 goto err_out;
9554 }
9555
9556 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9557 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9558 " aborting\n");
9559 rc = -ENODEV;
9560 goto err_out_disable;
9561 }
9562
9563 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9564 printk(KERN_ERR PFX "Cannot find second PCI device"
9565 " base address, aborting\n");
9566 rc = -ENODEV;
9567 goto err_out_disable;
9568 }
9569
9570 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9571 if (rc) {
9572 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9573 " aborting\n");
9574 goto err_out_disable;
9575 }
9576
9577 pci_set_master(pdev);
9578
9579 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9580 if (bp->pm_cap == 0) {
9581 printk(KERN_ERR PFX "Cannot find power management"
9582 " capability, aborting\n");
9583 rc = -EIO;
9584 goto err_out_release;
9585 }
9586
9587 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9588 if (bp->pcie_cap == 0) {
9589 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9590 " aborting\n");
9591 rc = -EIO;
9592 goto err_out_release;
9593 }
9594
9595 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9596 bp->flags |= USING_DAC_FLAG;
9597 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9598 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9599 " failed, aborting\n");
9600 rc = -EIO;
9601 goto err_out_release;
9602 }
9603
9604 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9605 printk(KERN_ERR PFX "System does not support DMA,"
9606 " aborting\n");
9607 rc = -EIO;
9608 goto err_out_release;
9609 }
9610
9611 bp->dev = dev;
9612 bp->pdev = pdev;
9613
9614 spin_lock_init(&bp->phy_lock);
9615
9616 bp->in_reset_task = 0;
9617
9618 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
9619 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
9620
cba0516d 9621 dev->base_addr = pci_resource_start(pdev, 0);
a2fbb9ea
ET
9622
9623 dev->irq = pdev->irq;
9624
9625 bp->regview = ioremap_nocache(dev->base_addr,
9626 pci_resource_len(pdev, 0));
9627 if (!bp->regview) {
9628 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9629 rc = -ENOMEM;
9630 goto err_out_release;
9631 }
9632
9633 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
9634 pci_resource_len(pdev, 2));
9635 if (!bp->doorbells) {
9636 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9637 rc = -ENOMEM;
9638 goto err_out_unmap;
9639 }
9640
9641 bnx2x_set_power_state(bp, PCI_D0);
9642
9643 bnx2x_get_hwinfo(bp);
9644
9645 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
c14423fe 9646 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
a2fbb9ea
ET
9647 " will only init first device\n");
9648 onefunc = 1;
9649 nomcp = 1;
9650 }
9651
9652 if (nomcp) {
9653 printk(KERN_ERR PFX "MCP disabled, will only"
9654 " init first device\n");
9655 onefunc = 1;
9656 }
9657
9658 if (onefunc && bp->port) {
9659 printk(KERN_ERR PFX "Second device disabled, exiting\n");
9660 rc = -ENODEV;
9661 goto err_out_unmap;
9662 }
9663
9664 bp->tx_ring_size = MAX_TX_AVAIL;
9665 bp->rx_ring_size = MAX_RX_AVAIL;
9666
9667 bp->rx_csum = 1;
9668
9669 bp->rx_offset = 0;
9670
9671 bp->tx_quick_cons_trip_int = 0xff;
9672 bp->tx_quick_cons_trip = 0xff;
9673 bp->tx_ticks_int = 50;
9674 bp->tx_ticks = 50;
9675
9676 bp->rx_quick_cons_trip_int = 0xff;
9677 bp->rx_quick_cons_trip = 0xff;
9678 bp->rx_ticks_int = 25;
9679 bp->rx_ticks = 25;
9680
9681 bp->stats_ticks = 1000000 & 0xffff00;
9682
9683 bp->timer_interval = HZ;
9684 bp->current_interval = (poll ? poll : HZ);
9685
9686 init_timer(&bp->timer);
9687 bp->timer.expires = jiffies + bp->current_interval;
9688 bp->timer.data = (unsigned long) bp;
9689 bp->timer.function = bnx2x_timer;
9690
9691 return 0;
9692
9693err_out_unmap:
9694 if (bp->regview) {
9695 iounmap(bp->regview);
9696 bp->regview = NULL;
9697 }
9698
9699 if (bp->doorbells) {
9700 iounmap(bp->doorbells);
9701 bp->doorbells = NULL;
9702 }
9703
9704err_out_release:
9705 pci_release_regions(pdev);
9706
9707err_out_disable:
9708 pci_disable_device(pdev);
9709 pci_set_drvdata(pdev, NULL);
9710
9711err_out:
9712 return rc;
9713}
9714
25047950
ET
9715static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
9716{
9717 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9718
9719 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9720 return val;
9721}
9722
9723/* return value of 1=2.5GHz 2=5GHz */
9724static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
9725{
9726 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9727
9728 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9729 return val;
9730}
9731
a2fbb9ea
ET
9732static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9733 const struct pci_device_id *ent)
9734{
9735 static int version_printed;
9736 struct net_device *dev = NULL;
9737 struct bnx2x *bp;
25047950 9738 int rc;
a2fbb9ea 9739 int port = PCI_FUNC(pdev->devfn);
25047950 9740 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
9741
9742 if (version_printed++ == 0)
9743 printk(KERN_INFO "%s", version);
9744
9745 /* dev zeroed in init_etherdev */
9746 dev = alloc_etherdev(sizeof(*bp));
9747 if (!dev)
9748 return -ENOMEM;
9749
9750 netif_carrier_off(dev);
9751
9752 bp = netdev_priv(dev);
9753 bp->msglevel = debug;
9754
9755 if (port && onefunc) {
9756 printk(KERN_ERR PFX "second function disabled. exiting\n");
25047950 9757 free_netdev(dev);
a2fbb9ea
ET
9758 return 0;
9759 }
9760
9761 rc = bnx2x_init_board(pdev, dev);
9762 if (rc < 0) {
9763 free_netdev(dev);
9764 return rc;
9765 }
9766
9767 dev->hard_start_xmit = bnx2x_start_xmit;
9768 dev->watchdog_timeo = TX_TIMEOUT;
9769
9770 dev->get_stats = bnx2x_get_stats;
9771 dev->ethtool_ops = &bnx2x_ethtool_ops;
9772 dev->open = bnx2x_open;
9773 dev->stop = bnx2x_close;
9774 dev->set_multicast_list = bnx2x_set_rx_mode;
9775 dev->set_mac_address = bnx2x_change_mac_addr;
9776 dev->do_ioctl = bnx2x_ioctl;
9777 dev->change_mtu = bnx2x_change_mtu;
9778 dev->tx_timeout = bnx2x_tx_timeout;
9779#ifdef BCM_VLAN
9780 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9781#endif
9782#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9783 dev->poll_controller = poll_bnx2x;
9784#endif
9785 dev->features |= NETIF_F_SG;
9786 if (bp->flags & USING_DAC_FLAG)
9787 dev->features |= NETIF_F_HIGHDMA;
9788 dev->features |= NETIF_F_IP_CSUM;
9789#ifdef BCM_VLAN
9790 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9791#endif
9792 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
9793
9794 rc = register_netdev(dev);
9795 if (rc) {
c14423fe 9796 dev_err(&pdev->dev, "Cannot register net device\n");
a2fbb9ea
ET
9797 if (bp->regview)
9798 iounmap(bp->regview);
9799 if (bp->doorbells)
9800 iounmap(bp->doorbells);
9801 pci_release_regions(pdev);
9802 pci_disable_device(pdev);
9803 pci_set_drvdata(pdev, NULL);
9804 free_netdev(dev);
9805 return rc;
9806 }
9807
9808 pci_set_drvdata(pdev, dev);
9809
9810 bp->name = board_info[ent->driver_data].name;
25047950
ET
9811 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
9812 " IRQ %d, ", dev->name, bp->name,
a2fbb9ea
ET
9813 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
9814 ((CHIP_ID(bp) & 0x0ff0) >> 4),
25047950
ET
9815 bnx2x_get_pcie_width(bp),
9816 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
9817 dev->base_addr, bp->pdev->irq);
9818 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea
ET
9819 return 0;
9820}
9821
9822static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9823{
9824 struct net_device *dev = pci_get_drvdata(pdev);
9825 struct bnx2x *bp = netdev_priv(dev);
9826
9827 flush_scheduled_work();
9828 /*tasklet_kill(&bp->sp_task);*/
9829 unregister_netdev(dev);
9830
9831 if (bp->regview)
9832 iounmap(bp->regview);
9833
9834 if (bp->doorbells)
9835 iounmap(bp->doorbells);
9836
9837 free_netdev(dev);
9838 pci_release_regions(pdev);
9839 pci_disable_device(pdev);
9840 pci_set_drvdata(pdev, NULL);
9841}
9842
9843static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9844{
9845 struct net_device *dev = pci_get_drvdata(pdev);
9846 struct bnx2x *bp = netdev_priv(dev);
9847 int rc;
9848
9849 if (!netif_running(dev))
9850 return 0;
9851
9852 rc = bnx2x_nic_unload(bp, 0);
9853 if (!rc)
9854 return rc;
9855
9856 netif_device_detach(dev);
9857 pci_save_state(pdev);
9858
9859 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9860 return 0;
9861}
9862
9863static int bnx2x_resume(struct pci_dev *pdev)
9864{
9865 struct net_device *dev = pci_get_drvdata(pdev);
9866 struct bnx2x *bp = netdev_priv(dev);
9867 int rc;
9868
9869 if (!netif_running(dev))
9870 return 0;
9871
9872 pci_restore_state(pdev);
9873
9874 bnx2x_set_power_state(bp, PCI_D0);
9875 netif_device_attach(dev);
9876
9877 rc = bnx2x_nic_load(bp, 0);
9878 if (rc)
9879 return rc;
9880
9881 return 0;
9882}
9883
9884static struct pci_driver bnx2x_pci_driver = {
9885 .name = DRV_MODULE_NAME,
9886 .id_table = bnx2x_pci_tbl,
9887 .probe = bnx2x_init_one,
9888 .remove = __devexit_p(bnx2x_remove_one),
9889 .suspend = bnx2x_suspend,
9890 .resume = bnx2x_resume,
9891};
9892
9893static int __init bnx2x_init(void)
9894{
9895 return pci_register_driver(&bnx2x_pci_driver);
9896}
9897
9898static void __exit bnx2x_cleanup(void)
9899{
9900 pci_unregister_driver(&bnx2x_pci_driver);
9901}
9902
9903module_init(bnx2x_init);
9904module_exit(bnx2x_cleanup);
9905
This page took 0.848959 seconds and 5 git commands to generate.