bnx2x: removed unused variables
[deliverable/linux.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
5de92408 3 * Copyright (c) 2007-2011 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
e4901dde 58#include "bnx2x_dcb.h"
a2fbb9ea 59
94a78b79
VZ
60#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
45229b42
BH
63#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
68#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 70#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
80MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
82MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
84MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 86MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 87
555f6c78
EG
88static int multi_mode = 1;
89module_param(multi_mode, int, 0);
ca00392c
EG
90MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
d6214d7a 93int num_queues;
54b9ddaa
VZ
94module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
555f6c78 97
19680c48 98static int disable_tpa;
19680c48 99module_param(disable_tpa, int, 0);
9898f86d 100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a 101
9ee3d37b
DK
102#define INT_MODE_INTx 1
103#define INT_MODE_MSI 2
8badd27a
EG
104static int int_mode;
105module_param(int_mode, int, 0);
cdaa7cb8
VZ
106MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
107 "(1 INT#x; 2 MSI)");
8badd27a 108
a18f5128
EG
109static int dropless_fc;
110module_param(dropless_fc, int, 0);
111MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
112
9898f86d 113static int poll;
a2fbb9ea 114module_param(poll, int, 0);
9898f86d 115MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
116
117static int mrrs = -1;
118module_param(mrrs, int, 0);
119MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
120
9898f86d 121static int debug;
a2fbb9ea 122module_param(debug, int, 0);
9898f86d
EG
123MODULE_PARM_DESC(debug, " Default debug msglevel");
124
1cf167f2 125static struct workqueue_struct *bnx2x_wq;
a2fbb9ea 126
ec6ba945
VZ
127#ifdef BCM_CNIC
128static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
129#endif
130
a2fbb9ea
ET
131enum bnx2x_board_type {
132 BCM57710 = 0,
34f80b04
EG
133 BCM57711 = 1,
134 BCM57711E = 2,
f2e0899f
DK
135 BCM57712 = 3,
136 BCM57712E = 4
a2fbb9ea
ET
137};
138
34f80b04 139/* indexed by board_type, above */
53a10565 140static struct {
a2fbb9ea
ET
141 char *name;
142} board_info[] __devinitdata = {
34f80b04
EG
143 { "Broadcom NetXtreme II BCM57710 XGb" },
144 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
145 { "Broadcom NetXtreme II BCM57711E XGb" },
146 { "Broadcom NetXtreme II BCM57712 XGb" },
147 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
148};
149
a3aa1884 150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
523224a3
DK
165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
f2e0899f
DK
369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
a2fbb9ea
ET
403/* used only at init
404 * locking is done by mcp
405 */
8d96286a 406static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
a2fbb9ea
ET
414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
a2fbb9ea 425
f2e0899f
DK
426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
8d96286a 432static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
433 int msglvl)
f2e0899f
DK
434{
435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436
437 switch (dmae->opcode & DMAE_COMMAND_DST) {
438 case DMAE_CMD_DST_PCI:
439 if (src_type == DMAE_CMD_SRC_PCI)
440 DP(msglvl, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
444 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
445 dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
453 dmae->comp_addr_hi, dmae->comp_addr_lo,
454 dmae->comp_val);
455 break;
456 case DMAE_CMD_DST_GRC:
457 if (src_type == DMAE_CMD_SRC_PCI)
458 DP(msglvl, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
462 dmae->len, dmae->dst_addr_lo >> 2,
463 dmae->comp_addr_hi, dmae->comp_addr_lo,
464 dmae->comp_val);
465 else
466 DP(msglvl, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae->opcode, dmae->src_addr_lo >> 2,
470 dmae->len, dmae->dst_addr_lo >> 2,
471 dmae->comp_addr_hi, dmae->comp_addr_lo,
472 dmae->comp_val);
473 break;
474 default:
475 if (src_type == DMAE_CMD_SRC_PCI)
476 DP(msglvl, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 "dst_addr [none]\n"
479 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
481 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
482 dmae->comp_val);
483 else
484 DP(msglvl, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 "dst_addr [none]\n"
487 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae->opcode, dmae->src_addr_lo >> 2,
489 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
490 dmae->comp_val);
491 break;
492 }
493
494}
495
6c719d00 496const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
497 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
498 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
499 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
500 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
501};
502
503/* copy command into DMAE command memory and set DMAE command go */
6c719d00 504void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
505{
506 u32 cmd_offset;
507 int i;
508
509 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
510 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
511 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
512
ad8d3948
EG
513 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
514 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
515 }
516 REG_WR(bp, dmae_reg_go_c[idx], 1);
517}
518
f2e0899f 519u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 520{
f2e0899f
DK
521 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
522 DMAE_CMD_C_ENABLE);
523}
ad8d3948 524
f2e0899f
DK
525u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
526{
527 return opcode & ~DMAE_CMD_SRC_RESET;
528}
ad8d3948 529
f2e0899f
DK
530u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
531 bool with_comp, u8 comp_type)
532{
533 u32 opcode = 0;
534
535 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
536 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 537
f2e0899f
DK
538 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539
540 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
541 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
542 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
543 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 544
a2fbb9ea 545#ifdef __BIG_ENDIAN
f2e0899f 546 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 547#else
f2e0899f 548 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 549#endif
f2e0899f
DK
550 if (with_comp)
551 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
552 return opcode;
553}
554
8d96286a 555static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
556 struct dmae_command *dmae,
557 u8 src_type, u8 dst_type)
f2e0899f
DK
558{
559 memset(dmae, 0, sizeof(struct dmae_command));
560
561 /* set the opcode */
562 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
563 true, DMAE_COMP_PCI);
564
565 /* fill in the completion parameters */
566 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
567 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
568 dmae->comp_val = DMAE_COMP_VAL;
569}
570
571/* issue a dmae command over the init-channel and wailt for completion */
8d96286a 572static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
573 struct dmae_command *dmae)
f2e0899f
DK
574{
575 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
5e374b5a 576 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
f2e0899f
DK
577 int rc = 0;
578
579 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
580 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
581 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 582
f2e0899f 583 /* lock the dmae channel */
6e30dd4e 584 spin_lock_bh(&bp->dmae_lock);
5ff7b6d4 585
f2e0899f 586 /* reset completion */
a2fbb9ea
ET
587 *wb_comp = 0;
588
f2e0899f
DK
589 /* post the command on the channel used for initializations */
590 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 591
f2e0899f 592 /* wait for completion */
a2fbb9ea 593 udelay(5);
f2e0899f 594 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
595 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
596
ad8d3948 597 if (!cnt) {
c3eefaf6 598 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
599 rc = DMAE_TIMEOUT;
600 goto unlock;
a2fbb9ea 601 }
ad8d3948 602 cnt--;
f2e0899f 603 udelay(50);
a2fbb9ea 604 }
f2e0899f
DK
605 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
606 BNX2X_ERR("DMAE PCI error!\n");
607 rc = DMAE_PCI_ERROR;
608 }
609
610 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
611 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
612 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 613
f2e0899f 614unlock:
6e30dd4e 615 spin_unlock_bh(&bp->dmae_lock);
f2e0899f
DK
616 return rc;
617}
618
619void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
620 u32 len32)
621{
622 struct dmae_command dmae;
623
624 if (!bp->dmae_ready) {
625 u32 *data = bnx2x_sp(bp, wb_data[0]);
626
627 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
628 " using indirect\n", dst_addr, len32);
629 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
630 return;
631 }
632
633 /* set opcode and fixed command fields */
634 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
635
636 /* fill in addresses and len */
637 dmae.src_addr_lo = U64_LO(dma_addr);
638 dmae.src_addr_hi = U64_HI(dma_addr);
639 dmae.dst_addr_lo = dst_addr >> 2;
640 dmae.dst_addr_hi = 0;
641 dmae.len = len32;
642
643 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
644
645 /* issue the command and wait for completion */
646 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
647}
648
c18487ee 649void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 650{
5ff7b6d4 651 struct dmae_command dmae;
ad8d3948
EG
652
653 if (!bp->dmae_ready) {
654 u32 *data = bnx2x_sp(bp, wb_data[0]);
655 int i;
656
657 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
658 " using indirect\n", src_addr, len32);
659 for (i = 0; i < len32; i++)
660 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
661 return;
662 }
663
f2e0899f
DK
664 /* set opcode and fixed command fields */
665 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 666
f2e0899f 667 /* fill in addresses and len */
5ff7b6d4
EG
668 dmae.src_addr_lo = src_addr >> 2;
669 dmae.src_addr_hi = 0;
670 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
671 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
672 dmae.len = len32;
ad8d3948 673
f2e0899f 674 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 675
f2e0899f
DK
676 /* issue the command and wait for completion */
677 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
678}
679
8d96286a 680static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
681 u32 addr, u32 len)
573f2035 682{
02e3c6cb 683 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
684 int offset = 0;
685
02e3c6cb 686 while (len > dmae_wr_max) {
573f2035 687 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
688 addr + offset, dmae_wr_max);
689 offset += dmae_wr_max * 4;
690 len -= dmae_wr_max;
573f2035
EG
691 }
692
693 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
694}
695
ad8d3948
EG
696/* used only for slowpath so not inlined */
697static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
698{
699 u32 wb_write[2];
700
701 wb_write[0] = val_hi;
702 wb_write[1] = val_lo;
703 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 704}
a2fbb9ea 705
ad8d3948
EG
706#ifdef USE_WB_RD
707static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
708{
709 u32 wb_data[2];
710
711 REG_RD_DMAE(bp, reg, wb_data, 2);
712
713 return HILO_U64(wb_data[0], wb_data[1]);
714}
715#endif
716
a2fbb9ea
ET
717static int bnx2x_mc_assert(struct bnx2x *bp)
718{
a2fbb9ea 719 char last_idx;
34f80b04
EG
720 int i, rc = 0;
721 u32 row0, row1, row2, row3;
722
723 /* XSTORM */
724 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
725 XSTORM_ASSERT_LIST_INDEX_OFFSET);
726 if (last_idx)
727 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
728
729 /* print the asserts */
730 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
731
732 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i));
734 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
736 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
738 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
739 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
740
741 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
742 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
743 " 0x%08x 0x%08x 0x%08x\n",
744 i, row3, row2, row1, row0);
745 rc++;
746 } else {
747 break;
748 }
749 }
750
751 /* TSTORM */
752 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
753 TSTORM_ASSERT_LIST_INDEX_OFFSET);
754 if (last_idx)
755 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
756
757 /* print the asserts */
758 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
759
760 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i));
762 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
764 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
766 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
767 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
768
769 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
770 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
771 " 0x%08x 0x%08x 0x%08x\n",
772 i, row3, row2, row1, row0);
773 rc++;
774 } else {
775 break;
776 }
777 }
778
779 /* CSTORM */
780 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
781 CSTORM_ASSERT_LIST_INDEX_OFFSET);
782 if (last_idx)
783 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
784
785 /* print the asserts */
786 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
787
788 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i));
790 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
792 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
794 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
795 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
796
797 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
798 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
799 " 0x%08x 0x%08x 0x%08x\n",
800 i, row3, row2, row1, row0);
801 rc++;
802 } else {
803 break;
804 }
805 }
806
807 /* USTORM */
808 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
809 USTORM_ASSERT_LIST_INDEX_OFFSET);
810 if (last_idx)
811 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
812
813 /* print the asserts */
814 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
815
816 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i));
818 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 4);
820 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 8);
822 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
823 USTORM_ASSERT_LIST_OFFSET(i) + 12);
824
825 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
826 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
827 " 0x%08x 0x%08x 0x%08x\n",
828 i, row3, row2, row1, row0);
829 rc++;
830 } else {
831 break;
a2fbb9ea
ET
832 }
833 }
34f80b04 834
a2fbb9ea
ET
835 return rc;
836}
c14423fe 837
7a25cc73 838void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
a2fbb9ea 839{
7a25cc73 840 u32 addr, val;
a2fbb9ea 841 u32 mark, offset;
4781bfad 842 __be32 data[9];
a2fbb9ea 843 int word;
f2e0899f 844 u32 trace_shmem_base;
2145a920
VZ
845 if (BP_NOMCP(bp)) {
846 BNX2X_ERR("NO MCP - can not dump\n");
847 return;
848 }
7a25cc73
DK
849 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
850 (bp->common.bc_ver & 0xff0000) >> 16,
851 (bp->common.bc_ver & 0xff00) >> 8,
852 (bp->common.bc_ver & 0xff));
853
854 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
855 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
856 printk("%s" "MCP PC at 0x%x\n", lvl, val);
cdaa7cb8 857
f2e0899f
DK
858 if (BP_PATH(bp) == 0)
859 trace_shmem_base = bp->common.shmem_base;
860 else
861 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
862 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 863 mark = REG_RD(bp, addr);
f2e0899f
DK
864 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
865 + ((mark + 0x3) & ~0x3) - 0x08000000;
7a25cc73 866 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
a2fbb9ea 867
7a25cc73 868 printk("%s", lvl);
f2e0899f 869 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 870 for (word = 0; word < 8; word++)
cdaa7cb8 871 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 872 data[8] = 0x0;
7995c64e 873 pr_cont("%s", (char *)data);
a2fbb9ea 874 }
cdaa7cb8 875 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 876 for (word = 0; word < 8; word++)
cdaa7cb8 877 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 878 data[8] = 0x0;
7995c64e 879 pr_cont("%s", (char *)data);
a2fbb9ea 880 }
7a25cc73
DK
881 printk("%s" "end of fw dump\n", lvl);
882}
883
884static inline void bnx2x_fw_dump(struct bnx2x *bp)
885{
886 bnx2x_fw_dump_lvl(bp, KERN_ERR);
a2fbb9ea
ET
887}
888
6c719d00 889void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
890{
891 int i;
523224a3
DK
892 u16 j;
893 struct hc_sp_status_block_data sp_sb_data;
894 int func = BP_FUNC(bp);
895#ifdef BNX2X_STOP_ON_ERROR
896 u16 start = 0, end = 0;
897#endif
a2fbb9ea 898
66e855f3
YG
899 bp->stats_state = STATS_STATE_DISABLED;
900 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
901
a2fbb9ea
ET
902 BNX2X_ERR("begin crash dump -----------------\n");
903
8440d2b6
EG
904 /* Indices */
905 /* Common */
523224a3 906 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 907 " spq_prod_idx(0x%x)\n",
523224a3
DK
908 bp->def_idx, bp->def_att_idx,
909 bp->attn_state, bp->spq_prod_idx);
910 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
911 bp->def_status_blk->atten_status_block.attn_bits,
912 bp->def_status_blk->atten_status_block.attn_bits_ack,
913 bp->def_status_blk->atten_status_block.status_block_id,
914 bp->def_status_blk->atten_status_block.attn_bits_index);
915 BNX2X_ERR(" def (");
916 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
917 pr_cont("0x%x%s",
918 bp->def_status_blk->sp_sb.index_values[i],
919 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
920
921 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
922 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
923 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
924 i*sizeof(u32));
925
926 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
927 "pf_id(0x%x) vnic_id(0x%x) "
928 "vf_id(0x%x) vf_valid (0x%x)\n",
929 sp_sb_data.igu_sb_id,
930 sp_sb_data.igu_seg_id,
931 sp_sb_data.p_func.pf_id,
932 sp_sb_data.p_func.vnic_id,
933 sp_sb_data.p_func.vf_id,
934 sp_sb_data.p_func.vf_valid);
935
8440d2b6 936
ec6ba945 937 for_each_eth_queue(bp, i) {
a2fbb9ea 938 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 939 int loop;
f2e0899f 940 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
941 struct hc_status_block_data_e1x sb_data_e1x;
942 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
943 CHIP_IS_E2(bp) ?
944 sb_data_e2.common.state_machine :
523224a3
DK
945 sb_data_e1x.common.state_machine;
946 struct hc_index_data *hc_index_p =
f2e0899f
DK
947 CHIP_IS_E2(bp) ?
948 sb_data_e2.index_data :
523224a3
DK
949 sb_data_e1x.index_data;
950 int data_size;
951 u32 *sb_data_p;
952
953 /* Rx */
cdaa7cb8 954 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 955 " rx_comp_prod(0x%x)"
cdaa7cb8 956 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 957 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 958 fp->rx_comp_prod,
66e855f3 959 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 960 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 961 " fp_hc_idx(0x%x)\n",
8440d2b6 962 fp->rx_sge_prod, fp->last_max_sge,
523224a3 963 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 964
523224a3 965 /* Tx */
cdaa7cb8
VZ
966 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
967 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
968 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
969 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
970 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 971
f2e0899f
DK
972 loop = CHIP_IS_E2(bp) ?
973 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
974
975 /* host sb data */
976
ec6ba945
VZ
977#ifdef BCM_CNIC
978 if (IS_FCOE_FP(fp))
979 continue;
980#endif
523224a3
DK
981 BNX2X_ERR(" run indexes (");
982 for (j = 0; j < HC_SB_MAX_SM; j++)
983 pr_cont("0x%x%s",
984 fp->sb_running_index[j],
985 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
986
987 BNX2X_ERR(" indexes (");
988 for (j = 0; j < loop; j++)
989 pr_cont("0x%x%s",
990 fp->sb_index_values[j],
991 (j == loop - 1) ? ")" : " ");
992 /* fw sb data */
f2e0899f
DK
993 data_size = CHIP_IS_E2(bp) ?
994 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
995 sizeof(struct hc_status_block_data_e1x);
996 data_size /= sizeof(u32);
f2e0899f
DK
997 sb_data_p = CHIP_IS_E2(bp) ?
998 (u32 *)&sb_data_e2 :
999 (u32 *)&sb_data_e1x;
523224a3
DK
1000 /* copy sb data in here */
1001 for (j = 0; j < data_size; j++)
1002 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1003 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1004 j * sizeof(u32));
1005
f2e0899f
DK
1006 if (CHIP_IS_E2(bp)) {
1007 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1008 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1009 sb_data_e2.common.p_func.pf_id,
1010 sb_data_e2.common.p_func.vf_id,
1011 sb_data_e2.common.p_func.vf_valid,
1012 sb_data_e2.common.p_func.vnic_id,
1013 sb_data_e2.common.same_igu_sb_1b);
1014 } else {
1015 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1016 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1017 sb_data_e1x.common.p_func.pf_id,
1018 sb_data_e1x.common.p_func.vf_id,
1019 sb_data_e1x.common.p_func.vf_valid,
1020 sb_data_e1x.common.p_func.vnic_id,
1021 sb_data_e1x.common.same_igu_sb_1b);
1022 }
523224a3
DK
1023
1024 /* SB_SMs data */
1025 for (j = 0; j < HC_SB_MAX_SM; j++) {
1026 pr_cont("SM[%d] __flags (0x%x) "
1027 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1028 "time_to_expire (0x%x) "
1029 "timer_value(0x%x)\n", j,
1030 hc_sm_p[j].__flags,
1031 hc_sm_p[j].igu_sb_id,
1032 hc_sm_p[j].igu_seg_id,
1033 hc_sm_p[j].time_to_expire,
1034 hc_sm_p[j].timer_value);
1035 }
1036
1037 /* Indecies data */
1038 for (j = 0; j < loop; j++) {
1039 pr_cont("INDEX[%d] flags (0x%x) "
1040 "timeout (0x%x)\n", j,
1041 hc_index_p[j].flags,
1042 hc_index_p[j].timeout);
1043 }
8440d2b6 1044 }
a2fbb9ea 1045
523224a3 1046#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
1047 /* Rings */
1048 /* Rx */
ec6ba945 1049 for_each_rx_queue(bp, i) {
8440d2b6 1050 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1051
1052 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1053 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1054 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1055 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1056 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1057
c3eefaf6
EG
1058 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1059 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
1060 }
1061
3196a88a
EG
1062 start = RX_SGE(fp->rx_sge_prod);
1063 end = RX_SGE(fp->last_max_sge);
8440d2b6 1064 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1065 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1066 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1067
c3eefaf6
EG
1068 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1069 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1070 }
1071
a2fbb9ea
ET
1072 start = RCQ_BD(fp->rx_comp_cons - 10);
1073 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1074 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1075 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1076
c3eefaf6
EG
1077 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1078 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1079 }
1080 }
1081
8440d2b6 1082 /* Tx */
ec6ba945 1083 for_each_tx_queue(bp, i) {
8440d2b6
EG
1084 struct bnx2x_fastpath *fp = &bp->fp[i];
1085
1086 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1087 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1088 for (j = start; j != end; j = TX_BD(j + 1)) {
1089 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1090
c3eefaf6
EG
1091 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1092 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
1093 }
1094
1095 start = TX_BD(fp->tx_bd_cons - 10);
1096 end = TX_BD(fp->tx_bd_cons + 254);
1097 for (j = start; j != end; j = TX_BD(j + 1)) {
1098 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1099
c3eefaf6
EG
1100 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1101 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
1102 }
1103 }
523224a3 1104#endif
34f80b04 1105 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1106 bnx2x_mc_assert(bp);
1107 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1108}
1109
f2e0899f 1110static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1111{
34f80b04 1112 int port = BP_PORT(bp);
a2fbb9ea
ET
1113 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1114 u32 val = REG_RD(bp, addr);
1115 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1116 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
1117
1118 if (msix) {
8badd27a
EG
1119 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1120 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1121 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1122 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
1123 } else if (msi) {
1124 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1125 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1126 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1127 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1128 } else {
1129 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1130 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1131 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1132 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1133
a0fd065c
DK
1134 if (!CHIP_IS_E1(bp)) {
1135 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1136 val, port, addr);
615f8fd9 1137
a0fd065c 1138 REG_WR(bp, addr, val);
615f8fd9 1139
a0fd065c
DK
1140 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1141 }
a2fbb9ea
ET
1142 }
1143
a0fd065c
DK
1144 if (CHIP_IS_E1(bp))
1145 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1146
8badd27a
EG
1147 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1148 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1149
1150 REG_WR(bp, addr, val);
37dbbf32
EG
1151 /*
1152 * Ensure that HC_CONFIG is written before leading/trailing edge config
1153 */
1154 mmiowb();
1155 barrier();
34f80b04 1156
f2e0899f 1157 if (!CHIP_IS_E1(bp)) {
34f80b04 1158 /* init leading/trailing edge */
fb3bff17 1159 if (IS_MF(bp)) {
8badd27a 1160 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1161 if (bp->port.pmf)
4acac6a5
EG
1162 /* enable nig and gpio3 attention */
1163 val |= 0x1100;
34f80b04
EG
1164 } else
1165 val = 0xffff;
1166
1167 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1168 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1169 }
37dbbf32
EG
1170
1171 /* Make sure that interrupts are indeed enabled from here on */
1172 mmiowb();
a2fbb9ea
ET
1173}
1174
f2e0899f
DK
1175static void bnx2x_igu_int_enable(struct bnx2x *bp)
1176{
1177 u32 val;
1178 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1179 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1180
1181 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1182
1183 if (msix) {
1184 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1185 IGU_PF_CONF_SINGLE_ISR_EN);
1186 val |= (IGU_PF_CONF_FUNC_EN |
1187 IGU_PF_CONF_MSI_MSIX_EN |
1188 IGU_PF_CONF_ATTN_BIT_EN);
1189 } else if (msi) {
1190 val &= ~IGU_PF_CONF_INT_LINE_EN;
1191 val |= (IGU_PF_CONF_FUNC_EN |
1192 IGU_PF_CONF_MSI_MSIX_EN |
1193 IGU_PF_CONF_ATTN_BIT_EN |
1194 IGU_PF_CONF_SINGLE_ISR_EN);
1195 } else {
1196 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1197 val |= (IGU_PF_CONF_FUNC_EN |
1198 IGU_PF_CONF_INT_LINE_EN |
1199 IGU_PF_CONF_ATTN_BIT_EN |
1200 IGU_PF_CONF_SINGLE_ISR_EN);
1201 }
1202
1203 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1204 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1205
1206 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1207
1208 barrier();
1209
1210 /* init leading/trailing edge */
1211 if (IS_MF(bp)) {
1212 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1213 if (bp->port.pmf)
1214 /* enable nig and gpio3 attention */
1215 val |= 0x1100;
1216 } else
1217 val = 0xffff;
1218
1219 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1220 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1221
1222 /* Make sure that interrupts are indeed enabled from here on */
1223 mmiowb();
1224}
1225
1226void bnx2x_int_enable(struct bnx2x *bp)
1227{
1228 if (bp->common.int_block == INT_BLOCK_HC)
1229 bnx2x_hc_int_enable(bp);
1230 else
1231 bnx2x_igu_int_enable(bp);
1232}
1233
1234static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1235{
34f80b04 1236 int port = BP_PORT(bp);
a2fbb9ea
ET
1237 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1238 u32 val = REG_RD(bp, addr);
1239
a0fd065c
DK
1240 /*
1241 * in E1 we must use only PCI configuration space to disable
1242 * MSI/MSIX capablility
1243 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1244 */
1245 if (CHIP_IS_E1(bp)) {
1246 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1247 * Use mask register to prevent from HC sending interrupts
1248 * after we exit the function
1249 */
1250 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1251
1252 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1253 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1254 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1255 } else
1256 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1257 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1258 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1259 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1260
1261 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1262 val, port, addr);
1263
8badd27a
EG
1264 /* flush all outstanding writes */
1265 mmiowb();
1266
a2fbb9ea
ET
1267 REG_WR(bp, addr, val);
1268 if (REG_RD(bp, addr) != val)
1269 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1270}
1271
f2e0899f
DK
1272static void bnx2x_igu_int_disable(struct bnx2x *bp)
1273{
1274 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1275
1276 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1277 IGU_PF_CONF_INT_LINE_EN |
1278 IGU_PF_CONF_ATTN_BIT_EN);
1279
1280 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1281
1282 /* flush all outstanding writes */
1283 mmiowb();
1284
1285 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1286 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1287 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1288}
1289
8d96286a 1290static void bnx2x_int_disable(struct bnx2x *bp)
f2e0899f
DK
1291{
1292 if (bp->common.int_block == INT_BLOCK_HC)
1293 bnx2x_hc_int_disable(bp);
1294 else
1295 bnx2x_igu_int_disable(bp);
1296}
1297
9f6c9258 1298void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1299{
a2fbb9ea 1300 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1301 int i, offset;
a2fbb9ea 1302
f8ef6e44
YG
1303 if (disable_hw)
1304 /* prevent the HW from sending interrupts */
1305 bnx2x_int_disable(bp);
a2fbb9ea
ET
1306
1307 /* make sure all ISRs are done */
1308 if (msix) {
8badd27a
EG
1309 synchronize_irq(bp->msix_table[0].vector);
1310 offset = 1;
37b091ba
MC
1311#ifdef BCM_CNIC
1312 offset++;
1313#endif
ec6ba945 1314 for_each_eth_queue(bp, i)
8badd27a 1315 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1316 } else
1317 synchronize_irq(bp->pdev->irq);
1318
1319 /* make sure sp_task is not running */
1cf167f2
EG
1320 cancel_delayed_work(&bp->sp_task);
1321 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1322}
1323
34f80b04 1324/* fast path */
a2fbb9ea
ET
1325
1326/*
34f80b04 1327 * General service functions
a2fbb9ea
ET
1328 */
1329
72fd0718
VZ
1330/* Return true if succeeded to acquire the lock */
1331static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1332{
1333 u32 lock_status;
1334 u32 resource_bit = (1 << resource);
1335 int func = BP_FUNC(bp);
1336 u32 hw_lock_control_reg;
1337
1338 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1339
1340 /* Validating that the resource is within range */
1341 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1342 DP(NETIF_MSG_HW,
1343 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1344 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1345 return false;
72fd0718
VZ
1346 }
1347
1348 if (func <= 5)
1349 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1350 else
1351 hw_lock_control_reg =
1352 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1353
1354 /* Try to acquire the lock */
1355 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1356 lock_status = REG_RD(bp, hw_lock_control_reg);
1357 if (lock_status & resource_bit)
1358 return true;
1359
1360 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1361 return false;
1362}
1363
993ac7b5
MC
1364#ifdef BCM_CNIC
1365static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1366#endif
3196a88a 1367
9f6c9258 1368void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1369 union eth_rx_cqe *rr_cqe)
1370{
1371 struct bnx2x *bp = fp->bp;
1372 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1373 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1374
34f80b04 1375 DP(BNX2X_MSG_SP,
a2fbb9ea 1376 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1377 fp->index, cid, command, bp->state,
34f80b04 1378 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1379
523224a3
DK
1380 switch (command | fp->state) {
1381 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1382 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1383 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1384 break;
1385
523224a3
DK
1386 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1387 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1388 fp->state = BNX2X_FP_STATE_HALTED;
1389 break;
1390
523224a3
DK
1391 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1392 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1393 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1394 break;
1395
523224a3
DK
1396 default:
1397 BNX2X_ERR("unexpected MC reply (%d) "
1398 "fp[%d] state is %x\n",
1399 command, fp->index, fp->state);
993ac7b5 1400 break;
523224a3 1401 }
3196a88a 1402
8fe23fbd 1403 smp_mb__before_atomic_inc();
6e30dd4e 1404 atomic_inc(&bp->cq_spq_left);
523224a3
DK
1405 /* push the change in fp->state and towards the memory */
1406 smp_wmb();
49d66772 1407
523224a3 1408 return;
a2fbb9ea
ET
1409}
1410
9f6c9258 1411irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1412{
555f6c78 1413 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1414 u16 status = bnx2x_ack_int(bp);
34f80b04 1415 u16 mask;
ca00392c 1416 int i;
a2fbb9ea 1417
34f80b04 1418 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1419 if (unlikely(status == 0)) {
1420 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1421 return IRQ_NONE;
1422 }
f5372251 1423 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1424
3196a88a
EG
1425#ifdef BNX2X_STOP_ON_ERROR
1426 if (unlikely(bp->panic))
1427 return IRQ_HANDLED;
1428#endif
1429
ec6ba945 1430 for_each_eth_queue(bp, i) {
ca00392c 1431 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1432
523224a3 1433 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1434 if (status & mask) {
54b9ddaa
VZ
1435 /* Handle Rx and Tx according to SB id */
1436 prefetch(fp->rx_cons_sb);
54b9ddaa 1437 prefetch(fp->tx_cons_sb);
523224a3 1438 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1439 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1440 status &= ~mask;
1441 }
a2fbb9ea
ET
1442 }
1443
993ac7b5 1444#ifdef BCM_CNIC
523224a3 1445 mask = 0x2;
993ac7b5
MC
1446 if (status & (mask | 0x1)) {
1447 struct cnic_ops *c_ops = NULL;
1448
1449 rcu_read_lock();
1450 c_ops = rcu_dereference(bp->cnic_ops);
1451 if (c_ops)
1452 c_ops->cnic_handler(bp->cnic_data, NULL);
1453 rcu_read_unlock();
1454
1455 status &= ~mask;
1456 }
1457#endif
a2fbb9ea 1458
34f80b04 1459 if (unlikely(status & 0x1)) {
1cf167f2 1460 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1461
1462 status &= ~0x1;
1463 if (!status)
1464 return IRQ_HANDLED;
1465 }
1466
cdaa7cb8
VZ
1467 if (unlikely(status))
1468 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1469 status);
a2fbb9ea 1470
c18487ee 1471 return IRQ_HANDLED;
a2fbb9ea
ET
1472}
1473
c18487ee 1474/* end of fast path */
a2fbb9ea 1475
a2fbb9ea 1476
c18487ee
YR
1477/* Link */
1478
1479/*
1480 * General service functions
1481 */
a2fbb9ea 1482
9f6c9258 1483int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1484{
1485 u32 lock_status;
1486 u32 resource_bit = (1 << resource);
4a37fb66
YG
1487 int func = BP_FUNC(bp);
1488 u32 hw_lock_control_reg;
c18487ee 1489 int cnt;
a2fbb9ea 1490
c18487ee
YR
1491 /* Validating that the resource is within range */
1492 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1493 DP(NETIF_MSG_HW,
1494 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1495 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1496 return -EINVAL;
1497 }
a2fbb9ea 1498
4a37fb66
YG
1499 if (func <= 5) {
1500 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1501 } else {
1502 hw_lock_control_reg =
1503 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1504 }
1505
c18487ee 1506 /* Validating that the resource is not already taken */
4a37fb66 1507 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1508 if (lock_status & resource_bit) {
1509 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1510 lock_status, resource_bit);
1511 return -EEXIST;
1512 }
a2fbb9ea 1513
46230476
EG
1514 /* Try for 5 second every 5ms */
1515 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1516 /* Try to acquire the lock */
4a37fb66
YG
1517 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1518 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1519 if (lock_status & resource_bit)
1520 return 0;
a2fbb9ea 1521
c18487ee 1522 msleep(5);
a2fbb9ea 1523 }
c18487ee
YR
1524 DP(NETIF_MSG_HW, "Timeout\n");
1525 return -EAGAIN;
1526}
a2fbb9ea 1527
9f6c9258 1528int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1529{
1530 u32 lock_status;
1531 u32 resource_bit = (1 << resource);
4a37fb66
YG
1532 int func = BP_FUNC(bp);
1533 u32 hw_lock_control_reg;
a2fbb9ea 1534
72fd0718
VZ
1535 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1536
c18487ee
YR
1537 /* Validating that the resource is within range */
1538 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1539 DP(NETIF_MSG_HW,
1540 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1541 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1542 return -EINVAL;
1543 }
1544
4a37fb66
YG
1545 if (func <= 5) {
1546 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1547 } else {
1548 hw_lock_control_reg =
1549 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1550 }
1551
c18487ee 1552 /* Validating that the resource is currently taken */
4a37fb66 1553 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1554 if (!(lock_status & resource_bit)) {
1555 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1556 lock_status, resource_bit);
1557 return -EFAULT;
a2fbb9ea
ET
1558 }
1559
9f6c9258
DK
1560 REG_WR(bp, hw_lock_control_reg, resource_bit);
1561 return 0;
c18487ee 1562}
a2fbb9ea 1563
9f6c9258 1564
4acac6a5
EG
1565int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1566{
1567 /* The GPIO should be swapped if swap register is set and active */
1568 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1569 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1570 int gpio_shift = gpio_num +
1571 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1572 u32 gpio_mask = (1 << gpio_shift);
1573 u32 gpio_reg;
1574 int value;
1575
1576 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1577 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1578 return -EINVAL;
1579 }
1580
1581 /* read GPIO value */
1582 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1583
1584 /* get the requested pin value */
1585 if ((gpio_reg & gpio_mask) == gpio_mask)
1586 value = 1;
1587 else
1588 value = 0;
1589
1590 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1591
1592 return value;
1593}
1594
17de50b7 1595int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1596{
1597 /* The GPIO should be swapped if swap register is set and active */
1598 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1599 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1600 int gpio_shift = gpio_num +
1601 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1602 u32 gpio_mask = (1 << gpio_shift);
1603 u32 gpio_reg;
a2fbb9ea 1604
c18487ee
YR
1605 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1606 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1607 return -EINVAL;
1608 }
a2fbb9ea 1609
4a37fb66 1610 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1611 /* read GPIO and mask except the float bits */
1612 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1613
c18487ee
YR
1614 switch (mode) {
1615 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1616 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1617 gpio_num, gpio_shift);
1618 /* clear FLOAT and set CLR */
1619 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1620 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1621 break;
a2fbb9ea 1622
c18487ee
YR
1623 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1624 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1625 gpio_num, gpio_shift);
1626 /* clear FLOAT and set SET */
1627 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1628 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1629 break;
a2fbb9ea 1630
17de50b7 1631 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1632 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1633 gpio_num, gpio_shift);
1634 /* set FLOAT */
1635 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1636 break;
a2fbb9ea 1637
c18487ee
YR
1638 default:
1639 break;
a2fbb9ea
ET
1640 }
1641
c18487ee 1642 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1643 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1644
c18487ee 1645 return 0;
a2fbb9ea
ET
1646}
1647
4acac6a5
EG
1648int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1649{
1650 /* The GPIO should be swapped if swap register is set and active */
1651 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1652 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1653 int gpio_shift = gpio_num +
1654 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1655 u32 gpio_mask = (1 << gpio_shift);
1656 u32 gpio_reg;
1657
1658 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1659 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1660 return -EINVAL;
1661 }
1662
1663 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1664 /* read GPIO int */
1665 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1666
1667 switch (mode) {
1668 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1669 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1670 "output low\n", gpio_num, gpio_shift);
1671 /* clear SET and set CLR */
1672 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1673 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1674 break;
1675
1676 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1677 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1678 "output high\n", gpio_num, gpio_shift);
1679 /* clear CLR and set SET */
1680 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1681 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1682 break;
1683
1684 default:
1685 break;
1686 }
1687
1688 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1689 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1690
1691 return 0;
1692}
1693
c18487ee 1694static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1695{
c18487ee
YR
1696 u32 spio_mask = (1 << spio_num);
1697 u32 spio_reg;
a2fbb9ea 1698
c18487ee
YR
1699 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1700 (spio_num > MISC_REGISTERS_SPIO_7)) {
1701 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1702 return -EINVAL;
a2fbb9ea
ET
1703 }
1704
4a37fb66 1705 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1706 /* read SPIO and mask except the float bits */
1707 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1708
c18487ee 1709 switch (mode) {
6378c025 1710 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1711 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1712 /* clear FLOAT and set CLR */
1713 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1714 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1715 break;
a2fbb9ea 1716
6378c025 1717 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1718 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1719 /* clear FLOAT and set SET */
1720 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1721 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1722 break;
a2fbb9ea 1723
c18487ee
YR
1724 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1725 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1726 /* set FLOAT */
1727 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1728 break;
a2fbb9ea 1729
c18487ee
YR
1730 default:
1731 break;
a2fbb9ea
ET
1732 }
1733
c18487ee 1734 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1735 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1736
a2fbb9ea
ET
1737 return 0;
1738}
1739
9f6c9258 1740void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1741{
a22f0788 1742 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1743 switch (bp->link_vars.ieee_fc &
1744 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1745 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1746 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1747 ADVERTISED_Pause);
c18487ee 1748 break;
356e2385 1749
c18487ee 1750 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1751 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 1752 ADVERTISED_Pause);
c18487ee 1753 break;
356e2385 1754
c18487ee 1755 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1756 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1757 break;
356e2385 1758
c18487ee 1759 default:
a22f0788 1760 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1761 ADVERTISED_Pause);
c18487ee
YR
1762 break;
1763 }
1764}
f1410647 1765
9f6c9258 1766u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1767{
19680c48
EG
1768 if (!BP_NOMCP(bp)) {
1769 u8 rc;
a22f0788
YR
1770 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1771 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1772 /* Initialize link parameters structure variables */
8c99e7b0
YR
1773 /* It is recommended to turn off RX FC for jumbo frames
1774 for better performance */
f2e0899f 1775 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1776 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1777 else
c0700f90 1778 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1779
4a37fb66 1780 bnx2x_acquire_phy_lock(bp);
b5bf9068 1781
a22f0788 1782 if (load_mode == LOAD_DIAG) {
de6eae1f 1783 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1784 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1785 }
b5bf9068 1786
19680c48 1787 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1788
4a37fb66 1789 bnx2x_release_phy_lock(bp);
a2fbb9ea 1790
3c96c68b
EG
1791 bnx2x_calc_fc_adv(bp);
1792
b5bf9068
EG
1793 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1794 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1795 bnx2x_link_report(bp);
b5bf9068 1796 }
a22f0788 1797 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1798 return rc;
1799 }
f5372251 1800 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1801 return -EINVAL;
a2fbb9ea
ET
1802}
1803
9f6c9258 1804void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1805{
19680c48 1806 if (!BP_NOMCP(bp)) {
4a37fb66 1807 bnx2x_acquire_phy_lock(bp);
54c2fb78 1808 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1809 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1810 bnx2x_release_phy_lock(bp);
a2fbb9ea 1811
19680c48
EG
1812 bnx2x_calc_fc_adv(bp);
1813 } else
f5372251 1814 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1815}
a2fbb9ea 1816
c18487ee
YR
1817static void bnx2x__link_reset(struct bnx2x *bp)
1818{
19680c48 1819 if (!BP_NOMCP(bp)) {
4a37fb66 1820 bnx2x_acquire_phy_lock(bp);
589abe3a 1821 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1822 bnx2x_release_phy_lock(bp);
19680c48 1823 } else
f5372251 1824 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1825}
a2fbb9ea 1826
a22f0788 1827u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1828{
2145a920 1829 u8 rc = 0;
a2fbb9ea 1830
2145a920
VZ
1831 if (!BP_NOMCP(bp)) {
1832 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1833 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1834 is_serdes);
2145a920
VZ
1835 bnx2x_release_phy_lock(bp);
1836 } else
1837 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1838
c18487ee
YR
1839 return rc;
1840}
a2fbb9ea 1841
8a1c38d1 1842static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1843{
8a1c38d1
EG
1844 u32 r_param = bp->link_vars.line_speed / 8;
1845 u32 fair_periodic_timeout_usec;
1846 u32 t_fair;
34f80b04 1847
8a1c38d1
EG
1848 memset(&(bp->cmng.rs_vars), 0,
1849 sizeof(struct rate_shaping_vars_per_port));
1850 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1851
8a1c38d1
EG
1852 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1853 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1854
8a1c38d1
EG
1855 /* this is the threshold below which no timer arming will occur
1856 1.25 coefficient is for the threshold to be a little bigger
1857 than the real time, to compensate for timer in-accuracy */
1858 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1859 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1860
8a1c38d1
EG
1861 /* resolution of fairness timer */
1862 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1863 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1864 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1865
8a1c38d1
EG
1866 /* this is the threshold below which we won't arm the timer anymore */
1867 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1868
8a1c38d1
EG
1869 /* we multiply by 1e3/8 to get bytes/msec.
1870 We don't want the credits to pass a credit
1871 of the t_fair*FAIR_MEM (algorithm resolution) */
1872 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1873 /* since each tick is 4 usec */
1874 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1875}
1876
2691d51d
EG
1877/* Calculates the sum of vn_min_rates.
1878 It's needed for further normalizing of the min_rates.
1879 Returns:
1880 sum of vn_min_rates.
1881 or
1882 0 - if all the min_rates are 0.
1883 In the later case fainess algorithm should be deactivated.
1884 If not all min_rates are zero then those that are zeroes will be set to 1.
1885 */
1886static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1887{
1888 int all_zero = 1;
2691d51d
EG
1889 int vn;
1890
1891 bp->vn_weight_sum = 0;
1892 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1893 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1894 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1895 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1896
1897 /* Skip hidden vns */
1898 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1899 continue;
1900
1901 /* If min rate is zero - set it to 1 */
1902 if (!vn_min_rate)
1903 vn_min_rate = DEF_MIN_RATE;
1904 else
1905 all_zero = 0;
1906
1907 bp->vn_weight_sum += vn_min_rate;
1908 }
1909
30ae438b
DK
1910 /* if ETS or all min rates are zeros - disable fairness */
1911 if (BNX2X_IS_ETS_ENABLED(bp)) {
1912 bp->cmng.flags.cmng_enables &=
1913 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1914 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
1915 } else if (all_zero) {
b015e3d1
EG
1916 bp->cmng.flags.cmng_enables &=
1917 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1918 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1919 " fairness will be disabled\n");
1920 } else
1921 bp->cmng.flags.cmng_enables |=
1922 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1923}
1924
f2e0899f 1925static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1926{
1927 struct rate_shaping_vars_per_vn m_rs_vn;
1928 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1929 u32 vn_cfg = bp->mf_config[vn];
1930 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1931 u16 vn_min_rate, vn_max_rate;
1932 int i;
1933
1934 /* If function is hidden - set min and max to zeroes */
1935 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1936 vn_min_rate = 0;
1937 vn_max_rate = 0;
1938
1939 } else {
faa6fcbb
DK
1940 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1941
34f80b04
EG
1942 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1943 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
faa6fcbb
DK
1944 /* If fairness is enabled (not all min rates are zeroes) and
1945 if current min rate is zero - set it to 1.
1946 This is a requirement of the algorithm. */
f2e0899f 1947 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04 1948 vn_min_rate = DEF_MIN_RATE;
faa6fcbb
DK
1949
1950 if (IS_MF_SI(bp))
1951 /* maxCfg in percents of linkspeed */
1952 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1953 else
1954 /* maxCfg is absolute in 100Mb units */
1955 vn_max_rate = maxCfg * 100;
34f80b04 1956 }
f85582f8 1957
8a1c38d1 1958 DP(NETIF_MSG_IFUP,
b015e3d1 1959 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1960 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1961
1962 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1963 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1964
1965 /* global vn counter - maximal Mbps for this vn */
1966 m_rs_vn.vn_counter.rate = vn_max_rate;
1967
1968 /* quota - number of bytes transmitted in this period */
1969 m_rs_vn.vn_counter.quota =
1970 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1971
8a1c38d1 1972 if (bp->vn_weight_sum) {
34f80b04
EG
1973 /* credit for each period of the fairness algorithm:
1974 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1975 vn_weight_sum should not be larger than 10000, thus
1976 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1977 than zero */
34f80b04 1978 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1979 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1980 (8 * bp->vn_weight_sum))),
ff80ee02
DK
1981 (bp->cmng.fair_vars.fair_threshold +
1982 MIN_ABOVE_THRESH));
cdaa7cb8 1983 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1984 m_fair_vn.vn_credit_delta);
1985 }
1986
34f80b04
EG
1987 /* Store it to internal memory */
1988 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1989 REG_WR(bp, BAR_XSTRORM_INTMEM +
1990 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1991 ((u32 *)(&m_rs_vn))[i]);
1992
1993 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1994 REG_WR(bp, BAR_XSTRORM_INTMEM +
1995 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1996 ((u32 *)(&m_fair_vn))[i]);
1997}
f85582f8 1998
523224a3
DK
1999static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2000{
2001 if (CHIP_REV_IS_SLOW(bp))
2002 return CMNG_FNS_NONE;
fb3bff17 2003 if (IS_MF(bp))
523224a3
DK
2004 return CMNG_FNS_MINMAX;
2005
2006 return CMNG_FNS_NONE;
2007}
2008
2ae17f66 2009void bnx2x_read_mf_cfg(struct bnx2x *bp)
523224a3 2010{
0793f83f 2011 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
523224a3
DK
2012
2013 if (BP_NOMCP(bp))
2014 return; /* what should be the default bvalue in this case */
2015
0793f83f
DK
2016 /* For 2 port configuration the absolute function number formula
2017 * is:
2018 * abs_func = 2 * vn + BP_PORT + BP_PATH
2019 *
2020 * and there are 4 functions per port
2021 *
2022 * For 4 port configuration it is
2023 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2024 *
2025 * and there are 2 functions per port
2026 */
523224a3 2027 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
0793f83f
DK
2028 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2029
2030 if (func >= E1H_FUNC_MAX)
2031 break;
2032
f2e0899f 2033 bp->mf_config[vn] =
523224a3
DK
2034 MF_CFG_RD(bp, func_mf_config[func].config);
2035 }
2036}
2037
2038static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2039{
2040
2041 if (cmng_type == CMNG_FNS_MINMAX) {
2042 int vn;
2043
2044 /* clear cmng_enables */
2045 bp->cmng.flags.cmng_enables = 0;
2046
2047 /* read mf conf from shmem */
2048 if (read_cfg)
2049 bnx2x_read_mf_cfg(bp);
2050
2051 /* Init rate shaping and fairness contexts */
2052 bnx2x_init_port_minmax(bp);
2053
2054 /* vn_weight_sum and enable fairness if not 0 */
2055 bnx2x_calc_vn_weight_sum(bp);
2056
2057 /* calculate and set min-max rate for each vn */
c4154f25
DK
2058 if (bp->port.pmf)
2059 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2060 bnx2x_init_vn_minmax(bp, vn);
523224a3
DK
2061
2062 /* always enable rate shaping and fairness */
2063 bp->cmng.flags.cmng_enables |=
2064 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2065 if (!bp->vn_weight_sum)
2066 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2067 " fairness will be disabled\n");
2068 return;
2069 }
2070
2071 /* rate shaping and fairness are disabled */
2072 DP(NETIF_MSG_IFUP,
2073 "rate shaping and fairness are disabled\n");
2074}
34f80b04 2075
523224a3
DK
2076static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2077{
2078 int port = BP_PORT(bp);
2079 int func;
2080 int vn;
2081
2082 /* Set the attention towards other drivers on the same port */
2083 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2084 if (vn == BP_E1HVN(bp))
2085 continue;
2086
2087 func = ((vn << 1) | port);
2088 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2089 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2090 }
2091}
8a1c38d1 2092
c18487ee
YR
2093/* This function is called upon link interrupt */
2094static void bnx2x_link_attn(struct bnx2x *bp)
2095{
bb2a0f7a
YG
2096 /* Make sure that we are synced with the current statistics */
2097 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2098
c18487ee 2099 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2100
bb2a0f7a
YG
2101 if (bp->link_vars.link_up) {
2102
1c06328c 2103 /* dropless flow control */
f2e0899f 2104 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2105 int port = BP_PORT(bp);
2106 u32 pause_enabled = 0;
2107
2108 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2109 pause_enabled = 1;
2110
2111 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2112 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2113 pause_enabled);
2114 }
2115
bb2a0f7a
YG
2116 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2117 struct host_port_stats *pstats;
2118
2119 pstats = bnx2x_sp(bp, port_stats);
2120 /* reset old bmac stats */
2121 memset(&(pstats->mac_stx[0]), 0,
2122 sizeof(struct mac_stx));
2123 }
f34d28ea 2124 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2125 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2126 }
2127
f2e0899f
DK
2128 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2129 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2130
f2e0899f
DK
2131 if (cmng_fns != CMNG_FNS_NONE) {
2132 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2133 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2134 } else
2135 /* rate shaping and fairness are disabled */
2136 DP(NETIF_MSG_IFUP,
2137 "single function mode without fairness\n");
34f80b04 2138 }
9fdc3e95 2139
2ae17f66
VZ
2140 __bnx2x_link_report(bp);
2141
9fdc3e95
DK
2142 if (IS_MF(bp))
2143 bnx2x_link_sync_notify(bp);
c18487ee 2144}
a2fbb9ea 2145
9f6c9258 2146void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2147{
2ae17f66 2148 if (bp->state != BNX2X_STATE_OPEN)
c18487ee 2149 return;
a2fbb9ea 2150
c18487ee 2151 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2152
bb2a0f7a
YG
2153 if (bp->link_vars.link_up)
2154 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2155 else
2156 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2157
c18487ee
YR
2158 /* indicate link status */
2159 bnx2x_link_report(bp);
a2fbb9ea 2160}
a2fbb9ea 2161
34f80b04
EG
2162static void bnx2x_pmf_update(struct bnx2x *bp)
2163{
2164 int port = BP_PORT(bp);
2165 u32 val;
2166
2167 bp->port.pmf = 1;
2168 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2169
2170 /* enable nig attention */
2171 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
2172 if (bp->common.int_block == INT_BLOCK_HC) {
2173 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2174 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2175 } else if (CHIP_IS_E2(bp)) {
2176 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2177 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2178 }
bb2a0f7a
YG
2179
2180 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2181}
2182
c18487ee 2183/* end of Link */
a2fbb9ea
ET
2184
2185/* slow path */
2186
2187/*
2188 * General service functions
2189 */
2190
2691d51d 2191/* send the MCP a request, block until there is a reply */
a22f0788 2192u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2193{
f2e0899f 2194 int mb_idx = BP_FW_MB_IDX(bp);
a5971d43 2195 u32 seq;
2691d51d
EG
2196 u32 rc = 0;
2197 u32 cnt = 1;
2198 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2199
c4ff7cbf 2200 mutex_lock(&bp->fw_mb_mutex);
a5971d43 2201 seq = ++bp->fw_seq;
f2e0899f
DK
2202 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2203 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2204
2691d51d
EG
2205 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2206
2207 do {
2208 /* let the FW do it's magic ... */
2209 msleep(delay);
2210
f2e0899f 2211 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2212
c4ff7cbf
EG
2213 /* Give the FW up to 5 second (500*10ms) */
2214 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2215
2216 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2217 cnt*delay, rc, seq);
2218
2219 /* is this a reply to our command? */
2220 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2221 rc &= FW_MSG_CODE_MASK;
2222 else {
2223 /* FW BUG! */
2224 BNX2X_ERR("FW failed to respond!\n");
2225 bnx2x_fw_dump(bp);
2226 rc = 0;
2227 }
c4ff7cbf 2228 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2229
2230 return rc;
2231}
2232
ec6ba945
VZ
2233static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2234{
2235#ifdef BCM_CNIC
2236 if (IS_FCOE_FP(fp) && IS_MF(bp))
2237 return false;
2238#endif
2239 return true;
2240}
2241
523224a3 2242/* must be called under rtnl_lock */
8d96286a 2243static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2244{
523224a3 2245 u32 mask = (1 << cl_id);
2691d51d 2246
523224a3
DK
2247 /* initial seeting is BNX2X_ACCEPT_NONE */
2248 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2249 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2250 u8 unmatched_unicast = 0;
2691d51d 2251
0793f83f
DK
2252 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2253 unmatched_unicast = 1;
2254
523224a3
DK
2255 if (filters & BNX2X_PROMISCUOUS_MODE) {
2256 /* promiscious - accept all, drop none */
2257 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2258 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
0793f83f
DK
2259 if (IS_MF_SI(bp)) {
2260 /*
2261 * SI mode defines to accept in promiscuos mode
2262 * only unmatched packets
2263 */
2264 unmatched_unicast = 1;
2265 accp_all_ucast = 0;
2266 }
523224a3
DK
2267 }
2268 if (filters & BNX2X_ACCEPT_UNICAST) {
2269 /* accept matched ucast */
2270 drop_all_ucast = 0;
2271 }
d9c8f498 2272 if (filters & BNX2X_ACCEPT_MULTICAST)
523224a3
DK
2273 /* accept matched mcast */
2274 drop_all_mcast = 0;
d9c8f498 2275
523224a3
DK
2276 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2277 /* accept all mcast */
2278 drop_all_ucast = 0;
2279 accp_all_ucast = 1;
2280 }
2281 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2282 /* accept all mcast */
2283 drop_all_mcast = 0;
2284 accp_all_mcast = 1;
2285 }
2286 if (filters & BNX2X_ACCEPT_BROADCAST) {
2287 /* accept (all) bcast */
2288 drop_all_bcast = 0;
2289 accp_all_bcast = 1;
2290 }
2691d51d 2291
523224a3
DK
2292 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2293 bp->mac_filters.ucast_drop_all | mask :
2294 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2295
523224a3
DK
2296 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2297 bp->mac_filters.mcast_drop_all | mask :
2298 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2299
523224a3
DK
2300 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2301 bp->mac_filters.bcast_drop_all | mask :
2302 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2303
523224a3
DK
2304 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2305 bp->mac_filters.ucast_accept_all | mask :
2306 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2307
523224a3
DK
2308 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2309 bp->mac_filters.mcast_accept_all | mask :
2310 bp->mac_filters.mcast_accept_all & ~mask;
2311
2312 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2313 bp->mac_filters.bcast_accept_all | mask :
2314 bp->mac_filters.bcast_accept_all & ~mask;
2315
2316 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2317 bp->mac_filters.unmatched_unicast | mask :
2318 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2319}
2320
8d96286a 2321static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2322{
030f3356
DK
2323 struct tstorm_eth_function_common_config tcfg = {0};
2324 u16 rss_flgs;
2691d51d 2325
030f3356
DK
2326 /* tpa */
2327 if (p->func_flgs & FUNC_FLG_TPA)
2328 tcfg.config_flags |=
2329 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2330
030f3356
DK
2331 /* set rss flags */
2332 rss_flgs = (p->rss->mode <<
2333 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2334
2335 if (p->rss->cap & RSS_IPV4_CAP)
2336 rss_flgs |= RSS_IPV4_CAP_MASK;
2337 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2338 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2339 if (p->rss->cap & RSS_IPV6_CAP)
2340 rss_flgs |= RSS_IPV6_CAP_MASK;
2341 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2342 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2343
2344 tcfg.config_flags |= rss_flgs;
2345 tcfg.rss_result_mask = p->rss->result_mask;
2346
2347 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2348
523224a3
DK
2349 /* Enable the function in the FW */
2350 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2351 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2352
523224a3
DK
2353 /* statistics */
2354 if (p->func_flgs & FUNC_FLG_STATS) {
2355 struct stats_indication_flags stats_flags = {0};
2356 stats_flags.collect_eth = 1;
2691d51d 2357
523224a3
DK
2358 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2359 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2360
523224a3
DK
2361 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2362 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2363
523224a3
DK
2364 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2365 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2366
523224a3
DK
2367 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2368 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2369 }
2370
523224a3
DK
2371 /* spq */
2372 if (p->func_flgs & FUNC_FLG_SPQ) {
2373 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2374 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2375 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2376 }
2691d51d
EG
2377}
2378
523224a3
DK
2379static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2380 struct bnx2x_fastpath *fp)
28912902 2381{
523224a3 2382 u16 flags = 0;
28912902 2383
523224a3
DK
2384 /* calculate queue flags */
2385 flags |= QUEUE_FLG_CACHE_ALIGN;
2386 flags |= QUEUE_FLG_HC;
0793f83f 2387 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
28912902 2388
523224a3
DK
2389 flags |= QUEUE_FLG_VLAN;
2390 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
523224a3
DK
2391
2392 if (!fp->disable_tpa)
2393 flags |= QUEUE_FLG_TPA;
2394
ec6ba945
VZ
2395 flags = stat_counter_valid(bp, fp) ?
2396 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
523224a3
DK
2397
2398 return flags;
2399}
2400
2401static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2402 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2403 struct bnx2x_rxq_init_params *rxq_init)
2404{
2405 u16 max_sge = 0;
2406 u16 sge_sz = 0;
2407 u16 tpa_agg_size = 0;
2408
2409 /* calculate queue flags */
2410 u16 flags = bnx2x_get_cl_flags(bp, fp);
2411
2412 if (!fp->disable_tpa) {
2413 pause->sge_th_hi = 250;
2414 pause->sge_th_lo = 150;
2415 tpa_agg_size = min_t(u32,
2416 (min_t(u32, 8, MAX_SKB_FRAGS) *
2417 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2418 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2419 SGE_PAGE_SHIFT;
2420 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2421 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2422 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2423 0xffff);
2424 }
2425
2426 /* pause - not for e1 */
2427 if (!CHIP_IS_E1(bp)) {
2428 pause->bd_th_hi = 350;
2429 pause->bd_th_lo = 250;
2430 pause->rcq_th_hi = 350;
2431 pause->rcq_th_lo = 250;
2432 pause->sge_th_hi = 0;
2433 pause->sge_th_lo = 0;
2434 pause->pri_map = 1;
2435 }
2436
2437 /* rxq setup */
2438 rxq_init->flags = flags;
2439 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2440 rxq_init->dscr_map = fp->rx_desc_mapping;
2441 rxq_init->sge_map = fp->rx_sge_mapping;
2442 rxq_init->rcq_map = fp->rx_comp_mapping;
2443 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
a8c94b91
VZ
2444
2445 /* Always use mini-jumbo MTU for FCoE L2 ring */
2446 if (IS_FCOE_FP(fp))
2447 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2448 else
2449 rxq_init->mtu = bp->dev->mtu;
2450
2451 rxq_init->buf_sz = fp->rx_buf_size;
523224a3
DK
2452 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2453 rxq_init->cl_id = fp->cl_id;
2454 rxq_init->spcl_id = fp->cl_id;
2455 rxq_init->stat_id = fp->cl_id;
2456 rxq_init->tpa_agg_sz = tpa_agg_size;
2457 rxq_init->sge_buf_sz = sge_sz;
2458 rxq_init->max_sges_pkt = max_sge;
2459 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2460 rxq_init->fw_sb_id = fp->fw_sb_id;
2461
ec6ba945
VZ
2462 if (IS_FCOE_FP(fp))
2463 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2464 else
2465 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
523224a3
DK
2466
2467 rxq_init->cid = HW_CID(bp, fp->cid);
2468
2469 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2470}
2471
2472static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2473 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2474{
2475 u16 flags = bnx2x_get_cl_flags(bp, fp);
2476
2477 txq_init->flags = flags;
2478 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2479 txq_init->dscr_map = fp->tx_desc_mapping;
2480 txq_init->stat_id = fp->cl_id;
2481 txq_init->cid = HW_CID(bp, fp->cid);
2482 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2483 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2484 txq_init->fw_sb_id = fp->fw_sb_id;
ec6ba945
VZ
2485
2486 if (IS_FCOE_FP(fp)) {
2487 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2488 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2489 }
2490
523224a3
DK
2491 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2492}
2493
8d96286a 2494static void bnx2x_pf_init(struct bnx2x *bp)
523224a3
DK
2495{
2496 struct bnx2x_func_init_params func_init = {0};
2497 struct bnx2x_rss_params rss = {0};
2498 struct event_ring_data eq_data = { {0} };
2499 u16 flags;
2500
2501 /* pf specific setups */
2502 if (!CHIP_IS_E1(bp))
fb3bff17 2503 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2504
f2e0899f
DK
2505 if (CHIP_IS_E2(bp)) {
2506 /* reset IGU PF statistics: MSIX + ATTN */
2507 /* PF */
2508 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2509 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2510 (CHIP_MODE_IS_4_PORT(bp) ?
2511 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2512 /* ATTN */
2513 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2514 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2515 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2516 (CHIP_MODE_IS_4_PORT(bp) ?
2517 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2518 }
2519
523224a3
DK
2520 /* function setup flags */
2521 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2522
f2e0899f
DK
2523 if (CHIP_IS_E1x(bp))
2524 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2525 else
2526 flags |= FUNC_FLG_TPA;
523224a3 2527
030f3356
DK
2528 /* function setup */
2529
523224a3
DK
2530 /**
2531 * Although RSS is meaningless when there is a single HW queue we
2532 * still need it enabled in order to have HW Rx hash generated.
523224a3 2533 */
030f3356
DK
2534 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2535 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2536 rss.mode = bp->multi_mode;
2537 rss.result_mask = MULTI_MASK;
2538 func_init.rss = &rss;
523224a3
DK
2539
2540 func_init.func_flgs = flags;
2541 func_init.pf_id = BP_FUNC(bp);
2542 func_init.func_id = BP_FUNC(bp);
2543 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2544 func_init.spq_map = bp->spq_mapping;
2545 func_init.spq_prod = bp->spq_prod_idx;
2546
2547 bnx2x_func_init(bp, &func_init);
2548
2549 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2550
2551 /*
2552 Congestion management values depend on the link rate
2553 There is no active link so initial link rate is set to 10 Gbps.
2554 When the link comes up The congestion management values are
2555 re-calculated according to the actual link rate.
2556 */
2557 bp->link_vars.line_speed = SPEED_10000;
2558 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2559
2560 /* Only the PMF sets the HW */
2561 if (bp->port.pmf)
2562 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2563
2564 /* no rx until link is up */
2565 bp->rx_mode = BNX2X_RX_MODE_NONE;
2566 bnx2x_set_storm_rx_mode(bp);
2567
2568 /* init Event Queue */
2569 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2570 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2571 eq_data.producer = bp->eq_prod;
2572 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2573 eq_data.sb_id = DEF_SB_ID;
2574 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2575}
2576
2577
2578static void bnx2x_e1h_disable(struct bnx2x *bp)
2579{
2580 int port = BP_PORT(bp);
2581
2582 netif_tx_disable(bp->dev);
2583
2584 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2585
2586 netif_carrier_off(bp->dev);
2587}
2588
2589static void bnx2x_e1h_enable(struct bnx2x *bp)
2590{
2591 int port = BP_PORT(bp);
2592
2593 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2594
2595 /* Tx queue should be only reenabled */
2596 netif_tx_wake_all_queues(bp->dev);
2597
2598 /*
2599 * Should not call netif_carrier_on since it will be called if the link
2600 * is up when checking for link state
2601 */
2602}
2603
0793f83f
DK
2604/* called due to MCP event (on pmf):
2605 * reread new bandwidth configuration
2606 * configure FW
2607 * notify others function about the change
2608 */
2609static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2610{
2611 if (bp->link_vars.link_up) {
2612 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2613 bnx2x_link_sync_notify(bp);
2614 }
2615 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2616}
2617
2618static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2619{
2620 bnx2x_config_mf_bw(bp);
2621 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2622}
2623
523224a3
DK
2624static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2625{
2626 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2627
2628 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2629
2630 /*
2631 * This is the only place besides the function initialization
2632 * where the bp->flags can change so it is done without any
2633 * locks
2634 */
f2e0899f 2635 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2636 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2637 bp->flags |= MF_FUNC_DIS;
2638
2639 bnx2x_e1h_disable(bp);
2640 } else {
2641 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2642 bp->flags &= ~MF_FUNC_DIS;
2643
2644 bnx2x_e1h_enable(bp);
2645 }
2646 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2647 }
2648 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
0793f83f 2649 bnx2x_config_mf_bw(bp);
523224a3
DK
2650 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2651 }
2652
2653 /* Report results to MCP */
2654 if (dcc_event)
2655 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2656 else
2657 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2658}
2659
2660/* must be called under the spq lock */
2661static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2662{
2663 struct eth_spe *next_spe = bp->spq_prod_bd;
2664
2665 if (bp->spq_prod_bd == bp->spq_last_bd) {
2666 bp->spq_prod_bd = bp->spq;
2667 bp->spq_prod_idx = 0;
2668 DP(NETIF_MSG_TIMER, "end of spq\n");
2669 } else {
2670 bp->spq_prod_bd++;
2671 bp->spq_prod_idx++;
2672 }
2673 return next_spe;
2674}
2675
2676/* must be called under the spq lock */
28912902
MC
2677static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2678{
2679 int func = BP_FUNC(bp);
2680
2681 /* Make sure that BD data is updated before writing the producer */
2682 wmb();
2683
523224a3 2684 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 2685 bp->spq_prod_idx);
28912902
MC
2686 mmiowb();
2687}
2688
a2fbb9ea 2689/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2690int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
f85582f8 2691 u32 data_hi, u32 data_lo, int common)
a2fbb9ea 2692{
28912902 2693 struct eth_spe *spe;
523224a3 2694 u16 type;
a2fbb9ea 2695
a2fbb9ea
ET
2696#ifdef BNX2X_STOP_ON_ERROR
2697 if (unlikely(bp->panic))
2698 return -EIO;
2699#endif
2700
34f80b04 2701 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2702
6e30dd4e
VZ
2703 if (common) {
2704 if (!atomic_read(&bp->eq_spq_left)) {
2705 BNX2X_ERR("BUG! EQ ring full!\n");
2706 spin_unlock_bh(&bp->spq_lock);
2707 bnx2x_panic();
2708 return -EBUSY;
2709 }
2710 } else if (!atomic_read(&bp->cq_spq_left)) {
2711 BNX2X_ERR("BUG! SPQ ring full!\n");
2712 spin_unlock_bh(&bp->spq_lock);
2713 bnx2x_panic();
2714 return -EBUSY;
a2fbb9ea 2715 }
f1410647 2716
28912902
MC
2717 spe = bnx2x_sp_get_next(bp);
2718
a2fbb9ea 2719 /* CID needs port number to be encoded int it */
28912902 2720 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2721 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2722 HW_CID(bp, cid));
523224a3 2723
a2fbb9ea 2724 if (common)
523224a3
DK
2725 /* Common ramrods:
2726 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2727 * TRAFFIC_STOP, TRAFFIC_START
2728 */
2729 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2730 & SPE_HDR_CONN_TYPE;
2731 else
2732 /* ETH ramrods: SETUP, HALT */
2733 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2734 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2735
523224a3
DK
2736 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2737 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2738
523224a3
DK
2739 spe->hdr.type = cpu_to_le16(type);
2740
2741 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2742 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2743
2744 /* stats ramrod has it's own slot on the spq */
6e30dd4e 2745 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
523224a3
DK
2746 /* It's ok if the actual decrement is issued towards the memory
2747 * somewhere between the spin_lock and spin_unlock. Thus no
2748 * more explict memory barrier is needed.
2749 */
6e30dd4e
VZ
2750 if (common)
2751 atomic_dec(&bp->eq_spq_left);
2752 else
2753 atomic_dec(&bp->cq_spq_left);
2754 }
2755
a2fbb9ea 2756
cdaa7cb8 2757 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3 2758 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
6e30dd4e 2759 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
cdaa7cb8
VZ
2760 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2761 (u32)(U64_LO(bp->spq_mapping) +
2762 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
6e30dd4e
VZ
2763 HW_CID(bp, cid), data_hi, data_lo, type,
2764 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
cdaa7cb8 2765
28912902 2766 bnx2x_sp_prod_update(bp);
34f80b04 2767 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2768 return 0;
2769}
2770
2771/* acquire split MCP access lock register */
4a37fb66 2772static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2773{
72fd0718 2774 u32 j, val;
34f80b04 2775 int rc = 0;
a2fbb9ea
ET
2776
2777 might_sleep();
72fd0718 2778 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2779 val = (1UL << 31);
2780 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2781 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2782 if (val & (1L << 31))
2783 break;
2784
2785 msleep(5);
2786 }
a2fbb9ea 2787 if (!(val & (1L << 31))) {
19680c48 2788 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2789 rc = -EBUSY;
2790 }
2791
2792 return rc;
2793}
2794
4a37fb66
YG
2795/* release split MCP access lock register */
2796static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2797{
72fd0718 2798 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2799}
2800
523224a3
DK
2801#define BNX2X_DEF_SB_ATT_IDX 0x0001
2802#define BNX2X_DEF_SB_IDX 0x0002
2803
a2fbb9ea
ET
2804static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2805{
523224a3 2806 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2807 u16 rc = 0;
2808
2809 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2810 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2811 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2812 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2813 }
523224a3
DK
2814
2815 if (bp->def_idx != def_sb->sp_sb.running_index) {
2816 bp->def_idx = def_sb->sp_sb.running_index;
2817 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2818 }
523224a3
DK
2819
2820 /* Do not reorder: indecies reading should complete before handling */
2821 barrier();
a2fbb9ea
ET
2822 return rc;
2823}
2824
2825/*
2826 * slow path service functions
2827 */
2828
2829static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2830{
34f80b04 2831 int port = BP_PORT(bp);
a2fbb9ea
ET
2832 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2833 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2834 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2835 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2836 u32 aeu_mask;
87942b46 2837 u32 nig_mask = 0;
f2e0899f 2838 u32 reg_addr;
a2fbb9ea 2839
a2fbb9ea
ET
2840 if (bp->attn_state & asserted)
2841 BNX2X_ERR("IGU ERROR\n");
2842
3fcaf2e5
EG
2843 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2844 aeu_mask = REG_RD(bp, aeu_addr);
2845
a2fbb9ea 2846 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2847 aeu_mask, asserted);
72fd0718 2848 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2849 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2850
3fcaf2e5
EG
2851 REG_WR(bp, aeu_addr, aeu_mask);
2852 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2853
3fcaf2e5 2854 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2855 bp->attn_state |= asserted;
3fcaf2e5 2856 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2857
2858 if (asserted & ATTN_HARD_WIRED_MASK) {
2859 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2860
a5e9a7cf
EG
2861 bnx2x_acquire_phy_lock(bp);
2862
877e9aa4 2863 /* save nig interrupt mask */
87942b46 2864 nig_mask = REG_RD(bp, nig_int_mask_addr);
a2fbb9ea 2865
361c391e
YR
2866 /* If nig_mask is not set, no need to call the update
2867 * function.
2868 */
2869 if (nig_mask) {
2870 REG_WR(bp, nig_int_mask_addr, 0);
2871
2872 bnx2x_link_attn(bp);
2873 }
a2fbb9ea
ET
2874
2875 /* handle unicore attn? */
2876 }
2877 if (asserted & ATTN_SW_TIMER_4_FUNC)
2878 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2879
2880 if (asserted & GPIO_2_FUNC)
2881 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2882
2883 if (asserted & GPIO_3_FUNC)
2884 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2885
2886 if (asserted & GPIO_4_FUNC)
2887 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2888
2889 if (port == 0) {
2890 if (asserted & ATTN_GENERAL_ATTN_1) {
2891 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2892 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2893 }
2894 if (asserted & ATTN_GENERAL_ATTN_2) {
2895 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2896 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2897 }
2898 if (asserted & ATTN_GENERAL_ATTN_3) {
2899 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2900 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2901 }
2902 } else {
2903 if (asserted & ATTN_GENERAL_ATTN_4) {
2904 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2905 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2906 }
2907 if (asserted & ATTN_GENERAL_ATTN_5) {
2908 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2909 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2910 }
2911 if (asserted & ATTN_GENERAL_ATTN_6) {
2912 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2913 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2914 }
2915 }
2916
2917 } /* if hardwired */
2918
f2e0899f
DK
2919 if (bp->common.int_block == INT_BLOCK_HC)
2920 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2921 COMMAND_REG_ATTN_BITS_SET);
2922 else
2923 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2924
2925 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2926 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2927 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2928
2929 /* now set back the mask */
a5e9a7cf 2930 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2931 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2932 bnx2x_release_phy_lock(bp);
2933 }
a2fbb9ea
ET
2934}
2935
fd4ef40d
EG
2936static inline void bnx2x_fan_failure(struct bnx2x *bp)
2937{
2938 int port = BP_PORT(bp);
b7737c9b 2939 u32 ext_phy_config;
fd4ef40d 2940 /* mark the failure */
b7737c9b
YR
2941 ext_phy_config =
2942 SHMEM_RD(bp,
2943 dev_info.port_hw_config[port].external_phy_config);
2944
2945 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2946 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2947 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2948 ext_phy_config);
fd4ef40d
EG
2949
2950 /* log the failure */
cdaa7cb8
VZ
2951 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2952 " the driver to shutdown the card to prevent permanent"
2953 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2954}
ab6ad5a4 2955
877e9aa4 2956static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2957{
34f80b04 2958 int port = BP_PORT(bp);
877e9aa4 2959 int reg_offset;
d90d96ba 2960 u32 val;
877e9aa4 2961
34f80b04
EG
2962 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2963 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2964
34f80b04 2965 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2966
2967 val = REG_RD(bp, reg_offset);
2968 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2969 REG_WR(bp, reg_offset, val);
2970
2971 BNX2X_ERR("SPIO5 hw attention\n");
2972
fd4ef40d 2973 /* Fan failure attention */
d90d96ba 2974 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2975 bnx2x_fan_failure(bp);
877e9aa4 2976 }
34f80b04 2977
589abe3a
EG
2978 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2979 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2980 bnx2x_acquire_phy_lock(bp);
2981 bnx2x_handle_module_detect_int(&bp->link_params);
2982 bnx2x_release_phy_lock(bp);
2983 }
2984
34f80b04
EG
2985 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2986
2987 val = REG_RD(bp, reg_offset);
2988 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2989 REG_WR(bp, reg_offset, val);
2990
2991 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2992 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2993 bnx2x_panic();
2994 }
877e9aa4
ET
2995}
2996
2997static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2998{
2999 u32 val;
3000
0626b899 3001 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3002
3003 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3004 BNX2X_ERR("DB hw attention 0x%x\n", val);
3005 /* DORQ discard attention */
3006 if (val & 0x2)
3007 BNX2X_ERR("FATAL error from DORQ\n");
3008 }
34f80b04
EG
3009
3010 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3011
3012 int port = BP_PORT(bp);
3013 int reg_offset;
3014
3015 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3016 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3017
3018 val = REG_RD(bp, reg_offset);
3019 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3020 REG_WR(bp, reg_offset, val);
3021
3022 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3023 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3024 bnx2x_panic();
3025 }
877e9aa4
ET
3026}
3027
3028static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3029{
3030 u32 val;
3031
3032 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3033
3034 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3035 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3036 /* CFC error attention */
3037 if (val & 0x2)
3038 BNX2X_ERR("FATAL error from CFC\n");
3039 }
3040
3041 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3042
3043 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3044 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3045 /* RQ_USDMDP_FIFO_OVERFLOW */
3046 if (val & 0x18000)
3047 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
3048 if (CHIP_IS_E2(bp)) {
3049 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3050 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3051 }
877e9aa4 3052 }
34f80b04
EG
3053
3054 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3055
3056 int port = BP_PORT(bp);
3057 int reg_offset;
3058
3059 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3060 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3061
3062 val = REG_RD(bp, reg_offset);
3063 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3064 REG_WR(bp, reg_offset, val);
3065
3066 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3067 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3068 bnx2x_panic();
3069 }
877e9aa4
ET
3070}
3071
3072static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3073{
34f80b04
EG
3074 u32 val;
3075
877e9aa4
ET
3076 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3077
34f80b04
EG
3078 if (attn & BNX2X_PMF_LINK_ASSERT) {
3079 int func = BP_FUNC(bp);
3080
3081 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
3082 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3083 func_mf_config[BP_ABS_FUNC(bp)].config);
3084 val = SHMEM_RD(bp,
3085 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3086 if (val & DRV_STATUS_DCC_EVENT_MASK)
3087 bnx2x_dcc_event(bp,
3088 (val & DRV_STATUS_DCC_EVENT_MASK));
0793f83f
DK
3089
3090 if (val & DRV_STATUS_SET_MF_BW)
3091 bnx2x_set_mf_bw(bp);
3092
2691d51d 3093 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3094 bnx2x_pmf_update(bp);
3095
2ae17f66
VZ
3096 /* Always call it here: bnx2x_link_report() will
3097 * prevent the link indication duplication.
3098 */
3099 bnx2x__link_status_update(bp);
3100
e4901dde 3101 if (bp->port.pmf &&
785b9b1a
SR
3102 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3103 bp->dcbx_enabled > 0)
e4901dde
VZ
3104 /* start dcbx state machine */
3105 bnx2x_dcbx_set_params(bp,
3106 BNX2X_DCBX_STATE_NEG_RECEIVED);
34f80b04 3107 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3108
3109 BNX2X_ERR("MC assert!\n");
3110 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3111 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3112 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3113 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3114 bnx2x_panic();
3115
3116 } else if (attn & BNX2X_MCP_ASSERT) {
3117
3118 BNX2X_ERR("MCP assert!\n");
3119 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3120 bnx2x_fw_dump(bp);
877e9aa4
ET
3121
3122 } else
3123 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3124 }
3125
3126 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3127 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3128 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
3129 val = CHIP_IS_E1(bp) ? 0 :
3130 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
3131 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3132 }
3133 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
3134 val = CHIP_IS_E1(bp) ? 0 :
3135 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
3136 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3137 }
877e9aa4 3138 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3139 }
3140}
3141
72fd0718
VZ
3142#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3143#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3144#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3145#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3146#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
f85582f8 3147
72fd0718
VZ
3148/*
3149 * should be run under rtnl lock
3150 */
3151static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3152{
3153 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3154 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3155 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3156 barrier();
3157 mmiowb();
3158}
3159
3160/*
3161 * should be run under rtnl lock
3162 */
3163static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3164{
3165 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3166 val |= (1 << 16);
3167 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3168 barrier();
3169 mmiowb();
3170}
3171
3172/*
3173 * should be run under rtnl lock
3174 */
9f6c9258 3175bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
3176{
3177 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3178 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3179 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3180}
3181
3182/*
3183 * should be run under rtnl lock
3184 */
9f6c9258 3185inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3186{
3187 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188
3189 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3190
3191 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3192 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3193 barrier();
3194 mmiowb();
3195}
3196
3197/*
3198 * should be run under rtnl lock
3199 */
9f6c9258 3200u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3201{
3202 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3203
3204 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3205
3206 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3207 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3208 barrier();
3209 mmiowb();
3210
3211 return val1;
3212}
3213
3214/*
3215 * should be run under rtnl lock
3216 */
3217static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3218{
3219 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3220}
3221
3222static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3223{
3224 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3226}
3227
3228static inline void _print_next_block(int idx, const char *blk)
3229{
3230 if (idx)
3231 pr_cont(", ");
3232 pr_cont("%s", blk);
3233}
3234
3235static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3236{
3237 int i = 0;
3238 u32 cur_bit = 0;
3239 for (i = 0; sig; i++) {
3240 cur_bit = ((u32)0x1 << i);
3241 if (sig & cur_bit) {
3242 switch (cur_bit) {
3243 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3244 _print_next_block(par_num++, "BRB");
3245 break;
3246 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3247 _print_next_block(par_num++, "PARSER");
3248 break;
3249 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3250 _print_next_block(par_num++, "TSDM");
3251 break;
3252 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3253 _print_next_block(par_num++, "SEARCHER");
3254 break;
3255 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3256 _print_next_block(par_num++, "TSEMI");
3257 break;
3258 }
3259
3260 /* Clear the bit */
3261 sig &= ~cur_bit;
3262 }
3263 }
3264
3265 return par_num;
3266}
3267
3268static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3269{
3270 int i = 0;
3271 u32 cur_bit = 0;
3272 for (i = 0; sig; i++) {
3273 cur_bit = ((u32)0x1 << i);
3274 if (sig & cur_bit) {
3275 switch (cur_bit) {
3276 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3277 _print_next_block(par_num++, "PBCLIENT");
3278 break;
3279 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3280 _print_next_block(par_num++, "QM");
3281 break;
3282 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3283 _print_next_block(par_num++, "XSDM");
3284 break;
3285 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3286 _print_next_block(par_num++, "XSEMI");
3287 break;
3288 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3289 _print_next_block(par_num++, "DOORBELLQ");
3290 break;
3291 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3292 _print_next_block(par_num++, "VAUX PCI CORE");
3293 break;
3294 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3295 _print_next_block(par_num++, "DEBUG");
3296 break;
3297 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3298 _print_next_block(par_num++, "USDM");
3299 break;
3300 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3301 _print_next_block(par_num++, "USEMI");
3302 break;
3303 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3304 _print_next_block(par_num++, "UPB");
3305 break;
3306 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3307 _print_next_block(par_num++, "CSDM");
3308 break;
3309 }
3310
3311 /* Clear the bit */
3312 sig &= ~cur_bit;
3313 }
3314 }
3315
3316 return par_num;
3317}
3318
3319static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3320{
3321 int i = 0;
3322 u32 cur_bit = 0;
3323 for (i = 0; sig; i++) {
3324 cur_bit = ((u32)0x1 << i);
3325 if (sig & cur_bit) {
3326 switch (cur_bit) {
3327 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3328 _print_next_block(par_num++, "CSEMI");
3329 break;
3330 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3331 _print_next_block(par_num++, "PXP");
3332 break;
3333 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3334 _print_next_block(par_num++,
3335 "PXPPCICLOCKCLIENT");
3336 break;
3337 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3338 _print_next_block(par_num++, "CFC");
3339 break;
3340 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3341 _print_next_block(par_num++, "CDU");
3342 break;
3343 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3344 _print_next_block(par_num++, "IGU");
3345 break;
3346 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3347 _print_next_block(par_num++, "MISC");
3348 break;
3349 }
3350
3351 /* Clear the bit */
3352 sig &= ~cur_bit;
3353 }
3354 }
3355
3356 return par_num;
3357}
3358
3359static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3360{
3361 int i = 0;
3362 u32 cur_bit = 0;
3363 for (i = 0; sig; i++) {
3364 cur_bit = ((u32)0x1 << i);
3365 if (sig & cur_bit) {
3366 switch (cur_bit) {
3367 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3368 _print_next_block(par_num++, "MCP ROM");
3369 break;
3370 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3371 _print_next_block(par_num++, "MCP UMP RX");
3372 break;
3373 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3374 _print_next_block(par_num++, "MCP UMP TX");
3375 break;
3376 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3377 _print_next_block(par_num++, "MCP SCPAD");
3378 break;
3379 }
3380
3381 /* Clear the bit */
3382 sig &= ~cur_bit;
3383 }
3384 }
3385
3386 return par_num;
3387}
3388
3389static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3390 u32 sig2, u32 sig3)
3391{
3392 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3393 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3394 int par_num = 0;
3395 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3396 "[0]:0x%08x [1]:0x%08x "
3397 "[2]:0x%08x [3]:0x%08x\n",
3398 sig0 & HW_PRTY_ASSERT_SET_0,
3399 sig1 & HW_PRTY_ASSERT_SET_1,
3400 sig2 & HW_PRTY_ASSERT_SET_2,
3401 sig3 & HW_PRTY_ASSERT_SET_3);
3402 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3403 bp->dev->name);
3404 par_num = bnx2x_print_blocks_with_parity0(
3405 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3406 par_num = bnx2x_print_blocks_with_parity1(
3407 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3408 par_num = bnx2x_print_blocks_with_parity2(
3409 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3410 par_num = bnx2x_print_blocks_with_parity3(
3411 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3412 printk("\n");
3413 return true;
3414 } else
3415 return false;
3416}
3417
9f6c9258 3418bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3419{
a2fbb9ea 3420 struct attn_route attn;
72fd0718
VZ
3421 int port = BP_PORT(bp);
3422
3423 attn.sig[0] = REG_RD(bp,
3424 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3425 port*4);
3426 attn.sig[1] = REG_RD(bp,
3427 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3428 port*4);
3429 attn.sig[2] = REG_RD(bp,
3430 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3431 port*4);
3432 attn.sig[3] = REG_RD(bp,
3433 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3434 port*4);
3435
3436 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3437 attn.sig[3]);
3438}
3439
f2e0899f
DK
3440
3441static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3442{
3443 u32 val;
3444 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3445
3446 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3447 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3448 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3449 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3450 "ADDRESS_ERROR\n");
3451 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3452 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3453 "INCORRECT_RCV_BEHAVIOR\n");
3454 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3455 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3456 "WAS_ERROR_ATTN\n");
3457 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3458 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3459 "VF_LENGTH_VIOLATION_ATTN\n");
3460 if (val &
3461 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3462 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3463 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3464 if (val &
3465 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3466 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3467 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3468 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3469 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3470 "TCPL_ERROR_ATTN\n");
3471 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3472 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3473 "TCPL_IN_TWO_RCBS_ATTN\n");
3474 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3475 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3476 "CSSNOOP_FIFO_OVERFLOW\n");
3477 }
3478 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3479 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3480 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3481 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3482 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3483 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3484 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3485 "_ATC_TCPL_TO_NOT_PEND\n");
3486 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3487 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3488 "ATC_GPA_MULTIPLE_HITS\n");
3489 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3490 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3491 "ATC_RCPL_TO_EMPTY_CNT\n");
3492 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3493 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3494 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3495 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3496 "ATC_IREQ_LESS_THAN_STU\n");
3497 }
3498
3499 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3500 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3501 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3502 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3503 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3504 }
3505
3506}
3507
72fd0718
VZ
3508static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3509{
3510 struct attn_route attn, *group_mask;
34f80b04 3511 int port = BP_PORT(bp);
877e9aa4 3512 int index;
a2fbb9ea
ET
3513 u32 reg_addr;
3514 u32 val;
3fcaf2e5 3515 u32 aeu_mask;
a2fbb9ea
ET
3516
3517 /* need to take HW lock because MCP or other port might also
3518 try to handle this event */
4a37fb66 3519 bnx2x_acquire_alr(bp);
a2fbb9ea 3520
4a33bc03 3521 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
72fd0718
VZ
3522 bp->recovery_state = BNX2X_RECOVERY_INIT;
3523 bnx2x_set_reset_in_progress(bp);
3524 schedule_delayed_work(&bp->reset_task, 0);
3525 /* Disable HW interrupts */
3526 bnx2x_int_disable(bp);
3527 bnx2x_release_alr(bp);
3528 /* In case of parity errors don't handle attentions so that
3529 * other function would "see" parity errors.
3530 */
3531 return;
3532 }
3533
a2fbb9ea
ET
3534 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3535 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3536 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3537 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3538 if (CHIP_IS_E2(bp))
3539 attn.sig[4] =
3540 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3541 else
3542 attn.sig[4] = 0;
3543
3544 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3545 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3546
3547 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3548 if (deasserted & (1 << index)) {
72fd0718 3549 group_mask = &bp->attn_group[index];
a2fbb9ea 3550
f2e0899f
DK
3551 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3552 "%08x %08x %08x\n",
3553 index,
3554 group_mask->sig[0], group_mask->sig[1],
3555 group_mask->sig[2], group_mask->sig[3],
3556 group_mask->sig[4]);
a2fbb9ea 3557
f2e0899f
DK
3558 bnx2x_attn_int_deasserted4(bp,
3559 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3560 bnx2x_attn_int_deasserted3(bp,
72fd0718 3561 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3562 bnx2x_attn_int_deasserted1(bp,
72fd0718 3563 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3564 bnx2x_attn_int_deasserted2(bp,
72fd0718 3565 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3566 bnx2x_attn_int_deasserted0(bp,
72fd0718 3567 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3568 }
3569 }
3570
4a37fb66 3571 bnx2x_release_alr(bp);
a2fbb9ea 3572
f2e0899f
DK
3573 if (bp->common.int_block == INT_BLOCK_HC)
3574 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3575 COMMAND_REG_ATTN_BITS_CLR);
3576 else
3577 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3578
3579 val = ~deasserted;
f2e0899f
DK
3580 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3581 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3582 REG_WR(bp, reg_addr, val);
a2fbb9ea 3583
a2fbb9ea 3584 if (~bp->attn_state & deasserted)
3fcaf2e5 3585 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3586
3587 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3588 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3589
3fcaf2e5
EG
3590 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3591 aeu_mask = REG_RD(bp, reg_addr);
3592
3593 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3594 aeu_mask, deasserted);
72fd0718 3595 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3596 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3597
3fcaf2e5
EG
3598 REG_WR(bp, reg_addr, aeu_mask);
3599 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3600
3601 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3602 bp->attn_state &= ~deasserted;
3603 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3604}
3605
3606static void bnx2x_attn_int(struct bnx2x *bp)
3607{
3608 /* read local copy of bits */
68d59484
EG
3609 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3610 attn_bits);
3611 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3612 attn_bits_ack);
a2fbb9ea
ET
3613 u32 attn_state = bp->attn_state;
3614
3615 /* look for changed bits */
3616 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3617 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3618
3619 DP(NETIF_MSG_HW,
3620 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3621 attn_bits, attn_ack, asserted, deasserted);
3622
3623 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3624 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3625
3626 /* handle bits that were raised */
3627 if (asserted)
3628 bnx2x_attn_int_asserted(bp, asserted);
3629
3630 if (deasserted)
3631 bnx2x_attn_int_deasserted(bp, deasserted);
3632}
3633
523224a3
DK
3634static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3635{
3636 /* No memory barriers */
3637 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3638 mmiowb(); /* keep prod updates ordered */
3639}
3640
3641#ifdef BCM_CNIC
3642static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3643 union event_ring_elem *elem)
3644{
3645 if (!bp->cnic_eth_dev.starting_cid ||
c3a8ce61
VZ
3646 (cid < bp->cnic_eth_dev.starting_cid &&
3647 cid != bp->cnic_eth_dev.iscsi_l2_cid))
523224a3
DK
3648 return 1;
3649
3650 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3651
3652 if (unlikely(elem->message.data.cfc_del_event.error)) {
3653 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3654 cid);
3655 bnx2x_panic_dump(bp);
3656 }
3657 bnx2x_cnic_cfc_comp(bp, cid);
3658 return 0;
3659}
3660#endif
3661
3662static void bnx2x_eq_int(struct bnx2x *bp)
3663{
3664 u16 hw_cons, sw_cons, sw_prod;
3665 union event_ring_elem *elem;
3666 u32 cid;
3667 u8 opcode;
3668 int spqe_cnt = 0;
3669
3670 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3671
3672 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3673 * when we get the the next-page we nned to adjust so the loop
3674 * condition below will be met. The next element is the size of a
3675 * regular element and hence incrementing by 1
3676 */
3677 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3678 hw_cons++;
3679
25985edc 3680 /* This function may never run in parallel with itself for a
523224a3
DK
3681 * specific bp, thus there is no need in "paired" read memory
3682 * barrier here.
3683 */
3684 sw_cons = bp->eq_cons;
3685 sw_prod = bp->eq_prod;
3686
6e30dd4e
VZ
3687 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3688 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
523224a3
DK
3689
3690 for (; sw_cons != hw_cons;
3691 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3692
3693
3694 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3695
3696 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3697 opcode = elem->message.opcode;
3698
3699
3700 /* handle eq element */
3701 switch (opcode) {
3702 case EVENT_RING_OPCODE_STAT_QUERY:
3703 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3704 /* nothing to do with stats comp */
3705 continue;
3706
3707 case EVENT_RING_OPCODE_CFC_DEL:
3708 /* handle according to cid range */
3709 /*
3710 * we may want to verify here that the bp state is
3711 * HALTING
3712 */
3713 DP(NETIF_MSG_IFDOWN,
3714 "got delete ramrod for MULTI[%d]\n", cid);
3715#ifdef BCM_CNIC
3716 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3717 goto next_spqe;
ec6ba945
VZ
3718 if (cid == BNX2X_FCOE_ETH_CID)
3719 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3720 else
523224a3 3721#endif
ec6ba945 3722 bnx2x_fp(bp, cid, state) =
523224a3
DK
3723 BNX2X_FP_STATE_CLOSED;
3724
3725 goto next_spqe;
e4901dde
VZ
3726
3727 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3728 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3729 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3730 goto next_spqe;
3731 case EVENT_RING_OPCODE_START_TRAFFIC:
3732 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3733 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3734 goto next_spqe;
523224a3
DK
3735 }
3736
3737 switch (opcode | bp->state) {
3738 case (EVENT_RING_OPCODE_FUNCTION_START |
3739 BNX2X_STATE_OPENING_WAIT4_PORT):
3740 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3741 bp->state = BNX2X_STATE_FUNC_STARTED;
3742 break;
3743
3744 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3745 BNX2X_STATE_CLOSING_WAIT4_HALT):
3746 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3747 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3748 break;
3749
3750 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3751 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3752 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
6e30dd4e
VZ
3753 if (elem->message.data.set_mac_event.echo)
3754 bp->set_mac_pending = 0;
523224a3
DK
3755 break;
3756
3757 case (EVENT_RING_OPCODE_SET_MAC |
3758 BNX2X_STATE_CLOSING_WAIT4_HALT):
3759 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
6e30dd4e
VZ
3760 if (elem->message.data.set_mac_event.echo)
3761 bp->set_mac_pending = 0;
523224a3
DK
3762 break;
3763 default:
3764 /* unknown event log error and continue */
3765 BNX2X_ERR("Unknown EQ event %d\n",
3766 elem->message.opcode);
3767 }
3768next_spqe:
3769 spqe_cnt++;
3770 } /* for */
3771
8fe23fbd 3772 smp_mb__before_atomic_inc();
6e30dd4e 3773 atomic_add(spqe_cnt, &bp->eq_spq_left);
523224a3
DK
3774
3775 bp->eq_cons = sw_cons;
3776 bp->eq_prod = sw_prod;
3777 /* Make sure that above mem writes were issued towards the memory */
3778 smp_wmb();
3779
3780 /* update producer */
3781 bnx2x_update_eq_prod(bp, bp->eq_prod);
3782}
3783
a2fbb9ea
ET
3784static void bnx2x_sp_task(struct work_struct *work)
3785{
1cf167f2 3786 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3787 u16 status;
3788
a2fbb9ea 3789 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3790/* if (status == 0) */
3791/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3792
cdaa7cb8 3793 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3794
877e9aa4 3795 /* HW attentions */
523224a3 3796 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3797 bnx2x_attn_int(bp);
523224a3 3798 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3799 }
3800
523224a3
DK
3801 /* SP events: STAT_QUERY and others */
3802 if (status & BNX2X_DEF_SB_IDX) {
ec6ba945
VZ
3803#ifdef BCM_CNIC
3804 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
523224a3 3805
ec6ba945
VZ
3806 if ((!NO_FCOE(bp)) &&
3807 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3808 napi_schedule(&bnx2x_fcoe(bp, napi));
3809#endif
523224a3
DK
3810 /* Handle EQ completions */
3811 bnx2x_eq_int(bp);
3812
3813 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3814 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3815
3816 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3817 }
3818
3819 if (unlikely(status))
3820 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3821 status);
a2fbb9ea 3822
523224a3
DK
3823 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3824 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3825}
3826
9f6c9258 3827irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3828{
3829 struct net_device *dev = dev_instance;
3830 struct bnx2x *bp = netdev_priv(dev);
3831
523224a3
DK
3832 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3833 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3834
3835#ifdef BNX2X_STOP_ON_ERROR
3836 if (unlikely(bp->panic))
3837 return IRQ_HANDLED;
3838#endif
3839
993ac7b5
MC
3840#ifdef BCM_CNIC
3841 {
3842 struct cnic_ops *c_ops;
3843
3844 rcu_read_lock();
3845 c_ops = rcu_dereference(bp->cnic_ops);
3846 if (c_ops)
3847 c_ops->cnic_handler(bp->cnic_data, NULL);
3848 rcu_read_unlock();
3849 }
3850#endif
1cf167f2 3851 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3852
3853 return IRQ_HANDLED;
3854}
3855
3856/* end of slow path */
3857
a2fbb9ea
ET
3858static void bnx2x_timer(unsigned long data)
3859{
3860 struct bnx2x *bp = (struct bnx2x *) data;
3861
3862 if (!netif_running(bp->dev))
3863 return;
3864
a2fbb9ea
ET
3865 if (poll) {
3866 struct bnx2x_fastpath *fp = &bp->fp[0];
a2fbb9ea 3867
7961f791 3868 bnx2x_tx_int(fp);
b8ee8328 3869 bnx2x_rx_int(fp, 1000);
a2fbb9ea
ET
3870 }
3871
34f80b04 3872 if (!BP_NOMCP(bp)) {
f2e0899f 3873 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3874 u32 drv_pulse;
3875 u32 mcp_pulse;
3876
3877 ++bp->fw_drv_pulse_wr_seq;
3878 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3879 /* TBD - add SYSTEM_TIME */
3880 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3881 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3882
f2e0899f 3883 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3884 MCP_PULSE_SEQ_MASK);
3885 /* The delta between driver pulse and mcp response
3886 * should be 1 (before mcp response) or 0 (after mcp response)
3887 */
3888 if ((drv_pulse != mcp_pulse) &&
3889 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3890 /* someone lost a heartbeat... */
3891 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3892 drv_pulse, mcp_pulse);
3893 }
3894 }
3895
f34d28ea 3896 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3897 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3898
a2fbb9ea
ET
3899 mod_timer(&bp->timer, jiffies + bp->current_interval);
3900}
3901
3902/* end of Statistics */
3903
3904/* nic init */
3905
3906/*
3907 * nic init service functions
3908 */
3909
523224a3 3910static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3911{
523224a3
DK
3912 u32 i;
3913 if (!(len%4) && !(addr%4))
3914 for (i = 0; i < len; i += 4)
3915 REG_WR(bp, addr + i, fill);
3916 else
3917 for (i = 0; i < len; i++)
3918 REG_WR8(bp, addr + i, fill);
34f80b04 3919
34f80b04
EG
3920}
3921
523224a3
DK
3922/* helper: writes FP SP data to FW - data_size in dwords */
3923static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3924 int fw_sb_id,
3925 u32 *sb_data_p,
3926 u32 data_size)
34f80b04 3927{
a2fbb9ea 3928 int index;
523224a3
DK
3929 for (index = 0; index < data_size; index++)
3930 REG_WR(bp, BAR_CSTRORM_INTMEM +
3931 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3932 sizeof(u32)*index,
3933 *(sb_data_p + index));
3934}
a2fbb9ea 3935
523224a3
DK
3936static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3937{
3938 u32 *sb_data_p;
3939 u32 data_size = 0;
f2e0899f 3940 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3941 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3942
523224a3 3943 /* disable the function first */
f2e0899f
DK
3944 if (CHIP_IS_E2(bp)) {
3945 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3946 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3947 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3948 sb_data_e2.common.p_func.vf_valid = false;
3949 sb_data_p = (u32 *)&sb_data_e2;
3950 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3951 } else {
3952 memset(&sb_data_e1x, 0,
3953 sizeof(struct hc_status_block_data_e1x));
3954 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3955 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3956 sb_data_e1x.common.p_func.vf_valid = false;
3957 sb_data_p = (u32 *)&sb_data_e1x;
3958 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3959 }
523224a3 3960 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3961
523224a3
DK
3962 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3963 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3964 CSTORM_STATUS_BLOCK_SIZE);
3965 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3966 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3967 CSTORM_SYNC_BLOCK_SIZE);
3968}
34f80b04 3969
523224a3
DK
3970/* helper: writes SP SB data to FW */
3971static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3972 struct hc_sp_status_block_data *sp_sb_data)
3973{
3974 int func = BP_FUNC(bp);
3975 int i;
3976 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3977 REG_WR(bp, BAR_CSTRORM_INTMEM +
3978 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3979 i*sizeof(u32),
3980 *((u32 *)sp_sb_data + i));
34f80b04
EG
3981}
3982
523224a3 3983static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
3984{
3985 int func = BP_FUNC(bp);
523224a3
DK
3986 struct hc_sp_status_block_data sp_sb_data;
3987 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 3988
523224a3
DK
3989 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3990 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3991 sp_sb_data.p_func.vf_valid = false;
3992
3993 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3994
3995 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3996 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3997 CSTORM_SP_STATUS_BLOCK_SIZE);
3998 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3999 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4000 CSTORM_SP_SYNC_BLOCK_SIZE);
4001
4002}
4003
4004
4005static inline
4006void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4007 int igu_sb_id, int igu_seg_id)
4008{
4009 hc_sm->igu_sb_id = igu_sb_id;
4010 hc_sm->igu_seg_id = igu_seg_id;
4011 hc_sm->timer_value = 0xFF;
4012 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
4013}
4014
8d96286a 4015static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
523224a3 4016 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 4017{
523224a3
DK
4018 int igu_seg_id;
4019
f2e0899f 4020 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
4021 struct hc_status_block_data_e1x sb_data_e1x;
4022 struct hc_status_block_sm *hc_sm_p;
523224a3
DK
4023 int data_size;
4024 u32 *sb_data_p;
4025
f2e0899f
DK
4026 if (CHIP_INT_MODE_IS_BC(bp))
4027 igu_seg_id = HC_SEG_ACCESS_NORM;
4028 else
4029 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
4030
4031 bnx2x_zero_fp_sb(bp, fw_sb_id);
4032
f2e0899f
DK
4033 if (CHIP_IS_E2(bp)) {
4034 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4035 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4036 sb_data_e2.common.p_func.vf_id = vfid;
4037 sb_data_e2.common.p_func.vf_valid = vf_valid;
4038 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4039 sb_data_e2.common.same_igu_sb_1b = true;
4040 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4041 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4042 hc_sm_p = sb_data_e2.common.state_machine;
f2e0899f
DK
4043 sb_data_p = (u32 *)&sb_data_e2;
4044 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4045 } else {
4046 memset(&sb_data_e1x, 0,
4047 sizeof(struct hc_status_block_data_e1x));
4048 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4049 sb_data_e1x.common.p_func.vf_id = 0xff;
4050 sb_data_e1x.common.p_func.vf_valid = false;
4051 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4052 sb_data_e1x.common.same_igu_sb_1b = true;
4053 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4054 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4055 hc_sm_p = sb_data_e1x.common.state_machine;
f2e0899f
DK
4056 sb_data_p = (u32 *)&sb_data_e1x;
4057 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4058 }
523224a3
DK
4059
4060 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4061 igu_sb_id, igu_seg_id);
4062 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4063 igu_sb_id, igu_seg_id);
4064
4065 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4066
4067 /* write indecies to HW */
4068 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4069}
4070
4071static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4072 u8 sb_index, u8 disable, u16 usec)
4073{
4074 int port = BP_PORT(bp);
4075 u8 ticks = usec / BNX2X_BTR;
4076
4077 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4078
4079 disable = disable ? 1 : (usec ? 0 : 1);
4080 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4081}
4082
4083static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4084 u16 tx_usec, u16 rx_usec)
4085{
4086 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4087 false, rx_usec);
4088 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4089 false, tx_usec);
4090}
f2e0899f 4091
523224a3
DK
4092static void bnx2x_init_def_sb(struct bnx2x *bp)
4093{
4094 struct host_sp_status_block *def_sb = bp->def_status_blk;
4095 dma_addr_t mapping = bp->def_status_blk_mapping;
4096 int igu_sp_sb_index;
4097 int igu_seg_id;
34f80b04
EG
4098 int port = BP_PORT(bp);
4099 int func = BP_FUNC(bp);
523224a3 4100 int reg_offset;
a2fbb9ea 4101 u64 section;
523224a3
DK
4102 int index;
4103 struct hc_sp_status_block_data sp_sb_data;
4104 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4105
f2e0899f
DK
4106 if (CHIP_INT_MODE_IS_BC(bp)) {
4107 igu_sp_sb_index = DEF_SB_IGU_ID;
4108 igu_seg_id = HC_SEG_ACCESS_DEF;
4109 } else {
4110 igu_sp_sb_index = bp->igu_dsb_id;
4111 igu_seg_id = IGU_SEG_ACCESS_DEF;
4112 }
a2fbb9ea
ET
4113
4114 /* ATTN */
523224a3 4115 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 4116 atten_status_block);
523224a3 4117 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 4118
49d66772
ET
4119 bp->attn_state = 0;
4120
a2fbb9ea
ET
4121 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4122 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 4123 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
4124 int sindex;
4125 /* take care of sig[0]..sig[4] */
4126 for (sindex = 0; sindex < 4; sindex++)
4127 bp->attn_group[index].sig[sindex] =
4128 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
4129
4130 if (CHIP_IS_E2(bp))
4131 /*
4132 * enable5 is separate from the rest of the registers,
4133 * and therefore the address skip is 4
4134 * and not 16 between the different groups
4135 */
4136 bp->attn_group[index].sig[4] = REG_RD(bp,
4137 reg_offset + 0x10 + 0x4*index);
4138 else
4139 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
4140 }
4141
f2e0899f
DK
4142 if (bp->common.int_block == INT_BLOCK_HC) {
4143 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4144 HC_REG_ATTN_MSG0_ADDR_L);
4145
4146 REG_WR(bp, reg_offset, U64_LO(section));
4147 REG_WR(bp, reg_offset + 4, U64_HI(section));
4148 } else if (CHIP_IS_E2(bp)) {
4149 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4150 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4151 }
a2fbb9ea 4152
523224a3
DK
4153 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4154 sp_sb);
a2fbb9ea 4155
523224a3 4156 bnx2x_zero_sp_sb(bp);
a2fbb9ea 4157
523224a3
DK
4158 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4159 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4160 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4161 sp_sb_data.igu_seg_id = igu_seg_id;
4162 sp_sb_data.p_func.pf_id = func;
f2e0899f 4163 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 4164 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 4165
523224a3 4166 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 4167
bb2a0f7a 4168 bp->stats_pending = 0;
66e855f3 4169 bp->set_mac_pending = 0;
bb2a0f7a 4170
523224a3 4171 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4172}
4173
9f6c9258 4174void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4175{
a2fbb9ea
ET
4176 int i;
4177
ec6ba945 4178 for_each_eth_queue(bp, i)
523224a3 4179 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
423cfa7e 4180 bp->tx_ticks, bp->rx_ticks);
a2fbb9ea
ET
4181}
4182
a2fbb9ea
ET
4183static void bnx2x_init_sp_ring(struct bnx2x *bp)
4184{
a2fbb9ea 4185 spin_lock_init(&bp->spq_lock);
6e30dd4e 4186 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
a2fbb9ea 4187
a2fbb9ea 4188 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4189 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4190 bp->spq_prod_bd = bp->spq;
4191 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
4192}
4193
523224a3 4194static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
4195{
4196 int i;
523224a3
DK
4197 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4198 union event_ring_elem *elem =
4199 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 4200
523224a3
DK
4201 elem->next_page.addr.hi =
4202 cpu_to_le32(U64_HI(bp->eq_mapping +
4203 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4204 elem->next_page.addr.lo =
4205 cpu_to_le32(U64_LO(bp->eq_mapping +
4206 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 4207 }
523224a3
DK
4208 bp->eq_cons = 0;
4209 bp->eq_prod = NUM_EQ_DESC;
4210 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6e30dd4e
VZ
4211 /* we want a warning message before it gets rought... */
4212 atomic_set(&bp->eq_spq_left,
4213 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
a2fbb9ea
ET
4214}
4215
ab532cf3 4216void bnx2x_push_indir_table(struct bnx2x *bp)
a2fbb9ea 4217{
26c8fa4d 4218 int func = BP_FUNC(bp);
a2fbb9ea
ET
4219 int i;
4220
555f6c78 4221 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4222 return;
4223
4224 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4225 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4226 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
ab532cf3
TH
4227 bp->fp->cl_id + bp->rx_indir_table[i]);
4228}
4229
4230static void bnx2x_init_ind_table(struct bnx2x *bp)
4231{
4232 int i;
4233
4234 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4235 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
4236
4237 bnx2x_push_indir_table(bp);
a2fbb9ea
ET
4238}
4239
9f6c9258 4240void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4241{
34f80b04 4242 int mode = bp->rx_mode;
ec6ba945 4243 int port = BP_PORT(bp);
523224a3 4244 u16 cl_id;
ec6ba945 4245 u32 def_q_filters = 0;
523224a3 4246
581ce43d
EG
4247 /* All but management unicast packets should pass to the host as well */
4248 u32 llh_mask =
4249 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4250 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4251 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4252 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4253
a2fbb9ea
ET
4254 switch (mode) {
4255 case BNX2X_RX_MODE_NONE: /* no Rx */
ec6ba945
VZ
4256 def_q_filters = BNX2X_ACCEPT_NONE;
4257#ifdef BCM_CNIC
4258 if (!NO_FCOE(bp)) {
4259 cl_id = bnx2x_fcoe(bp, cl_id);
4260 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4261 }
4262#endif
a2fbb9ea 4263 break;
356e2385 4264
a2fbb9ea 4265 case BNX2X_RX_MODE_NORMAL:
ec6ba945
VZ
4266 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4267 BNX2X_ACCEPT_MULTICAST;
4268#ifdef BCM_CNIC
711c9146
VZ
4269 if (!NO_FCOE(bp)) {
4270 cl_id = bnx2x_fcoe(bp, cl_id);
4271 bnx2x_rxq_set_mac_filters(bp, cl_id,
4272 BNX2X_ACCEPT_UNICAST |
4273 BNX2X_ACCEPT_MULTICAST);
4274 }
ec6ba945 4275#endif
a2fbb9ea 4276 break;
356e2385 4277
a2fbb9ea 4278 case BNX2X_RX_MODE_ALLMULTI:
ec6ba945
VZ
4279 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4280 BNX2X_ACCEPT_ALL_MULTICAST;
4281#ifdef BCM_CNIC
711c9146
VZ
4282 /*
4283 * Prevent duplication of multicast packets by configuring FCoE
4284 * L2 Client to receive only matched unicast frames.
4285 */
4286 if (!NO_FCOE(bp)) {
4287 cl_id = bnx2x_fcoe(bp, cl_id);
4288 bnx2x_rxq_set_mac_filters(bp, cl_id,
4289 BNX2X_ACCEPT_UNICAST);
4290 }
ec6ba945 4291#endif
a2fbb9ea 4292 break;
356e2385 4293
a2fbb9ea 4294 case BNX2X_RX_MODE_PROMISC:
ec6ba945
VZ
4295 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4296#ifdef BCM_CNIC
711c9146
VZ
4297 /*
4298 * Prevent packets duplication by configuring DROP_ALL for FCoE
4299 * L2 Client.
4300 */
4301 if (!NO_FCOE(bp)) {
4302 cl_id = bnx2x_fcoe(bp, cl_id);
4303 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4304 }
ec6ba945 4305#endif
581ce43d
EG
4306 /* pass management unicast packets as well */
4307 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4308 break;
356e2385 4309
a2fbb9ea 4310 default:
34f80b04
EG
4311 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4312 break;
a2fbb9ea
ET
4313 }
4314
ec6ba945
VZ
4315 cl_id = BP_L_ID(bp);
4316 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4317
581ce43d 4318 REG_WR(bp,
ec6ba945
VZ
4319 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4320 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
581ce43d 4321
523224a3
DK
4322 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4323 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
ec6ba945
VZ
4324 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4325 "unmatched_ucast 0x%x\n", mode,
523224a3
DK
4326 bp->mac_filters.ucast_drop_all,
4327 bp->mac_filters.mcast_drop_all,
4328 bp->mac_filters.bcast_drop_all,
4329 bp->mac_filters.ucast_accept_all,
4330 bp->mac_filters.mcast_accept_all,
ec6ba945
VZ
4331 bp->mac_filters.bcast_accept_all,
4332 bp->mac_filters.unmatched_unicast
523224a3 4333 );
a2fbb9ea 4334
523224a3 4335 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
4336}
4337
471de716
EG
4338static void bnx2x_init_internal_common(struct bnx2x *bp)
4339{
4340 int i;
4341
523224a3 4342 if (!CHIP_IS_E1(bp)) {
de832a55 4343
523224a3
DK
4344 /* xstorm needs to know whether to add ovlan to packets or not,
4345 * in switch-independent we'll write 0 to here... */
34f80b04 4346 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4347 bp->mf_mode);
34f80b04 4348 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4349 bp->mf_mode);
34f80b04 4350 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4351 bp->mf_mode);
34f80b04 4352 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4353 bp->mf_mode);
34f80b04
EG
4354 }
4355
0793f83f
DK
4356 if (IS_MF_SI(bp))
4357 /*
4358 * In switch independent mode, the TSTORM needs to accept
4359 * packets that failed classification, since approximate match
4360 * mac addresses aren't written to NIG LLH
4361 */
4362 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4363 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4364
523224a3
DK
4365 /* Zero this manually as its initialization is
4366 currently missing in the initTool */
4367 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 4368 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 4369 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
4370 if (CHIP_IS_E2(bp)) {
4371 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4372 CHIP_INT_MODE_IS_BC(bp) ?
4373 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4374 }
523224a3 4375}
8a1c38d1 4376
523224a3
DK
4377static void bnx2x_init_internal_port(struct bnx2x *bp)
4378{
4379 /* port */
e4901dde 4380 bnx2x_dcb_init_intmem_pfc(bp);
a2fbb9ea
ET
4381}
4382
471de716
EG
4383static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4384{
4385 switch (load_code) {
4386 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 4387 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
4388 bnx2x_init_internal_common(bp);
4389 /* no break */
4390
4391 case FW_MSG_CODE_DRV_LOAD_PORT:
4392 bnx2x_init_internal_port(bp);
4393 /* no break */
4394
4395 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
4396 /* internal memory per function is
4397 initialized inside bnx2x_pf_init */
471de716
EG
4398 break;
4399
4400 default:
4401 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4402 break;
4403 }
4404}
4405
523224a3
DK
4406static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4407{
4408 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4409
4410 fp->state = BNX2X_FP_STATE_CLOSED;
4411
b3b83c3f 4412 fp->cid = fp_idx;
523224a3
DK
4413 fp->cl_id = BP_L_ID(bp) + fp_idx;
4414 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4415 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4416 /* qZone id equals to FW (per path) client id */
4417 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
4418 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4419 ETH_MAX_RX_CLIENTS_E1H);
523224a3 4420 /* init shortcut */
f2e0899f
DK
4421 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4422 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4423 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4424 /* Setup SB indicies */
4425 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4426 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4427
4428 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4429 "cl_id %d fw_sb %d igu_sb %d\n",
4430 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4431 fp->igu_sb_id);
4432 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4433 fp->fw_sb_id, fp->igu_sb_id);
4434
4435 bnx2x_update_fpsb_idx(fp);
4436}
4437
9f6c9258 4438void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4439{
4440 int i;
4441
ec6ba945 4442 for_each_eth_queue(bp, i)
523224a3 4443 bnx2x_init_fp_sb(bp, i);
37b091ba 4444#ifdef BCM_CNIC
ec6ba945
VZ
4445 if (!NO_FCOE(bp))
4446 bnx2x_init_fcoe_fp(bp);
523224a3
DK
4447
4448 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4449 BNX2X_VF_ID_INVALID, false,
4450 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4451
37b091ba 4452#endif
a2fbb9ea 4453
020c7e3f
YR
4454 /* Initialize MOD_ABS interrupts */
4455 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
4456 bp->common.shmem_base, bp->common.shmem2_base,
4457 BP_PORT(bp));
16119785
EG
4458 /* ensure status block indices were read */
4459 rmb();
4460
523224a3 4461 bnx2x_init_def_sb(bp);
5c862848 4462 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4463 bnx2x_init_rx_rings(bp);
523224a3 4464 bnx2x_init_tx_rings(bp);
a2fbb9ea 4465 bnx2x_init_sp_ring(bp);
523224a3 4466 bnx2x_init_eq_ring(bp);
471de716 4467 bnx2x_init_internal(bp, load_code);
523224a3 4468 bnx2x_pf_init(bp);
a2fbb9ea 4469 bnx2x_init_ind_table(bp);
0ef00459
EG
4470 bnx2x_stats_init(bp);
4471
0ef00459
EG
4472 /* flush all before enabling interrupts */
4473 mb();
4474 mmiowb();
4475
615f8fd9 4476 bnx2x_int_enable(bp);
eb8da205
EG
4477
4478 /* Check for SPIO5 */
4479 bnx2x_attn_int_deasserted0(bp,
4480 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4481 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4482}
4483
4484/* end of nic init */
4485
4486/*
4487 * gzip service functions
4488 */
4489
4490static int bnx2x_gunzip_init(struct bnx2x *bp)
4491{
1a983142
FT
4492 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4493 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4494 if (bp->gunzip_buf == NULL)
4495 goto gunzip_nomem1;
4496
4497 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4498 if (bp->strm == NULL)
4499 goto gunzip_nomem2;
4500
4501 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4502 GFP_KERNEL);
4503 if (bp->strm->workspace == NULL)
4504 goto gunzip_nomem3;
4505
4506 return 0;
4507
4508gunzip_nomem3:
4509 kfree(bp->strm);
4510 bp->strm = NULL;
4511
4512gunzip_nomem2:
1a983142
FT
4513 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4514 bp->gunzip_mapping);
a2fbb9ea
ET
4515 bp->gunzip_buf = NULL;
4516
4517gunzip_nomem1:
cdaa7cb8
VZ
4518 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4519 " un-compression\n");
a2fbb9ea
ET
4520 return -ENOMEM;
4521}
4522
4523static void bnx2x_gunzip_end(struct bnx2x *bp)
4524{
b3b83c3f
DK
4525 if (bp->strm) {
4526 kfree(bp->strm->workspace);
4527 kfree(bp->strm);
4528 bp->strm = NULL;
4529 }
a2fbb9ea
ET
4530
4531 if (bp->gunzip_buf) {
1a983142
FT
4532 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4533 bp->gunzip_mapping);
a2fbb9ea
ET
4534 bp->gunzip_buf = NULL;
4535 }
4536}
4537
94a78b79 4538static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4539{
4540 int n, rc;
4541
4542 /* check gzip header */
94a78b79
VZ
4543 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4544 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4545 return -EINVAL;
94a78b79 4546 }
a2fbb9ea
ET
4547
4548 n = 10;
4549
34f80b04 4550#define FNAME 0x8
a2fbb9ea
ET
4551
4552 if (zbuf[3] & FNAME)
4553 while ((zbuf[n++] != 0) && (n < len));
4554
94a78b79 4555 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4556 bp->strm->avail_in = len - n;
4557 bp->strm->next_out = bp->gunzip_buf;
4558 bp->strm->avail_out = FW_BUF_SIZE;
4559
4560 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4561 if (rc != Z_OK)
4562 return rc;
4563
4564 rc = zlib_inflate(bp->strm, Z_FINISH);
4565 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4566 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4567 bp->strm->msg);
a2fbb9ea
ET
4568
4569 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4570 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4571 netdev_err(bp->dev, "Firmware decompression error:"
4572 " gunzip_outlen (%d) not aligned\n",
4573 bp->gunzip_outlen);
a2fbb9ea
ET
4574 bp->gunzip_outlen >>= 2;
4575
4576 zlib_inflateEnd(bp->strm);
4577
4578 if (rc == Z_STREAM_END)
4579 return 0;
4580
4581 return rc;
4582}
4583
4584/* nic load/unload */
4585
4586/*
34f80b04 4587 * General service functions
a2fbb9ea
ET
4588 */
4589
4590/* send a NIG loopback debug packet */
4591static void bnx2x_lb_pckt(struct bnx2x *bp)
4592{
a2fbb9ea 4593 u32 wb_write[3];
a2fbb9ea
ET
4594
4595 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4596 wb_write[0] = 0x55555555;
4597 wb_write[1] = 0x55555555;
34f80b04 4598 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4599 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4600
4601 /* NON-IP protocol */
a2fbb9ea
ET
4602 wb_write[0] = 0x09000000;
4603 wb_write[1] = 0x55555555;
34f80b04 4604 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4605 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4606}
4607
4608/* some of the internal memories
4609 * are not directly readable from the driver
4610 * to test them we send debug packets
4611 */
4612static int bnx2x_int_mem_test(struct bnx2x *bp)
4613{
4614 int factor;
4615 int count, i;
4616 u32 val = 0;
4617
ad8d3948 4618 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4619 factor = 120;
ad8d3948
EG
4620 else if (CHIP_REV_IS_EMUL(bp))
4621 factor = 200;
4622 else
a2fbb9ea 4623 factor = 1;
a2fbb9ea 4624
a2fbb9ea
ET
4625 /* Disable inputs of parser neighbor blocks */
4626 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4627 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4628 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4629 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4630
4631 /* Write 0 to parser credits for CFC search request */
4632 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4633
4634 /* send Ethernet packet */
4635 bnx2x_lb_pckt(bp);
4636
4637 /* TODO do i reset NIG statistic? */
4638 /* Wait until NIG register shows 1 packet of size 0x10 */
4639 count = 1000 * factor;
4640 while (count) {
34f80b04 4641
a2fbb9ea
ET
4642 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4643 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4644 if (val == 0x10)
4645 break;
4646
4647 msleep(10);
4648 count--;
4649 }
4650 if (val != 0x10) {
4651 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4652 return -1;
4653 }
4654
4655 /* Wait until PRS register shows 1 packet */
4656 count = 1000 * factor;
4657 while (count) {
4658 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4659 if (val == 1)
4660 break;
4661
4662 msleep(10);
4663 count--;
4664 }
4665 if (val != 0x1) {
4666 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4667 return -2;
4668 }
4669
4670 /* Reset and init BRB, PRS */
34f80b04 4671 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4672 msleep(50);
34f80b04 4673 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4674 msleep(50);
94a78b79
VZ
4675 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4676 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4677
4678 DP(NETIF_MSG_HW, "part2\n");
4679
4680 /* Disable inputs of parser neighbor blocks */
4681 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4682 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4683 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4684 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4685
4686 /* Write 0 to parser credits for CFC search request */
4687 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4688
4689 /* send 10 Ethernet packets */
4690 for (i = 0; i < 10; i++)
4691 bnx2x_lb_pckt(bp);
4692
4693 /* Wait until NIG register shows 10 + 1
4694 packets of size 11*0x10 = 0xb0 */
4695 count = 1000 * factor;
4696 while (count) {
34f80b04 4697
a2fbb9ea
ET
4698 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4699 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4700 if (val == 0xb0)
4701 break;
4702
4703 msleep(10);
4704 count--;
4705 }
4706 if (val != 0xb0) {
4707 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4708 return -3;
4709 }
4710
4711 /* Wait until PRS register shows 2 packets */
4712 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4713 if (val != 2)
4714 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4715
4716 /* Write 1 to parser credits for CFC search request */
4717 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4718
4719 /* Wait until PRS register shows 3 packets */
4720 msleep(10 * factor);
4721 /* Wait until NIG register shows 1 packet of size 0x10 */
4722 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4723 if (val != 3)
4724 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4725
4726 /* clear NIG EOP FIFO */
4727 for (i = 0; i < 11; i++)
4728 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4729 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4730 if (val != 1) {
4731 BNX2X_ERR("clear of NIG failed\n");
4732 return -4;
4733 }
4734
4735 /* Reset and init BRB, PRS, NIG */
4736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4737 msleep(50);
4738 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4739 msleep(50);
94a78b79
VZ
4740 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4741 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4742#ifndef BCM_CNIC
a2fbb9ea
ET
4743 /* set NIC mode */
4744 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4745#endif
4746
4747 /* Enable inputs of parser neighbor blocks */
4748 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4749 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4750 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4751 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4752
4753 DP(NETIF_MSG_HW, "done\n");
4754
4755 return 0; /* OK */
4756}
4757
4a33bc03 4758static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
a2fbb9ea
ET
4759{
4760 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4761 if (CHIP_IS_E2(bp))
4762 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4763 else
4764 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4765 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4766 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4767 /*
4768 * mask read length error interrupts in brb for parser
4769 * (parsing unit and 'checksum and crc' unit)
4770 * these errors are legal (PU reads fixed length and CAC can cause
4771 * read length error on truncated packets)
4772 */
4773 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4774 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4775 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4776 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4777 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4778 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4779/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4780/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4781 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4782 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4783 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4784/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4785/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4786 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4787 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4788 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4789 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4790/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4791/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 4792
34f80b04
EG
4793 if (CHIP_REV_IS_FPGA(bp))
4794 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4795 else if (CHIP_IS_E2(bp))
4796 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4797 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4798 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4799 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4800 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4801 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4802 else
4803 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4804 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4805 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4806 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4807/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4808/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4809 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4810 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04 4811/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4a33bc03 4812 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
a2fbb9ea
ET
4813}
4814
81f75bbf
EG
4815static void bnx2x_reset_common(struct bnx2x *bp)
4816{
4817 /* reset_common */
4818 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4819 0xd3ffff7f);
4820 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4821}
4822
573f2035
EG
4823static void bnx2x_init_pxp(struct bnx2x *bp)
4824{
4825 u16 devctl;
4826 int r_order, w_order;
4827
4828 pci_read_config_word(bp->pdev,
4829 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4830 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4831 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4832 if (bp->mrrs == -1)
4833 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4834 else {
4835 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4836 r_order = bp->mrrs;
4837 }
4838
4839 bnx2x_init_pxp_arb(bp, r_order, w_order);
4840}
fd4ef40d
EG
4841
4842static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4843{
2145a920 4844 int is_required;
fd4ef40d 4845 u32 val;
2145a920 4846 int port;
fd4ef40d 4847
2145a920
VZ
4848 if (BP_NOMCP(bp))
4849 return;
4850
4851 is_required = 0;
fd4ef40d
EG
4852 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4853 SHARED_HW_CFG_FAN_FAILURE_MASK;
4854
4855 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4856 is_required = 1;
4857
4858 /*
4859 * The fan failure mechanism is usually related to the PHY type since
4860 * the power consumption of the board is affected by the PHY. Currently,
4861 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4862 */
4863 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4864 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4865 is_required |=
d90d96ba
YR
4866 bnx2x_fan_failure_det_req(
4867 bp,
4868 bp->common.shmem_base,
a22f0788 4869 bp->common.shmem2_base,
d90d96ba 4870 port);
fd4ef40d
EG
4871 }
4872
4873 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4874
4875 if (is_required == 0)
4876 return;
4877
4878 /* Fan failure is indicated by SPIO 5 */
4879 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4880 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4881
4882 /* set to active low mode */
4883 val = REG_RD(bp, MISC_REG_SPIO_INT);
4884 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4885 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4886 REG_WR(bp, MISC_REG_SPIO_INT, val);
4887
4888 /* enable interrupt to signal the IGU */
4889 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4890 val |= (1 << MISC_REGISTERS_SPIO_5);
4891 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4892}
4893
f2e0899f
DK
4894static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4895{
4896 u32 offset = 0;
4897
4898 if (CHIP_IS_E1(bp))
4899 return;
4900 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4901 return;
4902
4903 switch (BP_ABS_FUNC(bp)) {
4904 case 0:
4905 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4906 break;
4907 case 1:
4908 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4909 break;
4910 case 2:
4911 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4912 break;
4913 case 3:
4914 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4915 break;
4916 case 4:
4917 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4918 break;
4919 case 5:
4920 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4921 break;
4922 case 6:
4923 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4924 break;
4925 case 7:
4926 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4927 break;
4928 default:
4929 return;
4930 }
4931
4932 REG_WR(bp, offset, pretend_func_num);
4933 REG_RD(bp, offset);
4934 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4935}
4936
4937static void bnx2x_pf_disable(struct bnx2x *bp)
4938{
4939 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4940 val &= ~IGU_PF_CONF_FUNC_EN;
4941
4942 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4943 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4944 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4945}
4946
523224a3 4947static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4948{
a2fbb9ea 4949 u32 val, i;
a2fbb9ea 4950
f2e0899f 4951 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4952
81f75bbf 4953 bnx2x_reset_common(bp);
34f80b04
EG
4954 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4955 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4956
94a78b79 4957 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4958 if (!CHIP_IS_E1(bp))
fb3bff17 4959 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4960
f2e0899f
DK
4961 if (CHIP_IS_E2(bp)) {
4962 u8 fid;
4963
4964 /**
4965 * 4-port mode or 2-port mode we need to turn of master-enable
4966 * for everyone, after that, turn it back on for self.
4967 * so, we disregard multi-function or not, and always disable
4968 * for all functions on the given path, this means 0,2,4,6 for
4969 * path 0 and 1,3,5,7 for path 1
4970 */
4971 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4972 if (fid == BP_ABS_FUNC(bp)) {
4973 REG_WR(bp,
4974 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4975 1);
4976 continue;
4977 }
4978
4979 bnx2x_pretend_func(bp, fid);
4980 /* clear pf enable */
4981 bnx2x_pf_disable(bp);
4982 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4983 }
4984 }
a2fbb9ea 4985
94a78b79 4986 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
4987 if (CHIP_IS_E1(bp)) {
4988 /* enable HW interrupt from PXP on USDM overflow
4989 bit 16 on INT_MASK_0 */
4990 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4991 }
a2fbb9ea 4992
94a78b79 4993 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 4994 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4995
4996#ifdef __BIG_ENDIAN
34f80b04
EG
4997 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4998 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4999 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5000 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5001 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5002 /* make sure this value is 0 */
5003 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5004
5005/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5006 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5007 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5008 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5009 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5010#endif
5011
523224a3
DK
5012 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5013
34f80b04
EG
5014 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5015 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5016
34f80b04
EG
5017 /* let the HW do it's magic ... */
5018 msleep(100);
5019 /* finish PXP init */
5020 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5021 if (val != 1) {
5022 BNX2X_ERR("PXP2 CFG failed\n");
5023 return -EBUSY;
5024 }
5025 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5026 if (val != 1) {
5027 BNX2X_ERR("PXP2 RD_INIT failed\n");
5028 return -EBUSY;
5029 }
a2fbb9ea 5030
f2e0899f
DK
5031 /* Timers bug workaround E2 only. We need to set the entire ILT to
5032 * have entries with value "0" and valid bit on.
5033 * This needs to be done by the first PF that is loaded in a path
5034 * (i.e. common phase)
5035 */
5036 if (CHIP_IS_E2(bp)) {
5037 struct ilt_client_info ilt_cli;
5038 struct bnx2x_ilt ilt;
5039 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5040 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5041
b595076a 5042 /* initialize dummy TM client */
f2e0899f
DK
5043 ilt_cli.start = 0;
5044 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5045 ilt_cli.client_num = ILT_CLIENT_TM;
5046
5047 /* Step 1: set zeroes to all ilt page entries with valid bit on
5048 * Step 2: set the timers first/last ilt entry to point
5049 * to the entire range to prevent ILT range error for 3rd/4th
25985edc 5050 * vnic (this code assumes existence of the vnic)
f2e0899f
DK
5051 *
5052 * both steps performed by call to bnx2x_ilt_client_init_op()
5053 * with dummy TM client
5054 *
5055 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5056 * and his brother are split registers
5057 */
5058 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5059 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5060 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5061
5062 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5063 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5064 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5065 }
5066
5067
34f80b04
EG
5068 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5069 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5070
f2e0899f
DK
5071 if (CHIP_IS_E2(bp)) {
5072 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5073 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5074 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5075
5076 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5077
5078 /* let the HW do it's magic ... */
5079 do {
5080 msleep(200);
5081 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5082 } while (factor-- && (val != 1));
5083
5084 if (val != 1) {
5085 BNX2X_ERR("ATC_INIT failed\n");
5086 return -EBUSY;
5087 }
5088 }
5089
94a78b79 5090 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5091
34f80b04
EG
5092 /* clean the DMAE memory */
5093 bp->dmae_ready = 1;
5094 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5095
94a78b79
VZ
5096 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5097 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5098 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5099 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5100
34f80b04
EG
5101 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5102 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5103 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5104 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5105
94a78b79 5106 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 5107
f2e0899f
DK
5108 if (CHIP_MODE_IS_4_PORT(bp))
5109 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
f85582f8 5110
523224a3
DK
5111 /* QM queues pointers table */
5112 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5113
34f80b04
EG
5114 /* soft reset pulse */
5115 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5116 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5117
37b091ba 5118#ifdef BCM_CNIC
94a78b79 5119 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5120#endif
a2fbb9ea 5121
94a78b79 5122 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
5123 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5124
34f80b04
EG
5125 if (!CHIP_REV_IS_SLOW(bp)) {
5126 /* enable hw interrupt from doorbell Q */
5127 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5128 }
a2fbb9ea 5129
94a78b79 5130 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
5131 if (CHIP_MODE_IS_4_PORT(bp)) {
5132 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5133 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5134 }
5135
94a78b79 5136 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5137 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5138#ifndef BCM_CNIC
3196a88a
EG
5139 /* set NIC mode */
5140 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5141#endif
f2e0899f 5142 if (!CHIP_IS_E1(bp))
0793f83f 5143 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
f85582f8 5144
f2e0899f
DK
5145 if (CHIP_IS_E2(bp)) {
5146 /* Bit-map indicating which L2 hdrs may appear after the
5147 basic Ethernet header */
0793f83f 5148 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5149 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5150 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5151 }
a2fbb9ea 5152
94a78b79
VZ
5153 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5154 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5155 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5156 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5157
ca00392c
EG
5158 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5159 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5160 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5161 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5162
94a78b79
VZ
5163 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5164 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5165 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5166 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5167
f2e0899f
DK
5168 if (CHIP_MODE_IS_4_PORT(bp))
5169 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5170
34f80b04
EG
5171 /* sync semi rtc */
5172 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5173 0x80000000);
5174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5175 0x80000000);
a2fbb9ea 5176
94a78b79
VZ
5177 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5178 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5179 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5180
f2e0899f 5181 if (CHIP_IS_E2(bp)) {
0793f83f 5182 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5183 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5184 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5185 }
5186
34f80b04 5187 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5188 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5189 REG_WR(bp, i, random32());
f85582f8 5190
94a78b79 5191 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5192#ifdef BCM_CNIC
5193 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5194 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5195 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5196 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5197 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5198 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5199 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5200 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5201 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5202 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5203#endif
34f80b04 5204 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5205
34f80b04
EG
5206 if (sizeof(union cdu_context) != 1024)
5207 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5208 dev_alert(&bp->pdev->dev, "please adjust the size "
5209 "of cdu_context(%ld)\n",
7995c64e 5210 (long)sizeof(union cdu_context));
a2fbb9ea 5211
94a78b79 5212 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5213 val = (4 << 24) + (0 << 12) + 1024;
5214 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5215
94a78b79 5216 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5217 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5218 /* enable context validation interrupt from CFC */
5219 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5220
5221 /* set the thresholds to prevent CFC/CDU race */
5222 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5223
94a78b79 5224 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
5225
5226 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5227 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5228
5229 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 5230 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5231
94a78b79 5232 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5233 /* Reset PCIE errors for debug */
5234 REG_WR(bp, 0x2814, 0xffffffff);
5235 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5236
f2e0899f
DK
5237 if (CHIP_IS_E2(bp)) {
5238 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5239 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5240 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5241 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5242 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5243 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5244 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5245 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5246 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5247 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5248 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5249 }
5250
94a78b79 5251 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5252 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5253 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5254 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5255
94a78b79 5256 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 5257 if (!CHIP_IS_E1(bp)) {
fb3bff17 5258 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
0793f83f 5259 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
34f80b04 5260 }
f2e0899f
DK
5261 if (CHIP_IS_E2(bp)) {
5262 /* Bit-map indicating which L2 hdrs may appear after the
5263 basic Ethernet header */
0793f83f 5264 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
f2e0899f 5265 }
34f80b04
EG
5266
5267 if (CHIP_REV_IS_SLOW(bp))
5268 msleep(200);
5269
5270 /* finish CFC init */
5271 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5272 if (val != 1) {
5273 BNX2X_ERR("CFC LL_INIT failed\n");
5274 return -EBUSY;
5275 }
5276 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5277 if (val != 1) {
5278 BNX2X_ERR("CFC AC_INIT failed\n");
5279 return -EBUSY;
5280 }
5281 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5282 if (val != 1) {
5283 BNX2X_ERR("CFC CAM_INIT failed\n");
5284 return -EBUSY;
5285 }
5286 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5287
f2e0899f
DK
5288 if (CHIP_IS_E1(bp)) {
5289 /* read NIG statistic
5290 to see if this is our first up since powerup */
5291 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5292 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 5293
f2e0899f
DK
5294 /* do internal memory self test */
5295 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5296 BNX2X_ERR("internal mem self test failed\n");
5297 return -EBUSY;
5298 }
34f80b04
EG
5299 }
5300
fd4ef40d
EG
5301 bnx2x_setup_fan_failure_detection(bp);
5302
34f80b04
EG
5303 /* clear PXP2 attentions */
5304 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5305
4a33bc03
VZ
5306 bnx2x_enable_blocks_attention(bp);
5307 if (CHIP_PARITY_ENABLED(bp))
5308 bnx2x_enable_blocks_parity(bp);
a2fbb9ea 5309
6bbca910 5310 if (!BP_NOMCP(bp)) {
f2e0899f
DK
5311 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5312 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5313 CHIP_IS_E1x(bp)) {
5314 u32 shmem_base[2], shmem2_base[2];
5315 shmem_base[0] = bp->common.shmem_base;
5316 shmem2_base[0] = bp->common.shmem2_base;
5317 if (CHIP_IS_E2(bp)) {
5318 shmem_base[1] =
5319 SHMEM2_RD(bp, other_shmem_base_addr);
5320 shmem2_base[1] =
5321 SHMEM2_RD(bp, other_shmem2_base_addr);
5322 }
5323 bnx2x_acquire_phy_lock(bp);
5324 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5325 bp->common.chip_id);
5326 bnx2x_release_phy_lock(bp);
5327 }
6bbca910
YR
5328 } else
5329 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5330
34f80b04
EG
5331 return 0;
5332}
a2fbb9ea 5333
523224a3 5334static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
5335{
5336 int port = BP_PORT(bp);
94a78b79 5337 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5338 u32 low, high;
34f80b04 5339 u32 val;
a2fbb9ea 5340
cdaa7cb8 5341 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5342
5343 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5344
94a78b79 5345 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5346 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 5347
f2e0899f
DK
5348 /* Timers bug workaround: disables the pf_master bit in pglue at
5349 * common phase, we need to enable it here before any dmae access are
5350 * attempted. Therefore we manually added the enable-master to the
5351 * port phase (it also happens in the function phase)
5352 */
5353 if (CHIP_IS_E2(bp))
5354 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5355
ca00392c
EG
5356 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5357 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5358 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5359 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5360
523224a3
DK
5361 /* QM cid (connection) count */
5362 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 5363
523224a3 5364#ifdef BCM_CNIC
94a78b79 5365 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5366 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5367 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5368#endif
cdaa7cb8 5369
94a78b79 5370 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5371
f2e0899f
DK
5372 if (CHIP_MODE_IS_4_PORT(bp))
5373 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5374
5375 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5376 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5377 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5378 /* no pause for emulation and FPGA */
5379 low = 0;
5380 high = 513;
5381 } else {
5382 if (IS_MF(bp))
5383 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5384 else if (bp->dev->mtu > 4096) {
5385 if (bp->flags & ONE_PORT_FLAG)
5386 low = 160;
5387 else {
5388 val = bp->dev->mtu;
5389 /* (24*1024 + val*4)/256 */
5390 low = 96 + (val/64) +
5391 ((val % 64) ? 1 : 0);
5392 }
5393 } else
5394 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5395 high = low + 56; /* 14*1024/256 */
5396 }
5397 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5398 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 5399 }
1c06328c 5400
f2e0899f
DK
5401 if (CHIP_MODE_IS_4_PORT(bp)) {
5402 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5403 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5404 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5405 BRB1_REG_MAC_GUARANTIED_0), 40);
5406 }
1c06328c 5407
94a78b79 5408 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5409
94a78b79 5410 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5411 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5412 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5413 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5414
94a78b79
VZ
5415 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5416 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5417 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5418 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
5419 if (CHIP_MODE_IS_4_PORT(bp))
5420 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 5421
94a78b79 5422 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5423 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5424
94a78b79 5425 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5426
f2e0899f
DK
5427 if (!CHIP_IS_E2(bp)) {
5428 /* configure PBF to work without PAUSE mtu 9000 */
5429 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5430
f2e0899f
DK
5431 /* update threshold */
5432 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5433 /* update init credit */
5434 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5435
f2e0899f
DK
5436 /* probe changes */
5437 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5438 udelay(50);
5439 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5440 }
a2fbb9ea 5441
37b091ba
MC
5442#ifdef BCM_CNIC
5443 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5444#endif
94a78b79 5445 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5446 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5447
5448 if (CHIP_IS_E1(bp)) {
5449 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5450 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5451 }
94a78b79 5452 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5453
f2e0899f
DK
5454 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5455
94a78b79 5456 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5457 /* init aeu_mask_attn_func_0/1:
5458 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5459 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5460 * bits 4-7 are used for "per vn group attention" */
e4901dde
VZ
5461 val = IS_MF(bp) ? 0xF7 : 0x7;
5462 /* Enable DCBX attention for all but E1 */
5463 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5464 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
34f80b04 5465
94a78b79 5466 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5467 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5468 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5469 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5470 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5471
94a78b79 5472 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5473
5474 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5475
f2e0899f 5476 if (!CHIP_IS_E1(bp)) {
fb3bff17 5477 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5478 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
0793f83f 5479 (IS_MF_SD(bp) ? 0x1 : 0x2));
34f80b04 5480
f2e0899f
DK
5481 if (CHIP_IS_E2(bp)) {
5482 val = 0;
5483 switch (bp->mf_mode) {
5484 case MULTI_FUNCTION_SD:
5485 val = 1;
5486 break;
5487 case MULTI_FUNCTION_SI:
5488 val = 2;
5489 break;
5490 }
5491
5492 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5493 NIG_REG_LLH0_CLS_TYPE), val);
5494 }
1c06328c
EG
5495 {
5496 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5497 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5498 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5499 }
34f80b04
EG
5500 }
5501
94a78b79 5502 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5503 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5504 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5505 bp->common.shmem2_base, port)) {
4d295db0
EG
5506 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5507 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5508 val = REG_RD(bp, reg_addr);
f1410647 5509 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5510 REG_WR(bp, reg_addr, val);
f1410647 5511 }
c18487ee 5512 bnx2x__link_reset(bp);
a2fbb9ea 5513
34f80b04
EG
5514 return 0;
5515}
5516
34f80b04
EG
5517static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5518{
5519 int reg;
5520
f2e0899f 5521 if (CHIP_IS_E1(bp))
34f80b04 5522 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5523 else
5524 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5525
5526 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5527}
5528
f2e0899f
DK
5529static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5530{
5531 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5532}
5533
5534static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5535{
5536 u32 i, base = FUNC_ILT_BASE(func);
5537 for (i = base; i < base + ILT_PER_FUNC; i++)
5538 bnx2x_ilt_wr(bp, i, 0);
5539}
5540
523224a3 5541static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5542{
5543 int port = BP_PORT(bp);
5544 int func = BP_FUNC(bp);
523224a3
DK
5545 struct bnx2x_ilt *ilt = BP_ILT(bp);
5546 u16 cdu_ilt_start;
8badd27a 5547 u32 addr, val;
f4a66897
VZ
5548 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5549 int i, main_mem_width;
34f80b04 5550
cdaa7cb8 5551 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5552
8badd27a 5553 /* set MSI reconfigure capability */
f2e0899f
DK
5554 if (bp->common.int_block == INT_BLOCK_HC) {
5555 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5556 val = REG_RD(bp, addr);
5557 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5558 REG_WR(bp, addr, val);
5559 }
8badd27a 5560
523224a3
DK
5561 ilt = BP_ILT(bp);
5562 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5563
523224a3
DK
5564 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5565 ilt->lines[cdu_ilt_start + i].page =
5566 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5567 ilt->lines[cdu_ilt_start + i].page_mapping =
5568 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5569 /* cdu ilt pages are allocated manually so there's no need to
5570 set the size */
37b091ba 5571 }
523224a3 5572 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 5573
523224a3
DK
5574#ifdef BCM_CNIC
5575 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5576
523224a3
DK
5577 /* T1 hash bits value determines the T1 number of entries */
5578 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5579#endif
37b091ba 5580
523224a3
DK
5581#ifndef BCM_CNIC
5582 /* set NIC mode */
5583 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5584#endif /* BCM_CNIC */
37b091ba 5585
f2e0899f
DK
5586 if (CHIP_IS_E2(bp)) {
5587 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5588
5589 /* Turn on a single ISR mode in IGU if driver is going to use
5590 * INT#x or MSI
5591 */
5592 if (!(bp->flags & USING_MSIX_FLAG))
5593 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5594 /*
5595 * Timers workaround bug: function init part.
5596 * Need to wait 20msec after initializing ILT,
5597 * needed to make sure there are no requests in
5598 * one of the PXP internal queues with "old" ILT addresses
5599 */
5600 msleep(20);
5601 /*
5602 * Master enable - Due to WB DMAE writes performed before this
5603 * register is re-initialized as part of the regular function
5604 * init
5605 */
5606 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5607 /* Enable the function in IGU */
5608 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5609 }
5610
523224a3 5611 bp->dmae_ready = 1;
34f80b04 5612
523224a3
DK
5613 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5614
f2e0899f
DK
5615 if (CHIP_IS_E2(bp))
5616 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5617
523224a3
DK
5618 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5619 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5620 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5621 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5622 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5623 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5624 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5625 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5626 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5627
f2e0899f
DK
5628 if (CHIP_IS_E2(bp)) {
5629 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5630 BP_PATH(bp));
5631 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5632 BP_PATH(bp));
5633 }
5634
5635 if (CHIP_MODE_IS_4_PORT(bp))
5636 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5637
5638 if (CHIP_IS_E2(bp))
5639 REG_WR(bp, QM_REG_PF_EN, 1);
5640
523224a3 5641 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5642
5643 if (CHIP_MODE_IS_4_PORT(bp))
5644 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5645
523224a3
DK
5646 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5647 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5648 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5649 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5650 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5651 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5652 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5653 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5654 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5655 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5656 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5657 if (CHIP_IS_E2(bp))
5658 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5659
523224a3
DK
5660 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5661
5662 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5663
f2e0899f
DK
5664 if (CHIP_IS_E2(bp))
5665 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5666
fb3bff17 5667 if (IS_MF(bp)) {
34f80b04 5668 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5669 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5670 }
5671
523224a3
DK
5672 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5673
34f80b04 5674 /* HC init per function */
f2e0899f
DK
5675 if (bp->common.int_block == INT_BLOCK_HC) {
5676 if (CHIP_IS_E1H(bp)) {
5677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5678
5679 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5680 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5681 }
5682 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5683
5684 } else {
5685 int num_segs, sb_idx, prod_offset;
5686
34f80b04
EG
5687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5688
f2e0899f
DK
5689 if (CHIP_IS_E2(bp)) {
5690 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5691 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5692 }
5693
5694 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5695
5696 if (CHIP_IS_E2(bp)) {
5697 int dsb_idx = 0;
5698 /**
5699 * Producer memory:
5700 * E2 mode: address 0-135 match to the mapping memory;
5701 * 136 - PF0 default prod; 137 - PF1 default prod;
5702 * 138 - PF2 default prod; 139 - PF3 default prod;
5703 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5704 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5705 * 144-147 reserved.
5706 *
5707 * E1.5 mode - In backward compatible mode;
5708 * for non default SB; each even line in the memory
5709 * holds the U producer and each odd line hold
5710 * the C producer. The first 128 producers are for
5711 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5712 * producers are for the DSB for each PF.
5713 * Each PF has five segments: (the order inside each
5714 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5715 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5716 * 144-147 attn prods;
5717 */
5718 /* non-default-status-blocks */
5719 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5720 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5721 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5722 prod_offset = (bp->igu_base_sb + sb_idx) *
5723 num_segs;
5724
5725 for (i = 0; i < num_segs; i++) {
5726 addr = IGU_REG_PROD_CONS_MEMORY +
5727 (prod_offset + i) * 4;
5728 REG_WR(bp, addr, 0);
5729 }
5730 /* send consumer update with value 0 */
5731 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5732 USTORM_ID, 0, IGU_INT_NOP, 1);
5733 bnx2x_igu_clear_sb(bp,
5734 bp->igu_base_sb + sb_idx);
5735 }
5736
5737 /* default-status-blocks */
5738 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5739 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5740
5741 if (CHIP_MODE_IS_4_PORT(bp))
5742 dsb_idx = BP_FUNC(bp);
5743 else
5744 dsb_idx = BP_E1HVN(bp);
5745
5746 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5747 IGU_BC_BASE_DSB_PROD + dsb_idx :
5748 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5749
5750 for (i = 0; i < (num_segs * E1HVN_MAX);
5751 i += E1HVN_MAX) {
5752 addr = IGU_REG_PROD_CONS_MEMORY +
5753 (prod_offset + i)*4;
5754 REG_WR(bp, addr, 0);
5755 }
5756 /* send consumer update with 0 */
5757 if (CHIP_INT_MODE_IS_BC(bp)) {
5758 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5759 USTORM_ID, 0, IGU_INT_NOP, 1);
5760 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5761 CSTORM_ID, 0, IGU_INT_NOP, 1);
5762 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5763 XSTORM_ID, 0, IGU_INT_NOP, 1);
5764 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5765 TSTORM_ID, 0, IGU_INT_NOP, 1);
5766 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5767 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5768 } else {
5769 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5770 USTORM_ID, 0, IGU_INT_NOP, 1);
5771 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5772 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5773 }
5774 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5775
5776 /* !!! these should become driver const once
5777 rf-tool supports split-68 const */
5778 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5779 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5780 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5781 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5782 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5783 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5784 }
34f80b04 5785 }
34f80b04 5786
c14423fe 5787 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5788 REG_WR(bp, 0x2114, 0xffffffff);
5789 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5790
5791 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5792 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5793 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5794 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5795 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5796 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5797
f4a66897
VZ
5798 if (CHIP_IS_E1x(bp)) {
5799 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5800 main_mem_base = HC_REG_MAIN_MEMORY +
5801 BP_PORT(bp) * (main_mem_size * 4);
5802 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5803 main_mem_width = 8;
5804
5805 val = REG_RD(bp, main_mem_prty_clr);
5806 if (val)
5807 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5808 "block during "
5809 "function init (0x%x)!\n", val);
5810
5811 /* Clear "false" parity errors in MSI-X table */
5812 for (i = main_mem_base;
5813 i < main_mem_base + main_mem_size * 4;
5814 i += main_mem_width) {
5815 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5816 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5817 i, main_mem_width / 4);
5818 }
5819 /* Clear HC parity attention */
5820 REG_RD(bp, main_mem_prty_clr);
5821 }
5822
b7737c9b 5823 bnx2x_phy_probe(&bp->link_params);
f85582f8 5824
34f80b04
EG
5825 return 0;
5826}
5827
9f6c9258 5828int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5829{
523224a3 5830 int rc = 0;
a2fbb9ea 5831
34f80b04 5832 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5833 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5834
34f80b04 5835 bp->dmae_ready = 0;
6e30dd4e 5836 spin_lock_init(&bp->dmae_lock);
a2fbb9ea 5837
34f80b04
EG
5838 switch (load_code) {
5839 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5840 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5841 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5842 if (rc)
5843 goto init_hw_err;
5844 /* no break */
5845
5846 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5847 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5848 if (rc)
5849 goto init_hw_err;
5850 /* no break */
5851
5852 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5853 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5854 if (rc)
5855 goto init_hw_err;
5856 break;
5857
5858 default:
5859 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5860 break;
5861 }
5862
5863 if (!BP_NOMCP(bp)) {
f2e0899f 5864 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5865
5866 bp->fw_drv_pulse_wr_seq =
f2e0899f 5867 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5868 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5869 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5870 }
a2fbb9ea 5871
34f80b04
EG
5872init_hw_err:
5873 bnx2x_gunzip_end(bp);
5874
5875 return rc;
a2fbb9ea
ET
5876}
5877
9f6c9258 5878void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea 5879{
b3b83c3f 5880 bnx2x_gunzip_end(bp);
a2fbb9ea
ET
5881
5882 /* fastpath */
b3b83c3f 5883 bnx2x_free_fp_mem(bp);
a2fbb9ea
ET
5884 /* end of fastpath */
5885
5886 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5887 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5888
5889 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5890 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5891
523224a3
DK
5892 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5893 bp->context.size);
5894
5895 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5896
5897 BNX2X_FREE(bp->ilt->lines);
f85582f8 5898
37b091ba 5899#ifdef BCM_CNIC
f2e0899f
DK
5900 if (CHIP_IS_E2(bp))
5901 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5902 sizeof(struct host_hc_status_block_e2));
5903 else
5904 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5905 sizeof(struct host_hc_status_block_e1x));
f85582f8 5906
523224a3 5907 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 5908#endif
f85582f8 5909
7a9b2557 5910 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 5911
523224a3
DK
5912 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5913 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5914
ab532cf3 5915 BNX2X_FREE(bp->rx_indir_table);
a2fbb9ea
ET
5916}
5917
f2e0899f 5918
9f6c9258 5919int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea 5920{
b3b83c3f
DK
5921 if (bnx2x_gunzip_init(bp))
5922 return -ENOMEM;
8badd27a 5923
523224a3 5924#ifdef BCM_CNIC
f2e0899f
DK
5925 if (CHIP_IS_E2(bp))
5926 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5927 sizeof(struct host_hc_status_block_e2));
5928 else
5929 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5930 sizeof(struct host_hc_status_block_e1x));
8badd27a 5931
523224a3
DK
5932 /* allocate searcher T2 table */
5933 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5934#endif
a2fbb9ea 5935
8badd27a 5936
523224a3
DK
5937 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5938 sizeof(struct host_sp_status_block));
a2fbb9ea 5939
523224a3
DK
5940 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5941 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5942
523224a3 5943 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
f85582f8 5944
523224a3
DK
5945 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5946 bp->context.size);
65abd74d 5947
523224a3 5948 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 5949
523224a3
DK
5950 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5951 goto alloc_mem_err;
65abd74d 5952
9f6c9258
DK
5953 /* Slow path ring */
5954 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 5955
523224a3
DK
5956 /* EQ */
5957 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5958 BCM_PAGE_SIZE * NUM_EQ_PAGES);
ab532cf3
TH
5959
5960 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
5961 TSTORM_INDIRECTION_TABLE_SIZE);
b3b83c3f
DK
5962
5963 /* fastpath */
5964 /* need to be done at the end, since it's self adjusting to amount
5965 * of memory available for RSS queues
5966 */
5967 if (bnx2x_alloc_fp_mem(bp))
5968 goto alloc_mem_err;
9f6c9258 5969 return 0;
e1510706 5970
9f6c9258
DK
5971alloc_mem_err:
5972 bnx2x_free_mem(bp);
5973 return -ENOMEM;
65abd74d
YG
5974}
5975
a2fbb9ea
ET
5976/*
5977 * Init service functions
5978 */
8d96286a 5979static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5980 int *state_p, int flags);
5981
523224a3 5982int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 5983{
523224a3 5984 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 5985
523224a3
DK
5986 /* Wait for completion */
5987 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5988 WAIT_RAMROD_COMMON);
5989}
a2fbb9ea 5990
8d96286a 5991static int bnx2x_func_stop(struct bnx2x *bp)
523224a3
DK
5992{
5993 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 5994
523224a3
DK
5995 /* Wait for completion */
5996 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5997 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
5998}
5999
e665bfda 6000/**
e8920674 6001 * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
e665bfda 6002 *
e8920674
DK
6003 * @bp: driver handle
6004 * @set: set or clear an entry (1 or 0)
6005 * @mac: pointer to a buffer containing a MAC
6006 * @cl_bit_vec: bit vector of clients to register a MAC for
6007 * @cam_offset: offset in a CAM to use
6008 * @is_bcast: is the set MAC a broadcast address (for E1 only)
e665bfda 6009 */
215faf9c 6010static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
f85582f8
DK
6011 u32 cl_bit_vec, u8 cam_offset,
6012 u8 is_bcast)
34f80b04 6013{
523224a3
DK
6014 struct mac_configuration_cmd *config =
6015 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6016 int ramrod_flags = WAIT_RAMROD_COMMON;
6017
6018 bp->set_mac_pending = 1;
523224a3 6019
8d9c5f34 6020 config->hdr.length = 1;
e665bfda
MC
6021 config->hdr.offset = cam_offset;
6022 config->hdr.client_id = 0xff;
6e30dd4e
VZ
6023 /* Mark the single MAC configuration ramrod as opposed to a
6024 * UC/MC list configuration).
6025 */
6026 config->hdr.echo = 1;
34f80b04
EG
6027
6028 /* primary MAC */
6029 config->config_table[0].msb_mac_addr =
e665bfda 6030 swab16(*(u16 *)&mac[0]);
34f80b04 6031 config->config_table[0].middle_mac_addr =
e665bfda 6032 swab16(*(u16 *)&mac[2]);
34f80b04 6033 config->config_table[0].lsb_mac_addr =
e665bfda 6034 swab16(*(u16 *)&mac[4]);
ca00392c 6035 config->config_table[0].clients_bit_vector =
e665bfda 6036 cpu_to_le32(cl_bit_vec);
34f80b04 6037 config->config_table[0].vlan_id = 0;
523224a3 6038 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 6039 if (set)
523224a3
DK
6040 SET_FLAG(config->config_table[0].flags,
6041 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6042 T_ETH_MAC_COMMAND_SET);
3101c2bc 6043 else
523224a3
DK
6044 SET_FLAG(config->config_table[0].flags,
6045 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6046 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 6047
523224a3
DK
6048 if (is_bcast)
6049 SET_FLAG(config->config_table[0].flags,
6050 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6051
6052 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 6053 (set ? "setting" : "clearing"),
34f80b04
EG
6054 config->config_table[0].msb_mac_addr,
6055 config->config_table[0].middle_mac_addr,
523224a3 6056 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 6057
6e30dd4e
VZ
6058 mb();
6059
523224a3 6060 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 6061 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
6062 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6063
6064 /* Wait for a completion */
6065 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
6066}
6067
8d96286a 6068static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6069 int *state_p, int flags)
a2fbb9ea
ET
6070{
6071 /* can take a while if any port is running */
8b3a0f0b 6072 int cnt = 5000;
523224a3
DK
6073 u8 poll = flags & WAIT_RAMROD_POLL;
6074 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 6075
c14423fe
ET
6076 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6077 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6078
6079 might_sleep();
34f80b04 6080 while (cnt--) {
a2fbb9ea 6081 if (poll) {
523224a3
DK
6082 if (common)
6083 bnx2x_eq_int(bp);
6084 else {
6085 bnx2x_rx_int(bp->fp, 10);
6086 /* if index is different from 0
6087 * the reply for some commands will
6088 * be on the non default queue
6089 */
6090 if (idx)
6091 bnx2x_rx_int(&bp->fp[idx], 10);
6092 }
a2fbb9ea 6093 }
a2fbb9ea 6094
3101c2bc 6095 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6096 if (*state_p == state) {
6097#ifdef BNX2X_STOP_ON_ERROR
6098 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6099#endif
a2fbb9ea 6100 return 0;
8b3a0f0b 6101 }
a2fbb9ea 6102
a2fbb9ea 6103 msleep(1);
e3553b29
EG
6104
6105 if (bp->panic)
6106 return -EIO;
a2fbb9ea
ET
6107 }
6108
a2fbb9ea 6109 /* timeout! */
49d66772
ET
6110 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6111 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6112#ifdef BNX2X_STOP_ON_ERROR
6113 bnx2x_panic();
6114#endif
a2fbb9ea 6115
49d66772 6116 return -EBUSY;
a2fbb9ea
ET
6117}
6118
8d96286a 6119static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 6120{
f2e0899f
DK
6121 if (CHIP_IS_E1H(bp))
6122 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6123 else if (CHIP_MODE_IS_4_PORT(bp))
6e30dd4e 6124 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
f2e0899f 6125 else
6e30dd4e 6126 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
523224a3
DK
6127}
6128
0793f83f
DK
6129/**
6130 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6131 * relevant. In addition, current implementation is tuned for a
6132 * single ETH MAC.
0793f83f
DK
6133 */
6134enum {
6135 LLH_CAM_ISCSI_ETH_LINE = 0,
6136 LLH_CAM_ETH_LINE,
6137 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6138};
6139
6140static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6141 int set,
6142 unsigned char *dev_addr,
6143 int index)
6144{
6145 u32 wb_data[2];
6146 u32 mem_offset, ena_offset, mem_index;
6147 /**
6148 * indexes mapping:
6149 * 0..7 - goes to MEM
6150 * 8..15 - goes to MEM2
6151 */
6152
6153 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6154 return;
6155
6156 /* calculate memory start offset according to the mapping
6157 * and index in the memory */
6158 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6159 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6160 NIG_REG_LLH0_FUNC_MEM;
6161 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6162 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6163 mem_index = index;
6164 } else {
6165 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6166 NIG_REG_P0_LLH_FUNC_MEM2;
6167 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6168 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6169 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6170 }
6171
6172 if (set) {
6173 /* LLH_FUNC_MEM is a u64 WB register */
6174 mem_offset += 8*mem_index;
6175
6176 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6177 (dev_addr[4] << 8) | dev_addr[5]);
6178 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6179
6180 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6181 }
6182
6183 /* enable/disable the entry */
6184 REG_WR(bp, ena_offset + 4*mem_index, set);
6185
6186}
6187
523224a3
DK
6188void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6189{
6190 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6191 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 6192
523224a3
DK
6193 /* networking MAC */
6194 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6195 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 6196
0793f83f
DK
6197 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6198
523224a3
DK
6199 if (CHIP_IS_E1(bp)) {
6200 /* broadcast MAC */
215faf9c
JP
6201 static const u8 bcast[ETH_ALEN] = {
6202 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6203 };
523224a3
DK
6204 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6205 }
e665bfda 6206}
6e30dd4e
VZ
6207
6208static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6209{
6210 return CHIP_REV_IS_SLOW(bp) ?
6211 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6212 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6213}
6214
6215/* set mc list, do not wait as wait implies sleep and
6216 * set_rx_mode can be invoked from non-sleepable context.
6217 *
6218 * Instead we use the same ramrod data buffer each time we need
6219 * to configure a list of addresses, and use the fact that the
6220 * list of MACs is changed in an incremental way and that the
6221 * function is called under the netif_addr_lock. A temporary
6222 * inconsistent CAM configuration (possible in case of a very fast
6223 * sequence of add/del/add on the host side) will shortly be
6224 * restored by the handler of the last ramrod.
6225 */
6226static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
523224a3
DK
6227{
6228 int i = 0, old;
6229 struct net_device *dev = bp->dev;
6e30dd4e 6230 u8 offset = bnx2x_e1_cam_mc_offset(bp);
523224a3
DK
6231 struct netdev_hw_addr *ha;
6232 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6233 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6234
6e30dd4e
VZ
6235 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6236 return -EINVAL;
6237
523224a3
DK
6238 netdev_for_each_mc_addr(ha, dev) {
6239 /* copy mac */
6240 config_cmd->config_table[i].msb_mac_addr =
6241 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6242 config_cmd->config_table[i].middle_mac_addr =
6243 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6244 config_cmd->config_table[i].lsb_mac_addr =
6245 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 6246
523224a3
DK
6247 config_cmd->config_table[i].vlan_id = 0;
6248 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6249 config_cmd->config_table[i].clients_bit_vector =
6250 cpu_to_le32(1 << BP_L_ID(bp));
6251
6252 SET_FLAG(config_cmd->config_table[i].flags,
6253 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6254 T_ETH_MAC_COMMAND_SET);
6255
6256 DP(NETIF_MSG_IFUP,
6257 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6258 config_cmd->config_table[i].msb_mac_addr,
6259 config_cmd->config_table[i].middle_mac_addr,
6260 config_cmd->config_table[i].lsb_mac_addr);
6261 i++;
6262 }
6263 old = config_cmd->hdr.length;
6264 if (old > i) {
6265 for (; i < old; i++) {
6266 if (CAM_IS_INVALID(config_cmd->
6267 config_table[i])) {
6268 /* already invalidated */
6269 break;
6270 }
6271 /* invalidate */
6272 SET_FLAG(config_cmd->config_table[i].flags,
6273 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6274 T_ETH_MAC_COMMAND_INVALIDATE);
6275 }
6276 }
6277
6e30dd4e
VZ
6278 wmb();
6279
523224a3
DK
6280 config_cmd->hdr.length = i;
6281 config_cmd->hdr.offset = offset;
6282 config_cmd->hdr.client_id = 0xff;
6e30dd4e
VZ
6283 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6284 * synchronization.
6285 */
6286 config_cmd->hdr.echo = 0;
523224a3 6287
6e30dd4e 6288 mb();
523224a3 6289
6e30dd4e 6290 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
523224a3
DK
6291 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6292}
6e30dd4e
VZ
6293
6294void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
e665bfda 6295{
523224a3
DK
6296 int i;
6297 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6298 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6299 int ramrod_flags = WAIT_RAMROD_COMMON;
6e30dd4e 6300 u8 offset = bnx2x_e1_cam_mc_offset(bp);
523224a3 6301
6e30dd4e 6302 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
523224a3
DK
6303 SET_FLAG(config_cmd->config_table[i].flags,
6304 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6305 T_ETH_MAC_COMMAND_INVALIDATE);
6306
6e30dd4e
VZ
6307 wmb();
6308
6309 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6310 config_cmd->hdr.offset = offset;
6311 config_cmd->hdr.client_id = 0xff;
6312 /* We'll wait for a completion this time... */
6313 config_cmd->hdr.echo = 1;
6314
6315 bp->set_mac_pending = 1;
6316
6317 mb();
6318
523224a3
DK
6319 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6320 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
6321
6322 /* Wait for a completion */
523224a3
DK
6323 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6324 ramrod_flags);
6325
e665bfda
MC
6326}
6327
6e30dd4e
VZ
6328/* Accept one or more multicasts */
6329static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6330{
6331 struct net_device *dev = bp->dev;
6332 struct netdev_hw_addr *ha;
6333 u32 mc_filter[MC_HASH_SIZE];
6334 u32 crc, bit, regidx;
6335 int i;
6336
6337 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6338
6339 netdev_for_each_mc_addr(ha, dev) {
6340 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6341 bnx2x_mc_addr(ha));
6342
6343 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6344 ETH_ALEN);
6345 bit = (crc >> 24) & 0xff;
6346 regidx = bit >> 5;
6347 bit &= 0x1f;
6348 mc_filter[regidx] |= (1 << bit);
6349 }
6350
6351 for (i = 0; i < MC_HASH_SIZE; i++)
6352 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6353 mc_filter[i]);
6354
6355 return 0;
6356}
6357
6358void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6359{
6360 int i;
6361
6362 for (i = 0; i < MC_HASH_SIZE; i++)
6363 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6364}
6365
993ac7b5
MC
6366#ifdef BCM_CNIC
6367/**
e8920674 6368 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
993ac7b5 6369 *
e8920674
DK
6370 * @bp: driver handle
6371 * @set: set or clear the CAM entry
993ac7b5 6372 *
e8920674
DK
6373 * This function will wait until the ramdord completion returns.
6374 * Return 0 if success, -ENODEV if ramrod doesn't return.
993ac7b5 6375 */
8d96286a 6376static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 6377{
523224a3
DK
6378 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6379 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
ec6ba945
VZ
6380 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6381 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
523224a3 6382 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
2ba45142 6383 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
993ac7b5
MC
6384
6385 /* Send a SET_MAC ramrod */
2ba45142 6386 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
523224a3 6387 cam_offset, 0);
0793f83f 6388
2ba45142 6389 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
ec6ba945
VZ
6390
6391 return 0;
6392}
6393
6394/**
e8920674 6395 * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
ec6ba945 6396 *
e8920674
DK
6397 * @bp: driver handle
6398 * @set: set or clear the CAM entry
ec6ba945 6399 *
e8920674
DK
6400 * This function will wait until the ramrod completion returns.
6401 * Returns 0 if success, -ENODEV if ramrod doesn't return.
ec6ba945
VZ
6402 */
6403int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6404{
6405 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6406 /**
6407 * CAM allocation for E1H
6408 * eth unicasts: by func number
6409 * iscsi: by func number
6410 * fip unicast: by func number
6411 * fip multicast: by func number
6412 */
6413 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6414 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6415
6416 return 0;
6417}
6418
6419int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6420{
6421 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6422
6423 /**
6424 * CAM allocation for E1H
6425 * eth unicasts: by func number
6426 * iscsi: by func number
6427 * fip unicast: by func number
6428 * fip multicast: by func number
6429 */
6430 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6431 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6432
993ac7b5
MC
6433 return 0;
6434}
6435#endif
6436
523224a3
DK
6437static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6438 struct bnx2x_client_init_params *params,
6439 u8 activate,
6440 struct client_init_ramrod_data *data)
6441{
6442 /* Clear the buffer */
6443 memset(data, 0, sizeof(*data));
6444
6445 /* general */
6446 data->general.client_id = params->rxq_params.cl_id;
6447 data->general.statistics_counter_id = params->rxq_params.stat_id;
6448 data->general.statistics_en_flg =
6449 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
ec6ba945
VZ
6450 data->general.is_fcoe_flg =
6451 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
523224a3
DK
6452 data->general.activate_flg = activate;
6453 data->general.sp_client_id = params->rxq_params.spcl_id;
6454
6455 /* Rx data */
6456 data->rx.tpa_en_flg =
6457 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6458 data->rx.vmqueue_mode_en_flg = 0;
6459 data->rx.cache_line_alignment_log_size =
6460 params->rxq_params.cache_line_log;
6461 data->rx.enable_dynamic_hc =
6462 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6463 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6464 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6465 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6466
6467 /* We don't set drop flags */
6468 data->rx.drop_ip_cs_err_flg = 0;
6469 data->rx.drop_tcp_cs_err_flg = 0;
6470 data->rx.drop_ttl0_flg = 0;
6471 data->rx.drop_udp_cs_err_flg = 0;
6472
6473 data->rx.inner_vlan_removal_enable_flg =
6474 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6475 data->rx.outer_vlan_removal_enable_flg =
6476 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6477 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6478 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6479 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6480 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6481 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6482 data->rx.bd_page_base.lo =
6483 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6484 data->rx.bd_page_base.hi =
6485 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6486 data->rx.sge_page_base.lo =
6487 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6488 data->rx.sge_page_base.hi =
6489 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6490 data->rx.cqe_page_base.lo =
6491 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6492 data->rx.cqe_page_base.hi =
6493 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6494 data->rx.is_leading_rss =
6495 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6496 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6497
6498 /* Tx data */
6499 data->tx.enforce_security_flg = 0; /* VF specific */
6500 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6501 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6502 data->tx.mtu = 0; /* VF specific */
6503 data->tx.tx_bd_page_base.lo =
6504 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6505 data->tx.tx_bd_page_base.hi =
6506 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6507
6508 /* flow control data */
6509 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6510 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6511 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6512 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6513 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6514 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6515 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6516
6517 data->fc.safc_group_num = params->txq_params.cos;
6518 data->fc.safc_group_en_flg =
6519 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
ec6ba945
VZ
6520 data->fc.traffic_type =
6521 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6522 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
523224a3
DK
6523}
6524
6525static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6526{
6527 /* ustorm cxt validation */
6528 cxt->ustorm_ag_context.cdu_usage =
6529 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6530 ETH_CONNECTION_TYPE);
6531 /* xcontext validation */
6532 cxt->xstorm_ag_context.cdu_reserved =
6533 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6534 ETH_CONNECTION_TYPE);
6535}
6536
8d96286a 6537static int bnx2x_setup_fw_client(struct bnx2x *bp,
6538 struct bnx2x_client_init_params *params,
6539 u8 activate,
6540 struct client_init_ramrod_data *data,
6541 dma_addr_t data_mapping)
523224a3
DK
6542{
6543 u16 hc_usec;
6544 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6545 int ramrod_flags = 0, rc;
6546
6547 /* HC and context validation values */
6548 hc_usec = params->txq_params.hc_rate ?
6549 1000000 / params->txq_params.hc_rate : 0;
6550 bnx2x_update_coalesce_sb_index(bp,
6551 params->txq_params.fw_sb_id,
6552 params->txq_params.sb_cq_index,
6553 !(params->txq_params.flags & QUEUE_FLG_HC),
6554 hc_usec);
6555
6556 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6557
6558 hc_usec = params->rxq_params.hc_rate ?
6559 1000000 / params->rxq_params.hc_rate : 0;
6560 bnx2x_update_coalesce_sb_index(bp,
6561 params->rxq_params.fw_sb_id,
6562 params->rxq_params.sb_cq_index,
6563 !(params->rxq_params.flags & QUEUE_FLG_HC),
6564 hc_usec);
6565
6566 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6567 params->rxq_params.cid);
6568
6569 /* zero stats */
6570 if (params->txq_params.flags & QUEUE_FLG_STATS)
6571 storm_memset_xstats_zero(bp, BP_PORT(bp),
6572 params->txq_params.stat_id);
6573
6574 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6575 storm_memset_ustats_zero(bp, BP_PORT(bp),
6576 params->rxq_params.stat_id);
6577 storm_memset_tstats_zero(bp, BP_PORT(bp),
6578 params->rxq_params.stat_id);
6579 }
6580
6581 /* Fill the ramrod data */
6582 bnx2x_fill_cl_init_data(bp, params, activate, data);
6583
6584 /* SETUP ramrod.
6585 *
6586 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6587 * barrier except from mmiowb() is needed to impose a
6588 * proper ordering of memory operations.
6589 */
6590 mmiowb();
a2fbb9ea 6591
a2fbb9ea 6592
523224a3
DK
6593 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6594 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 6595
34f80b04 6596 /* Wait for completion */
523224a3
DK
6597 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6598 params->ramrod_params.index,
6599 params->ramrod_params.pstate,
6600 ramrod_flags);
34f80b04 6601 return rc;
a2fbb9ea
ET
6602}
6603
d6214d7a 6604/**
e8920674 6605 * bnx2x_set_int_mode - configure interrupt mode
d6214d7a 6606 *
e8920674 6607 * @bp: driver handle
d6214d7a 6608 *
e8920674 6609 * In case of MSI-X it will also try to enable MSI-X.
d6214d7a 6610 */
9ee3d37b 6611static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 6612{
ca00392c 6613
9ee3d37b 6614 switch (int_mode) {
d6214d7a
DK
6615 case INT_MODE_MSI:
6616 bnx2x_enable_msi(bp);
6617 /* falling through... */
6618 case INT_MODE_INTx:
ec6ba945 6619 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a 6620 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
ca00392c 6621 break;
d6214d7a
DK
6622 default:
6623 /* Set number of queues according to bp->multi_mode value */
6624 bnx2x_set_num_queues(bp);
ca00392c 6625
d6214d7a
DK
6626 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6627 bp->num_queues);
ca00392c 6628
d6214d7a
DK
6629 /* if we can't use MSI-X we only need one fp,
6630 * so try to enable MSI-X with the requested number of fp's
6631 * and fallback to MSI or legacy INTx with one fp
6632 */
9ee3d37b 6633 if (bnx2x_enable_msix(bp)) {
d6214d7a
DK
6634 /* failed to enable MSI-X */
6635 if (bp->multi_mode)
6636 DP(NETIF_MSG_IFUP,
6637 "Multi requested but failed to "
6638 "enable MSI-X (%d), "
6639 "set number of queues to %d\n",
6640 bp->num_queues,
ec6ba945
VZ
6641 1 + NONE_ETH_CONTEXT_USE);
6642 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a 6643
9ee3d37b 6644 /* Try to enable MSI */
d6214d7a
DK
6645 if (!(bp->flags & DISABLE_MSI_FLAG))
6646 bnx2x_enable_msi(bp);
6647 }
9f6c9258
DK
6648 break;
6649 }
a2fbb9ea
ET
6650}
6651
c2bff63f
DK
6652/* must be called prioir to any HW initializations */
6653static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6654{
6655 return L2_ILT_LINES(bp);
6656}
6657
523224a3
DK
6658void bnx2x_ilt_set_info(struct bnx2x *bp)
6659{
6660 struct ilt_client_info *ilt_client;
6661 struct bnx2x_ilt *ilt = BP_ILT(bp);
6662 u16 line = 0;
6663
6664 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6665 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6666
6667 /* CDU */
6668 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6669 ilt_client->client_num = ILT_CLIENT_CDU;
6670 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6671 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6672 ilt_client->start = line;
6673 line += L2_ILT_LINES(bp);
6674#ifdef BCM_CNIC
6675 line += CNIC_ILT_LINES;
6676#endif
6677 ilt_client->end = line - 1;
6678
6679 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6680 "flags 0x%x, hw psz %d\n",
6681 ilt_client->start,
6682 ilt_client->end,
6683 ilt_client->page_size,
6684 ilt_client->flags,
6685 ilog2(ilt_client->page_size >> 12));
6686
6687 /* QM */
6688 if (QM_INIT(bp->qm_cid_count)) {
6689 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6690 ilt_client->client_num = ILT_CLIENT_QM;
6691 ilt_client->page_size = QM_ILT_PAGE_SZ;
6692 ilt_client->flags = 0;
6693 ilt_client->start = line;
6694
6695 /* 4 bytes for each cid */
6696 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6697 QM_ILT_PAGE_SZ);
6698
6699 ilt_client->end = line - 1;
6700
6701 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6702 "flags 0x%x, hw psz %d\n",
6703 ilt_client->start,
6704 ilt_client->end,
6705 ilt_client->page_size,
6706 ilt_client->flags,
6707 ilog2(ilt_client->page_size >> 12));
6708
6709 }
6710 /* SRC */
6711 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6712#ifdef BCM_CNIC
6713 ilt_client->client_num = ILT_CLIENT_SRC;
6714 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6715 ilt_client->flags = 0;
6716 ilt_client->start = line;
6717 line += SRC_ILT_LINES;
6718 ilt_client->end = line - 1;
6719
6720 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6721 "flags 0x%x, hw psz %d\n",
6722 ilt_client->start,
6723 ilt_client->end,
6724 ilt_client->page_size,
6725 ilt_client->flags,
6726 ilog2(ilt_client->page_size >> 12));
6727
6728#else
6729 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6730#endif
9f6c9258 6731
523224a3
DK
6732 /* TM */
6733 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6734#ifdef BCM_CNIC
6735 ilt_client->client_num = ILT_CLIENT_TM;
6736 ilt_client->page_size = TM_ILT_PAGE_SZ;
6737 ilt_client->flags = 0;
6738 ilt_client->start = line;
6739 line += TM_ILT_LINES;
6740 ilt_client->end = line - 1;
6741
6742 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6743 "flags 0x%x, hw psz %d\n",
6744 ilt_client->start,
6745 ilt_client->end,
6746 ilt_client->page_size,
6747 ilt_client->flags,
6748 ilog2(ilt_client->page_size >> 12));
9f6c9258 6749
523224a3
DK
6750#else
6751 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6752#endif
6753}
f85582f8 6754
523224a3
DK
6755int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6756 int is_leading)
a2fbb9ea 6757{
523224a3 6758 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
6759 int rc;
6760
ec6ba945
VZ
6761 /* reset IGU state skip FCoE L2 queue */
6762 if (!IS_FCOE_FP(fp))
6763 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
523224a3 6764 IGU_INT_ENABLE, 0);
a2fbb9ea 6765
523224a3
DK
6766 params.ramrod_params.pstate = &fp->state;
6767 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6768 params.ramrod_params.index = fp->index;
6769 params.ramrod_params.cid = fp->cid;
a2fbb9ea 6770
ec6ba945
VZ
6771#ifdef BCM_CNIC
6772 if (IS_FCOE_FP(fp))
6773 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6774
6775#endif
6776
523224a3
DK
6777 if (is_leading)
6778 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 6779
523224a3
DK
6780 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6781
6782 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6783
6784 rc = bnx2x_setup_fw_client(bp, &params, 1,
6785 bnx2x_sp(bp, client_init_data),
6786 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 6787 return rc;
a2fbb9ea
ET
6788}
6789
8d96286a 6790static int bnx2x_stop_fw_client(struct bnx2x *bp,
6791 struct bnx2x_client_ramrod_params *p)
a2fbb9ea 6792{
34f80b04 6793 int rc;
a2fbb9ea 6794
523224a3 6795 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 6796
523224a3
DK
6797 /* halt the connection */
6798 *p->pstate = BNX2X_FP_STATE_HALTING;
6799 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6800 p->cl_id, 0);
a2fbb9ea 6801
34f80b04 6802 /* Wait for completion */
523224a3
DK
6803 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6804 p->pstate, poll_flag);
34f80b04 6805 if (rc) /* timeout */
da5a662a 6806 return rc;
a2fbb9ea 6807
523224a3
DK
6808 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6809 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6810 p->cl_id, 0);
6811 /* Wait for completion */
6812 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6813 p->pstate, poll_flag);
6814 if (rc) /* timeout */
6815 return rc;
a2fbb9ea 6816
a2fbb9ea 6817
523224a3
DK
6818 /* delete cfc entry */
6819 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 6820
523224a3
DK
6821 /* Wait for completion */
6822 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6823 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 6824 return rc;
a2fbb9ea
ET
6825}
6826
523224a3
DK
6827static int bnx2x_stop_client(struct bnx2x *bp, int index)
6828{
6829 struct bnx2x_client_ramrod_params client_stop = {0};
6830 struct bnx2x_fastpath *fp = &bp->fp[index];
6831
6832 client_stop.index = index;
6833 client_stop.cid = fp->cid;
6834 client_stop.cl_id = fp->cl_id;
6835 client_stop.pstate = &(fp->state);
6836 client_stop.poll = 0;
6837
6838 return bnx2x_stop_fw_client(bp, &client_stop);
6839}
6840
6841
34f80b04
EG
6842static void bnx2x_reset_func(struct bnx2x *bp)
6843{
6844 int port = BP_PORT(bp);
6845 int func = BP_FUNC(bp);
f2e0899f 6846 int i;
523224a3 6847 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
6848 (CHIP_IS_E2(bp) ?
6849 offsetof(struct hc_status_block_data_e2, common) :
6850 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
6851 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6852 int pfid_offset = offsetof(struct pci_entity, pf_id);
6853
6854 /* Disable the function in the FW */
6855 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6856 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6857 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6858 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6859
6860 /* FP SBs */
ec6ba945 6861 for_each_eth_queue(bp, i) {
523224a3
DK
6862 struct bnx2x_fastpath *fp = &bp->fp[i];
6863 REG_WR8(bp,
6864 BAR_CSTRORM_INTMEM +
6865 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6866 + pfunc_offset_fp + pfid_offset,
6867 HC_FUNCTION_DISABLED);
6868 }
6869
6870 /* SP SB */
6871 REG_WR8(bp,
6872 BAR_CSTRORM_INTMEM +
6873 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6874 pfunc_offset_sp + pfid_offset,
6875 HC_FUNCTION_DISABLED);
6876
6877
6878 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6879 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6880 0);
34f80b04
EG
6881
6882 /* Configure IGU */
f2e0899f
DK
6883 if (bp->common.int_block == INT_BLOCK_HC) {
6884 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6885 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6886 } else {
6887 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6888 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6889 }
34f80b04 6890
37b091ba
MC
6891#ifdef BCM_CNIC
6892 /* Disable Timer scan */
6893 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6894 /*
6895 * Wait for at least 10ms and up to 2 second for the timers scan to
6896 * complete
6897 */
6898 for (i = 0; i < 200; i++) {
6899 msleep(10);
6900 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6901 break;
6902 }
6903#endif
34f80b04 6904 /* Clear ILT */
f2e0899f
DK
6905 bnx2x_clear_func_ilt(bp, func);
6906
6907 /* Timers workaround bug for E2: if this is vnic-3,
6908 * we need to set the entire ilt range for this timers.
6909 */
6910 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6911 struct ilt_client_info ilt_cli;
6912 /* use dummy TM client */
6913 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6914 ilt_cli.start = 0;
6915 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6916 ilt_cli.client_num = ILT_CLIENT_TM;
6917
6918 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6919 }
6920
6921 /* this assumes that reset_port() called before reset_func()*/
6922 if (CHIP_IS_E2(bp))
6923 bnx2x_pf_disable(bp);
523224a3
DK
6924
6925 bp->dmae_ready = 0;
34f80b04
EG
6926}
6927
6928static void bnx2x_reset_port(struct bnx2x *bp)
6929{
6930 int port = BP_PORT(bp);
6931 u32 val;
6932
6933 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6934
6935 /* Do not rcv packets to BRB */
6936 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6937 /* Do not direct rcv packets that are not for MCP to the BRB */
6938 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6939 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6940
6941 /* Configure AEU */
6942 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6943
6944 msleep(100);
6945 /* Check for BRB port occupancy */
6946 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6947 if (val)
6948 DP(NETIF_MSG_IFDOWN,
33471629 6949 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6950
6951 /* TODO: Close Doorbell port? */
6952}
6953
34f80b04
EG
6954static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6955{
6956 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 6957 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
6958
6959 switch (reset_code) {
6960 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6961 bnx2x_reset_port(bp);
6962 bnx2x_reset_func(bp);
6963 bnx2x_reset_common(bp);
6964 break;
6965
6966 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6967 bnx2x_reset_port(bp);
6968 bnx2x_reset_func(bp);
6969 break;
6970
6971 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6972 bnx2x_reset_func(bp);
6973 break;
49d66772 6974
34f80b04
EG
6975 default:
6976 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6977 break;
6978 }
6979}
6980
ec6ba945
VZ
6981#ifdef BCM_CNIC
6982static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
6983{
6984 if (bp->flags & FCOE_MACS_SET) {
6985 if (!IS_MF_SD(bp))
6986 bnx2x_set_fip_eth_mac_addr(bp, 0);
6987
6988 bnx2x_set_all_enode_macs(bp, 0);
6989
6990 bp->flags &= ~FCOE_MACS_SET;
6991 }
6992}
6993#endif
6994
9f6c9258 6995void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6996{
da5a662a 6997 int port = BP_PORT(bp);
a2fbb9ea 6998 u32 reset_code = 0;
da5a662a 6999 int i, cnt, rc;
a2fbb9ea 7000
555f6c78 7001 /* Wait until tx fastpath tasks complete */
ec6ba945 7002 for_each_tx_queue(bp, i) {
228241eb
ET
7003 struct bnx2x_fastpath *fp = &bp->fp[i];
7004
34f80b04 7005 cnt = 1000;
e8b5fc51 7006 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7007
34f80b04
EG
7008 if (!cnt) {
7009 BNX2X_ERR("timeout waiting for queue[%d]\n",
7010 i);
7011#ifdef BNX2X_STOP_ON_ERROR
7012 bnx2x_panic();
7013 return -EBUSY;
7014#else
7015 break;
7016#endif
7017 }
7018 cnt--;
da5a662a 7019 msleep(1);
34f80b04 7020 }
228241eb 7021 }
da5a662a
VZ
7022 /* Give HW time to discard old tx messages */
7023 msleep(1);
a2fbb9ea 7024
6e30dd4e 7025 bnx2x_set_eth_mac(bp, 0);
65abd74d 7026
6e30dd4e 7027 bnx2x_invalidate_uc_list(bp);
3101c2bc 7028
6e30dd4e
VZ
7029 if (CHIP_IS_E1(bp))
7030 bnx2x_invalidate_e1_mc_list(bp);
7031 else {
7032 bnx2x_invalidate_e1h_mc_list(bp);
7033 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3101c2bc 7034 }
523224a3 7035
993ac7b5 7036#ifdef BCM_CNIC
ec6ba945 7037 bnx2x_del_fcoe_eth_macs(bp);
993ac7b5 7038#endif
3101c2bc 7039
65abd74d
YG
7040 if (unload_mode == UNLOAD_NORMAL)
7041 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7042
7d0446c2 7043 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7044 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7045
7d0446c2 7046 else if (bp->wol) {
65abd74d
YG
7047 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7048 u8 *mac_addr = bp->dev->dev_addr;
7049 u32 val;
7050 /* The mac address is written to entries 1-4 to
7051 preserve entry 0 which is used by the PMF */
7052 u8 entry = (BP_E1HVN(bp) + 1)*8;
7053
7054 val = (mac_addr[0] << 8) | mac_addr[1];
7055 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7056
7057 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7058 (mac_addr[4] << 8) | mac_addr[5];
7059 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7060
7061 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7062
7063 } else
7064 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7065
34f80b04
EG
7066 /* Close multi and leading connections
7067 Completions for ramrods are collected in a synchronous way */
523224a3
DK
7068 for_each_queue(bp, i)
7069
7070 if (bnx2x_stop_client(bp, i))
7071#ifdef BNX2X_STOP_ON_ERROR
7072 return;
7073#else
228241eb 7074 goto unload_error;
523224a3 7075#endif
a2fbb9ea 7076
523224a3 7077 rc = bnx2x_func_stop(bp);
da5a662a 7078 if (rc) {
523224a3 7079 BNX2X_ERR("Function stop failed!\n");
da5a662a 7080#ifdef BNX2X_STOP_ON_ERROR
523224a3 7081 return;
da5a662a
VZ
7082#else
7083 goto unload_error;
34f80b04 7084#endif
228241eb 7085 }
523224a3 7086#ifndef BNX2X_STOP_ON_ERROR
228241eb 7087unload_error:
523224a3 7088#endif
34f80b04 7089 if (!BP_NOMCP(bp))
a22f0788 7090 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 7091 else {
f2e0899f
DK
7092 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7093 "%d, %d, %d\n", BP_PATH(bp),
7094 load_count[BP_PATH(bp)][0],
7095 load_count[BP_PATH(bp)][1],
7096 load_count[BP_PATH(bp)][2]);
7097 load_count[BP_PATH(bp)][0]--;
7098 load_count[BP_PATH(bp)][1 + port]--;
7099 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7100 "%d, %d, %d\n", BP_PATH(bp),
7101 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7102 load_count[BP_PATH(bp)][2]);
7103 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 7104 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 7105 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
7106 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7107 else
7108 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7109 }
a2fbb9ea 7110
34f80b04
EG
7111 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7112 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7113 bnx2x__link_reset(bp);
a2fbb9ea 7114
523224a3
DK
7115 /* Disable HW interrupts, NAPI */
7116 bnx2x_netif_stop(bp, 1);
7117
7118 /* Release IRQs */
d6214d7a 7119 bnx2x_free_irq(bp);
523224a3 7120
a2fbb9ea 7121 /* Reset the chip */
228241eb 7122 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7123
7124 /* Report UNLOAD_DONE to MCP */
34f80b04 7125 if (!BP_NOMCP(bp))
a22f0788 7126 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 7127
72fd0718
VZ
7128}
7129
9f6c9258 7130void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
7131{
7132 u32 val;
7133
7134 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7135
7136 if (CHIP_IS_E1(bp)) {
7137 int port = BP_PORT(bp);
7138 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7139 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7140
7141 val = REG_RD(bp, addr);
7142 val &= ~(0x300);
7143 REG_WR(bp, addr, val);
7144 } else if (CHIP_IS_E1H(bp)) {
7145 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7146 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7147 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7148 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7149 }
7150}
7151
72fd0718
VZ
7152/* Close gates #2, #3 and #4: */
7153static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7154{
7155 u32 val, addr;
7156
7157 /* Gates #2 and #4a are closed/opened for "not E1" only */
7158 if (!CHIP_IS_E1(bp)) {
7159 /* #4 */
7160 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7161 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7162 close ? (val | 0x1) : (val & (~(u32)1)));
7163 /* #2 */
7164 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7165 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7166 close ? (val | 0x1) : (val & (~(u32)1)));
7167 }
7168
7169 /* #3 */
7170 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7171 val = REG_RD(bp, addr);
7172 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7173
7174 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7175 close ? "closing" : "opening");
7176 mmiowb();
7177}
7178
7179#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7180
7181static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7182{
7183 /* Do some magic... */
7184 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7185 *magic_val = val & SHARED_MF_CLP_MAGIC;
7186 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7187}
7188
e8920674
DK
7189/**
7190 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
72fd0718 7191 *
e8920674
DK
7192 * @bp: driver handle
7193 * @magic_val: old value of the `magic' bit.
72fd0718
VZ
7194 */
7195static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7196{
7197 /* Restore the `magic' bit value... */
72fd0718
VZ
7198 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7199 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7200 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7201}
7202
f85582f8 7203/**
e8920674 7204 * bnx2x_reset_mcp_prep - prepare for MCP reset.
72fd0718 7205 *
e8920674
DK
7206 * @bp: driver handle
7207 * @magic_val: old value of 'magic' bit.
7208 *
7209 * Takes care of CLP configurations.
72fd0718
VZ
7210 */
7211static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7212{
7213 u32 shmem;
7214 u32 validity_offset;
7215
7216 DP(NETIF_MSG_HW, "Starting\n");
7217
7218 /* Set `magic' bit in order to save MF config */
7219 if (!CHIP_IS_E1(bp))
7220 bnx2x_clp_reset_prep(bp, magic_val);
7221
7222 /* Get shmem offset */
7223 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7224 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7225
7226 /* Clear validity map flags */
7227 if (shmem > 0)
7228 REG_WR(bp, shmem + validity_offset, 0);
7229}
7230
7231#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7232#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7233
e8920674
DK
7234/**
7235 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
72fd0718 7236 *
e8920674 7237 * @bp: driver handle
72fd0718
VZ
7238 */
7239static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7240{
7241 /* special handling for emulation and FPGA,
7242 wait 10 times longer */
7243 if (CHIP_REV_IS_SLOW(bp))
7244 msleep(MCP_ONE_TIMEOUT*10);
7245 else
7246 msleep(MCP_ONE_TIMEOUT);
7247}
7248
1b6e2ceb
DK
7249/*
7250 * initializes bp->common.shmem_base and waits for validity signature to appear
7251 */
7252static int bnx2x_init_shmem(struct bnx2x *bp)
72fd0718 7253{
1b6e2ceb
DK
7254 int cnt = 0;
7255 u32 val = 0;
72fd0718 7256
1b6e2ceb
DK
7257 do {
7258 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7259 if (bp->common.shmem_base) {
7260 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7261 if (val & SHR_MEM_VALIDITY_MB)
7262 return 0;
7263 }
72fd0718 7264
1b6e2ceb 7265 bnx2x_mcp_wait_one(bp);
72fd0718 7266
1b6e2ceb 7267 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
72fd0718 7268
1b6e2ceb 7269 BNX2X_ERR("BAD MCP validity signature\n");
72fd0718 7270
1b6e2ceb
DK
7271 return -ENODEV;
7272}
72fd0718 7273
1b6e2ceb
DK
7274static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7275{
7276 int rc = bnx2x_init_shmem(bp);
72fd0718 7277
72fd0718
VZ
7278 /* Restore the `magic' bit value */
7279 if (!CHIP_IS_E1(bp))
7280 bnx2x_clp_reset_done(bp, magic_val);
7281
7282 return rc;
7283}
7284
7285static void bnx2x_pxp_prep(struct bnx2x *bp)
7286{
7287 if (!CHIP_IS_E1(bp)) {
7288 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7289 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7290 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7291 mmiowb();
7292 }
7293}
7294
7295/*
7296 * Reset the whole chip except for:
7297 * - PCIE core
7298 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7299 * one reset bit)
7300 * - IGU
7301 * - MISC (including AEU)
7302 * - GRC
7303 * - RBCN, RBCP
7304 */
7305static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7306{
7307 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7308
7309 not_reset_mask1 =
7310 MISC_REGISTERS_RESET_REG_1_RST_HC |
7311 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7312 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7313
7314 not_reset_mask2 =
7315 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7316 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7317 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7318 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7319 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7320 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7321 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7322 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7323
7324 reset_mask1 = 0xffffffff;
7325
7326 if (CHIP_IS_E1(bp))
7327 reset_mask2 = 0xffff;
7328 else
7329 reset_mask2 = 0x1ffff;
7330
7331 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7332 reset_mask1 & (~not_reset_mask1));
7333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7334 reset_mask2 & (~not_reset_mask2));
7335
7336 barrier();
7337 mmiowb();
7338
7339 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7340 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7341 mmiowb();
7342}
7343
7344static int bnx2x_process_kill(struct bnx2x *bp)
7345{
7346 int cnt = 1000;
7347 u32 val = 0;
7348 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7349
7350
7351 /* Empty the Tetris buffer, wait for 1s */
7352 do {
7353 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7354 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7355 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7356 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7357 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7358 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7359 ((port_is_idle_0 & 0x1) == 0x1) &&
7360 ((port_is_idle_1 & 0x1) == 0x1) &&
7361 (pgl_exp_rom2 == 0xffffffff))
7362 break;
7363 msleep(1);
7364 } while (cnt-- > 0);
7365
7366 if (cnt <= 0) {
7367 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7368 " are still"
7369 " outstanding read requests after 1s!\n");
7370 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7371 " port_is_idle_0=0x%08x,"
7372 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7373 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7374 pgl_exp_rom2);
7375 return -EAGAIN;
7376 }
7377
7378 barrier();
7379
7380 /* Close gates #2, #3 and #4 */
7381 bnx2x_set_234_gates(bp, true);
7382
7383 /* TBD: Indicate that "process kill" is in progress to MCP */
7384
7385 /* Clear "unprepared" bit */
7386 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7387 barrier();
7388
7389 /* Make sure all is written to the chip before the reset */
7390 mmiowb();
7391
7392 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7393 * PSWHST, GRC and PSWRD Tetris buffer.
7394 */
7395 msleep(1);
7396
7397 /* Prepare to chip reset: */
7398 /* MCP */
7399 bnx2x_reset_mcp_prep(bp, &val);
7400
7401 /* PXP */
7402 bnx2x_pxp_prep(bp);
7403 barrier();
7404
7405 /* reset the chip */
7406 bnx2x_process_kill_chip_reset(bp);
7407 barrier();
7408
7409 /* Recover after reset: */
7410 /* MCP */
7411 if (bnx2x_reset_mcp_comp(bp, val))
7412 return -EAGAIN;
7413
7414 /* PXP */
7415 bnx2x_pxp_prep(bp);
7416
7417 /* Open the gates #2, #3 and #4 */
7418 bnx2x_set_234_gates(bp, false);
7419
7420 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7421 * reset state, re-enable attentions. */
7422
a2fbb9ea
ET
7423 return 0;
7424}
7425
72fd0718
VZ
7426static int bnx2x_leader_reset(struct bnx2x *bp)
7427{
7428 int rc = 0;
7429 /* Try to recover after the failure */
7430 if (bnx2x_process_kill(bp)) {
7431 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7432 bp->dev->name);
7433 rc = -EAGAIN;
7434 goto exit_leader_reset;
7435 }
7436
7437 /* Clear "reset is in progress" bit and update the driver state */
7438 bnx2x_set_reset_done(bp);
7439 bp->recovery_state = BNX2X_RECOVERY_DONE;
7440
7441exit_leader_reset:
7442 bp->is_leader = 0;
7443 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7444 smp_wmb();
7445 return rc;
7446}
7447
72fd0718
VZ
7448/* Assumption: runs under rtnl lock. This together with the fact
7449 * that it's called only from bnx2x_reset_task() ensure that it
7450 * will never be called when netif_running(bp->dev) is false.
7451 */
7452static void bnx2x_parity_recover(struct bnx2x *bp)
7453{
7454 DP(NETIF_MSG_HW, "Handling parity\n");
7455 while (1) {
7456 switch (bp->recovery_state) {
7457 case BNX2X_RECOVERY_INIT:
7458 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7459 /* Try to get a LEADER_LOCK HW lock */
7460 if (bnx2x_trylock_hw_lock(bp,
7461 HW_LOCK_RESOURCE_RESERVED_08))
7462 bp->is_leader = 1;
7463
7464 /* Stop the driver */
7465 /* If interface has been removed - break */
7466 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7467 return;
7468
7469 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7470 /* Ensure "is_leader" and "recovery_state"
7471 * update values are seen on other CPUs
7472 */
7473 smp_wmb();
7474 break;
7475
7476 case BNX2X_RECOVERY_WAIT:
7477 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7478 if (bp->is_leader) {
7479 u32 load_counter = bnx2x_get_load_cnt(bp);
7480 if (load_counter) {
7481 /* Wait until all other functions get
7482 * down.
7483 */
7484 schedule_delayed_work(&bp->reset_task,
7485 HZ/10);
7486 return;
7487 } else {
7488 /* If all other functions got down -
7489 * try to bring the chip back to
7490 * normal. In any case it's an exit
7491 * point for a leader.
7492 */
7493 if (bnx2x_leader_reset(bp) ||
7494 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7495 printk(KERN_ERR"%s: Recovery "
7496 "has failed. Power cycle is "
7497 "needed.\n", bp->dev->name);
7498 /* Disconnect this device */
7499 netif_device_detach(bp->dev);
7500 /* Block ifup for all function
7501 * of this ASIC until
7502 * "process kill" or power
7503 * cycle.
7504 */
7505 bnx2x_set_reset_in_progress(bp);
7506 /* Shut down the power */
7507 bnx2x_set_power_state(bp,
7508 PCI_D3hot);
7509 return;
7510 }
7511
7512 return;
7513 }
7514 } else { /* non-leader */
7515 if (!bnx2x_reset_is_done(bp)) {
7516 /* Try to get a LEADER_LOCK HW lock as
7517 * long as a former leader may have
7518 * been unloaded by the user or
7519 * released a leadership by another
7520 * reason.
7521 */
7522 if (bnx2x_trylock_hw_lock(bp,
7523 HW_LOCK_RESOURCE_RESERVED_08)) {
7524 /* I'm a leader now! Restart a
7525 * switch case.
7526 */
7527 bp->is_leader = 1;
7528 break;
7529 }
7530
7531 schedule_delayed_work(&bp->reset_task,
7532 HZ/10);
7533 return;
7534
7535 } else { /* A leader has completed
7536 * the "process kill". It's an exit
7537 * point for a non-leader.
7538 */
7539 bnx2x_nic_load(bp, LOAD_NORMAL);
7540 bp->recovery_state =
7541 BNX2X_RECOVERY_DONE;
7542 smp_wmb();
7543 return;
7544 }
7545 }
7546 default:
7547 return;
7548 }
7549 }
7550}
7551
7552/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7553 * scheduled on a general queue in order to prevent a dead lock.
7554 */
34f80b04
EG
7555static void bnx2x_reset_task(struct work_struct *work)
7556{
72fd0718 7557 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7558
7559#ifdef BNX2X_STOP_ON_ERROR
7560 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7561 " so reset not done to allow debug dump,\n"
72fd0718 7562 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7563 return;
7564#endif
7565
7566 rtnl_lock();
7567
7568 if (!netif_running(bp->dev))
7569 goto reset_task_exit;
7570
72fd0718
VZ
7571 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7572 bnx2x_parity_recover(bp);
7573 else {
7574 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7575 bnx2x_nic_load(bp, LOAD_NORMAL);
7576 }
34f80b04
EG
7577
7578reset_task_exit:
7579 rtnl_unlock();
7580}
7581
a2fbb9ea
ET
7582/* end of nic load/unload */
7583
a2fbb9ea
ET
7584/*
7585 * Init service functions
7586 */
7587
8d96286a 7588static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
f2e0899f
DK
7589{
7590 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7591 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7592 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
7593}
7594
f2e0899f 7595static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 7596{
f2e0899f 7597 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
7598
7599 /* Flush all outstanding writes */
7600 mmiowb();
7601
7602 /* Pretend to be function 0 */
7603 REG_WR(bp, reg, 0);
f2e0899f 7604 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
7605
7606 /* From now we are in the "like-E1" mode */
7607 bnx2x_int_disable(bp);
7608
7609 /* Flush all outstanding writes */
7610 mmiowb();
7611
f2e0899f
DK
7612 /* Restore the original function */
7613 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7614 REG_RD(bp, reg);
f1ef27ef
EG
7615}
7616
f2e0899f 7617static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 7618{
f2e0899f 7619 if (CHIP_IS_E1(bp))
f1ef27ef 7620 bnx2x_int_disable(bp);
f2e0899f
DK
7621 else
7622 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
7623}
7624
34f80b04
EG
7625static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7626{
7627 u32 val;
7628
7629 /* Check if there is any driver already loaded */
7630 val = REG_RD(bp, MISC_REG_UNPREPARED);
7631 if (val == 0x1) {
7632 /* Check if it is the UNDI driver
7633 * UNDI driver initializes CID offset for normal bell to 0x7
7634 */
4a37fb66 7635 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7636 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7637 if (val == 0x7) {
7638 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
7639 /* save our pf_num */
7640 int orig_pf_num = bp->pf_num;
da5a662a
VZ
7641 u32 swap_en;
7642 u32 swap_val;
34f80b04 7643
b4661739
EG
7644 /* clear the UNDI indication */
7645 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7646
34f80b04
EG
7647 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7648
7649 /* try unload UNDI on port 0 */
f2e0899f 7650 bp->pf_num = 0;
da5a662a 7651 bp->fw_seq =
f2e0899f 7652 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7653 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 7654 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7655
7656 /* if UNDI is loaded on the other port */
7657 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7658
da5a662a 7659 /* send "DONE" for previous unload */
a22f0788
YR
7660 bnx2x_fw_command(bp,
7661 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7662
7663 /* unload UNDI on port 1 */
f2e0899f 7664 bp->pf_num = 1;
da5a662a 7665 bp->fw_seq =
f2e0899f 7666 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
7667 DRV_MSG_SEQ_NUMBER_MASK);
7668 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7669
a22f0788 7670 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7671 }
7672
b4661739
EG
7673 /* now it's safe to release the lock */
7674 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7675
f2e0899f 7676 bnx2x_undi_int_disable(bp);
da5a662a
VZ
7677
7678 /* close input traffic and wait for it */
7679 /* Do not rcv packets to BRB */
7680 REG_WR(bp,
7681 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7682 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7683 /* Do not direct rcv packets that are not for MCP to
7684 * the BRB */
7685 REG_WR(bp,
7686 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7687 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7688 /* clear AEU */
7689 REG_WR(bp,
7690 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7691 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7692 msleep(10);
7693
7694 /* save NIG port swap info */
7695 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7696 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7697 /* reset device */
7698 REG_WR(bp,
7699 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7700 0xd3ffffff);
34f80b04
EG
7701 REG_WR(bp,
7702 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7703 0x1403);
da5a662a
VZ
7704 /* take the NIG out of reset and restore swap values */
7705 REG_WR(bp,
7706 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7707 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7708 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7709 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7710
7711 /* send unload done to the MCP */
a22f0788 7712 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7713
7714 /* restore our func and fw_seq */
f2e0899f 7715 bp->pf_num = orig_pf_num;
da5a662a 7716 bp->fw_seq =
f2e0899f 7717 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7718 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7719 } else
7720 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7721 }
7722}
7723
7724static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7725{
7726 u32 val, val2, val3, val4, id;
72ce58c3 7727 u16 pmc;
34f80b04
EG
7728
7729 /* Get the chip revision id and number. */
7730 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7731 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7732 id = ((val & 0xffff) << 16);
7733 val = REG_RD(bp, MISC_REG_CHIP_REV);
7734 id |= ((val & 0xf) << 12);
7735 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7736 id |= ((val & 0xff) << 4);
5a40e08e 7737 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7738 id |= (val & 0xf);
7739 bp->common.chip_id = id;
523224a3
DK
7740
7741 /* Set doorbell size */
7742 bp->db_size = (1 << BNX2X_DB_SHIFT);
7743
f2e0899f
DK
7744 if (CHIP_IS_E2(bp)) {
7745 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7746 if ((val & 1) == 0)
7747 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7748 else
7749 val = (val >> 1) & 1;
7750 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7751 "2_PORT_MODE");
7752 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7753 CHIP_2_PORT_MODE;
7754
7755 if (CHIP_MODE_IS_4_PORT(bp))
7756 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7757 else
7758 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7759 } else {
7760 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7761 bp->pfid = bp->pf_num; /* 0..7 */
7762 }
7763
523224a3
DK
7764 /*
7765 * set base FW non-default (fast path) status block id, this value is
7766 * used to initialize the fw_sb_id saved on the fp/queue structure to
7767 * determine the id used by the FW.
7768 */
f2e0899f
DK
7769 if (CHIP_IS_E1x(bp))
7770 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7771 else /* E2 */
7772 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7773
7774 bp->link_params.chip_id = bp->common.chip_id;
7775 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 7776
1c06328c
EG
7777 val = (REG_RD(bp, 0x2874) & 0x55);
7778 if ((bp->common.chip_id & 0x1) ||
7779 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7780 bp->flags |= ONE_PORT_FLAG;
7781 BNX2X_DEV_INFO("single port device\n");
7782 }
7783
34f80b04
EG
7784 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7785 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7786 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7787 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7788 bp->common.flash_size, bp->common.flash_size);
7789
1b6e2ceb
DK
7790 bnx2x_init_shmem(bp);
7791
f2e0899f
DK
7792 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7793 MISC_REG_GENERIC_CR_1 :
7794 MISC_REG_GENERIC_CR_0));
1b6e2ceb 7795
34f80b04 7796 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 7797 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
7798 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7799 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 7800
f2e0899f 7801 if (!bp->common.shmem_base) {
34f80b04
EG
7802 BNX2X_DEV_INFO("MCP not active\n");
7803 bp->flags |= NO_MCP_FLAG;
7804 return;
7805 }
7806
34f80b04 7807 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7808 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7809
7810 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7811 SHARED_HW_CFG_LED_MODE_MASK) >>
7812 SHARED_HW_CFG_LED_MODE_SHIFT);
7813
c2c8b03e
EG
7814 bp->link_params.feature_config_flags = 0;
7815 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7816 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7817 bp->link_params.feature_config_flags |=
7818 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7819 else
7820 bp->link_params.feature_config_flags &=
7821 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7822
34f80b04
EG
7823 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7824 bp->common.bc_ver = val;
7825 BNX2X_DEV_INFO("bc_ver %X\n", val);
7826 if (val < BNX2X_BC_VER) {
7827 /* for now only warn
7828 * later we might need to enforce this */
f2e0899f
DK
7829 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7830 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 7831 }
4d295db0 7832 bp->link_params.feature_config_flags |=
a22f0788 7833 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
7834 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7835
a22f0788
YR
7836 bp->link_params.feature_config_flags |=
7837 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7838 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3 7839
f9a3ebbe
DK
7840 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7841 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7842
72ce58c3 7843 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7844 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7845
7846 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7847 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7848 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7849 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7850
cdaa7cb8
VZ
7851 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7852 val, val2, val3, val4);
34f80b04
EG
7853}
7854
f2e0899f
DK
7855#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7856#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7857
7858static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7859{
7860 int pfid = BP_FUNC(bp);
7861 int vn = BP_E1HVN(bp);
7862 int igu_sb_id;
7863 u32 val;
7864 u8 fid;
7865
7866 bp->igu_base_sb = 0xff;
7867 bp->igu_sb_cnt = 0;
7868 if (CHIP_INT_MODE_IS_BC(bp)) {
7869 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
ec6ba945 7870 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
7871
7872 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7873 FP_SB_MAX_E1x;
7874
7875 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7876 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7877
7878 return;
7879 }
7880
7881 /* IGU in normal mode - read CAM */
7882 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7883 igu_sb_id++) {
7884 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7885 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7886 continue;
7887 fid = IGU_FID(val);
7888 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7889 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7890 continue;
7891 if (IGU_VEC(val) == 0)
7892 /* default status block */
7893 bp->igu_dsb_id = igu_sb_id;
7894 else {
7895 if (bp->igu_base_sb == 0xff)
7896 bp->igu_base_sb = igu_sb_id;
7897 bp->igu_sb_cnt++;
7898 }
7899 }
7900 }
ec6ba945
VZ
7901 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
7902 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
7903 if (bp->igu_sb_cnt == 0)
7904 BNX2X_ERR("CAM configuration error\n");
7905}
7906
34f80b04
EG
7907static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7908 u32 switch_cfg)
a2fbb9ea 7909{
a22f0788
YR
7910 int cfg_size = 0, idx, port = BP_PORT(bp);
7911
7912 /* Aggregation of supported attributes of all external phys */
7913 bp->port.supported[0] = 0;
7914 bp->port.supported[1] = 0;
b7737c9b
YR
7915 switch (bp->link_params.num_phys) {
7916 case 1:
a22f0788
YR
7917 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7918 cfg_size = 1;
7919 break;
b7737c9b 7920 case 2:
a22f0788
YR
7921 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7922 cfg_size = 1;
7923 break;
7924 case 3:
7925 if (bp->link_params.multi_phy_config &
7926 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7927 bp->port.supported[1] =
7928 bp->link_params.phy[EXT_PHY1].supported;
7929 bp->port.supported[0] =
7930 bp->link_params.phy[EXT_PHY2].supported;
7931 } else {
7932 bp->port.supported[0] =
7933 bp->link_params.phy[EXT_PHY1].supported;
7934 bp->port.supported[1] =
7935 bp->link_params.phy[EXT_PHY2].supported;
7936 }
7937 cfg_size = 2;
7938 break;
b7737c9b 7939 }
a2fbb9ea 7940
a22f0788 7941 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 7942 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 7943 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 7944 SHMEM_RD(bp,
a22f0788
YR
7945 dev_info.port_hw_config[port].external_phy_config),
7946 SHMEM_RD(bp,
7947 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 7948 return;
f85582f8 7949 }
a2fbb9ea 7950
b7737c9b
YR
7951 switch (switch_cfg) {
7952 case SWITCH_CFG_1G:
34f80b04
EG
7953 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7954 port*0x10);
7955 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7956 break;
7957
7958 case SWITCH_CFG_10G:
34f80b04
EG
7959 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7960 port*0x18);
7961 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7962 break;
7963
7964 default:
7965 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 7966 bp->port.link_config[0]);
a2fbb9ea
ET
7967 return;
7968 }
a22f0788
YR
7969 /* mask what we support according to speed_cap_mask per configuration */
7970 for (idx = 0; idx < cfg_size; idx++) {
7971 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7972 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 7973 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7974
a22f0788 7975 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7976 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 7977 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7978
a22f0788 7979 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7980 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 7981 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7982
a22f0788 7983 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7984 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 7985 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7986
a22f0788 7987 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7988 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 7989 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 7990 SUPPORTED_1000baseT_Full);
a2fbb9ea 7991
a22f0788 7992 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7993 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 7994 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7995
a22f0788 7996 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7997 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
7998 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7999
8000 }
a2fbb9ea 8001
a22f0788
YR
8002 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8003 bp->port.supported[1]);
a2fbb9ea
ET
8004}
8005
34f80b04 8006static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8007{
a22f0788
YR
8008 u32 link_config, idx, cfg_size = 0;
8009 bp->port.advertising[0] = 0;
8010 bp->port.advertising[1] = 0;
8011 switch (bp->link_params.num_phys) {
8012 case 1:
8013 case 2:
8014 cfg_size = 1;
8015 break;
8016 case 3:
8017 cfg_size = 2;
8018 break;
8019 }
8020 for (idx = 0; idx < cfg_size; idx++) {
8021 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8022 link_config = bp->port.link_config[idx];
8023 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 8024 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
8025 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8026 bp->link_params.req_line_speed[idx] =
8027 SPEED_AUTO_NEG;
8028 bp->port.advertising[idx] |=
8029 bp->port.supported[idx];
f85582f8
DK
8030 } else {
8031 /* force 10G, no AN */
a22f0788
YR
8032 bp->link_params.req_line_speed[idx] =
8033 SPEED_10000;
8034 bp->port.advertising[idx] |=
8035 (ADVERTISED_10000baseT_Full |
f85582f8 8036 ADVERTISED_FIBRE);
a22f0788 8037 continue;
f85582f8
DK
8038 }
8039 break;
a2fbb9ea 8040
f85582f8 8041 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
8042 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8043 bp->link_params.req_line_speed[idx] =
8044 SPEED_10;
8045 bp->port.advertising[idx] |=
8046 (ADVERTISED_10baseT_Full |
f85582f8
DK
8047 ADVERTISED_TP);
8048 } else {
8049 BNX2X_ERROR("NVRAM config error. "
8050 "Invalid link_config 0x%x"
8051 " speed_cap_mask 0x%x\n",
8052 link_config,
a22f0788 8053 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8054 return;
8055 }
8056 break;
a2fbb9ea 8057
f85582f8 8058 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
8059 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8060 bp->link_params.req_line_speed[idx] =
8061 SPEED_10;
8062 bp->link_params.req_duplex[idx] =
8063 DUPLEX_HALF;
8064 bp->port.advertising[idx] |=
8065 (ADVERTISED_10baseT_Half |
f85582f8
DK
8066 ADVERTISED_TP);
8067 } else {
8068 BNX2X_ERROR("NVRAM config error. "
8069 "Invalid link_config 0x%x"
8070 " speed_cap_mask 0x%x\n",
8071 link_config,
8072 bp->link_params.speed_cap_mask[idx]);
8073 return;
8074 }
8075 break;
a2fbb9ea 8076
f85582f8
DK
8077 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8078 if (bp->port.supported[idx] &
8079 SUPPORTED_100baseT_Full) {
a22f0788
YR
8080 bp->link_params.req_line_speed[idx] =
8081 SPEED_100;
8082 bp->port.advertising[idx] |=
8083 (ADVERTISED_100baseT_Full |
f85582f8
DK
8084 ADVERTISED_TP);
8085 } else {
8086 BNX2X_ERROR("NVRAM config error. "
8087 "Invalid link_config 0x%x"
8088 " speed_cap_mask 0x%x\n",
8089 link_config,
8090 bp->link_params.speed_cap_mask[idx]);
8091 return;
8092 }
8093 break;
a2fbb9ea 8094
f85582f8
DK
8095 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8096 if (bp->port.supported[idx] &
8097 SUPPORTED_100baseT_Half) {
8098 bp->link_params.req_line_speed[idx] =
8099 SPEED_100;
8100 bp->link_params.req_duplex[idx] =
8101 DUPLEX_HALF;
a22f0788
YR
8102 bp->port.advertising[idx] |=
8103 (ADVERTISED_100baseT_Half |
f85582f8
DK
8104 ADVERTISED_TP);
8105 } else {
8106 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8107 "Invalid link_config 0x%x"
8108 " speed_cap_mask 0x%x\n",
a22f0788
YR
8109 link_config,
8110 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8111 return;
8112 }
8113 break;
a2fbb9ea 8114
f85582f8 8115 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
8116 if (bp->port.supported[idx] &
8117 SUPPORTED_1000baseT_Full) {
8118 bp->link_params.req_line_speed[idx] =
8119 SPEED_1000;
8120 bp->port.advertising[idx] |=
8121 (ADVERTISED_1000baseT_Full |
f85582f8
DK
8122 ADVERTISED_TP);
8123 } else {
8124 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8125 "Invalid link_config 0x%x"
8126 " speed_cap_mask 0x%x\n",
a22f0788
YR
8127 link_config,
8128 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8129 return;
8130 }
8131 break;
a2fbb9ea 8132
f85582f8 8133 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
8134 if (bp->port.supported[idx] &
8135 SUPPORTED_2500baseX_Full) {
8136 bp->link_params.req_line_speed[idx] =
8137 SPEED_2500;
8138 bp->port.advertising[idx] |=
8139 (ADVERTISED_2500baseX_Full |
34f80b04 8140 ADVERTISED_TP);
f85582f8
DK
8141 } else {
8142 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8143 "Invalid link_config 0x%x"
8144 " speed_cap_mask 0x%x\n",
a22f0788 8145 link_config,
f85582f8
DK
8146 bp->link_params.speed_cap_mask[idx]);
8147 return;
8148 }
8149 break;
a2fbb9ea 8150
f85582f8
DK
8151 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8152 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8153 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
8154 if (bp->port.supported[idx] &
8155 SUPPORTED_10000baseT_Full) {
8156 bp->link_params.req_line_speed[idx] =
8157 SPEED_10000;
8158 bp->port.advertising[idx] |=
8159 (ADVERTISED_10000baseT_Full |
34f80b04 8160 ADVERTISED_FIBRE);
f85582f8
DK
8161 } else {
8162 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8163 "Invalid link_config 0x%x"
8164 " speed_cap_mask 0x%x\n",
a22f0788 8165 link_config,
f85582f8
DK
8166 bp->link_params.speed_cap_mask[idx]);
8167 return;
8168 }
8169 break;
a2fbb9ea 8170
f85582f8
DK
8171 default:
8172 BNX2X_ERROR("NVRAM config error. "
8173 "BAD link speed link_config 0x%x\n",
8174 link_config);
8175 bp->link_params.req_line_speed[idx] =
8176 SPEED_AUTO_NEG;
8177 bp->port.advertising[idx] =
8178 bp->port.supported[idx];
8179 break;
8180 }
a2fbb9ea 8181
a22f0788 8182 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 8183 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
8184 if ((bp->link_params.req_flow_ctrl[idx] ==
8185 BNX2X_FLOW_CTRL_AUTO) &&
8186 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8187 bp->link_params.req_flow_ctrl[idx] =
8188 BNX2X_FLOW_CTRL_NONE;
8189 }
a2fbb9ea 8190
a22f0788
YR
8191 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8192 " 0x%x advertising 0x%x\n",
8193 bp->link_params.req_line_speed[idx],
8194 bp->link_params.req_duplex[idx],
8195 bp->link_params.req_flow_ctrl[idx],
8196 bp->port.advertising[idx]);
8197 }
a2fbb9ea
ET
8198}
8199
e665bfda
MC
8200static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8201{
8202 mac_hi = cpu_to_be16(mac_hi);
8203 mac_lo = cpu_to_be32(mac_lo);
8204 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8205 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8206}
8207
34f80b04 8208static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8209{
34f80b04 8210 int port = BP_PORT(bp);
589abe3a 8211 u32 config;
6f38ad93 8212 u32 ext_phy_type, ext_phy_config;
a2fbb9ea 8213
c18487ee 8214 bp->link_params.bp = bp;
34f80b04 8215 bp->link_params.port = port;
c18487ee 8216
c18487ee 8217 bp->link_params.lane_config =
a2fbb9ea 8218 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 8219
a22f0788 8220 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
8221 SHMEM_RD(bp,
8222 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
8223 bp->link_params.speed_cap_mask[1] =
8224 SHMEM_RD(bp,
8225 dev_info.port_hw_config[port].speed_capability_mask2);
8226 bp->port.link_config[0] =
a2fbb9ea
ET
8227 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8228
a22f0788
YR
8229 bp->port.link_config[1] =
8230 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 8231
a22f0788
YR
8232 bp->link_params.multi_phy_config =
8233 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
8234 /* If the device is capable of WoL, set the default state according
8235 * to the HW
8236 */
4d295db0 8237 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8238 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8239 (config & PORT_FEATURE_WOL_ENABLED));
8240
f85582f8 8241 BNX2X_DEV_INFO("lane_config 0x%08x "
a22f0788 8242 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 8243 bp->link_params.lane_config,
a22f0788
YR
8244 bp->link_params.speed_cap_mask[0],
8245 bp->port.link_config[0]);
a2fbb9ea 8246
a22f0788 8247 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 8248 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 8249 bnx2x_phy_probe(&bp->link_params);
c18487ee 8250 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8251
8252 bnx2x_link_settings_requested(bp);
8253
01cd4528
EG
8254 /*
8255 * If connected directly, work with the internal PHY, otherwise, work
8256 * with the external PHY
8257 */
b7737c9b
YR
8258 ext_phy_config =
8259 SHMEM_RD(bp,
8260 dev_info.port_hw_config[port].external_phy_config);
8261 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 8262 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 8263 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
8264
8265 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8266 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8267 bp->mdio.prtad =
b7737c9b 8268 XGXS_EXT_PHY_ADDR(ext_phy_config);
5866df6d
YR
8269
8270 /*
8271 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8272 * In MF mode, it is set to cover self test cases
8273 */
8274 if (IS_MF(bp))
8275 bp->port.need_hw_lock = 1;
8276 else
8277 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8278 bp->common.shmem_base,
8279 bp->common.shmem2_base);
0793f83f 8280}
01cd4528 8281
2ba45142
VZ
8282#ifdef BCM_CNIC
8283static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8284{
8285 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8286 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8287 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8288 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8289
8290 /* Get the number of maximum allowed iSCSI and FCoE connections */
8291 bp->cnic_eth_dev.max_iscsi_conn =
8292 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8293 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8294
8295 bp->cnic_eth_dev.max_fcoe_conn =
8296 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8297 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8298
8299 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8300 bp->cnic_eth_dev.max_iscsi_conn,
8301 bp->cnic_eth_dev.max_fcoe_conn);
8302
8303 /* If mamimum allowed number of connections is zero -
8304 * disable the feature.
8305 */
8306 if (!bp->cnic_eth_dev.max_iscsi_conn)
8307 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8308
8309 if (!bp->cnic_eth_dev.max_fcoe_conn)
8310 bp->flags |= NO_FCOE_FLAG;
8311}
8312#endif
8313
0793f83f
DK
8314static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8315{
8316 u32 val, val2;
8317 int func = BP_ABS_FUNC(bp);
8318 int port = BP_PORT(bp);
2ba45142
VZ
8319#ifdef BCM_CNIC
8320 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8321 u8 *fip_mac = bp->fip_mac;
8322#endif
0793f83f
DK
8323
8324 if (BP_NOMCP(bp)) {
8325 BNX2X_ERROR("warning: random MAC workaround active\n");
8326 random_ether_addr(bp->dev->dev_addr);
8327 } else if (IS_MF(bp)) {
8328 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8329 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8330 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8331 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8332 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
37b091ba
MC
8333
8334#ifdef BCM_CNIC
2ba45142
VZ
8335 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8336 * FCoE MAC then the appropriate feature should be disabled.
8337 */
0793f83f
DK
8338 if (IS_MF_SI(bp)) {
8339 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8340 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8341 val2 = MF_CFG_RD(bp, func_ext_config[func].
8342 iscsi_mac_addr_upper);
8343 val = MF_CFG_RD(bp, func_ext_config[func].
8344 iscsi_mac_addr_lower);
2ba45142
VZ
8345 BNX2X_DEV_INFO("Read iSCSI MAC: "
8346 "0x%x:0x%04x\n", val2, val);
8347 bnx2x_set_mac_buf(iscsi_mac, val, val2);
2ba45142
VZ
8348 } else
8349 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8350
8351 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8352 val2 = MF_CFG_RD(bp, func_ext_config[func].
8353 fcoe_mac_addr_upper);
8354 val = MF_CFG_RD(bp, func_ext_config[func].
8355 fcoe_mac_addr_lower);
8356 BNX2X_DEV_INFO("Read FCoE MAC to "
8357 "0x%x:0x%04x\n", val2, val);
8358 bnx2x_set_mac_buf(fip_mac, val, val2);
8359
2ba45142
VZ
8360 } else
8361 bp->flags |= NO_FCOE_FLAG;
0793f83f 8362 }
37b091ba 8363#endif
0793f83f
DK
8364 } else {
8365 /* in SF read MACs from port configuration */
8366 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8367 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8368 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8369
8370#ifdef BCM_CNIC
8371 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8372 iscsi_mac_upper);
8373 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8374 iscsi_mac_lower);
2ba45142 8375 bnx2x_set_mac_buf(iscsi_mac, val, val2);
0793f83f
DK
8376#endif
8377 }
8378
8379 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8380 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8381
ec6ba945 8382#ifdef BCM_CNIC
2ba45142 8383 /* Set the FCoE MAC in modes other then MF_SI */
ec6ba945
VZ
8384 if (!CHIP_IS_E1x(bp)) {
8385 if (IS_MF_SD(bp))
2ba45142
VZ
8386 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8387 else if (!IS_MF(bp))
8388 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
ec6ba945 8389 }
426b9241
DK
8390
8391 /* Disable iSCSI if MAC configuration is
8392 * invalid.
8393 */
8394 if (!is_valid_ether_addr(iscsi_mac)) {
8395 bp->flags |= NO_ISCSI_FLAG;
8396 memset(iscsi_mac, 0, ETH_ALEN);
8397 }
8398
8399 /* Disable FCoE if MAC configuration is
8400 * invalid.
8401 */
8402 if (!is_valid_ether_addr(fip_mac)) {
8403 bp->flags |= NO_FCOE_FLAG;
8404 memset(bp->fip_mac, 0, ETH_ALEN);
8405 }
ec6ba945 8406#endif
34f80b04
EG
8407}
8408
8409static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8410{
0793f83f 8411 int /*abs*/func = BP_ABS_FUNC(bp);
b8ee8328 8412 int vn;
0793f83f 8413 u32 val = 0;
34f80b04 8414 int rc = 0;
a2fbb9ea 8415
34f80b04 8416 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8417
f2e0899f
DK
8418 if (CHIP_IS_E1x(bp)) {
8419 bp->common.int_block = INT_BLOCK_HC;
8420
8421 bp->igu_dsb_id = DEF_SB_IGU_ID;
8422 bp->igu_base_sb = 0;
ec6ba945
VZ
8423 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8424 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8425 } else {
8426 bp->common.int_block = INT_BLOCK_IGU;
8427 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8428 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8429 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8430 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8431 } else
8432 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 8433
f2e0899f
DK
8434 bnx2x_get_igu_cam_info(bp);
8435
8436 }
8437 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8438 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8439
8440 /*
8441 * Initialize MF configuration
8442 */
523224a3 8443
fb3bff17
DK
8444 bp->mf_ov = 0;
8445 bp->mf_mode = 0;
f2e0899f 8446 vn = BP_E1HVN(bp);
0793f83f 8447
f2e0899f 8448 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
0793f83f
DK
8449 DP(NETIF_MSG_PROBE,
8450 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8451 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8452 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
f2e0899f
DK
8453 if (SHMEM2_HAS(bp, mf_cfg_addr))
8454 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8455 else
8456 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
8457 offsetof(struct shmem_region, func_mb) +
8458 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
0793f83f
DK
8459 /*
8460 * get mf configuration:
25985edc 8461 * 1. existence of MF configuration
0793f83f
DK
8462 * 2. MAC address must be legal (check only upper bytes)
8463 * for Switch-Independent mode;
8464 * OVLAN must be legal for Switch-Dependent mode
8465 * 3. SF_MODE configures specific MF mode
8466 */
8467 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8468 /* get mf configuration */
8469 val = SHMEM_RD(bp,
8470 dev_info.shared_feature_config.config);
8471 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8472
8473 switch (val) {
8474 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8475 val = MF_CFG_RD(bp, func_mf_config[func].
8476 mac_upper);
8477 /* check for legal mac (upper bytes)*/
8478 if (val != 0xffff) {
8479 bp->mf_mode = MULTI_FUNCTION_SI;
8480 bp->mf_config[vn] = MF_CFG_RD(bp,
8481 func_mf_config[func].config);
8482 } else
8483 DP(NETIF_MSG_PROBE, "illegal MAC "
8484 "address for SI\n");
8485 break;
8486 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8487 /* get OV configuration */
8488 val = MF_CFG_RD(bp,
8489 func_mf_config[FUNC_0].e1hov_tag);
8490 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8491
8492 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8493 bp->mf_mode = MULTI_FUNCTION_SD;
8494 bp->mf_config[vn] = MF_CFG_RD(bp,
8495 func_mf_config[func].config);
8496 } else
8497 DP(NETIF_MSG_PROBE, "illegal OV for "
8498 "SD\n");
8499 break;
8500 default:
8501 /* Unknown configuration: reset mf_config */
8502 bp->mf_config[vn] = 0;
25985edc 8503 DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
0793f83f
DK
8504 val);
8505 }
8506 }
a2fbb9ea 8507
2691d51d 8508 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 8509 IS_MF(bp) ? "multi" : "single");
2691d51d 8510
0793f83f
DK
8511 switch (bp->mf_mode) {
8512 case MULTI_FUNCTION_SD:
8513 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8514 FUNC_MF_CFG_E1HOV_TAG_MASK;
2691d51d 8515 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 8516 bp->mf_ov = val;
0793f83f
DK
8517 BNX2X_DEV_INFO("MF OV for func %d is %d"
8518 " (0x%04x)\n", func,
8519 bp->mf_ov, bp->mf_ov);
2691d51d 8520 } else {
0793f83f
DK
8521 BNX2X_ERR("No valid MF OV for func %d,"
8522 " aborting\n", func);
34f80b04
EG
8523 rc = -EPERM;
8524 }
0793f83f
DK
8525 break;
8526 case MULTI_FUNCTION_SI:
8527 BNX2X_DEV_INFO("func %d is in MF "
8528 "switch-independent mode\n", func);
8529 break;
8530 default:
8531 if (vn) {
8532 BNX2X_ERR("VN %d in single function mode,"
8533 " aborting\n", vn);
2691d51d
EG
8534 rc = -EPERM;
8535 }
0793f83f 8536 break;
34f80b04 8537 }
0793f83f 8538
34f80b04 8539 }
a2fbb9ea 8540
f2e0899f
DK
8541 /* adjust igu_sb_cnt to MF for E1x */
8542 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
8543 bp->igu_sb_cnt /= E1HVN_MAX;
8544
f2e0899f
DK
8545 /*
8546 * adjust E2 sb count: to be removed when FW will support
8547 * more then 16 L2 clients
8548 */
8549#define MAX_L2_CLIENTS 16
8550 if (CHIP_IS_E2(bp))
8551 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8552 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8553
34f80b04
EG
8554 if (!BP_NOMCP(bp)) {
8555 bnx2x_get_port_hwinfo(bp);
8556
f2e0899f
DK
8557 bp->fw_seq =
8558 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8559 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
8560 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8561 }
8562
0793f83f
DK
8563 /* Get MAC addresses */
8564 bnx2x_get_mac_hwinfo(bp);
a2fbb9ea 8565
2ba45142
VZ
8566#ifdef BCM_CNIC
8567 bnx2x_get_cnic_info(bp);
8568#endif
8569
34f80b04
EG
8570 return rc;
8571}
8572
34f24c7f
VZ
8573static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8574{
8575 int cnt, i, block_end, rodi;
8576 char vpd_data[BNX2X_VPD_LEN+1];
8577 char str_id_reg[VENDOR_ID_LEN+1];
8578 char str_id_cap[VENDOR_ID_LEN+1];
8579 u8 len;
8580
8581 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8582 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8583
8584 if (cnt < BNX2X_VPD_LEN)
8585 goto out_not_found;
8586
8587 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8588 PCI_VPD_LRDT_RO_DATA);
8589 if (i < 0)
8590 goto out_not_found;
8591
8592
8593 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8594 pci_vpd_lrdt_size(&vpd_data[i]);
8595
8596 i += PCI_VPD_LRDT_TAG_SIZE;
8597
8598 if (block_end > BNX2X_VPD_LEN)
8599 goto out_not_found;
8600
8601 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8602 PCI_VPD_RO_KEYWORD_MFR_ID);
8603 if (rodi < 0)
8604 goto out_not_found;
8605
8606 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8607
8608 if (len != VENDOR_ID_LEN)
8609 goto out_not_found;
8610
8611 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8612
8613 /* vendor specific info */
8614 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8615 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8616 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8617 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8618
8619 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8620 PCI_VPD_RO_KEYWORD_VENDOR0);
8621 if (rodi >= 0) {
8622 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8623
8624 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8625
8626 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8627 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8628 bp->fw_ver[len] = ' ';
8629 }
8630 }
8631 return;
8632 }
8633out_not_found:
8634 return;
8635}
8636
34f80b04
EG
8637static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8638{
f2e0899f 8639 int func;
87942b46 8640 int timer_interval;
34f80b04
EG
8641 int rc;
8642
34f80b04 8643 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8644 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 8645 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
8646#ifdef BCM_CNIC
8647 mutex_init(&bp->cnic_mutex);
8648#endif
a2fbb9ea 8649
1cf167f2 8650 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8651 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8652
8653 rc = bnx2x_get_hwinfo(bp);
8654
523224a3
DK
8655 if (!rc)
8656 rc = bnx2x_alloc_mem_bp(bp);
8657
34f24c7f 8658 bnx2x_read_fwinfo(bp);
f2e0899f
DK
8659
8660 func = BP_FUNC(bp);
8661
34f80b04
EG
8662 /* need to reset chip if undi was active */
8663 if (!BP_NOMCP(bp))
8664 bnx2x_undi_unload(bp);
8665
8666 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8667 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8668
8669 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8670 dev_err(&bp->pdev->dev, "MCP disabled, "
8671 "must load devices in order!\n");
34f80b04 8672
555f6c78 8673 bp->multi_mode = multi_mode;
555f6c78 8674
7a9b2557
VZ
8675 /* Set TPA flags */
8676 if (disable_tpa) {
8677 bp->flags &= ~TPA_ENABLE_FLAG;
8678 bp->dev->features &= ~NETIF_F_LRO;
8679 } else {
8680 bp->flags |= TPA_ENABLE_FLAG;
8681 bp->dev->features |= NETIF_F_LRO;
8682 }
5d7cd496 8683 bp->disable_tpa = disable_tpa;
7a9b2557 8684
a18f5128
EG
8685 if (CHIP_IS_E1(bp))
8686 bp->dropless_fc = 0;
8687 else
8688 bp->dropless_fc = dropless_fc;
8689
8d5726c4 8690 bp->mrrs = mrrs;
7a9b2557 8691
34f80b04 8692 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04 8693
7d323bfd 8694 /* make sure that the numbers are in the right granularity */
523224a3
DK
8695 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8696 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 8697
87942b46
EG
8698 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8699 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8700
8701 init_timer(&bp->timer);
8702 bp->timer.expires = jiffies + bp->current_interval;
8703 bp->timer.data = (unsigned long) bp;
8704 bp->timer.function = bnx2x_timer;
8705
785b9b1a 8706 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
e4901dde
VZ
8707 bnx2x_dcbx_init_params(bp);
8708
34f80b04 8709 return rc;
a2fbb9ea
ET
8710}
8711
a2fbb9ea 8712
de0c62db
DK
8713/****************************************************************************
8714* General service functions
8715****************************************************************************/
a2fbb9ea 8716
bb2a0f7a 8717/* called with rtnl_lock */
a2fbb9ea
ET
8718static int bnx2x_open(struct net_device *dev)
8719{
8720 struct bnx2x *bp = netdev_priv(dev);
8721
6eccabb3
EG
8722 netif_carrier_off(dev);
8723
a2fbb9ea
ET
8724 bnx2x_set_power_state(bp, PCI_D0);
8725
72fd0718
VZ
8726 if (!bnx2x_reset_is_done(bp)) {
8727 do {
8728 /* Reset MCP mail box sequence if there is on going
8729 * recovery
8730 */
8731 bp->fw_seq = 0;
8732
8733 /* If it's the first function to load and reset done
8734 * is still not cleared it may mean that. We don't
8735 * check the attention state here because it may have
8736 * already been cleared by a "common" reset but we
8737 * shell proceed with "process kill" anyway.
8738 */
8739 if ((bnx2x_get_load_cnt(bp) == 0) &&
8740 bnx2x_trylock_hw_lock(bp,
8741 HW_LOCK_RESOURCE_RESERVED_08) &&
8742 (!bnx2x_leader_reset(bp))) {
8743 DP(NETIF_MSG_HW, "Recovered in open\n");
8744 break;
8745 }
8746
8747 bnx2x_set_power_state(bp, PCI_D3hot);
8748
8749 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8750 " completed yet. Try again later. If u still see this"
8751 " message after a few retries then power cycle is"
8752 " required.\n", bp->dev->name);
8753
8754 return -EAGAIN;
8755 } while (0);
8756 }
8757
8758 bp->recovery_state = BNX2X_RECOVERY_DONE;
8759
bb2a0f7a 8760 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8761}
8762
bb2a0f7a 8763/* called with rtnl_lock */
a2fbb9ea
ET
8764static int bnx2x_close(struct net_device *dev)
8765{
a2fbb9ea
ET
8766 struct bnx2x *bp = netdev_priv(dev);
8767
8768 /* Unload the driver, release IRQs */
bb2a0f7a 8769 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8770 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8771
8772 return 0;
8773}
8774
6e30dd4e
VZ
8775#define E1_MAX_UC_LIST 29
8776#define E1H_MAX_UC_LIST 30
8777#define E2_MAX_UC_LIST 14
8778static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
8779{
8780 if (CHIP_IS_E1(bp))
8781 return E1_MAX_UC_LIST;
8782 else if (CHIP_IS_E1H(bp))
8783 return E1H_MAX_UC_LIST;
8784 else
8785 return E2_MAX_UC_LIST;
8786}
8787
8788
8789static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
8790{
8791 if (CHIP_IS_E1(bp))
8792 /* CAM Entries for Port0:
8793 * 0 - prim ETH MAC
8794 * 1 - BCAST MAC
8795 * 2 - iSCSI L2 ring ETH MAC
8796 * 3-31 - UC MACs
8797 *
8798 * Port1 entries are allocated the same way starting from
8799 * entry 32.
8800 */
8801 return 3 + 32 * BP_PORT(bp);
8802 else if (CHIP_IS_E1H(bp)) {
8803 /* CAM Entries:
8804 * 0-7 - prim ETH MAC for each function
8805 * 8-15 - iSCSI L2 ring ETH MAC for each function
8806 * 16 till 255 UC MAC lists for each function
8807 *
8808 * Remark: There is no FCoE support for E1H, thus FCoE related
8809 * MACs are not considered.
8810 */
8811 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
8812 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
8813 } else {
8814 /* CAM Entries (there is a separate CAM per engine):
8815 * 0-4 - prim ETH MAC for each function
8816 * 4-7 - iSCSI L2 ring ETH MAC for each function
8817 * 8-11 - FIP ucast L2 MAC for each function
8818 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
8819 * 16 till 71 UC MAC lists for each function
8820 */
8821 u8 func_idx =
8822 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
8823
8824 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
8825 bnx2x_max_uc_list(bp) * func_idx;
8826 }
8827}
8828
8829/* set uc list, do not wait as wait implies sleep and
8830 * set_rx_mode can be invoked from non-sleepable context.
8831 *
8832 * Instead we use the same ramrod data buffer each time we need
8833 * to configure a list of addresses, and use the fact that the
8834 * list of MACs is changed in an incremental way and that the
8835 * function is called under the netif_addr_lock. A temporary
8836 * inconsistent CAM configuration (possible in case of very fast
8837 * sequence of add/del/add on the host side) will shortly be
8838 * restored by the handler of the last ramrod.
8839 */
8840static int bnx2x_set_uc_list(struct bnx2x *bp)
8841{
8842 int i = 0, old;
8843 struct net_device *dev = bp->dev;
8844 u8 offset = bnx2x_uc_list_cam_offset(bp);
8845 struct netdev_hw_addr *ha;
8846 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8847 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8848
8849 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
8850 return -EINVAL;
8851
8852 netdev_for_each_uc_addr(ha, dev) {
8853 /* copy mac */
8854 config_cmd->config_table[i].msb_mac_addr =
8855 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
8856 config_cmd->config_table[i].middle_mac_addr =
8857 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
8858 config_cmd->config_table[i].lsb_mac_addr =
8859 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
8860
8861 config_cmd->config_table[i].vlan_id = 0;
8862 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
8863 config_cmd->config_table[i].clients_bit_vector =
8864 cpu_to_le32(1 << BP_L_ID(bp));
8865
8866 SET_FLAG(config_cmd->config_table[i].flags,
8867 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8868 T_ETH_MAC_COMMAND_SET);
8869
8870 DP(NETIF_MSG_IFUP,
8871 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
8872 config_cmd->config_table[i].msb_mac_addr,
8873 config_cmd->config_table[i].middle_mac_addr,
8874 config_cmd->config_table[i].lsb_mac_addr);
8875
8876 i++;
8877
8878 /* Set uc MAC in NIG */
8879 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
8880 LLH_CAM_ETH_LINE + i);
8881 }
8882 old = config_cmd->hdr.length;
8883 if (old > i) {
8884 for (; i < old; i++) {
8885 if (CAM_IS_INVALID(config_cmd->
8886 config_table[i])) {
8887 /* already invalidated */
8888 break;
8889 }
8890 /* invalidate */
8891 SET_FLAG(config_cmd->config_table[i].flags,
8892 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8893 T_ETH_MAC_COMMAND_INVALIDATE);
8894 }
8895 }
8896
8897 wmb();
8898
8899 config_cmd->hdr.length = i;
8900 config_cmd->hdr.offset = offset;
8901 config_cmd->hdr.client_id = 0xff;
8902 /* Mark that this ramrod doesn't use bp->set_mac_pending for
8903 * synchronization.
8904 */
8905 config_cmd->hdr.echo = 0;
8906
8907 mb();
8908
8909 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8910 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8911
8912}
8913
8914void bnx2x_invalidate_uc_list(struct bnx2x *bp)
8915{
8916 int i;
8917 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8918 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8919 int ramrod_flags = WAIT_RAMROD_COMMON;
8920 u8 offset = bnx2x_uc_list_cam_offset(bp);
8921 u8 max_list_size = bnx2x_max_uc_list(bp);
8922
8923 for (i = 0; i < max_list_size; i++) {
8924 SET_FLAG(config_cmd->config_table[i].flags,
8925 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8926 T_ETH_MAC_COMMAND_INVALIDATE);
8927 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
8928 }
8929
8930 wmb();
8931
8932 config_cmd->hdr.length = max_list_size;
8933 config_cmd->hdr.offset = offset;
8934 config_cmd->hdr.client_id = 0xff;
8935 /* We'll wait for a completion this time... */
8936 config_cmd->hdr.echo = 1;
8937
8938 bp->set_mac_pending = 1;
8939
8940 mb();
8941
8942 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8943 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8944
8945 /* Wait for a completion */
8946 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
8947 ramrod_flags);
8948
8949}
8950
8951static inline int bnx2x_set_mc_list(struct bnx2x *bp)
8952{
8953 /* some multicasts */
8954 if (CHIP_IS_E1(bp)) {
8955 return bnx2x_set_e1_mc_list(bp);
8956 } else { /* E1H and newer */
8957 return bnx2x_set_e1h_mc_list(bp);
8958 }
8959}
8960
f5372251 8961/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 8962void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
8963{
8964 struct bnx2x *bp = netdev_priv(dev);
8965 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
34f80b04
EG
8966
8967 if (bp->state != BNX2X_STATE_OPEN) {
8968 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8969 return;
8970 }
8971
8972 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8973
8974 if (dev->flags & IFF_PROMISC)
8975 rx_mode = BNX2X_RX_MODE_PROMISC;
6e30dd4e 8976 else if (dev->flags & IFF_ALLMULTI)
34f80b04 8977 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6e30dd4e
VZ
8978 else {
8979 /* some multicasts */
8980 if (bnx2x_set_mc_list(bp))
8981 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04 8982
6e30dd4e
VZ
8983 /* some unicasts */
8984 if (bnx2x_set_uc_list(bp))
8985 rx_mode = BNX2X_RX_MODE_PROMISC;
34f80b04
EG
8986 }
8987
8988 bp->rx_mode = rx_mode;
8989 bnx2x_set_storm_rx_mode(bp);
8990}
8991
c18487ee 8992/* called with rtnl_lock */
01cd4528
EG
8993static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8994 int devad, u16 addr)
a2fbb9ea 8995{
01cd4528
EG
8996 struct bnx2x *bp = netdev_priv(netdev);
8997 u16 value;
8998 int rc;
a2fbb9ea 8999
01cd4528
EG
9000 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
9001 prtad, devad, addr);
a2fbb9ea 9002
01cd4528
EG
9003 /* The HW expects different devad if CL22 is used */
9004 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 9005
01cd4528 9006 bnx2x_acquire_phy_lock(bp);
e10bc84d 9007 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
9008 bnx2x_release_phy_lock(bp);
9009 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 9010
01cd4528
EG
9011 if (!rc)
9012 rc = value;
9013 return rc;
9014}
a2fbb9ea 9015
01cd4528
EG
9016/* called with rtnl_lock */
9017static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
9018 u16 addr, u16 value)
9019{
9020 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
9021 int rc;
9022
9023 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
9024 " value 0x%x\n", prtad, devad, addr, value);
9025
01cd4528
EG
9026 /* The HW expects different devad if CL22 is used */
9027 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 9028
01cd4528 9029 bnx2x_acquire_phy_lock(bp);
e10bc84d 9030 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
9031 bnx2x_release_phy_lock(bp);
9032 return rc;
9033}
c18487ee 9034
01cd4528
EG
9035/* called with rtnl_lock */
9036static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9037{
9038 struct bnx2x *bp = netdev_priv(dev);
9039 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 9040
01cd4528
EG
9041 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
9042 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 9043
01cd4528
EG
9044 if (!netif_running(dev))
9045 return -EAGAIN;
9046
9047 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
9048}
9049
257ddbda 9050#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
9051static void poll_bnx2x(struct net_device *dev)
9052{
9053 struct bnx2x *bp = netdev_priv(dev);
9054
9055 disable_irq(bp->pdev->irq);
9056 bnx2x_interrupt(bp->pdev->irq, dev);
9057 enable_irq(bp->pdev->irq);
9058}
9059#endif
9060
c64213cd
SH
9061static const struct net_device_ops bnx2x_netdev_ops = {
9062 .ndo_open = bnx2x_open,
9063 .ndo_stop = bnx2x_close,
9064 .ndo_start_xmit = bnx2x_start_xmit,
8307fa3e 9065 .ndo_select_queue = bnx2x_select_queue,
6e30dd4e 9066 .ndo_set_rx_mode = bnx2x_set_rx_mode,
c64213cd
SH
9067 .ndo_set_mac_address = bnx2x_change_mac_addr,
9068 .ndo_validate_addr = eth_validate_addr,
9069 .ndo_do_ioctl = bnx2x_ioctl,
9070 .ndo_change_mtu = bnx2x_change_mtu,
66371c44
MM
9071 .ndo_fix_features = bnx2x_fix_features,
9072 .ndo_set_features = bnx2x_set_features,
c64213cd 9073 .ndo_tx_timeout = bnx2x_tx_timeout,
257ddbda 9074#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
9075 .ndo_poll_controller = poll_bnx2x,
9076#endif
9077};
9078
34f80b04
EG
9079static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9080 struct net_device *dev)
a2fbb9ea
ET
9081{
9082 struct bnx2x *bp;
9083 int rc;
9084
9085 SET_NETDEV_DEV(dev, &pdev->dev);
9086 bp = netdev_priv(dev);
9087
34f80b04
EG
9088 bp->dev = dev;
9089 bp->pdev = pdev;
a2fbb9ea 9090 bp->flags = 0;
f2e0899f 9091 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9092
9093 rc = pci_enable_device(pdev);
9094 if (rc) {
cdaa7cb8
VZ
9095 dev_err(&bp->pdev->dev,
9096 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
9097 goto err_out;
9098 }
9099
9100 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9101 dev_err(&bp->pdev->dev,
9102 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
9103 rc = -ENODEV;
9104 goto err_out_disable;
9105 }
9106
9107 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9108 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9109 " base address, aborting\n");
a2fbb9ea
ET
9110 rc = -ENODEV;
9111 goto err_out_disable;
9112 }
9113
34f80b04
EG
9114 if (atomic_read(&pdev->enable_cnt) == 1) {
9115 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9116 if (rc) {
cdaa7cb8
VZ
9117 dev_err(&bp->pdev->dev,
9118 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
9119 goto err_out_disable;
9120 }
a2fbb9ea 9121
34f80b04
EG
9122 pci_set_master(pdev);
9123 pci_save_state(pdev);
9124 }
a2fbb9ea
ET
9125
9126 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9127 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
9128 dev_err(&bp->pdev->dev,
9129 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
9130 rc = -EIO;
9131 goto err_out_release;
9132 }
9133
9134 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9135 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
9136 dev_err(&bp->pdev->dev,
9137 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
9138 rc = -EIO;
9139 goto err_out_release;
9140 }
9141
1a983142 9142 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 9143 bp->flags |= USING_DAC_FLAG;
1a983142 9144 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
9145 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9146 " failed, aborting\n");
a2fbb9ea
ET
9147 rc = -EIO;
9148 goto err_out_release;
9149 }
9150
1a983142 9151 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
9152 dev_err(&bp->pdev->dev,
9153 "System does not support DMA, aborting\n");
a2fbb9ea
ET
9154 rc = -EIO;
9155 goto err_out_release;
9156 }
9157
34f80b04
EG
9158 dev->mem_start = pci_resource_start(pdev, 0);
9159 dev->base_addr = dev->mem_start;
9160 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
9161
9162 dev->irq = pdev->irq;
9163
275f165f 9164 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 9165 if (!bp->regview) {
cdaa7cb8
VZ
9166 dev_err(&bp->pdev->dev,
9167 "Cannot map register space, aborting\n");
a2fbb9ea
ET
9168 rc = -ENOMEM;
9169 goto err_out_release;
9170 }
9171
34f80b04 9172 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 9173 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 9174 pci_resource_len(pdev, 2)));
a2fbb9ea 9175 if (!bp->doorbells) {
cdaa7cb8
VZ
9176 dev_err(&bp->pdev->dev,
9177 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
9178 rc = -ENOMEM;
9179 goto err_out_unmap;
9180 }
9181
9182 bnx2x_set_power_state(bp, PCI_D0);
9183
34f80b04
EG
9184 /* clean indirect addresses */
9185 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9186 PCICFG_VENDOR_ID_OFFSET);
9187 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9188 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9189 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9190 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 9191
72fd0718
VZ
9192 /* Reset the load counter */
9193 bnx2x_clear_load_cnt(bp);
9194
34f80b04 9195 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 9196
c64213cd 9197 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 9198 bnx2x_set_ethtool_ops(dev);
5316bc0b 9199
66371c44
MM
9200 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9201 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
9202 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
9203
9204 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9205 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
9206
9207 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
5316bc0b 9208 if (bp->flags & USING_DAC_FLAG)
66371c44 9209 dev->features |= NETIF_F_HIGHDMA;
a2fbb9ea 9210
538dd2e3
MB
9211 /* Add Loopback capability to the device */
9212 dev->hw_features |= NETIF_F_LOOPBACK;
9213
98507672 9214#ifdef BCM_DCBNL
785b9b1a
SR
9215 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9216#endif
9217
01cd4528
EG
9218 /* get_port_hwinfo() will set prtad and mmds properly */
9219 bp->mdio.prtad = MDIO_PRTAD_NONE;
9220 bp->mdio.mmds = 0;
9221 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9222 bp->mdio.dev = dev;
9223 bp->mdio.mdio_read = bnx2x_mdio_read;
9224 bp->mdio.mdio_write = bnx2x_mdio_write;
9225
a2fbb9ea
ET
9226 return 0;
9227
9228err_out_unmap:
9229 if (bp->regview) {
9230 iounmap(bp->regview);
9231 bp->regview = NULL;
9232 }
a2fbb9ea
ET
9233 if (bp->doorbells) {
9234 iounmap(bp->doorbells);
9235 bp->doorbells = NULL;
9236 }
9237
9238err_out_release:
34f80b04
EG
9239 if (atomic_read(&pdev->enable_cnt) == 1)
9240 pci_release_regions(pdev);
a2fbb9ea
ET
9241
9242err_out_disable:
9243 pci_disable_device(pdev);
9244 pci_set_drvdata(pdev, NULL);
9245
9246err_out:
9247 return rc;
9248}
9249
37f9ce62
EG
9250static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9251 int *width, int *speed)
25047950
ET
9252{
9253 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9254
37f9ce62 9255 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 9256
37f9ce62
EG
9257 /* return value of 1=2.5GHz 2=5GHz */
9258 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 9259}
37f9ce62 9260
6891dd25 9261static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 9262{
37f9ce62 9263 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
9264 struct bnx2x_fw_file_hdr *fw_hdr;
9265 struct bnx2x_fw_file_section *sections;
94a78b79 9266 u32 offset, len, num_ops;
37f9ce62 9267 u16 *ops_offsets;
94a78b79 9268 int i;
37f9ce62 9269 const u8 *fw_ver;
94a78b79
VZ
9270
9271 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9272 return -EINVAL;
9273
9274 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9275 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9276
9277 /* Make sure none of the offsets and sizes make us read beyond
9278 * the end of the firmware data */
9279 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9280 offset = be32_to_cpu(sections[i].offset);
9281 len = be32_to_cpu(sections[i].len);
9282 if (offset + len > firmware->size) {
cdaa7cb8
VZ
9283 dev_err(&bp->pdev->dev,
9284 "Section %d length is out of bounds\n", i);
94a78b79
VZ
9285 return -EINVAL;
9286 }
9287 }
9288
9289 /* Likewise for the init_ops offsets */
9290 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9291 ops_offsets = (u16 *)(firmware->data + offset);
9292 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9293
9294 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9295 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
9296 dev_err(&bp->pdev->dev,
9297 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
9298 return -EINVAL;
9299 }
9300 }
9301
9302 /* Check FW version */
9303 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9304 fw_ver = firmware->data + offset;
9305 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9306 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9307 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9308 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
9309 dev_err(&bp->pdev->dev,
9310 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
9311 fw_ver[0], fw_ver[1], fw_ver[2],
9312 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9313 BCM_5710_FW_MINOR_VERSION,
9314 BCM_5710_FW_REVISION_VERSION,
9315 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 9316 return -EINVAL;
94a78b79
VZ
9317 }
9318
9319 return 0;
9320}
9321
ab6ad5a4 9322static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9323{
ab6ad5a4
EG
9324 const __be32 *source = (const __be32 *)_source;
9325 u32 *target = (u32 *)_target;
94a78b79 9326 u32 i;
94a78b79
VZ
9327
9328 for (i = 0; i < n/4; i++)
9329 target[i] = be32_to_cpu(source[i]);
9330}
9331
9332/*
9333 Ops array is stored in the following format:
9334 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9335 */
ab6ad5a4 9336static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 9337{
ab6ad5a4
EG
9338 const __be32 *source = (const __be32 *)_source;
9339 struct raw_op *target = (struct raw_op *)_target;
94a78b79 9340 u32 i, j, tmp;
94a78b79 9341
ab6ad5a4 9342 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
9343 tmp = be32_to_cpu(source[j]);
9344 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
9345 target[i].offset = tmp & 0xffffff;
9346 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
9347 }
9348}
ab6ad5a4 9349
523224a3
DK
9350/**
9351 * IRO array is stored in the following format:
9352 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9353 */
9354static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9355{
9356 const __be32 *source = (const __be32 *)_source;
9357 struct iro *target = (struct iro *)_target;
9358 u32 i, j, tmp;
9359
9360 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9361 target[i].base = be32_to_cpu(source[j]);
9362 j++;
9363 tmp = be32_to_cpu(source[j]);
9364 target[i].m1 = (tmp >> 16) & 0xffff;
9365 target[i].m2 = tmp & 0xffff;
9366 j++;
9367 tmp = be32_to_cpu(source[j]);
9368 target[i].m3 = (tmp >> 16) & 0xffff;
9369 target[i].size = tmp & 0xffff;
9370 j++;
9371 }
9372}
9373
ab6ad5a4 9374static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9375{
ab6ad5a4
EG
9376 const __be16 *source = (const __be16 *)_source;
9377 u16 *target = (u16 *)_target;
94a78b79 9378 u32 i;
94a78b79
VZ
9379
9380 for (i = 0; i < n/2; i++)
9381 target[i] = be16_to_cpu(source[i]);
9382}
9383
7995c64e
JP
9384#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9385do { \
9386 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9387 bp->arr = kmalloc(len, GFP_KERNEL); \
9388 if (!bp->arr) { \
9389 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9390 goto lbl; \
9391 } \
9392 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9393 (u8 *)bp->arr, len); \
9394} while (0)
94a78b79 9395
6891dd25 9396int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 9397{
45229b42 9398 const char *fw_file_name;
94a78b79 9399 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 9400 int rc;
94a78b79 9401
94a78b79 9402 if (CHIP_IS_E1(bp))
45229b42 9403 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 9404 else if (CHIP_IS_E1H(bp))
45229b42 9405 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
9406 else if (CHIP_IS_E2(bp))
9407 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 9408 else {
6891dd25 9409 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
9410 return -EINVAL;
9411 }
94a78b79 9412
6891dd25 9413 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 9414
6891dd25 9415 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 9416 if (rc) {
6891dd25 9417 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
9418 goto request_firmware_exit;
9419 }
9420
9421 rc = bnx2x_check_firmware(bp);
9422 if (rc) {
6891dd25 9423 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
9424 goto request_firmware_exit;
9425 }
9426
9427 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9428
9429 /* Initialize the pointers to the init arrays */
9430 /* Blob */
9431 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9432
9433 /* Opcodes */
9434 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9435
9436 /* Offsets */
ab6ad5a4
EG
9437 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9438 be16_to_cpu_n);
94a78b79
VZ
9439
9440 /* STORMs firmware */
573f2035
EG
9441 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9442 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9443 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9444 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9445 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9446 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9447 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9448 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9449 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9450 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9451 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9452 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9453 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9454 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9455 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9456 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
9457 /* IRO */
9458 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
9459
9460 return 0;
ab6ad5a4 9461
523224a3
DK
9462iro_alloc_err:
9463 kfree(bp->init_ops_offsets);
94a78b79
VZ
9464init_offsets_alloc_err:
9465 kfree(bp->init_ops);
9466init_ops_alloc_err:
9467 kfree(bp->init_data);
9468request_firmware_exit:
9469 release_firmware(bp->firmware);
9470
9471 return rc;
9472}
9473
523224a3
DK
9474static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9475{
9476 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 9477
523224a3
DK
9478#ifdef BCM_CNIC
9479 cid_count += CNIC_CID_MAX;
9480#endif
9481 return roundup(cid_count, QM_CID_ROUND);
9482}
f85582f8 9483
a2fbb9ea
ET
9484static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9485 const struct pci_device_id *ent)
9486{
a2fbb9ea
ET
9487 struct net_device *dev = NULL;
9488 struct bnx2x *bp;
37f9ce62 9489 int pcie_width, pcie_speed;
523224a3
DK
9490 int rc, cid_count;
9491
f2e0899f
DK
9492 switch (ent->driver_data) {
9493 case BCM57710:
9494 case BCM57711:
9495 case BCM57711E:
9496 cid_count = FP_SB_MAX_E1x;
9497 break;
9498
9499 case BCM57712:
9500 case BCM57712E:
9501 cid_count = FP_SB_MAX_E2;
9502 break;
a2fbb9ea 9503
f2e0899f
DK
9504 default:
9505 pr_err("Unknown board_type (%ld), aborting\n",
9506 ent->driver_data);
870634b0 9507 return -ENODEV;
f2e0899f
DK
9508 }
9509
ec6ba945 9510 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
f85582f8 9511
a2fbb9ea 9512 /* dev zeroed in init_etherdev */
523224a3 9513 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 9514 if (!dev) {
cdaa7cb8 9515 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 9516 return -ENOMEM;
34f80b04 9517 }
a2fbb9ea 9518
a2fbb9ea 9519 bp = netdev_priv(dev);
7995c64e 9520 bp->msg_enable = debug;
a2fbb9ea 9521
df4770de
EG
9522 pci_set_drvdata(pdev, dev);
9523
523224a3
DK
9524 bp->l2_cid_count = cid_count;
9525
34f80b04 9526 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
9527 if (rc < 0) {
9528 free_netdev(dev);
9529 return rc;
9530 }
9531
34f80b04 9532 rc = bnx2x_init_bp(bp);
693fc0d1
EG
9533 if (rc)
9534 goto init_one_exit;
9535
523224a3
DK
9536 /* calc qm_cid_count */
9537 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9538
ec6ba945
VZ
9539#ifdef BCM_CNIC
9540 /* disable FCOE L2 queue for E1x*/
9541 if (CHIP_IS_E1x(bp))
9542 bp->flags |= NO_FCOE_FLAG;
9543
9544#endif
9545
25985edc 9546 /* Configure interrupt mode: try to enable MSI-X/MSI if
d6214d7a
DK
9547 * needed, set bp->num_queues appropriately.
9548 */
9549 bnx2x_set_int_mode(bp);
9550
9551 /* Add all NAPI objects */
9552 bnx2x_add_all_napi(bp);
9553
b340007f
VZ
9554 rc = register_netdev(dev);
9555 if (rc) {
9556 dev_err(&pdev->dev, "Cannot register net device\n");
9557 goto init_one_exit;
9558 }
9559
ec6ba945
VZ
9560#ifdef BCM_CNIC
9561 if (!NO_FCOE(bp)) {
9562 /* Add storage MAC address */
9563 rtnl_lock();
9564 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9565 rtnl_unlock();
9566 }
9567#endif
9568
37f9ce62 9569 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
d6214d7a 9570
cdaa7cb8
VZ
9571 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9572 " IRQ %d, ", board_info[ent->driver_data].name,
9573 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
9574 pcie_width,
9575 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9576 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9577 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
9578 dev->base_addr, bp->pdev->irq);
9579 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 9580
a2fbb9ea 9581 return 0;
34f80b04
EG
9582
9583init_one_exit:
9584 if (bp->regview)
9585 iounmap(bp->regview);
9586
9587 if (bp->doorbells)
9588 iounmap(bp->doorbells);
9589
9590 free_netdev(dev);
9591
9592 if (atomic_read(&pdev->enable_cnt) == 1)
9593 pci_release_regions(pdev);
9594
9595 pci_disable_device(pdev);
9596 pci_set_drvdata(pdev, NULL);
9597
9598 return rc;
a2fbb9ea
ET
9599}
9600
9601static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9602{
9603 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
9604 struct bnx2x *bp;
9605
9606 if (!dev) {
cdaa7cb8 9607 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
9608 return;
9609 }
228241eb 9610 bp = netdev_priv(dev);
a2fbb9ea 9611
ec6ba945
VZ
9612#ifdef BCM_CNIC
9613 /* Delete storage MAC address */
9614 if (!NO_FCOE(bp)) {
9615 rtnl_lock();
9616 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9617 rtnl_unlock();
9618 }
9619#endif
9620
98507672
SR
9621#ifdef BCM_DCBNL
9622 /* Delete app tlvs from dcbnl */
9623 bnx2x_dcbnl_update_applist(bp, true);
9624#endif
9625
a2fbb9ea
ET
9626 unregister_netdev(dev);
9627
d6214d7a
DK
9628 /* Delete all NAPI objects */
9629 bnx2x_del_all_napi(bp);
9630
084d6cbb
VZ
9631 /* Power on: we can't let PCI layer write to us while we are in D3 */
9632 bnx2x_set_power_state(bp, PCI_D0);
9633
d6214d7a
DK
9634 /* Disable MSI/MSI-X */
9635 bnx2x_disable_msi(bp);
f85582f8 9636
084d6cbb
VZ
9637 /* Power off */
9638 bnx2x_set_power_state(bp, PCI_D3hot);
9639
72fd0718
VZ
9640 /* Make sure RESET task is not scheduled before continuing */
9641 cancel_delayed_work_sync(&bp->reset_task);
9642
a2fbb9ea
ET
9643 if (bp->regview)
9644 iounmap(bp->regview);
9645
9646 if (bp->doorbells)
9647 iounmap(bp->doorbells);
9648
523224a3
DK
9649 bnx2x_free_mem_bp(bp);
9650
a2fbb9ea 9651 free_netdev(dev);
34f80b04
EG
9652
9653 if (atomic_read(&pdev->enable_cnt) == 1)
9654 pci_release_regions(pdev);
9655
a2fbb9ea
ET
9656 pci_disable_device(pdev);
9657 pci_set_drvdata(pdev, NULL);
9658}
9659
f8ef6e44
YG
9660static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9661{
9662 int i;
9663
9664 bp->state = BNX2X_STATE_ERROR;
9665
9666 bp->rx_mode = BNX2X_RX_MODE_NONE;
9667
9668 bnx2x_netif_stop(bp, 0);
c89af1a3 9669 netif_carrier_off(bp->dev);
f8ef6e44
YG
9670
9671 del_timer_sync(&bp->timer);
9672 bp->stats_state = STATS_STATE_DISABLED;
9673 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9674
9675 /* Release IRQs */
d6214d7a 9676 bnx2x_free_irq(bp);
f8ef6e44 9677
f8ef6e44
YG
9678 /* Free SKBs, SGEs, TPA pool and driver internals */
9679 bnx2x_free_skbs(bp);
523224a3 9680
ec6ba945 9681 for_each_rx_queue(bp, i)
f8ef6e44 9682 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 9683
f8ef6e44
YG
9684 bnx2x_free_mem(bp);
9685
9686 bp->state = BNX2X_STATE_CLOSED;
9687
f8ef6e44
YG
9688 return 0;
9689}
9690
9691static void bnx2x_eeh_recover(struct bnx2x *bp)
9692{
9693 u32 val;
9694
9695 mutex_init(&bp->port.phy_mutex);
9696
9697 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9698 bp->link_params.shmem_base = bp->common.shmem_base;
9699 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9700
9701 if (!bp->common.shmem_base ||
9702 (bp->common.shmem_base < 0xA0000) ||
9703 (bp->common.shmem_base >= 0xC0000)) {
9704 BNX2X_DEV_INFO("MCP not active\n");
9705 bp->flags |= NO_MCP_FLAG;
9706 return;
9707 }
9708
9709 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9710 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9711 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9712 BNX2X_ERR("BAD MCP validity signature\n");
9713
9714 if (!BP_NOMCP(bp)) {
f2e0899f
DK
9715 bp->fw_seq =
9716 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9717 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
9718 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9719 }
9720}
9721
493adb1f
WX
9722/**
9723 * bnx2x_io_error_detected - called when PCI error is detected
9724 * @pdev: Pointer to PCI device
9725 * @state: The current pci connection state
9726 *
9727 * This function is called after a PCI bus error affecting
9728 * this device has been detected.
9729 */
9730static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9731 pci_channel_state_t state)
9732{
9733 struct net_device *dev = pci_get_drvdata(pdev);
9734 struct bnx2x *bp = netdev_priv(dev);
9735
9736 rtnl_lock();
9737
9738 netif_device_detach(dev);
9739
07ce50e4
DN
9740 if (state == pci_channel_io_perm_failure) {
9741 rtnl_unlock();
9742 return PCI_ERS_RESULT_DISCONNECT;
9743 }
9744
493adb1f 9745 if (netif_running(dev))
f8ef6e44 9746 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9747
9748 pci_disable_device(pdev);
9749
9750 rtnl_unlock();
9751
9752 /* Request a slot reset */
9753 return PCI_ERS_RESULT_NEED_RESET;
9754}
9755
9756/**
9757 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9758 * @pdev: Pointer to PCI device
9759 *
9760 * Restart the card from scratch, as if from a cold-boot.
9761 */
9762static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9763{
9764 struct net_device *dev = pci_get_drvdata(pdev);
9765 struct bnx2x *bp = netdev_priv(dev);
9766
9767 rtnl_lock();
9768
9769 if (pci_enable_device(pdev)) {
9770 dev_err(&pdev->dev,
9771 "Cannot re-enable PCI device after reset\n");
9772 rtnl_unlock();
9773 return PCI_ERS_RESULT_DISCONNECT;
9774 }
9775
9776 pci_set_master(pdev);
9777 pci_restore_state(pdev);
9778
9779 if (netif_running(dev))
9780 bnx2x_set_power_state(bp, PCI_D0);
9781
9782 rtnl_unlock();
9783
9784 return PCI_ERS_RESULT_RECOVERED;
9785}
9786
9787/**
9788 * bnx2x_io_resume - called when traffic can start flowing again
9789 * @pdev: Pointer to PCI device
9790 *
9791 * This callback is called when the error recovery driver tells us that
9792 * its OK to resume normal operation.
9793 */
9794static void bnx2x_io_resume(struct pci_dev *pdev)
9795{
9796 struct net_device *dev = pci_get_drvdata(pdev);
9797 struct bnx2x *bp = netdev_priv(dev);
9798
72fd0718 9799 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
9800 printk(KERN_ERR "Handling parity error recovery. "
9801 "Try again later\n");
72fd0718
VZ
9802 return;
9803 }
9804
493adb1f
WX
9805 rtnl_lock();
9806
f8ef6e44
YG
9807 bnx2x_eeh_recover(bp);
9808
493adb1f 9809 if (netif_running(dev))
f8ef6e44 9810 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
9811
9812 netif_device_attach(dev);
9813
9814 rtnl_unlock();
9815}
9816
9817static struct pci_error_handlers bnx2x_err_handler = {
9818 .error_detected = bnx2x_io_error_detected,
356e2385
EG
9819 .slot_reset = bnx2x_io_slot_reset,
9820 .resume = bnx2x_io_resume,
493adb1f
WX
9821};
9822
a2fbb9ea 9823static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
9824 .name = DRV_MODULE_NAME,
9825 .id_table = bnx2x_pci_tbl,
9826 .probe = bnx2x_init_one,
9827 .remove = __devexit_p(bnx2x_remove_one),
9828 .suspend = bnx2x_suspend,
9829 .resume = bnx2x_resume,
9830 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
9831};
9832
9833static int __init bnx2x_init(void)
9834{
dd21ca6d
SG
9835 int ret;
9836
7995c64e 9837 pr_info("%s", version);
938cf541 9838
1cf167f2
EG
9839 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9840 if (bnx2x_wq == NULL) {
7995c64e 9841 pr_err("Cannot create workqueue\n");
1cf167f2
EG
9842 return -ENOMEM;
9843 }
9844
dd21ca6d
SG
9845 ret = pci_register_driver(&bnx2x_pci_driver);
9846 if (ret) {
7995c64e 9847 pr_err("Cannot register driver\n");
dd21ca6d
SG
9848 destroy_workqueue(bnx2x_wq);
9849 }
9850 return ret;
a2fbb9ea
ET
9851}
9852
9853static void __exit bnx2x_cleanup(void)
9854{
9855 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
9856
9857 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
9858}
9859
9860module_init(bnx2x_init);
9861module_exit(bnx2x_cleanup);
9862
993ac7b5
MC
9863#ifdef BCM_CNIC
9864
9865/* count denotes the number of new completions we have seen */
9866static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9867{
9868 struct eth_spe *spe;
9869
9870#ifdef BNX2X_STOP_ON_ERROR
9871 if (unlikely(bp->panic))
9872 return;
9873#endif
9874
9875 spin_lock_bh(&bp->spq_lock);
c2bff63f 9876 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
9877 bp->cnic_spq_pending -= count;
9878
993ac7b5 9879
c2bff63f
DK
9880 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9881 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9882 & SPE_HDR_CONN_TYPE) >>
9883 SPE_HDR_CONN_TYPE_SHIFT;
9884
9885 /* Set validation for iSCSI L2 client before sending SETUP
9886 * ramrod
9887 */
9888 if (type == ETH_CONNECTION_TYPE) {
9889 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9890 hdr.conn_and_cmd_data) >>
9891 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9892
9893 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9894 bnx2x_set_ctx_validation(&bp->context.
9895 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9896 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9897 }
9898
6e30dd4e
VZ
9899 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9900 * We also check that the number of outstanding
9901 * COMMON ramrods is not more than the EQ and SPQ can
9902 * accommodate.
c2bff63f 9903 */
6e30dd4e
VZ
9904 if (type == ETH_CONNECTION_TYPE) {
9905 if (!atomic_read(&bp->cq_spq_left))
9906 break;
9907 else
9908 atomic_dec(&bp->cq_spq_left);
9909 } else if (type == NONE_CONNECTION_TYPE) {
9910 if (!atomic_read(&bp->eq_spq_left))
c2bff63f
DK
9911 break;
9912 else
6e30dd4e 9913 atomic_dec(&bp->eq_spq_left);
ec6ba945
VZ
9914 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9915 (type == FCOE_CONNECTION_TYPE)) {
c2bff63f
DK
9916 if (bp->cnic_spq_pending >=
9917 bp->cnic_eth_dev.max_kwqe_pending)
9918 break;
9919 else
9920 bp->cnic_spq_pending++;
9921 } else {
9922 BNX2X_ERR("Unknown SPE type: %d\n", type);
9923 bnx2x_panic();
993ac7b5 9924 break;
c2bff63f 9925 }
993ac7b5
MC
9926
9927 spe = bnx2x_sp_get_next(bp);
9928 *spe = *bp->cnic_kwq_cons;
9929
993ac7b5
MC
9930 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9931 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9932
9933 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9934 bp->cnic_kwq_cons = bp->cnic_kwq;
9935 else
9936 bp->cnic_kwq_cons++;
9937 }
9938 bnx2x_sp_prod_update(bp);
9939 spin_unlock_bh(&bp->spq_lock);
9940}
9941
9942static int bnx2x_cnic_sp_queue(struct net_device *dev,
9943 struct kwqe_16 *kwqes[], u32 count)
9944{
9945 struct bnx2x *bp = netdev_priv(dev);
9946 int i;
9947
9948#ifdef BNX2X_STOP_ON_ERROR
9949 if (unlikely(bp->panic))
9950 return -EIO;
9951#endif
9952
9953 spin_lock_bh(&bp->spq_lock);
9954
9955 for (i = 0; i < count; i++) {
9956 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9957
9958 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9959 break;
9960
9961 *bp->cnic_kwq_prod = *spe;
9962
9963 bp->cnic_kwq_pending++;
9964
9965 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9966 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
9967 spe->data.update_data_addr.hi,
9968 spe->data.update_data_addr.lo,
993ac7b5
MC
9969 bp->cnic_kwq_pending);
9970
9971 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9972 bp->cnic_kwq_prod = bp->cnic_kwq;
9973 else
9974 bp->cnic_kwq_prod++;
9975 }
9976
9977 spin_unlock_bh(&bp->spq_lock);
9978
9979 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9980 bnx2x_cnic_sp_post(bp, 0);
9981
9982 return i;
9983}
9984
9985static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9986{
9987 struct cnic_ops *c_ops;
9988 int rc = 0;
9989
9990 mutex_lock(&bp->cnic_mutex);
13707f9e
ED
9991 c_ops = rcu_dereference_protected(bp->cnic_ops,
9992 lockdep_is_held(&bp->cnic_mutex));
993ac7b5
MC
9993 if (c_ops)
9994 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9995 mutex_unlock(&bp->cnic_mutex);
9996
9997 return rc;
9998}
9999
10000static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
10001{
10002 struct cnic_ops *c_ops;
10003 int rc = 0;
10004
10005 rcu_read_lock();
10006 c_ops = rcu_dereference(bp->cnic_ops);
10007 if (c_ops)
10008 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
10009 rcu_read_unlock();
10010
10011 return rc;
10012}
10013
10014/*
10015 * for commands that have no data
10016 */
9f6c9258 10017int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
10018{
10019 struct cnic_ctl_info ctl = {0};
10020
10021 ctl.cmd = cmd;
10022
10023 return bnx2x_cnic_ctl_send(bp, &ctl);
10024}
10025
10026static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
10027{
10028 struct cnic_ctl_info ctl;
10029
10030 /* first we tell CNIC and only then we count this as a completion */
10031 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
10032 ctl.data.comp.cid = cid;
10033
10034 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 10035 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
10036}
10037
10038static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10039{
10040 struct bnx2x *bp = netdev_priv(dev);
10041 int rc = 0;
10042
10043 switch (ctl->cmd) {
10044 case DRV_CTL_CTXTBL_WR_CMD: {
10045 u32 index = ctl->data.io.offset;
10046 dma_addr_t addr = ctl->data.io.dma_addr;
10047
10048 bnx2x_ilt_wr(bp, index, addr);
10049 break;
10050 }
10051
c2bff63f
DK
10052 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
10053 int count = ctl->data.credit.credit_count;
993ac7b5
MC
10054
10055 bnx2x_cnic_sp_post(bp, count);
10056 break;
10057 }
10058
10059 /* rtnl_lock is held. */
10060 case DRV_CTL_START_L2_CMD: {
10061 u32 cli = ctl->data.ring.client_id;
10062
ec6ba945
VZ
10063 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
10064 bnx2x_del_fcoe_eth_macs(bp);
10065
523224a3
DK
10066 /* Set iSCSI MAC address */
10067 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
10068
10069 mmiowb();
10070 barrier();
10071
10072 /* Start accepting on iSCSI L2 ring. Accept all multicasts
10073 * because it's the only way for UIO Client to accept
10074 * multicasts (in non-promiscuous mode only one Client per
10075 * function will receive multicast packets (leading in our
10076 * case).
10077 */
10078 bnx2x_rxq_set_mac_filters(bp, cli,
10079 BNX2X_ACCEPT_UNICAST |
10080 BNX2X_ACCEPT_BROADCAST |
10081 BNX2X_ACCEPT_ALL_MULTICAST);
10082 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10083
993ac7b5
MC
10084 break;
10085 }
10086
10087 /* rtnl_lock is held. */
10088 case DRV_CTL_STOP_L2_CMD: {
10089 u32 cli = ctl->data.ring.client_id;
10090
523224a3
DK
10091 /* Stop accepting on iSCSI L2 ring */
10092 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
10093 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10094
10095 mmiowb();
10096 barrier();
10097
10098 /* Unset iSCSI L2 MAC */
10099 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
10100 break;
10101 }
c2bff63f
DK
10102 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
10103 int count = ctl->data.credit.credit_count;
10104
10105 smp_mb__before_atomic_inc();
6e30dd4e 10106 atomic_add(count, &bp->cq_spq_left);
c2bff63f
DK
10107 smp_mb__after_atomic_inc();
10108 break;
10109 }
993ac7b5 10110
fab0dc89
DK
10111 case DRV_CTL_ISCSI_STOPPED_CMD: {
10112 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
10113 break;
10114 }
10115
993ac7b5
MC
10116 default:
10117 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10118 rc = -EINVAL;
10119 }
10120
10121 return rc;
10122}
10123
9f6c9258 10124void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
10125{
10126 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10127
10128 if (bp->flags & USING_MSIX_FLAG) {
10129 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10130 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10131 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10132 } else {
10133 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10134 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10135 }
f2e0899f
DK
10136 if (CHIP_IS_E2(bp))
10137 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10138 else
10139 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10140
993ac7b5 10141 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 10142 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
10143 cp->irq_arr[1].status_blk = bp->def_status_blk;
10144 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 10145 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
10146
10147 cp->num_irq = 2;
10148}
10149
10150static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10151 void *data)
10152{
10153 struct bnx2x *bp = netdev_priv(dev);
10154 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10155
10156 if (ops == NULL)
10157 return -EINVAL;
10158
993ac7b5
MC
10159 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10160 if (!bp->cnic_kwq)
10161 return -ENOMEM;
10162
10163 bp->cnic_kwq_cons = bp->cnic_kwq;
10164 bp->cnic_kwq_prod = bp->cnic_kwq;
10165 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10166
10167 bp->cnic_spq_pending = 0;
10168 bp->cnic_kwq_pending = 0;
10169
10170 bp->cnic_data = data;
10171
10172 cp->num_irq = 0;
10173 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 10174 cp->iro_arr = bp->iro_arr;
993ac7b5 10175
993ac7b5 10176 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 10177
993ac7b5
MC
10178 rcu_assign_pointer(bp->cnic_ops, ops);
10179
10180 return 0;
10181}
10182
10183static int bnx2x_unregister_cnic(struct net_device *dev)
10184{
10185 struct bnx2x *bp = netdev_priv(dev);
10186 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10187
10188 mutex_lock(&bp->cnic_mutex);
993ac7b5
MC
10189 cp->drv_state = 0;
10190 rcu_assign_pointer(bp->cnic_ops, NULL);
10191 mutex_unlock(&bp->cnic_mutex);
10192 synchronize_rcu();
10193 kfree(bp->cnic_kwq);
10194 bp->cnic_kwq = NULL;
10195
10196 return 0;
10197}
10198
10199struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10200{
10201 struct bnx2x *bp = netdev_priv(dev);
10202 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10203
2ba45142
VZ
10204 /* If both iSCSI and FCoE are disabled - return NULL in
10205 * order to indicate CNIC that it should not try to work
10206 * with this device.
10207 */
10208 if (NO_ISCSI(bp) && NO_FCOE(bp))
10209 return NULL;
10210
993ac7b5
MC
10211 cp->drv_owner = THIS_MODULE;
10212 cp->chip_id = CHIP_ID(bp);
10213 cp->pdev = bp->pdev;
10214 cp->io_base = bp->regview;
10215 cp->io_base2 = bp->doorbells;
10216 cp->max_kwqe_pending = 8;
523224a3 10217 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
10218 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10219 bnx2x_cid_ilt_lines(bp);
993ac7b5 10220 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 10221 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
10222 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10223 cp->drv_ctl = bnx2x_drv_ctl;
10224 cp->drv_register_cnic = bnx2x_register_cnic;
10225 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
ec6ba945
VZ
10226 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10227 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10228 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
c2bff63f
DK
10229 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10230
2ba45142
VZ
10231 if (NO_ISCSI_OOO(bp))
10232 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10233
10234 if (NO_ISCSI(bp))
10235 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10236
10237 if (NO_FCOE(bp))
10238 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10239
c2bff63f
DK
10240 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10241 "starting cid %d\n",
10242 cp->ctx_blk_size,
10243 cp->ctx_tbl_offset,
10244 cp->ctx_tbl_len,
10245 cp->starting_cid);
993ac7b5
MC
10246 return cp;
10247}
10248EXPORT_SYMBOL(bnx2x_cnic_probe);
10249
10250#endif /* BCM_CNIC */
94a78b79 10251
This page took 1.277808 seconds and 5 git commands to generate.