Merge branch 's5p-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
e4901dde 58#include "bnx2x_dcb.h"
a2fbb9ea 59
94a78b79
VZ
60#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
45229b42
BH
63#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
68#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 70#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
80MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
82MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
84MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 86MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 87
555f6c78
EG
88static int multi_mode = 1;
89module_param(multi_mode, int, 0);
ca00392c
EG
90MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
d6214d7a 93int num_queues;
54b9ddaa
VZ
94module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
555f6c78 97
19680c48 98static int disable_tpa;
19680c48 99module_param(disable_tpa, int, 0);
9898f86d 100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
101
102static int int_mode;
103module_param(int_mode, int, 0);
cdaa7cb8
VZ
104MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
8badd27a 106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
1cf167f2 123static struct workqueue_struct *bnx2x_wq;
a2fbb9ea 124
ec6ba945
VZ
125#ifdef BCM_CNIC
126static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127#endif
128
a2fbb9ea
ET
129enum bnx2x_board_type {
130 BCM57710 = 0,
34f80b04
EG
131 BCM57711 = 1,
132 BCM57711E = 2,
f2e0899f
DK
133 BCM57712 = 3,
134 BCM57712E = 4
a2fbb9ea
ET
135};
136
34f80b04 137/* indexed by board_type, above */
53a10565 138static struct {
a2fbb9ea
ET
139 char *name;
140} board_info[] __devinitdata = {
34f80b04
EG
141 { "Broadcom NetXtreme II BCM57710 XGb" },
142 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
143 { "Broadcom NetXtreme II BCM57711E XGb" },
144 { "Broadcom NetXtreme II BCM57712 XGb" },
145 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
146};
147
f2e0899f
DK
148#ifndef PCI_DEVICE_ID_NX2_57712
149#define PCI_DEVICE_ID_NX2_57712 0x1662
150#endif
151#ifndef PCI_DEVICE_ID_NX2_57712E
152#define PCI_DEVICE_ID_NX2_57712E 0x1663
153#endif
34f80b04 154
a3aa1884 155static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
157 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
158 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
159 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
160 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
161 { 0 }
162};
163
164MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
165
166/****************************************************************************
167* General service functions
168****************************************************************************/
169
523224a3
DK
170static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
171 u32 addr, dma_addr_t mapping)
172{
173 REG_WR(bp, addr, U64_LO(mapping));
174 REG_WR(bp, addr + 4, U64_HI(mapping));
175}
176
177static inline void __storm_memset_fill(struct bnx2x *bp,
178 u32 addr, size_t size, u32 val)
179{
180 int i;
181 for (i = 0; i < size/4; i++)
182 REG_WR(bp, addr + (i * 4), val);
183}
184
185static inline void storm_memset_ustats_zero(struct bnx2x *bp,
186 u8 port, u16 stat_id)
187{
188 size_t size = sizeof(struct ustorm_per_client_stats);
189
190 u32 addr = BAR_USTRORM_INTMEM +
191 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
192
193 __storm_memset_fill(bp, addr, size, 0);
194}
195
196static inline void storm_memset_tstats_zero(struct bnx2x *bp,
197 u8 port, u16 stat_id)
198{
199 size_t size = sizeof(struct tstorm_per_client_stats);
200
201 u32 addr = BAR_TSTRORM_INTMEM +
202 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
203
204 __storm_memset_fill(bp, addr, size, 0);
205}
206
207static inline void storm_memset_xstats_zero(struct bnx2x *bp,
208 u8 port, u16 stat_id)
209{
210 size_t size = sizeof(struct xstorm_per_client_stats);
211
212 u32 addr = BAR_XSTRORM_INTMEM +
213 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
214
215 __storm_memset_fill(bp, addr, size, 0);
216}
217
218
219static inline void storm_memset_spq_addr(struct bnx2x *bp,
220 dma_addr_t mapping, u16 abs_fid)
221{
222 u32 addr = XSEM_REG_FAST_MEMORY +
223 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
224
225 __storm_memset_dma_mapping(bp, addr, mapping);
226}
227
228static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
229{
230 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
231}
232
233static inline void storm_memset_func_cfg(struct bnx2x *bp,
234 struct tstorm_eth_function_common_config *tcfg,
235 u16 abs_fid)
236{
237 size_t size = sizeof(struct tstorm_eth_function_common_config);
238
239 u32 addr = BAR_TSTRORM_INTMEM +
240 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
241
242 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
243}
244
245static inline void storm_memset_xstats_flags(struct bnx2x *bp,
246 struct stats_indication_flags *flags,
247 u16 abs_fid)
248{
249 size_t size = sizeof(struct stats_indication_flags);
250
251 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
252
253 __storm_memset_struct(bp, addr, size, (u32 *)flags);
254}
255
256static inline void storm_memset_tstats_flags(struct bnx2x *bp,
257 struct stats_indication_flags *flags,
258 u16 abs_fid)
259{
260 size_t size = sizeof(struct stats_indication_flags);
261
262 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
263
264 __storm_memset_struct(bp, addr, size, (u32 *)flags);
265}
266
267static inline void storm_memset_ustats_flags(struct bnx2x *bp,
268 struct stats_indication_flags *flags,
269 u16 abs_fid)
270{
271 size_t size = sizeof(struct stats_indication_flags);
272
273 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
274
275 __storm_memset_struct(bp, addr, size, (u32 *)flags);
276}
277
278static inline void storm_memset_cstats_flags(struct bnx2x *bp,
279 struct stats_indication_flags *flags,
280 u16 abs_fid)
281{
282 size_t size = sizeof(struct stats_indication_flags);
283
284 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
285
286 __storm_memset_struct(bp, addr, size, (u32 *)flags);
287}
288
289static inline void storm_memset_xstats_addr(struct bnx2x *bp,
290 dma_addr_t mapping, u16 abs_fid)
291{
292 u32 addr = BAR_XSTRORM_INTMEM +
293 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295 __storm_memset_dma_mapping(bp, addr, mapping);
296}
297
298static inline void storm_memset_tstats_addr(struct bnx2x *bp,
299 dma_addr_t mapping, u16 abs_fid)
300{
301 u32 addr = BAR_TSTRORM_INTMEM +
302 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304 __storm_memset_dma_mapping(bp, addr, mapping);
305}
306
307static inline void storm_memset_ustats_addr(struct bnx2x *bp,
308 dma_addr_t mapping, u16 abs_fid)
309{
310 u32 addr = BAR_USTRORM_INTMEM +
311 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
312
313 __storm_memset_dma_mapping(bp, addr, mapping);
314}
315
316static inline void storm_memset_cstats_addr(struct bnx2x *bp,
317 dma_addr_t mapping, u16 abs_fid)
318{
319 u32 addr = BAR_CSTRORM_INTMEM +
320 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
321
322 __storm_memset_dma_mapping(bp, addr, mapping);
323}
324
325static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
326 u16 pf_id)
327{
328 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
331 pf_id);
332 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
333 pf_id);
334 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
335 pf_id);
336}
337
338static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
339 u8 enable)
340{
341 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
344 enable);
345 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
346 enable);
347 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
348 enable);
349}
350
351static inline void storm_memset_eq_data(struct bnx2x *bp,
352 struct event_ring_data *eq_data,
353 u16 pfid)
354{
355 size_t size = sizeof(struct event_ring_data);
356
357 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
358
359 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
360}
361
362static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
363 u16 pfid)
364{
365 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
366 REG_WR16(bp, addr, eq_prod);
367}
368
369static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
370 u16 fw_sb_id, u8 sb_index,
371 u8 ticks)
372{
373
f2e0899f
DK
374 int index_offset = CHIP_IS_E2(bp) ?
375 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
376 offsetof(struct hc_status_block_data_e1x, index_data);
377 u32 addr = BAR_CSTRORM_INTMEM +
378 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
379 index_offset +
380 sizeof(struct hc_index_data)*sb_index +
381 offsetof(struct hc_index_data, timeout);
382 REG_WR8(bp, addr, ticks);
383 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
384 port, fw_sb_id, sb_index, ticks);
385}
386static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
387 u16 fw_sb_id, u8 sb_index,
388 u8 disable)
389{
390 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
391 int index_offset = CHIP_IS_E2(bp) ?
392 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
393 offsetof(struct hc_status_block_data_e1x, index_data);
394 u32 addr = BAR_CSTRORM_INTMEM +
395 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
396 index_offset +
397 sizeof(struct hc_index_data)*sb_index +
398 offsetof(struct hc_index_data, flags);
399 u16 flags = REG_RD16(bp, addr);
400 /* clear and set */
401 flags &= ~HC_INDEX_DATA_HC_ENABLED;
402 flags |= enable_flag;
403 REG_WR16(bp, addr, flags);
404 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
405 port, fw_sb_id, sb_index, disable);
406}
407
a2fbb9ea
ET
408/* used only at init
409 * locking is done by mcp
410 */
8d96286a 411static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
412{
413 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
414 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
415 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
416 PCICFG_VENDOR_ID_OFFSET);
417}
418
a2fbb9ea
ET
419static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
420{
421 u32 val;
422
423 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
424 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
425 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
426 PCICFG_VENDOR_ID_OFFSET);
427
428 return val;
429}
a2fbb9ea 430
f2e0899f
DK
431#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
432#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
433#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
434#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
435#define DMAE_DP_DST_NONE "dst_addr [none]"
436
8d96286a 437static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
438 int msglvl)
f2e0899f
DK
439{
440 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
441
442 switch (dmae->opcode & DMAE_COMMAND_DST) {
443 case DMAE_CMD_DST_PCI:
444 if (src_type == DMAE_CMD_SRC_PCI)
445 DP(msglvl, "DMAE: opcode 0x%08x\n"
446 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
447 "comp_addr [%x:%08x], comp_val 0x%08x\n",
448 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
449 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
450 dmae->comp_addr_hi, dmae->comp_addr_lo,
451 dmae->comp_val);
452 else
453 DP(msglvl, "DMAE: opcode 0x%08x\n"
454 "src [%08x], len [%d*4], dst [%x:%08x]\n"
455 "comp_addr [%x:%08x], comp_val 0x%08x\n",
456 dmae->opcode, dmae->src_addr_lo >> 2,
457 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
458 dmae->comp_addr_hi, dmae->comp_addr_lo,
459 dmae->comp_val);
460 break;
461 case DMAE_CMD_DST_GRC:
462 if (src_type == DMAE_CMD_SRC_PCI)
463 DP(msglvl, "DMAE: opcode 0x%08x\n"
464 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
465 "comp_addr [%x:%08x], comp_val 0x%08x\n",
466 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
467 dmae->len, dmae->dst_addr_lo >> 2,
468 dmae->comp_addr_hi, dmae->comp_addr_lo,
469 dmae->comp_val);
470 else
471 DP(msglvl, "DMAE: opcode 0x%08x\n"
472 "src [%08x], len [%d*4], dst [%08x]\n"
473 "comp_addr [%x:%08x], comp_val 0x%08x\n",
474 dmae->opcode, dmae->src_addr_lo >> 2,
475 dmae->len, dmae->dst_addr_lo >> 2,
476 dmae->comp_addr_hi, dmae->comp_addr_lo,
477 dmae->comp_val);
478 break;
479 default:
480 if (src_type == DMAE_CMD_SRC_PCI)
481 DP(msglvl, "DMAE: opcode 0x%08x\n"
482 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
483 "dst_addr [none]\n"
484 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
485 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
486 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
487 dmae->comp_val);
488 else
489 DP(msglvl, "DMAE: opcode 0x%08x\n"
490 DP_LEVEL "src_addr [%08x] len [%d * 4] "
491 "dst_addr [none]\n"
492 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
493 dmae->opcode, dmae->src_addr_lo >> 2,
494 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495 dmae->comp_val);
496 break;
497 }
498
499}
500
6c719d00 501const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
502 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
503 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
504 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
505 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
506};
507
508/* copy command into DMAE command memory and set DMAE command go */
6c719d00 509void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
510{
511 u32 cmd_offset;
512 int i;
513
514 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
515 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
516 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
517
ad8d3948
EG
518 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
519 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
520 }
521 REG_WR(bp, dmae_reg_go_c[idx], 1);
522}
523
f2e0899f 524u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 525{
f2e0899f
DK
526 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
527 DMAE_CMD_C_ENABLE);
528}
ad8d3948 529
f2e0899f
DK
530u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
531{
532 return opcode & ~DMAE_CMD_SRC_RESET;
533}
ad8d3948 534
f2e0899f
DK
535u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
536 bool with_comp, u8 comp_type)
537{
538 u32 opcode = 0;
539
540 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
541 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 542
f2e0899f
DK
543 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
544
545 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
546 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
547 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
548 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 549
a2fbb9ea 550#ifdef __BIG_ENDIAN
f2e0899f 551 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 552#else
f2e0899f 553 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 554#endif
f2e0899f
DK
555 if (with_comp)
556 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
557 return opcode;
558}
559
8d96286a 560static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
561 struct dmae_command *dmae,
562 u8 src_type, u8 dst_type)
f2e0899f
DK
563{
564 memset(dmae, 0, sizeof(struct dmae_command));
565
566 /* set the opcode */
567 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
568 true, DMAE_COMP_PCI);
569
570 /* fill in the completion parameters */
571 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
572 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
573 dmae->comp_val = DMAE_COMP_VAL;
574}
575
576/* issue a dmae command over the init-channel and wailt for completion */
8d96286a 577static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
578 struct dmae_command *dmae)
f2e0899f
DK
579{
580 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
581 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
582 int rc = 0;
583
584 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
585 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 587
f2e0899f 588 /* lock the dmae channel */
5ff7b6d4
EG
589 mutex_lock(&bp->dmae_mutex);
590
f2e0899f 591 /* reset completion */
a2fbb9ea
ET
592 *wb_comp = 0;
593
f2e0899f
DK
594 /* post the command on the channel used for initializations */
595 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 596
f2e0899f 597 /* wait for completion */
a2fbb9ea 598 udelay(5);
f2e0899f 599 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
600 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
601
ad8d3948 602 if (!cnt) {
c3eefaf6 603 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
604 rc = DMAE_TIMEOUT;
605 goto unlock;
a2fbb9ea 606 }
ad8d3948 607 cnt--;
f2e0899f 608 udelay(50);
a2fbb9ea 609 }
f2e0899f
DK
610 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
611 BNX2X_ERR("DMAE PCI error!\n");
612 rc = DMAE_PCI_ERROR;
613 }
614
615 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
616 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 618
f2e0899f 619unlock:
ad8d3948 620 mutex_unlock(&bp->dmae_mutex);
f2e0899f
DK
621 return rc;
622}
623
624void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
625 u32 len32)
626{
627 struct dmae_command dmae;
628
629 if (!bp->dmae_ready) {
630 u32 *data = bnx2x_sp(bp, wb_data[0]);
631
632 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
633 " using indirect\n", dst_addr, len32);
634 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
635 return;
636 }
637
638 /* set opcode and fixed command fields */
639 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
640
641 /* fill in addresses and len */
642 dmae.src_addr_lo = U64_LO(dma_addr);
643 dmae.src_addr_hi = U64_HI(dma_addr);
644 dmae.dst_addr_lo = dst_addr >> 2;
645 dmae.dst_addr_hi = 0;
646 dmae.len = len32;
647
648 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
649
650 /* issue the command and wait for completion */
651 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
652}
653
c18487ee 654void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 655{
5ff7b6d4 656 struct dmae_command dmae;
ad8d3948
EG
657
658 if (!bp->dmae_ready) {
659 u32 *data = bnx2x_sp(bp, wb_data[0]);
660 int i;
661
662 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
663 " using indirect\n", src_addr, len32);
664 for (i = 0; i < len32; i++)
665 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
666 return;
667 }
668
f2e0899f
DK
669 /* set opcode and fixed command fields */
670 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 671
f2e0899f 672 /* fill in addresses and len */
5ff7b6d4
EG
673 dmae.src_addr_lo = src_addr >> 2;
674 dmae.src_addr_hi = 0;
675 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
676 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
677 dmae.len = len32;
ad8d3948 678
f2e0899f 679 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 680
f2e0899f
DK
681 /* issue the command and wait for completion */
682 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
683}
684
8d96286a 685static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
686 u32 addr, u32 len)
573f2035 687{
02e3c6cb 688 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
689 int offset = 0;
690
02e3c6cb 691 while (len > dmae_wr_max) {
573f2035 692 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
693 addr + offset, dmae_wr_max);
694 offset += dmae_wr_max * 4;
695 len -= dmae_wr_max;
573f2035
EG
696 }
697
698 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
699}
700
ad8d3948
EG
701/* used only for slowpath so not inlined */
702static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
703{
704 u32 wb_write[2];
705
706 wb_write[0] = val_hi;
707 wb_write[1] = val_lo;
708 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 709}
a2fbb9ea 710
ad8d3948
EG
711#ifdef USE_WB_RD
712static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
713{
714 u32 wb_data[2];
715
716 REG_RD_DMAE(bp, reg, wb_data, 2);
717
718 return HILO_U64(wb_data[0], wb_data[1]);
719}
720#endif
721
a2fbb9ea
ET
722static int bnx2x_mc_assert(struct bnx2x *bp)
723{
a2fbb9ea 724 char last_idx;
34f80b04
EG
725 int i, rc = 0;
726 u32 row0, row1, row2, row3;
727
728 /* XSTORM */
729 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_INDEX_OFFSET);
731 if (last_idx)
732 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
733
734 /* print the asserts */
735 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
736
737 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
738 XSTORM_ASSERT_LIST_OFFSET(i));
739 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
740 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
741 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
742 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
743 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
744 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
745
746 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
747 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
748 " 0x%08x 0x%08x 0x%08x\n",
749 i, row3, row2, row1, row0);
750 rc++;
751 } else {
752 break;
753 }
754 }
755
756 /* TSTORM */
757 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_INDEX_OFFSET);
759 if (last_idx)
760 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
761
762 /* print the asserts */
763 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
764
765 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
766 TSTORM_ASSERT_LIST_OFFSET(i));
767 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
768 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
769 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
770 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
771 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
772 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
773
774 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
775 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
776 " 0x%08x 0x%08x 0x%08x\n",
777 i, row3, row2, row1, row0);
778 rc++;
779 } else {
780 break;
781 }
782 }
783
784 /* CSTORM */
785 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_INDEX_OFFSET);
787 if (last_idx)
788 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
789
790 /* print the asserts */
791 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
792
793 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
794 CSTORM_ASSERT_LIST_OFFSET(i));
795 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
796 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
797 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
798 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
799 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
800 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
801
802 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
803 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
804 " 0x%08x 0x%08x 0x%08x\n",
805 i, row3, row2, row1, row0);
806 rc++;
807 } else {
808 break;
809 }
810 }
811
812 /* USTORM */
813 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_INDEX_OFFSET);
815 if (last_idx)
816 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
817
818 /* print the asserts */
819 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
820
821 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
822 USTORM_ASSERT_LIST_OFFSET(i));
823 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
824 USTORM_ASSERT_LIST_OFFSET(i) + 4);
825 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
826 USTORM_ASSERT_LIST_OFFSET(i) + 8);
827 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
828 USTORM_ASSERT_LIST_OFFSET(i) + 12);
829
830 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
831 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
832 " 0x%08x 0x%08x 0x%08x\n",
833 i, row3, row2, row1, row0);
834 rc++;
835 } else {
836 break;
a2fbb9ea
ET
837 }
838 }
34f80b04 839
a2fbb9ea
ET
840 return rc;
841}
c14423fe 842
a2fbb9ea
ET
843static void bnx2x_fw_dump(struct bnx2x *bp)
844{
cdaa7cb8 845 u32 addr;
a2fbb9ea 846 u32 mark, offset;
4781bfad 847 __be32 data[9];
a2fbb9ea 848 int word;
f2e0899f 849 u32 trace_shmem_base;
2145a920
VZ
850 if (BP_NOMCP(bp)) {
851 BNX2X_ERR("NO MCP - can not dump\n");
852 return;
853 }
cdaa7cb8 854
f2e0899f
DK
855 if (BP_PATH(bp) == 0)
856 trace_shmem_base = bp->common.shmem_base;
857 else
858 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
859 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 860 mark = REG_RD(bp, addr);
f2e0899f
DK
861 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
862 + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 863 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 864
7995c64e 865 pr_err("");
f2e0899f 866 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 867 for (word = 0; word < 8; word++)
cdaa7cb8 868 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 869 data[8] = 0x0;
7995c64e 870 pr_cont("%s", (char *)data);
a2fbb9ea 871 }
cdaa7cb8 872 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 873 for (word = 0; word < 8; word++)
cdaa7cb8 874 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 875 data[8] = 0x0;
7995c64e 876 pr_cont("%s", (char *)data);
a2fbb9ea 877 }
7995c64e 878 pr_err("end of fw dump\n");
a2fbb9ea
ET
879}
880
6c719d00 881void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
882{
883 int i;
523224a3
DK
884 u16 j;
885 struct hc_sp_status_block_data sp_sb_data;
886 int func = BP_FUNC(bp);
887#ifdef BNX2X_STOP_ON_ERROR
888 u16 start = 0, end = 0;
889#endif
a2fbb9ea 890
66e855f3
YG
891 bp->stats_state = STATS_STATE_DISABLED;
892 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
893
a2fbb9ea
ET
894 BNX2X_ERR("begin crash dump -----------------\n");
895
8440d2b6
EG
896 /* Indices */
897 /* Common */
523224a3 898 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 899 " spq_prod_idx(0x%x)\n",
523224a3
DK
900 bp->def_idx, bp->def_att_idx,
901 bp->attn_state, bp->spq_prod_idx);
902 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
903 bp->def_status_blk->atten_status_block.attn_bits,
904 bp->def_status_blk->atten_status_block.attn_bits_ack,
905 bp->def_status_blk->atten_status_block.status_block_id,
906 bp->def_status_blk->atten_status_block.attn_bits_index);
907 BNX2X_ERR(" def (");
908 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
909 pr_cont("0x%x%s",
910 bp->def_status_blk->sp_sb.index_values[i],
911 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
912
913 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
914 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
915 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
916 i*sizeof(u32));
917
918 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
919 "pf_id(0x%x) vnic_id(0x%x) "
920 "vf_id(0x%x) vf_valid (0x%x)\n",
921 sp_sb_data.igu_sb_id,
922 sp_sb_data.igu_seg_id,
923 sp_sb_data.p_func.pf_id,
924 sp_sb_data.p_func.vnic_id,
925 sp_sb_data.p_func.vf_id,
926 sp_sb_data.p_func.vf_valid);
927
8440d2b6 928
ec6ba945 929 for_each_eth_queue(bp, i) {
a2fbb9ea 930 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 931 int loop;
f2e0899f 932 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
933 struct hc_status_block_data_e1x sb_data_e1x;
934 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
935 CHIP_IS_E2(bp) ?
936 sb_data_e2.common.state_machine :
523224a3
DK
937 sb_data_e1x.common.state_machine;
938 struct hc_index_data *hc_index_p =
f2e0899f
DK
939 CHIP_IS_E2(bp) ?
940 sb_data_e2.index_data :
523224a3
DK
941 sb_data_e1x.index_data;
942 int data_size;
943 u32 *sb_data_p;
944
945 /* Rx */
cdaa7cb8 946 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 947 " rx_comp_prod(0x%x)"
cdaa7cb8 948 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 949 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 950 fp->rx_comp_prod,
66e855f3 951 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 952 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 953 " fp_hc_idx(0x%x)\n",
8440d2b6 954 fp->rx_sge_prod, fp->last_max_sge,
523224a3 955 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 956
523224a3 957 /* Tx */
cdaa7cb8
VZ
958 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
959 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
960 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
961 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
962 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 963
f2e0899f
DK
964 loop = CHIP_IS_E2(bp) ?
965 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
966
967 /* host sb data */
968
ec6ba945
VZ
969#ifdef BCM_CNIC
970 if (IS_FCOE_FP(fp))
971 continue;
972#endif
523224a3
DK
973 BNX2X_ERR(" run indexes (");
974 for (j = 0; j < HC_SB_MAX_SM; j++)
975 pr_cont("0x%x%s",
976 fp->sb_running_index[j],
977 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
978
979 BNX2X_ERR(" indexes (");
980 for (j = 0; j < loop; j++)
981 pr_cont("0x%x%s",
982 fp->sb_index_values[j],
983 (j == loop - 1) ? ")" : " ");
984 /* fw sb data */
f2e0899f
DK
985 data_size = CHIP_IS_E2(bp) ?
986 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
987 sizeof(struct hc_status_block_data_e1x);
988 data_size /= sizeof(u32);
f2e0899f
DK
989 sb_data_p = CHIP_IS_E2(bp) ?
990 (u32 *)&sb_data_e2 :
991 (u32 *)&sb_data_e1x;
523224a3
DK
992 /* copy sb data in here */
993 for (j = 0; j < data_size; j++)
994 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
995 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
996 j * sizeof(u32));
997
f2e0899f
DK
998 if (CHIP_IS_E2(bp)) {
999 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1000 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1001 sb_data_e2.common.p_func.pf_id,
1002 sb_data_e2.common.p_func.vf_id,
1003 sb_data_e2.common.p_func.vf_valid,
1004 sb_data_e2.common.p_func.vnic_id,
1005 sb_data_e2.common.same_igu_sb_1b);
1006 } else {
1007 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1008 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1009 sb_data_e1x.common.p_func.pf_id,
1010 sb_data_e1x.common.p_func.vf_id,
1011 sb_data_e1x.common.p_func.vf_valid,
1012 sb_data_e1x.common.p_func.vnic_id,
1013 sb_data_e1x.common.same_igu_sb_1b);
1014 }
523224a3
DK
1015
1016 /* SB_SMs data */
1017 for (j = 0; j < HC_SB_MAX_SM; j++) {
1018 pr_cont("SM[%d] __flags (0x%x) "
1019 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1020 "time_to_expire (0x%x) "
1021 "timer_value(0x%x)\n", j,
1022 hc_sm_p[j].__flags,
1023 hc_sm_p[j].igu_sb_id,
1024 hc_sm_p[j].igu_seg_id,
1025 hc_sm_p[j].time_to_expire,
1026 hc_sm_p[j].timer_value);
1027 }
1028
1029 /* Indecies data */
1030 for (j = 0; j < loop; j++) {
1031 pr_cont("INDEX[%d] flags (0x%x) "
1032 "timeout (0x%x)\n", j,
1033 hc_index_p[j].flags,
1034 hc_index_p[j].timeout);
1035 }
8440d2b6 1036 }
a2fbb9ea 1037
523224a3 1038#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
1039 /* Rings */
1040 /* Rx */
ec6ba945 1041 for_each_rx_queue(bp, i) {
8440d2b6 1042 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1043
1044 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1045 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1046 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1047 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1048 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1049
c3eefaf6
EG
1050 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1051 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
1052 }
1053
3196a88a
EG
1054 start = RX_SGE(fp->rx_sge_prod);
1055 end = RX_SGE(fp->last_max_sge);
8440d2b6 1056 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1057 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1058 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1059
c3eefaf6
EG
1060 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1061 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1062 }
1063
a2fbb9ea
ET
1064 start = RCQ_BD(fp->rx_comp_cons - 10);
1065 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1066 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1067 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1068
c3eefaf6
EG
1069 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1070 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1071 }
1072 }
1073
8440d2b6 1074 /* Tx */
ec6ba945 1075 for_each_tx_queue(bp, i) {
8440d2b6
EG
1076 struct bnx2x_fastpath *fp = &bp->fp[i];
1077
1078 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1079 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1080 for (j = start; j != end; j = TX_BD(j + 1)) {
1081 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1082
c3eefaf6
EG
1083 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1084 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
1085 }
1086
1087 start = TX_BD(fp->tx_bd_cons - 10);
1088 end = TX_BD(fp->tx_bd_cons + 254);
1089 for (j = start; j != end; j = TX_BD(j + 1)) {
1090 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1091
c3eefaf6
EG
1092 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1093 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
1094 }
1095 }
523224a3 1096#endif
34f80b04 1097 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1098 bnx2x_mc_assert(bp);
1099 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1100}
1101
f2e0899f 1102static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1103{
34f80b04 1104 int port = BP_PORT(bp);
a2fbb9ea
ET
1105 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1106 u32 val = REG_RD(bp, addr);
1107 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1108 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
1109
1110 if (msix) {
8badd27a
EG
1111 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1112 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1113 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1114 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
1115 } else if (msi) {
1116 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1117 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1118 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1119 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1120 } else {
1121 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1122 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1123 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1124 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1125
a0fd065c
DK
1126 if (!CHIP_IS_E1(bp)) {
1127 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1128 val, port, addr);
615f8fd9 1129
a0fd065c 1130 REG_WR(bp, addr, val);
615f8fd9 1131
a0fd065c
DK
1132 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1133 }
a2fbb9ea
ET
1134 }
1135
a0fd065c
DK
1136 if (CHIP_IS_E1(bp))
1137 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1138
8badd27a
EG
1139 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1140 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1141
1142 REG_WR(bp, addr, val);
37dbbf32
EG
1143 /*
1144 * Ensure that HC_CONFIG is written before leading/trailing edge config
1145 */
1146 mmiowb();
1147 barrier();
34f80b04 1148
f2e0899f 1149 if (!CHIP_IS_E1(bp)) {
34f80b04 1150 /* init leading/trailing edge */
fb3bff17 1151 if (IS_MF(bp)) {
8badd27a 1152 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1153 if (bp->port.pmf)
4acac6a5
EG
1154 /* enable nig and gpio3 attention */
1155 val |= 0x1100;
34f80b04
EG
1156 } else
1157 val = 0xffff;
1158
1159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1160 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1161 }
37dbbf32
EG
1162
1163 /* Make sure that interrupts are indeed enabled from here on */
1164 mmiowb();
a2fbb9ea
ET
1165}
1166
f2e0899f
DK
1167static void bnx2x_igu_int_enable(struct bnx2x *bp)
1168{
1169 u32 val;
1170 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1171 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1172
1173 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1174
1175 if (msix) {
1176 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1177 IGU_PF_CONF_SINGLE_ISR_EN);
1178 val |= (IGU_PF_CONF_FUNC_EN |
1179 IGU_PF_CONF_MSI_MSIX_EN |
1180 IGU_PF_CONF_ATTN_BIT_EN);
1181 } else if (msi) {
1182 val &= ~IGU_PF_CONF_INT_LINE_EN;
1183 val |= (IGU_PF_CONF_FUNC_EN |
1184 IGU_PF_CONF_MSI_MSIX_EN |
1185 IGU_PF_CONF_ATTN_BIT_EN |
1186 IGU_PF_CONF_SINGLE_ISR_EN);
1187 } else {
1188 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1189 val |= (IGU_PF_CONF_FUNC_EN |
1190 IGU_PF_CONF_INT_LINE_EN |
1191 IGU_PF_CONF_ATTN_BIT_EN |
1192 IGU_PF_CONF_SINGLE_ISR_EN);
1193 }
1194
1195 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1196 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1197
1198 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1199
1200 barrier();
1201
1202 /* init leading/trailing edge */
1203 if (IS_MF(bp)) {
1204 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1205 if (bp->port.pmf)
1206 /* enable nig and gpio3 attention */
1207 val |= 0x1100;
1208 } else
1209 val = 0xffff;
1210
1211 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1212 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1213
1214 /* Make sure that interrupts are indeed enabled from here on */
1215 mmiowb();
1216}
1217
1218void bnx2x_int_enable(struct bnx2x *bp)
1219{
1220 if (bp->common.int_block == INT_BLOCK_HC)
1221 bnx2x_hc_int_enable(bp);
1222 else
1223 bnx2x_igu_int_enable(bp);
1224}
1225
1226static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1227{
34f80b04 1228 int port = BP_PORT(bp);
a2fbb9ea
ET
1229 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1230 u32 val = REG_RD(bp, addr);
1231
a0fd065c
DK
1232 /*
1233 * in E1 we must use only PCI configuration space to disable
1234 * MSI/MSIX capablility
1235 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1236 */
1237 if (CHIP_IS_E1(bp)) {
1238 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1239 * Use mask register to prevent from HC sending interrupts
1240 * after we exit the function
1241 */
1242 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1243
1244 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1245 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1246 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1247 } else
1248 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1249 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1250 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1251 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1252
1253 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1254 val, port, addr);
1255
8badd27a
EG
1256 /* flush all outstanding writes */
1257 mmiowb();
1258
a2fbb9ea
ET
1259 REG_WR(bp, addr, val);
1260 if (REG_RD(bp, addr) != val)
1261 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1262}
1263
f2e0899f
DK
1264static void bnx2x_igu_int_disable(struct bnx2x *bp)
1265{
1266 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1267
1268 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1269 IGU_PF_CONF_INT_LINE_EN |
1270 IGU_PF_CONF_ATTN_BIT_EN);
1271
1272 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1273
1274 /* flush all outstanding writes */
1275 mmiowb();
1276
1277 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1278 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1279 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1280}
1281
8d96286a 1282static void bnx2x_int_disable(struct bnx2x *bp)
f2e0899f
DK
1283{
1284 if (bp->common.int_block == INT_BLOCK_HC)
1285 bnx2x_hc_int_disable(bp);
1286 else
1287 bnx2x_igu_int_disable(bp);
1288}
1289
9f6c9258 1290void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1291{
a2fbb9ea 1292 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1293 int i, offset;
a2fbb9ea 1294
34f80b04 1295 /* disable interrupt handling */
a2fbb9ea 1296 atomic_inc(&bp->intr_sem);
e1510706
EG
1297 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1298
f8ef6e44
YG
1299 if (disable_hw)
1300 /* prevent the HW from sending interrupts */
1301 bnx2x_int_disable(bp);
a2fbb9ea
ET
1302
1303 /* make sure all ISRs are done */
1304 if (msix) {
8badd27a
EG
1305 synchronize_irq(bp->msix_table[0].vector);
1306 offset = 1;
37b091ba
MC
1307#ifdef BCM_CNIC
1308 offset++;
1309#endif
ec6ba945 1310 for_each_eth_queue(bp, i)
8badd27a 1311 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1312 } else
1313 synchronize_irq(bp->pdev->irq);
1314
1315 /* make sure sp_task is not running */
1cf167f2
EG
1316 cancel_delayed_work(&bp->sp_task);
1317 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1318}
1319
34f80b04 1320/* fast path */
a2fbb9ea
ET
1321
1322/*
34f80b04 1323 * General service functions
a2fbb9ea
ET
1324 */
1325
72fd0718
VZ
1326/* Return true if succeeded to acquire the lock */
1327static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1328{
1329 u32 lock_status;
1330 u32 resource_bit = (1 << resource);
1331 int func = BP_FUNC(bp);
1332 u32 hw_lock_control_reg;
1333
1334 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1335
1336 /* Validating that the resource is within range */
1337 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1338 DP(NETIF_MSG_HW,
1339 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1340 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1341 return false;
72fd0718
VZ
1342 }
1343
1344 if (func <= 5)
1345 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1346 else
1347 hw_lock_control_reg =
1348 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1349
1350 /* Try to acquire the lock */
1351 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1352 lock_status = REG_RD(bp, hw_lock_control_reg);
1353 if (lock_status & resource_bit)
1354 return true;
1355
1356 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1357 return false;
1358}
1359
993ac7b5
MC
1360#ifdef BCM_CNIC
1361static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1362#endif
3196a88a 1363
9f6c9258 1364void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1365 union eth_rx_cqe *rr_cqe)
1366{
1367 struct bnx2x *bp = fp->bp;
1368 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1369 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1370
34f80b04 1371 DP(BNX2X_MSG_SP,
a2fbb9ea 1372 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1373 fp->index, cid, command, bp->state,
34f80b04 1374 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1375
523224a3
DK
1376 switch (command | fp->state) {
1377 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1378 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1379 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1380 break;
1381
523224a3
DK
1382 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1383 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1384 fp->state = BNX2X_FP_STATE_HALTED;
1385 break;
1386
523224a3
DK
1387 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1388 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1389 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1390 break;
1391
523224a3
DK
1392 default:
1393 BNX2X_ERR("unexpected MC reply (%d) "
1394 "fp[%d] state is %x\n",
1395 command, fp->index, fp->state);
993ac7b5 1396 break;
523224a3 1397 }
3196a88a 1398
8fe23fbd
DK
1399 smp_mb__before_atomic_inc();
1400 atomic_inc(&bp->spq_left);
523224a3
DK
1401 /* push the change in fp->state and towards the memory */
1402 smp_wmb();
49d66772 1403
523224a3 1404 return;
a2fbb9ea
ET
1405}
1406
9f6c9258 1407irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1408{
555f6c78 1409 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1410 u16 status = bnx2x_ack_int(bp);
34f80b04 1411 u16 mask;
ca00392c 1412 int i;
a2fbb9ea 1413
34f80b04 1414 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1415 if (unlikely(status == 0)) {
1416 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1417 return IRQ_NONE;
1418 }
f5372251 1419 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1420
34f80b04 1421 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1422 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1423 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1424 return IRQ_HANDLED;
1425 }
1426
3196a88a
EG
1427#ifdef BNX2X_STOP_ON_ERROR
1428 if (unlikely(bp->panic))
1429 return IRQ_HANDLED;
1430#endif
1431
ec6ba945 1432 for_each_eth_queue(bp, i) {
ca00392c 1433 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1434
523224a3 1435 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1436 if (status & mask) {
54b9ddaa
VZ
1437 /* Handle Rx and Tx according to SB id */
1438 prefetch(fp->rx_cons_sb);
54b9ddaa 1439 prefetch(fp->tx_cons_sb);
523224a3 1440 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1441 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1442 status &= ~mask;
1443 }
a2fbb9ea
ET
1444 }
1445
993ac7b5 1446#ifdef BCM_CNIC
523224a3 1447 mask = 0x2;
993ac7b5
MC
1448 if (status & (mask | 0x1)) {
1449 struct cnic_ops *c_ops = NULL;
1450
1451 rcu_read_lock();
1452 c_ops = rcu_dereference(bp->cnic_ops);
1453 if (c_ops)
1454 c_ops->cnic_handler(bp->cnic_data, NULL);
1455 rcu_read_unlock();
1456
1457 status &= ~mask;
1458 }
1459#endif
a2fbb9ea 1460
34f80b04 1461 if (unlikely(status & 0x1)) {
1cf167f2 1462 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1463
1464 status &= ~0x1;
1465 if (!status)
1466 return IRQ_HANDLED;
1467 }
1468
cdaa7cb8
VZ
1469 if (unlikely(status))
1470 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1471 status);
a2fbb9ea 1472
c18487ee 1473 return IRQ_HANDLED;
a2fbb9ea
ET
1474}
1475
c18487ee 1476/* end of fast path */
a2fbb9ea 1477
a2fbb9ea 1478
c18487ee
YR
1479/* Link */
1480
1481/*
1482 * General service functions
1483 */
a2fbb9ea 1484
9f6c9258 1485int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1486{
1487 u32 lock_status;
1488 u32 resource_bit = (1 << resource);
4a37fb66
YG
1489 int func = BP_FUNC(bp);
1490 u32 hw_lock_control_reg;
c18487ee 1491 int cnt;
a2fbb9ea 1492
c18487ee
YR
1493 /* Validating that the resource is within range */
1494 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1495 DP(NETIF_MSG_HW,
1496 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1497 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1498 return -EINVAL;
1499 }
a2fbb9ea 1500
4a37fb66
YG
1501 if (func <= 5) {
1502 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1503 } else {
1504 hw_lock_control_reg =
1505 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1506 }
1507
c18487ee 1508 /* Validating that the resource is not already taken */
4a37fb66 1509 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1510 if (lock_status & resource_bit) {
1511 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1512 lock_status, resource_bit);
1513 return -EEXIST;
1514 }
a2fbb9ea 1515
46230476
EG
1516 /* Try for 5 second every 5ms */
1517 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1518 /* Try to acquire the lock */
4a37fb66
YG
1519 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1520 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1521 if (lock_status & resource_bit)
1522 return 0;
a2fbb9ea 1523
c18487ee 1524 msleep(5);
a2fbb9ea 1525 }
c18487ee
YR
1526 DP(NETIF_MSG_HW, "Timeout\n");
1527 return -EAGAIN;
1528}
a2fbb9ea 1529
9f6c9258 1530int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1531{
1532 u32 lock_status;
1533 u32 resource_bit = (1 << resource);
4a37fb66
YG
1534 int func = BP_FUNC(bp);
1535 u32 hw_lock_control_reg;
a2fbb9ea 1536
72fd0718
VZ
1537 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1538
c18487ee
YR
1539 /* Validating that the resource is within range */
1540 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1541 DP(NETIF_MSG_HW,
1542 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1543 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1544 return -EINVAL;
1545 }
1546
4a37fb66
YG
1547 if (func <= 5) {
1548 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1549 } else {
1550 hw_lock_control_reg =
1551 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1552 }
1553
c18487ee 1554 /* Validating that the resource is currently taken */
4a37fb66 1555 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1556 if (!(lock_status & resource_bit)) {
1557 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1558 lock_status, resource_bit);
1559 return -EFAULT;
a2fbb9ea
ET
1560 }
1561
9f6c9258
DK
1562 REG_WR(bp, hw_lock_control_reg, resource_bit);
1563 return 0;
c18487ee 1564}
a2fbb9ea 1565
9f6c9258 1566
4acac6a5
EG
1567int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1568{
1569 /* The GPIO should be swapped if swap register is set and active */
1570 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1571 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1572 int gpio_shift = gpio_num +
1573 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1574 u32 gpio_mask = (1 << gpio_shift);
1575 u32 gpio_reg;
1576 int value;
1577
1578 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1579 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1580 return -EINVAL;
1581 }
1582
1583 /* read GPIO value */
1584 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1585
1586 /* get the requested pin value */
1587 if ((gpio_reg & gpio_mask) == gpio_mask)
1588 value = 1;
1589 else
1590 value = 0;
1591
1592 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1593
1594 return value;
1595}
1596
17de50b7 1597int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1598{
1599 /* The GPIO should be swapped if swap register is set and active */
1600 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1601 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1602 int gpio_shift = gpio_num +
1603 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1604 u32 gpio_mask = (1 << gpio_shift);
1605 u32 gpio_reg;
a2fbb9ea 1606
c18487ee
YR
1607 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1608 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1609 return -EINVAL;
1610 }
a2fbb9ea 1611
4a37fb66 1612 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1613 /* read GPIO and mask except the float bits */
1614 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1615
c18487ee
YR
1616 switch (mode) {
1617 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1618 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1619 gpio_num, gpio_shift);
1620 /* clear FLOAT and set CLR */
1621 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1622 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1623 break;
a2fbb9ea 1624
c18487ee
YR
1625 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1626 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1627 gpio_num, gpio_shift);
1628 /* clear FLOAT and set SET */
1629 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1630 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1631 break;
a2fbb9ea 1632
17de50b7 1633 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1634 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1635 gpio_num, gpio_shift);
1636 /* set FLOAT */
1637 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1638 break;
a2fbb9ea 1639
c18487ee
YR
1640 default:
1641 break;
a2fbb9ea
ET
1642 }
1643
c18487ee 1644 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1645 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1646
c18487ee 1647 return 0;
a2fbb9ea
ET
1648}
1649
4acac6a5
EG
1650int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1651{
1652 /* The GPIO should be swapped if swap register is set and active */
1653 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1654 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1655 int gpio_shift = gpio_num +
1656 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1657 u32 gpio_mask = (1 << gpio_shift);
1658 u32 gpio_reg;
1659
1660 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1661 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1662 return -EINVAL;
1663 }
1664
1665 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1666 /* read GPIO int */
1667 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1668
1669 switch (mode) {
1670 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1671 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1672 "output low\n", gpio_num, gpio_shift);
1673 /* clear SET and set CLR */
1674 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676 break;
1677
1678 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1679 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1680 "output high\n", gpio_num, gpio_shift);
1681 /* clear CLR and set SET */
1682 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1683 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1684 break;
1685
1686 default:
1687 break;
1688 }
1689
1690 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1691 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1692
1693 return 0;
1694}
1695
c18487ee 1696static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1697{
c18487ee
YR
1698 u32 spio_mask = (1 << spio_num);
1699 u32 spio_reg;
a2fbb9ea 1700
c18487ee
YR
1701 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1702 (spio_num > MISC_REGISTERS_SPIO_7)) {
1703 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1704 return -EINVAL;
a2fbb9ea
ET
1705 }
1706
4a37fb66 1707 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1708 /* read SPIO and mask except the float bits */
1709 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1710
c18487ee 1711 switch (mode) {
6378c025 1712 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1713 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1714 /* clear FLOAT and set CLR */
1715 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1717 break;
a2fbb9ea 1718
6378c025 1719 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1720 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1721 /* clear FLOAT and set SET */
1722 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1724 break;
a2fbb9ea 1725
c18487ee
YR
1726 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1727 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1728 /* set FLOAT */
1729 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1730 break;
a2fbb9ea 1731
c18487ee
YR
1732 default:
1733 break;
a2fbb9ea
ET
1734 }
1735
c18487ee 1736 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1737 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1738
a2fbb9ea
ET
1739 return 0;
1740}
1741
a22f0788
YR
1742int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1743{
1744 u32 sel_phy_idx = 0;
1745 if (bp->link_vars.link_up) {
1746 sel_phy_idx = EXT_PHY1;
1747 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1748 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1749 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1750 sel_phy_idx = EXT_PHY2;
1751 } else {
1752
1753 switch (bnx2x_phy_selection(&bp->link_params)) {
1754 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1755 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1756 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1757 sel_phy_idx = EXT_PHY1;
1758 break;
1759 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1760 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1761 sel_phy_idx = EXT_PHY2;
1762 break;
1763 }
1764 }
1765 /*
1766 * The selected actived PHY is always after swapping (in case PHY
1767 * swapping is enabled). So when swapping is enabled, we need to reverse
1768 * the configuration
1769 */
1770
1771 if (bp->link_params.multi_phy_config &
1772 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1773 if (sel_phy_idx == EXT_PHY1)
1774 sel_phy_idx = EXT_PHY2;
1775 else if (sel_phy_idx == EXT_PHY2)
1776 sel_phy_idx = EXT_PHY1;
1777 }
1778 return LINK_CONFIG_IDX(sel_phy_idx);
1779}
1780
9f6c9258 1781void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1782{
a22f0788 1783 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1784 switch (bp->link_vars.ieee_fc &
1785 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1786 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1787 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1788 ADVERTISED_Pause);
c18487ee 1789 break;
356e2385 1790
c18487ee 1791 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1792 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 1793 ADVERTISED_Pause);
c18487ee 1794 break;
356e2385 1795
c18487ee 1796 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1797 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1798 break;
356e2385 1799
c18487ee 1800 default:
a22f0788 1801 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1802 ADVERTISED_Pause);
c18487ee
YR
1803 break;
1804 }
1805}
f1410647 1806
9f6c9258 1807u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1808{
19680c48
EG
1809 if (!BP_NOMCP(bp)) {
1810 u8 rc;
a22f0788
YR
1811 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1812 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1813 /* Initialize link parameters structure variables */
8c99e7b0
YR
1814 /* It is recommended to turn off RX FC for jumbo frames
1815 for better performance */
f2e0899f 1816 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1817 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1818 else
c0700f90 1819 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1820
4a37fb66 1821 bnx2x_acquire_phy_lock(bp);
b5bf9068 1822
a22f0788 1823 if (load_mode == LOAD_DIAG) {
de6eae1f 1824 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1825 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1826 }
b5bf9068 1827
19680c48 1828 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1829
4a37fb66 1830 bnx2x_release_phy_lock(bp);
a2fbb9ea 1831
3c96c68b
EG
1832 bnx2x_calc_fc_adv(bp);
1833
b5bf9068
EG
1834 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1835 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1836 bnx2x_link_report(bp);
b5bf9068 1837 }
a22f0788 1838 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1839 return rc;
1840 }
f5372251 1841 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1842 return -EINVAL;
a2fbb9ea
ET
1843}
1844
9f6c9258 1845void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1846{
19680c48 1847 if (!BP_NOMCP(bp)) {
4a37fb66 1848 bnx2x_acquire_phy_lock(bp);
54c2fb78 1849 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1850 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1851 bnx2x_release_phy_lock(bp);
a2fbb9ea 1852
19680c48
EG
1853 bnx2x_calc_fc_adv(bp);
1854 } else
f5372251 1855 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1856}
a2fbb9ea 1857
c18487ee
YR
1858static void bnx2x__link_reset(struct bnx2x *bp)
1859{
19680c48 1860 if (!BP_NOMCP(bp)) {
4a37fb66 1861 bnx2x_acquire_phy_lock(bp);
589abe3a 1862 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1863 bnx2x_release_phy_lock(bp);
19680c48 1864 } else
f5372251 1865 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1866}
a2fbb9ea 1867
a22f0788 1868u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1869{
2145a920 1870 u8 rc = 0;
a2fbb9ea 1871
2145a920
VZ
1872 if (!BP_NOMCP(bp)) {
1873 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1874 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1875 is_serdes);
2145a920
VZ
1876 bnx2x_release_phy_lock(bp);
1877 } else
1878 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1879
c18487ee
YR
1880 return rc;
1881}
a2fbb9ea 1882
8a1c38d1 1883static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1884{
8a1c38d1
EG
1885 u32 r_param = bp->link_vars.line_speed / 8;
1886 u32 fair_periodic_timeout_usec;
1887 u32 t_fair;
34f80b04 1888
8a1c38d1
EG
1889 memset(&(bp->cmng.rs_vars), 0,
1890 sizeof(struct rate_shaping_vars_per_port));
1891 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1892
8a1c38d1
EG
1893 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1894 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1895
8a1c38d1
EG
1896 /* this is the threshold below which no timer arming will occur
1897 1.25 coefficient is for the threshold to be a little bigger
1898 than the real time, to compensate for timer in-accuracy */
1899 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1900 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1901
8a1c38d1
EG
1902 /* resolution of fairness timer */
1903 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1904 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1905 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1906
8a1c38d1
EG
1907 /* this is the threshold below which we won't arm the timer anymore */
1908 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1909
8a1c38d1
EG
1910 /* we multiply by 1e3/8 to get bytes/msec.
1911 We don't want the credits to pass a credit
1912 of the t_fair*FAIR_MEM (algorithm resolution) */
1913 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1914 /* since each tick is 4 usec */
1915 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1916}
1917
2691d51d
EG
1918/* Calculates the sum of vn_min_rates.
1919 It's needed for further normalizing of the min_rates.
1920 Returns:
1921 sum of vn_min_rates.
1922 or
1923 0 - if all the min_rates are 0.
1924 In the later case fainess algorithm should be deactivated.
1925 If not all min_rates are zero then those that are zeroes will be set to 1.
1926 */
1927static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1928{
1929 int all_zero = 1;
2691d51d
EG
1930 int vn;
1931
1932 bp->vn_weight_sum = 0;
1933 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1934 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1935 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1936 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1937
1938 /* Skip hidden vns */
1939 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1940 continue;
1941
1942 /* If min rate is zero - set it to 1 */
1943 if (!vn_min_rate)
1944 vn_min_rate = DEF_MIN_RATE;
1945 else
1946 all_zero = 0;
1947
1948 bp->vn_weight_sum += vn_min_rate;
1949 }
1950
1951 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1952 if (all_zero) {
1953 bp->cmng.flags.cmng_enables &=
1954 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1955 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1956 " fairness will be disabled\n");
1957 } else
1958 bp->cmng.flags.cmng_enables |=
1959 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1960}
1961
f2e0899f 1962static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1963{
1964 struct rate_shaping_vars_per_vn m_rs_vn;
1965 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1966 u32 vn_cfg = bp->mf_config[vn];
1967 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1968 u16 vn_min_rate, vn_max_rate;
1969 int i;
1970
1971 /* If function is hidden - set min and max to zeroes */
1972 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1973 vn_min_rate = 0;
1974 vn_max_rate = 0;
1975
1976 } else {
faa6fcbb
DK
1977 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1978
34f80b04
EG
1979 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1980 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
faa6fcbb
DK
1981 /* If fairness is enabled (not all min rates are zeroes) and
1982 if current min rate is zero - set it to 1.
1983 This is a requirement of the algorithm. */
f2e0899f 1984 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04 1985 vn_min_rate = DEF_MIN_RATE;
faa6fcbb
DK
1986
1987 if (IS_MF_SI(bp))
1988 /* maxCfg in percents of linkspeed */
1989 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1990 else
1991 /* maxCfg is absolute in 100Mb units */
1992 vn_max_rate = maxCfg * 100;
34f80b04 1993 }
f85582f8 1994
8a1c38d1 1995 DP(NETIF_MSG_IFUP,
b015e3d1 1996 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1997 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1998
1999 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2000 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2001
2002 /* global vn counter - maximal Mbps for this vn */
2003 m_rs_vn.vn_counter.rate = vn_max_rate;
2004
2005 /* quota - number of bytes transmitted in this period */
2006 m_rs_vn.vn_counter.quota =
2007 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2008
8a1c38d1 2009 if (bp->vn_weight_sum) {
34f80b04
EG
2010 /* credit for each period of the fairness algorithm:
2011 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2012 vn_weight_sum should not be larger than 10000, thus
2013 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2014 than zero */
34f80b04 2015 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
2016 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2017 (8 * bp->vn_weight_sum))),
ff80ee02
DK
2018 (bp->cmng.fair_vars.fair_threshold +
2019 MIN_ABOVE_THRESH));
cdaa7cb8 2020 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2021 m_fair_vn.vn_credit_delta);
2022 }
2023
34f80b04
EG
2024 /* Store it to internal memory */
2025 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2026 REG_WR(bp, BAR_XSTRORM_INTMEM +
2027 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2028 ((u32 *)(&m_rs_vn))[i]);
2029
2030 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2031 REG_WR(bp, BAR_XSTRORM_INTMEM +
2032 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2033 ((u32 *)(&m_fair_vn))[i]);
2034}
f85582f8 2035
523224a3
DK
2036static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2037{
2038 if (CHIP_REV_IS_SLOW(bp))
2039 return CMNG_FNS_NONE;
fb3bff17 2040 if (IS_MF(bp))
523224a3
DK
2041 return CMNG_FNS_MINMAX;
2042
2043 return CMNG_FNS_NONE;
2044}
2045
2046static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2047{
0793f83f 2048 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
523224a3
DK
2049
2050 if (BP_NOMCP(bp))
2051 return; /* what should be the default bvalue in this case */
2052
0793f83f
DK
2053 /* For 2 port configuration the absolute function number formula
2054 * is:
2055 * abs_func = 2 * vn + BP_PORT + BP_PATH
2056 *
2057 * and there are 4 functions per port
2058 *
2059 * For 4 port configuration it is
2060 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2061 *
2062 * and there are 2 functions per port
2063 */
523224a3 2064 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
0793f83f
DK
2065 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2066
2067 if (func >= E1H_FUNC_MAX)
2068 break;
2069
f2e0899f 2070 bp->mf_config[vn] =
523224a3
DK
2071 MF_CFG_RD(bp, func_mf_config[func].config);
2072 }
2073}
2074
2075static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2076{
2077
2078 if (cmng_type == CMNG_FNS_MINMAX) {
2079 int vn;
2080
2081 /* clear cmng_enables */
2082 bp->cmng.flags.cmng_enables = 0;
2083
2084 /* read mf conf from shmem */
2085 if (read_cfg)
2086 bnx2x_read_mf_cfg(bp);
2087
2088 /* Init rate shaping and fairness contexts */
2089 bnx2x_init_port_minmax(bp);
2090
2091 /* vn_weight_sum and enable fairness if not 0 */
2092 bnx2x_calc_vn_weight_sum(bp);
2093
2094 /* calculate and set min-max rate for each vn */
2095 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2096 bnx2x_init_vn_minmax(bp, vn);
2097
2098 /* always enable rate shaping and fairness */
2099 bp->cmng.flags.cmng_enables |=
2100 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2101 if (!bp->vn_weight_sum)
2102 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2103 " fairness will be disabled\n");
2104 return;
2105 }
2106
2107 /* rate shaping and fairness are disabled */
2108 DP(NETIF_MSG_IFUP,
2109 "rate shaping and fairness are disabled\n");
2110}
34f80b04 2111
523224a3
DK
2112static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2113{
2114 int port = BP_PORT(bp);
2115 int func;
2116 int vn;
2117
2118 /* Set the attention towards other drivers on the same port */
2119 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2120 if (vn == BP_E1HVN(bp))
2121 continue;
2122
2123 func = ((vn << 1) | port);
2124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2125 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2126 }
2127}
8a1c38d1 2128
c18487ee
YR
2129/* This function is called upon link interrupt */
2130static void bnx2x_link_attn(struct bnx2x *bp)
2131{
d9e8b185 2132 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2133 /* Make sure that we are synced with the current statistics */
2134 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2135
c18487ee 2136 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2137
bb2a0f7a
YG
2138 if (bp->link_vars.link_up) {
2139
1c06328c 2140 /* dropless flow control */
f2e0899f 2141 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2142 int port = BP_PORT(bp);
2143 u32 pause_enabled = 0;
2144
2145 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2146 pause_enabled = 1;
2147
2148 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2149 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2150 pause_enabled);
2151 }
2152
bb2a0f7a
YG
2153 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2154 struct host_port_stats *pstats;
2155
2156 pstats = bnx2x_sp(bp, port_stats);
2157 /* reset old bmac stats */
2158 memset(&(pstats->mac_stx[0]), 0,
2159 sizeof(struct mac_stx));
2160 }
f34d28ea 2161 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2162 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2163 }
2164
d9e8b185
VZ
2165 /* indicate link status only if link status actually changed */
2166 if (prev_link_status != bp->link_vars.link_status)
2167 bnx2x_link_report(bp);
34f80b04 2168
f2e0899f
DK
2169 if (IS_MF(bp))
2170 bnx2x_link_sync_notify(bp);
34f80b04 2171
f2e0899f
DK
2172 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2173 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2174
f2e0899f
DK
2175 if (cmng_fns != CMNG_FNS_NONE) {
2176 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2177 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2178 } else
2179 /* rate shaping and fairness are disabled */
2180 DP(NETIF_MSG_IFUP,
2181 "single function mode without fairness\n");
34f80b04 2182 }
c18487ee 2183}
a2fbb9ea 2184
9f6c9258 2185void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2186{
f34d28ea 2187 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2188 return;
a2fbb9ea 2189
c18487ee 2190 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2191
bb2a0f7a
YG
2192 if (bp->link_vars.link_up)
2193 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2194 else
2195 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2196
f2e0899f
DK
2197 /* the link status update could be the result of a DCC event
2198 hence re-read the shmem mf configuration */
2199 bnx2x_read_mf_cfg(bp);
2691d51d 2200
c18487ee
YR
2201 /* indicate link status */
2202 bnx2x_link_report(bp);
a2fbb9ea 2203}
a2fbb9ea 2204
34f80b04
EG
2205static void bnx2x_pmf_update(struct bnx2x *bp)
2206{
2207 int port = BP_PORT(bp);
2208 u32 val;
2209
2210 bp->port.pmf = 1;
2211 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2212
2213 /* enable nig attention */
2214 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
2215 if (bp->common.int_block == INT_BLOCK_HC) {
2216 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2217 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2218 } else if (CHIP_IS_E2(bp)) {
2219 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2220 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2221 }
bb2a0f7a
YG
2222
2223 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2224}
2225
c18487ee 2226/* end of Link */
a2fbb9ea
ET
2227
2228/* slow path */
2229
2230/*
2231 * General service functions
2232 */
2233
2691d51d 2234/* send the MCP a request, block until there is a reply */
a22f0788 2235u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2236{
f2e0899f 2237 int mb_idx = BP_FW_MB_IDX(bp);
2691d51d
EG
2238 u32 seq = ++bp->fw_seq;
2239 u32 rc = 0;
2240 u32 cnt = 1;
2241 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2242
c4ff7cbf 2243 mutex_lock(&bp->fw_mb_mutex);
f2e0899f
DK
2244 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2245 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2246
2691d51d
EG
2247 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2248
2249 do {
2250 /* let the FW do it's magic ... */
2251 msleep(delay);
2252
f2e0899f 2253 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2254
c4ff7cbf
EG
2255 /* Give the FW up to 5 second (500*10ms) */
2256 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2257
2258 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2259 cnt*delay, rc, seq);
2260
2261 /* is this a reply to our command? */
2262 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2263 rc &= FW_MSG_CODE_MASK;
2264 else {
2265 /* FW BUG! */
2266 BNX2X_ERR("FW failed to respond!\n");
2267 bnx2x_fw_dump(bp);
2268 rc = 0;
2269 }
c4ff7cbf 2270 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2271
2272 return rc;
2273}
2274
ec6ba945
VZ
2275static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2276{
2277#ifdef BCM_CNIC
2278 if (IS_FCOE_FP(fp) && IS_MF(bp))
2279 return false;
2280#endif
2281 return true;
2282}
2283
523224a3 2284/* must be called under rtnl_lock */
8d96286a 2285static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2286{
523224a3 2287 u32 mask = (1 << cl_id);
2691d51d 2288
523224a3
DK
2289 /* initial seeting is BNX2X_ACCEPT_NONE */
2290 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2291 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2292 u8 unmatched_unicast = 0;
2691d51d 2293
0793f83f
DK
2294 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2295 unmatched_unicast = 1;
2296
523224a3
DK
2297 if (filters & BNX2X_PROMISCUOUS_MODE) {
2298 /* promiscious - accept all, drop none */
2299 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2300 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
0793f83f
DK
2301 if (IS_MF_SI(bp)) {
2302 /*
2303 * SI mode defines to accept in promiscuos mode
2304 * only unmatched packets
2305 */
2306 unmatched_unicast = 1;
2307 accp_all_ucast = 0;
2308 }
523224a3
DK
2309 }
2310 if (filters & BNX2X_ACCEPT_UNICAST) {
2311 /* accept matched ucast */
2312 drop_all_ucast = 0;
2313 }
d9c8f498 2314 if (filters & BNX2X_ACCEPT_MULTICAST)
523224a3
DK
2315 /* accept matched mcast */
2316 drop_all_mcast = 0;
d9c8f498 2317
523224a3
DK
2318 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2319 /* accept all mcast */
2320 drop_all_ucast = 0;
2321 accp_all_ucast = 1;
2322 }
2323 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2324 /* accept all mcast */
2325 drop_all_mcast = 0;
2326 accp_all_mcast = 1;
2327 }
2328 if (filters & BNX2X_ACCEPT_BROADCAST) {
2329 /* accept (all) bcast */
2330 drop_all_bcast = 0;
2331 accp_all_bcast = 1;
2332 }
2691d51d 2333
523224a3
DK
2334 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2335 bp->mac_filters.ucast_drop_all | mask :
2336 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2337
523224a3
DK
2338 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2339 bp->mac_filters.mcast_drop_all | mask :
2340 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2341
523224a3
DK
2342 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2343 bp->mac_filters.bcast_drop_all | mask :
2344 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2345
523224a3
DK
2346 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2347 bp->mac_filters.ucast_accept_all | mask :
2348 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2349
523224a3
DK
2350 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2351 bp->mac_filters.mcast_accept_all | mask :
2352 bp->mac_filters.mcast_accept_all & ~mask;
2353
2354 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2355 bp->mac_filters.bcast_accept_all | mask :
2356 bp->mac_filters.bcast_accept_all & ~mask;
2357
2358 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2359 bp->mac_filters.unmatched_unicast | mask :
2360 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2361}
2362
8d96286a 2363static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2364{
030f3356
DK
2365 struct tstorm_eth_function_common_config tcfg = {0};
2366 u16 rss_flgs;
2691d51d 2367
030f3356
DK
2368 /* tpa */
2369 if (p->func_flgs & FUNC_FLG_TPA)
2370 tcfg.config_flags |=
2371 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2372
030f3356
DK
2373 /* set rss flags */
2374 rss_flgs = (p->rss->mode <<
2375 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2376
2377 if (p->rss->cap & RSS_IPV4_CAP)
2378 rss_flgs |= RSS_IPV4_CAP_MASK;
2379 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2380 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2381 if (p->rss->cap & RSS_IPV6_CAP)
2382 rss_flgs |= RSS_IPV6_CAP_MASK;
2383 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2384 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2385
2386 tcfg.config_flags |= rss_flgs;
2387 tcfg.rss_result_mask = p->rss->result_mask;
2388
2389 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2390
523224a3
DK
2391 /* Enable the function in the FW */
2392 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2393 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2394
523224a3
DK
2395 /* statistics */
2396 if (p->func_flgs & FUNC_FLG_STATS) {
2397 struct stats_indication_flags stats_flags = {0};
2398 stats_flags.collect_eth = 1;
2691d51d 2399
523224a3
DK
2400 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2401 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2402
523224a3
DK
2403 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2404 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2405
523224a3
DK
2406 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2407 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2408
523224a3
DK
2409 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2410 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2411 }
2412
523224a3
DK
2413 /* spq */
2414 if (p->func_flgs & FUNC_FLG_SPQ) {
2415 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2416 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2417 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2418 }
2691d51d
EG
2419}
2420
523224a3
DK
2421static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2422 struct bnx2x_fastpath *fp)
28912902 2423{
523224a3 2424 u16 flags = 0;
28912902 2425
523224a3
DK
2426 /* calculate queue flags */
2427 flags |= QUEUE_FLG_CACHE_ALIGN;
2428 flags |= QUEUE_FLG_HC;
0793f83f 2429 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
28912902 2430
523224a3
DK
2431 flags |= QUEUE_FLG_VLAN;
2432 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
523224a3
DK
2433
2434 if (!fp->disable_tpa)
2435 flags |= QUEUE_FLG_TPA;
2436
ec6ba945
VZ
2437 flags = stat_counter_valid(bp, fp) ?
2438 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
523224a3
DK
2439
2440 return flags;
2441}
2442
2443static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2444 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2445 struct bnx2x_rxq_init_params *rxq_init)
2446{
2447 u16 max_sge = 0;
2448 u16 sge_sz = 0;
2449 u16 tpa_agg_size = 0;
2450
2451 /* calculate queue flags */
2452 u16 flags = bnx2x_get_cl_flags(bp, fp);
2453
2454 if (!fp->disable_tpa) {
2455 pause->sge_th_hi = 250;
2456 pause->sge_th_lo = 150;
2457 tpa_agg_size = min_t(u32,
2458 (min_t(u32, 8, MAX_SKB_FRAGS) *
2459 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2460 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2461 SGE_PAGE_SHIFT;
2462 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2463 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2464 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2465 0xffff);
2466 }
2467
2468 /* pause - not for e1 */
2469 if (!CHIP_IS_E1(bp)) {
2470 pause->bd_th_hi = 350;
2471 pause->bd_th_lo = 250;
2472 pause->rcq_th_hi = 350;
2473 pause->rcq_th_lo = 250;
2474 pause->sge_th_hi = 0;
2475 pause->sge_th_lo = 0;
2476 pause->pri_map = 1;
2477 }
2478
2479 /* rxq setup */
2480 rxq_init->flags = flags;
2481 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2482 rxq_init->dscr_map = fp->rx_desc_mapping;
2483 rxq_init->sge_map = fp->rx_sge_mapping;
2484 rxq_init->rcq_map = fp->rx_comp_mapping;
2485 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2486 rxq_init->mtu = bp->dev->mtu;
2487 rxq_init->buf_sz = bp->rx_buf_size;
2488 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2489 rxq_init->cl_id = fp->cl_id;
2490 rxq_init->spcl_id = fp->cl_id;
2491 rxq_init->stat_id = fp->cl_id;
2492 rxq_init->tpa_agg_sz = tpa_agg_size;
2493 rxq_init->sge_buf_sz = sge_sz;
2494 rxq_init->max_sges_pkt = max_sge;
2495 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2496 rxq_init->fw_sb_id = fp->fw_sb_id;
2497
ec6ba945
VZ
2498 if (IS_FCOE_FP(fp))
2499 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2500 else
2501 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
523224a3
DK
2502
2503 rxq_init->cid = HW_CID(bp, fp->cid);
2504
2505 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2506}
2507
2508static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2509 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2510{
2511 u16 flags = bnx2x_get_cl_flags(bp, fp);
2512
2513 txq_init->flags = flags;
2514 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2515 txq_init->dscr_map = fp->tx_desc_mapping;
2516 txq_init->stat_id = fp->cl_id;
2517 txq_init->cid = HW_CID(bp, fp->cid);
2518 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2519 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2520 txq_init->fw_sb_id = fp->fw_sb_id;
ec6ba945
VZ
2521
2522 if (IS_FCOE_FP(fp)) {
2523 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2524 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2525 }
2526
523224a3
DK
2527 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2528}
2529
8d96286a 2530static void bnx2x_pf_init(struct bnx2x *bp)
523224a3
DK
2531{
2532 struct bnx2x_func_init_params func_init = {0};
2533 struct bnx2x_rss_params rss = {0};
2534 struct event_ring_data eq_data = { {0} };
2535 u16 flags;
2536
2537 /* pf specific setups */
2538 if (!CHIP_IS_E1(bp))
fb3bff17 2539 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2540
f2e0899f
DK
2541 if (CHIP_IS_E2(bp)) {
2542 /* reset IGU PF statistics: MSIX + ATTN */
2543 /* PF */
2544 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2545 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2546 (CHIP_MODE_IS_4_PORT(bp) ?
2547 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2548 /* ATTN */
2549 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2550 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2551 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2552 (CHIP_MODE_IS_4_PORT(bp) ?
2553 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2554 }
2555
523224a3
DK
2556 /* function setup flags */
2557 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2558
f2e0899f
DK
2559 if (CHIP_IS_E1x(bp))
2560 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2561 else
2562 flags |= FUNC_FLG_TPA;
523224a3 2563
030f3356
DK
2564 /* function setup */
2565
523224a3
DK
2566 /**
2567 * Although RSS is meaningless when there is a single HW queue we
2568 * still need it enabled in order to have HW Rx hash generated.
523224a3 2569 */
030f3356
DK
2570 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2571 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2572 rss.mode = bp->multi_mode;
2573 rss.result_mask = MULTI_MASK;
2574 func_init.rss = &rss;
523224a3
DK
2575
2576 func_init.func_flgs = flags;
2577 func_init.pf_id = BP_FUNC(bp);
2578 func_init.func_id = BP_FUNC(bp);
2579 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2580 func_init.spq_map = bp->spq_mapping;
2581 func_init.spq_prod = bp->spq_prod_idx;
2582
2583 bnx2x_func_init(bp, &func_init);
2584
2585 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2586
2587 /*
2588 Congestion management values depend on the link rate
2589 There is no active link so initial link rate is set to 10 Gbps.
2590 When the link comes up The congestion management values are
2591 re-calculated according to the actual link rate.
2592 */
2593 bp->link_vars.line_speed = SPEED_10000;
2594 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2595
2596 /* Only the PMF sets the HW */
2597 if (bp->port.pmf)
2598 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2599
2600 /* no rx until link is up */
2601 bp->rx_mode = BNX2X_RX_MODE_NONE;
2602 bnx2x_set_storm_rx_mode(bp);
2603
2604 /* init Event Queue */
2605 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2606 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2607 eq_data.producer = bp->eq_prod;
2608 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2609 eq_data.sb_id = DEF_SB_ID;
2610 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2611}
2612
2613
2614static void bnx2x_e1h_disable(struct bnx2x *bp)
2615{
2616 int port = BP_PORT(bp);
2617
2618 netif_tx_disable(bp->dev);
2619
2620 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2621
2622 netif_carrier_off(bp->dev);
2623}
2624
2625static void bnx2x_e1h_enable(struct bnx2x *bp)
2626{
2627 int port = BP_PORT(bp);
2628
2629 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2630
2631 /* Tx queue should be only reenabled */
2632 netif_tx_wake_all_queues(bp->dev);
2633
2634 /*
2635 * Should not call netif_carrier_on since it will be called if the link
2636 * is up when checking for link state
2637 */
2638}
2639
0793f83f
DK
2640/* called due to MCP event (on pmf):
2641 * reread new bandwidth configuration
2642 * configure FW
2643 * notify others function about the change
2644 */
2645static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2646{
2647 if (bp->link_vars.link_up) {
2648 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2649 bnx2x_link_sync_notify(bp);
2650 }
2651 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2652}
2653
2654static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2655{
2656 bnx2x_config_mf_bw(bp);
2657 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2658}
2659
523224a3
DK
2660static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2661{
2662 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2663
2664 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2665
2666 /*
2667 * This is the only place besides the function initialization
2668 * where the bp->flags can change so it is done without any
2669 * locks
2670 */
f2e0899f 2671 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2672 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2673 bp->flags |= MF_FUNC_DIS;
2674
2675 bnx2x_e1h_disable(bp);
2676 } else {
2677 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2678 bp->flags &= ~MF_FUNC_DIS;
2679
2680 bnx2x_e1h_enable(bp);
2681 }
2682 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2683 }
2684 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
0793f83f 2685 bnx2x_config_mf_bw(bp);
523224a3
DK
2686 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2687 }
2688
2689 /* Report results to MCP */
2690 if (dcc_event)
2691 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2692 else
2693 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2694}
2695
2696/* must be called under the spq lock */
2697static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2698{
2699 struct eth_spe *next_spe = bp->spq_prod_bd;
2700
2701 if (bp->spq_prod_bd == bp->spq_last_bd) {
2702 bp->spq_prod_bd = bp->spq;
2703 bp->spq_prod_idx = 0;
2704 DP(NETIF_MSG_TIMER, "end of spq\n");
2705 } else {
2706 bp->spq_prod_bd++;
2707 bp->spq_prod_idx++;
2708 }
2709 return next_spe;
2710}
2711
2712/* must be called under the spq lock */
28912902
MC
2713static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2714{
2715 int func = BP_FUNC(bp);
2716
2717 /* Make sure that BD data is updated before writing the producer */
2718 wmb();
2719
523224a3 2720 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 2721 bp->spq_prod_idx);
28912902
MC
2722 mmiowb();
2723}
2724
a2fbb9ea 2725/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2726int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
f85582f8 2727 u32 data_hi, u32 data_lo, int common)
a2fbb9ea 2728{
28912902 2729 struct eth_spe *spe;
523224a3 2730 u16 type;
a2fbb9ea 2731
a2fbb9ea
ET
2732#ifdef BNX2X_STOP_ON_ERROR
2733 if (unlikely(bp->panic))
2734 return -EIO;
2735#endif
2736
34f80b04 2737 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2738
8fe23fbd 2739 if (!atomic_read(&bp->spq_left)) {
a2fbb9ea 2740 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2741 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2742 bnx2x_panic();
2743 return -EBUSY;
2744 }
f1410647 2745
28912902
MC
2746 spe = bnx2x_sp_get_next(bp);
2747
a2fbb9ea 2748 /* CID needs port number to be encoded int it */
28912902 2749 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2750 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2751 HW_CID(bp, cid));
523224a3 2752
a2fbb9ea 2753 if (common)
523224a3
DK
2754 /* Common ramrods:
2755 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2756 * TRAFFIC_STOP, TRAFFIC_START
2757 */
2758 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2759 & SPE_HDR_CONN_TYPE;
2760 else
2761 /* ETH ramrods: SETUP, HALT */
2762 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2763 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2764
523224a3
DK
2765 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2766 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2767
523224a3
DK
2768 spe->hdr.type = cpu_to_le16(type);
2769
2770 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2771 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2772
2773 /* stats ramrod has it's own slot on the spq */
2774 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2775 /* It's ok if the actual decrement is issued towards the memory
2776 * somewhere between the spin_lock and spin_unlock. Thus no
2777 * more explict memory barrier is needed.
2778 */
8fe23fbd 2779 atomic_dec(&bp->spq_left);
a2fbb9ea 2780
cdaa7cb8 2781 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3
DK
2782 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2783 "type(0x%x) left %x\n",
cdaa7cb8
VZ
2784 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2785 (u32)(U64_LO(bp->spq_mapping) +
2786 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
8fe23fbd 2787 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
cdaa7cb8 2788
28912902 2789 bnx2x_sp_prod_update(bp);
34f80b04 2790 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2791 return 0;
2792}
2793
2794/* acquire split MCP access lock register */
4a37fb66 2795static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2796{
72fd0718 2797 u32 j, val;
34f80b04 2798 int rc = 0;
a2fbb9ea
ET
2799
2800 might_sleep();
72fd0718 2801 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2802 val = (1UL << 31);
2803 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2804 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2805 if (val & (1L << 31))
2806 break;
2807
2808 msleep(5);
2809 }
a2fbb9ea 2810 if (!(val & (1L << 31))) {
19680c48 2811 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2812 rc = -EBUSY;
2813 }
2814
2815 return rc;
2816}
2817
4a37fb66
YG
2818/* release split MCP access lock register */
2819static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2820{
72fd0718 2821 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2822}
2823
523224a3
DK
2824#define BNX2X_DEF_SB_ATT_IDX 0x0001
2825#define BNX2X_DEF_SB_IDX 0x0002
2826
a2fbb9ea
ET
2827static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2828{
523224a3 2829 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2830 u16 rc = 0;
2831
2832 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2833 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2834 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2835 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2836 }
523224a3
DK
2837
2838 if (bp->def_idx != def_sb->sp_sb.running_index) {
2839 bp->def_idx = def_sb->sp_sb.running_index;
2840 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2841 }
523224a3
DK
2842
2843 /* Do not reorder: indecies reading should complete before handling */
2844 barrier();
a2fbb9ea
ET
2845 return rc;
2846}
2847
2848/*
2849 * slow path service functions
2850 */
2851
2852static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2853{
34f80b04 2854 int port = BP_PORT(bp);
a2fbb9ea
ET
2855 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2856 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2857 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2858 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2859 u32 aeu_mask;
87942b46 2860 u32 nig_mask = 0;
f2e0899f 2861 u32 reg_addr;
a2fbb9ea 2862
a2fbb9ea
ET
2863 if (bp->attn_state & asserted)
2864 BNX2X_ERR("IGU ERROR\n");
2865
3fcaf2e5
EG
2866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2867 aeu_mask = REG_RD(bp, aeu_addr);
2868
a2fbb9ea 2869 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2870 aeu_mask, asserted);
72fd0718 2871 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2872 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2873
3fcaf2e5
EG
2874 REG_WR(bp, aeu_addr, aeu_mask);
2875 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2876
3fcaf2e5 2877 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2878 bp->attn_state |= asserted;
3fcaf2e5 2879 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2880
2881 if (asserted & ATTN_HARD_WIRED_MASK) {
2882 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2883
a5e9a7cf
EG
2884 bnx2x_acquire_phy_lock(bp);
2885
877e9aa4 2886 /* save nig interrupt mask */
87942b46 2887 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2888 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2889
c18487ee 2890 bnx2x_link_attn(bp);
a2fbb9ea
ET
2891
2892 /* handle unicore attn? */
2893 }
2894 if (asserted & ATTN_SW_TIMER_4_FUNC)
2895 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2896
2897 if (asserted & GPIO_2_FUNC)
2898 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2899
2900 if (asserted & GPIO_3_FUNC)
2901 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2902
2903 if (asserted & GPIO_4_FUNC)
2904 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2905
2906 if (port == 0) {
2907 if (asserted & ATTN_GENERAL_ATTN_1) {
2908 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2909 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2910 }
2911 if (asserted & ATTN_GENERAL_ATTN_2) {
2912 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2913 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2914 }
2915 if (asserted & ATTN_GENERAL_ATTN_3) {
2916 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2917 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2918 }
2919 } else {
2920 if (asserted & ATTN_GENERAL_ATTN_4) {
2921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2923 }
2924 if (asserted & ATTN_GENERAL_ATTN_5) {
2925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2927 }
2928 if (asserted & ATTN_GENERAL_ATTN_6) {
2929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2931 }
2932 }
2933
2934 } /* if hardwired */
2935
f2e0899f
DK
2936 if (bp->common.int_block == INT_BLOCK_HC)
2937 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2938 COMMAND_REG_ATTN_BITS_SET);
2939 else
2940 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2941
2942 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2943 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2944 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2945
2946 /* now set back the mask */
a5e9a7cf 2947 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2948 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2949 bnx2x_release_phy_lock(bp);
2950 }
a2fbb9ea
ET
2951}
2952
fd4ef40d
EG
2953static inline void bnx2x_fan_failure(struct bnx2x *bp)
2954{
2955 int port = BP_PORT(bp);
b7737c9b 2956 u32 ext_phy_config;
fd4ef40d 2957 /* mark the failure */
b7737c9b
YR
2958 ext_phy_config =
2959 SHMEM_RD(bp,
2960 dev_info.port_hw_config[port].external_phy_config);
2961
2962 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2963 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2964 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2965 ext_phy_config);
fd4ef40d
EG
2966
2967 /* log the failure */
cdaa7cb8
VZ
2968 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2969 " the driver to shutdown the card to prevent permanent"
2970 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2971}
ab6ad5a4 2972
877e9aa4 2973static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2974{
34f80b04 2975 int port = BP_PORT(bp);
877e9aa4 2976 int reg_offset;
d90d96ba 2977 u32 val;
877e9aa4 2978
34f80b04
EG
2979 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2980 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2981
34f80b04 2982 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2983
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2986 REG_WR(bp, reg_offset, val);
2987
2988 BNX2X_ERR("SPIO5 hw attention\n");
2989
fd4ef40d 2990 /* Fan failure attention */
d90d96ba 2991 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2992 bnx2x_fan_failure(bp);
877e9aa4 2993 }
34f80b04 2994
589abe3a
EG
2995 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2996 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2997 bnx2x_acquire_phy_lock(bp);
2998 bnx2x_handle_module_detect_int(&bp->link_params);
2999 bnx2x_release_phy_lock(bp);
3000 }
3001
34f80b04
EG
3002 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3003
3004 val = REG_RD(bp, reg_offset);
3005 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3006 REG_WR(bp, reg_offset, val);
3007
3008 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3009 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3010 bnx2x_panic();
3011 }
877e9aa4
ET
3012}
3013
3014static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3015{
3016 u32 val;
3017
0626b899 3018 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3019
3020 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3021 BNX2X_ERR("DB hw attention 0x%x\n", val);
3022 /* DORQ discard attention */
3023 if (val & 0x2)
3024 BNX2X_ERR("FATAL error from DORQ\n");
3025 }
34f80b04
EG
3026
3027 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3028
3029 int port = BP_PORT(bp);
3030 int reg_offset;
3031
3032 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3033 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3034
3035 val = REG_RD(bp, reg_offset);
3036 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3037 REG_WR(bp, reg_offset, val);
3038
3039 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3040 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3041 bnx2x_panic();
3042 }
877e9aa4
ET
3043}
3044
3045static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3046{
3047 u32 val;
3048
3049 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3050
3051 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3052 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3053 /* CFC error attention */
3054 if (val & 0x2)
3055 BNX2X_ERR("FATAL error from CFC\n");
3056 }
3057
3058 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3059
3060 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3061 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3062 /* RQ_USDMDP_FIFO_OVERFLOW */
3063 if (val & 0x18000)
3064 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
3065 if (CHIP_IS_E2(bp)) {
3066 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3067 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3068 }
877e9aa4 3069 }
34f80b04
EG
3070
3071 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3072
3073 int port = BP_PORT(bp);
3074 int reg_offset;
3075
3076 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3077 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3078
3079 val = REG_RD(bp, reg_offset);
3080 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3081 REG_WR(bp, reg_offset, val);
3082
3083 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3084 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3085 bnx2x_panic();
3086 }
877e9aa4
ET
3087}
3088
3089static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3090{
34f80b04
EG
3091 u32 val;
3092
877e9aa4
ET
3093 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3094
34f80b04
EG
3095 if (attn & BNX2X_PMF_LINK_ASSERT) {
3096 int func = BP_FUNC(bp);
3097
3098 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
3099 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3100 func_mf_config[BP_ABS_FUNC(bp)].config);
3101 val = SHMEM_RD(bp,
3102 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3103 if (val & DRV_STATUS_DCC_EVENT_MASK)
3104 bnx2x_dcc_event(bp,
3105 (val & DRV_STATUS_DCC_EVENT_MASK));
0793f83f
DK
3106
3107 if (val & DRV_STATUS_SET_MF_BW)
3108 bnx2x_set_mf_bw(bp);
3109
34f80b04 3110 bnx2x__link_status_update(bp);
2691d51d 3111 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3112 bnx2x_pmf_update(bp);
3113
e4901dde 3114 if (bp->port.pmf &&
785b9b1a
SR
3115 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3116 bp->dcbx_enabled > 0)
e4901dde
VZ
3117 /* start dcbx state machine */
3118 bnx2x_dcbx_set_params(bp,
3119 BNX2X_DCBX_STATE_NEG_RECEIVED);
34f80b04 3120 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3121
3122 BNX2X_ERR("MC assert!\n");
3123 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3125 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3126 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3127 bnx2x_panic();
3128
3129 } else if (attn & BNX2X_MCP_ASSERT) {
3130
3131 BNX2X_ERR("MCP assert!\n");
3132 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3133 bnx2x_fw_dump(bp);
877e9aa4
ET
3134
3135 } else
3136 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3137 }
3138
3139 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3140 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3141 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
3142 val = CHIP_IS_E1(bp) ? 0 :
3143 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
3144 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3145 }
3146 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
3147 val = CHIP_IS_E1(bp) ? 0 :
3148 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
3149 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3150 }
877e9aa4 3151 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3152 }
3153}
3154
72fd0718
VZ
3155#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3156#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3157#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3158#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3159#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
f85582f8 3160
72fd0718
VZ
3161/*
3162 * should be run under rtnl lock
3163 */
3164static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3165{
3166 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3167 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3168 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3169 barrier();
3170 mmiowb();
3171}
3172
3173/*
3174 * should be run under rtnl lock
3175 */
3176static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3177{
3178 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3179 val |= (1 << 16);
3180 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3181 barrier();
3182 mmiowb();
3183}
3184
3185/*
3186 * should be run under rtnl lock
3187 */
9f6c9258 3188bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
3189{
3190 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3191 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3192 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3193}
3194
3195/*
3196 * should be run under rtnl lock
3197 */
9f6c9258 3198inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3199{
3200 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3201
3202 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3203
3204 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3205 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3206 barrier();
3207 mmiowb();
3208}
3209
3210/*
3211 * should be run under rtnl lock
3212 */
9f6c9258 3213u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3214{
3215 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3216
3217 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3218
3219 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3220 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3221 barrier();
3222 mmiowb();
3223
3224 return val1;
3225}
3226
3227/*
3228 * should be run under rtnl lock
3229 */
3230static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3231{
3232 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3233}
3234
3235static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3236{
3237 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3238 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3239}
3240
3241static inline void _print_next_block(int idx, const char *blk)
3242{
3243 if (idx)
3244 pr_cont(", ");
3245 pr_cont("%s", blk);
3246}
3247
3248static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3249{
3250 int i = 0;
3251 u32 cur_bit = 0;
3252 for (i = 0; sig; i++) {
3253 cur_bit = ((u32)0x1 << i);
3254 if (sig & cur_bit) {
3255 switch (cur_bit) {
3256 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3257 _print_next_block(par_num++, "BRB");
3258 break;
3259 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3260 _print_next_block(par_num++, "PARSER");
3261 break;
3262 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3263 _print_next_block(par_num++, "TSDM");
3264 break;
3265 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3266 _print_next_block(par_num++, "SEARCHER");
3267 break;
3268 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3269 _print_next_block(par_num++, "TSEMI");
3270 break;
3271 }
3272
3273 /* Clear the bit */
3274 sig &= ~cur_bit;
3275 }
3276 }
3277
3278 return par_num;
3279}
3280
3281static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3282{
3283 int i = 0;
3284 u32 cur_bit = 0;
3285 for (i = 0; sig; i++) {
3286 cur_bit = ((u32)0x1 << i);
3287 if (sig & cur_bit) {
3288 switch (cur_bit) {
3289 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3290 _print_next_block(par_num++, "PBCLIENT");
3291 break;
3292 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3293 _print_next_block(par_num++, "QM");
3294 break;
3295 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3296 _print_next_block(par_num++, "XSDM");
3297 break;
3298 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3299 _print_next_block(par_num++, "XSEMI");
3300 break;
3301 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3302 _print_next_block(par_num++, "DOORBELLQ");
3303 break;
3304 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3305 _print_next_block(par_num++, "VAUX PCI CORE");
3306 break;
3307 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3308 _print_next_block(par_num++, "DEBUG");
3309 break;
3310 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3311 _print_next_block(par_num++, "USDM");
3312 break;
3313 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3314 _print_next_block(par_num++, "USEMI");
3315 break;
3316 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3317 _print_next_block(par_num++, "UPB");
3318 break;
3319 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3320 _print_next_block(par_num++, "CSDM");
3321 break;
3322 }
3323
3324 /* Clear the bit */
3325 sig &= ~cur_bit;
3326 }
3327 }
3328
3329 return par_num;
3330}
3331
3332static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3333{
3334 int i = 0;
3335 u32 cur_bit = 0;
3336 for (i = 0; sig; i++) {
3337 cur_bit = ((u32)0x1 << i);
3338 if (sig & cur_bit) {
3339 switch (cur_bit) {
3340 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3341 _print_next_block(par_num++, "CSEMI");
3342 break;
3343 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3344 _print_next_block(par_num++, "PXP");
3345 break;
3346 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3347 _print_next_block(par_num++,
3348 "PXPPCICLOCKCLIENT");
3349 break;
3350 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3351 _print_next_block(par_num++, "CFC");
3352 break;
3353 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3354 _print_next_block(par_num++, "CDU");
3355 break;
3356 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3357 _print_next_block(par_num++, "IGU");
3358 break;
3359 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3360 _print_next_block(par_num++, "MISC");
3361 break;
3362 }
3363
3364 /* Clear the bit */
3365 sig &= ~cur_bit;
3366 }
3367 }
3368
3369 return par_num;
3370}
3371
3372static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3373{
3374 int i = 0;
3375 u32 cur_bit = 0;
3376 for (i = 0; sig; i++) {
3377 cur_bit = ((u32)0x1 << i);
3378 if (sig & cur_bit) {
3379 switch (cur_bit) {
3380 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3381 _print_next_block(par_num++, "MCP ROM");
3382 break;
3383 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3384 _print_next_block(par_num++, "MCP UMP RX");
3385 break;
3386 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3387 _print_next_block(par_num++, "MCP UMP TX");
3388 break;
3389 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3390 _print_next_block(par_num++, "MCP SCPAD");
3391 break;
3392 }
3393
3394 /* Clear the bit */
3395 sig &= ~cur_bit;
3396 }
3397 }
3398
3399 return par_num;
3400}
3401
3402static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3403 u32 sig2, u32 sig3)
3404{
3405 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3406 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3407 int par_num = 0;
3408 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3409 "[0]:0x%08x [1]:0x%08x "
3410 "[2]:0x%08x [3]:0x%08x\n",
3411 sig0 & HW_PRTY_ASSERT_SET_0,
3412 sig1 & HW_PRTY_ASSERT_SET_1,
3413 sig2 & HW_PRTY_ASSERT_SET_2,
3414 sig3 & HW_PRTY_ASSERT_SET_3);
3415 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3416 bp->dev->name);
3417 par_num = bnx2x_print_blocks_with_parity0(
3418 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3419 par_num = bnx2x_print_blocks_with_parity1(
3420 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3421 par_num = bnx2x_print_blocks_with_parity2(
3422 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3423 par_num = bnx2x_print_blocks_with_parity3(
3424 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3425 printk("\n");
3426 return true;
3427 } else
3428 return false;
3429}
3430
9f6c9258 3431bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3432{
a2fbb9ea 3433 struct attn_route attn;
72fd0718
VZ
3434 int port = BP_PORT(bp);
3435
3436 attn.sig[0] = REG_RD(bp,
3437 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3438 port*4);
3439 attn.sig[1] = REG_RD(bp,
3440 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3441 port*4);
3442 attn.sig[2] = REG_RD(bp,
3443 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3444 port*4);
3445 attn.sig[3] = REG_RD(bp,
3446 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3447 port*4);
3448
3449 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3450 attn.sig[3]);
3451}
3452
f2e0899f
DK
3453
3454static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3455{
3456 u32 val;
3457 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3458
3459 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3460 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3461 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3462 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3463 "ADDRESS_ERROR\n");
3464 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3465 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3466 "INCORRECT_RCV_BEHAVIOR\n");
3467 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3468 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3469 "WAS_ERROR_ATTN\n");
3470 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3471 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3472 "VF_LENGTH_VIOLATION_ATTN\n");
3473 if (val &
3474 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3475 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3476 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3477 if (val &
3478 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3479 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3480 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3481 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3482 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3483 "TCPL_ERROR_ATTN\n");
3484 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3485 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3486 "TCPL_IN_TWO_RCBS_ATTN\n");
3487 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3488 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3489 "CSSNOOP_FIFO_OVERFLOW\n");
3490 }
3491 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3492 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3493 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3494 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3495 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3496 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3497 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3498 "_ATC_TCPL_TO_NOT_PEND\n");
3499 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3500 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3501 "ATC_GPA_MULTIPLE_HITS\n");
3502 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3503 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3504 "ATC_RCPL_TO_EMPTY_CNT\n");
3505 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3506 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3507 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3508 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3509 "ATC_IREQ_LESS_THAN_STU\n");
3510 }
3511
3512 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3513 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3514 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3515 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3516 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3517 }
3518
3519}
3520
72fd0718
VZ
3521static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3522{
3523 struct attn_route attn, *group_mask;
34f80b04 3524 int port = BP_PORT(bp);
877e9aa4 3525 int index;
a2fbb9ea
ET
3526 u32 reg_addr;
3527 u32 val;
3fcaf2e5 3528 u32 aeu_mask;
a2fbb9ea
ET
3529
3530 /* need to take HW lock because MCP or other port might also
3531 try to handle this event */
4a37fb66 3532 bnx2x_acquire_alr(bp);
a2fbb9ea 3533
4a33bc03 3534 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
72fd0718
VZ
3535 bp->recovery_state = BNX2X_RECOVERY_INIT;
3536 bnx2x_set_reset_in_progress(bp);
3537 schedule_delayed_work(&bp->reset_task, 0);
3538 /* Disable HW interrupts */
3539 bnx2x_int_disable(bp);
3540 bnx2x_release_alr(bp);
3541 /* In case of parity errors don't handle attentions so that
3542 * other function would "see" parity errors.
3543 */
3544 return;
3545 }
3546
a2fbb9ea
ET
3547 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3548 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3549 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3550 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3551 if (CHIP_IS_E2(bp))
3552 attn.sig[4] =
3553 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3554 else
3555 attn.sig[4] = 0;
3556
3557 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3558 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3559
3560 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3561 if (deasserted & (1 << index)) {
72fd0718 3562 group_mask = &bp->attn_group[index];
a2fbb9ea 3563
f2e0899f
DK
3564 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3565 "%08x %08x %08x\n",
3566 index,
3567 group_mask->sig[0], group_mask->sig[1],
3568 group_mask->sig[2], group_mask->sig[3],
3569 group_mask->sig[4]);
a2fbb9ea 3570
f2e0899f
DK
3571 bnx2x_attn_int_deasserted4(bp,
3572 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3573 bnx2x_attn_int_deasserted3(bp,
72fd0718 3574 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3575 bnx2x_attn_int_deasserted1(bp,
72fd0718 3576 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3577 bnx2x_attn_int_deasserted2(bp,
72fd0718 3578 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3579 bnx2x_attn_int_deasserted0(bp,
72fd0718 3580 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3581 }
3582 }
3583
4a37fb66 3584 bnx2x_release_alr(bp);
a2fbb9ea 3585
f2e0899f
DK
3586 if (bp->common.int_block == INT_BLOCK_HC)
3587 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3588 COMMAND_REG_ATTN_BITS_CLR);
3589 else
3590 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3591
3592 val = ~deasserted;
f2e0899f
DK
3593 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3594 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3595 REG_WR(bp, reg_addr, val);
a2fbb9ea 3596
a2fbb9ea 3597 if (~bp->attn_state & deasserted)
3fcaf2e5 3598 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3599
3600 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3601 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3602
3fcaf2e5
EG
3603 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3604 aeu_mask = REG_RD(bp, reg_addr);
3605
3606 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3607 aeu_mask, deasserted);
72fd0718 3608 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3609 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3610
3fcaf2e5
EG
3611 REG_WR(bp, reg_addr, aeu_mask);
3612 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3613
3614 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3615 bp->attn_state &= ~deasserted;
3616 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3617}
3618
3619static void bnx2x_attn_int(struct bnx2x *bp)
3620{
3621 /* read local copy of bits */
68d59484
EG
3622 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3623 attn_bits);
3624 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3625 attn_bits_ack);
a2fbb9ea
ET
3626 u32 attn_state = bp->attn_state;
3627
3628 /* look for changed bits */
3629 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3630 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3631
3632 DP(NETIF_MSG_HW,
3633 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3634 attn_bits, attn_ack, asserted, deasserted);
3635
3636 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3637 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3638
3639 /* handle bits that were raised */
3640 if (asserted)
3641 bnx2x_attn_int_asserted(bp, asserted);
3642
3643 if (deasserted)
3644 bnx2x_attn_int_deasserted(bp, deasserted);
3645}
3646
523224a3
DK
3647static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3648{
3649 /* No memory barriers */
3650 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3651 mmiowb(); /* keep prod updates ordered */
3652}
3653
3654#ifdef BCM_CNIC
3655static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3656 union event_ring_elem *elem)
3657{
3658 if (!bp->cnic_eth_dev.starting_cid ||
3659 cid < bp->cnic_eth_dev.starting_cid)
3660 return 1;
3661
3662 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3663
3664 if (unlikely(elem->message.data.cfc_del_event.error)) {
3665 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3666 cid);
3667 bnx2x_panic_dump(bp);
3668 }
3669 bnx2x_cnic_cfc_comp(bp, cid);
3670 return 0;
3671}
3672#endif
3673
3674static void bnx2x_eq_int(struct bnx2x *bp)
3675{
3676 u16 hw_cons, sw_cons, sw_prod;
3677 union event_ring_elem *elem;
3678 u32 cid;
3679 u8 opcode;
3680 int spqe_cnt = 0;
3681
3682 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3683
3684 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3685 * when we get the the next-page we nned to adjust so the loop
3686 * condition below will be met. The next element is the size of a
3687 * regular element and hence incrementing by 1
3688 */
3689 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3690 hw_cons++;
3691
3692 /* This function may never run in parralel with itself for a
3693 * specific bp, thus there is no need in "paired" read memory
3694 * barrier here.
3695 */
3696 sw_cons = bp->eq_cons;
3697 sw_prod = bp->eq_prod;
3698
3699 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
8fe23fbd 3700 hw_cons, sw_cons, atomic_read(&bp->spq_left));
523224a3
DK
3701
3702 for (; sw_cons != hw_cons;
3703 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3704
3705
3706 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3707
3708 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3709 opcode = elem->message.opcode;
3710
3711
3712 /* handle eq element */
3713 switch (opcode) {
3714 case EVENT_RING_OPCODE_STAT_QUERY:
3715 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3716 /* nothing to do with stats comp */
3717 continue;
3718
3719 case EVENT_RING_OPCODE_CFC_DEL:
3720 /* handle according to cid range */
3721 /*
3722 * we may want to verify here that the bp state is
3723 * HALTING
3724 */
3725 DP(NETIF_MSG_IFDOWN,
3726 "got delete ramrod for MULTI[%d]\n", cid);
3727#ifdef BCM_CNIC
3728 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3729 goto next_spqe;
ec6ba945
VZ
3730 if (cid == BNX2X_FCOE_ETH_CID)
3731 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3732 else
523224a3 3733#endif
ec6ba945 3734 bnx2x_fp(bp, cid, state) =
523224a3
DK
3735 BNX2X_FP_STATE_CLOSED;
3736
3737 goto next_spqe;
e4901dde
VZ
3738
3739 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3740 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3741 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3742 goto next_spqe;
3743 case EVENT_RING_OPCODE_START_TRAFFIC:
3744 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3745 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3746 goto next_spqe;
523224a3
DK
3747 }
3748
3749 switch (opcode | bp->state) {
3750 case (EVENT_RING_OPCODE_FUNCTION_START |
3751 BNX2X_STATE_OPENING_WAIT4_PORT):
3752 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3753 bp->state = BNX2X_STATE_FUNC_STARTED;
3754 break;
3755
3756 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3757 BNX2X_STATE_CLOSING_WAIT4_HALT):
3758 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3759 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3760 break;
3761
3762 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3763 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3764 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3765 bp->set_mac_pending = 0;
3766 break;
3767
3768 case (EVENT_RING_OPCODE_SET_MAC |
3769 BNX2X_STATE_CLOSING_WAIT4_HALT):
3770 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3771 bp->set_mac_pending = 0;
3772 break;
3773 default:
3774 /* unknown event log error and continue */
3775 BNX2X_ERR("Unknown EQ event %d\n",
3776 elem->message.opcode);
3777 }
3778next_spqe:
3779 spqe_cnt++;
3780 } /* for */
3781
8fe23fbd
DK
3782 smp_mb__before_atomic_inc();
3783 atomic_add(spqe_cnt, &bp->spq_left);
523224a3
DK
3784
3785 bp->eq_cons = sw_cons;
3786 bp->eq_prod = sw_prod;
3787 /* Make sure that above mem writes were issued towards the memory */
3788 smp_wmb();
3789
3790 /* update producer */
3791 bnx2x_update_eq_prod(bp, bp->eq_prod);
3792}
3793
a2fbb9ea
ET
3794static void bnx2x_sp_task(struct work_struct *work)
3795{
1cf167f2 3796 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3797 u16 status;
3798
3799 /* Return here if interrupt is disabled */
3800 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3801 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3802 return;
3803 }
3804
3805 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3806/* if (status == 0) */
3807/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3808
cdaa7cb8 3809 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3810
877e9aa4 3811 /* HW attentions */
523224a3 3812 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3813 bnx2x_attn_int(bp);
523224a3 3814 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3815 }
3816
523224a3
DK
3817 /* SP events: STAT_QUERY and others */
3818 if (status & BNX2X_DEF_SB_IDX) {
ec6ba945
VZ
3819#ifdef BCM_CNIC
3820 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
523224a3 3821
ec6ba945
VZ
3822 if ((!NO_FCOE(bp)) &&
3823 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3824 napi_schedule(&bnx2x_fcoe(bp, napi));
3825#endif
523224a3
DK
3826 /* Handle EQ completions */
3827 bnx2x_eq_int(bp);
3828
3829 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3830 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3831
3832 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3833 }
3834
3835 if (unlikely(status))
3836 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3837 status);
a2fbb9ea 3838
523224a3
DK
3839 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3840 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3841}
3842
9f6c9258 3843irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3844{
3845 struct net_device *dev = dev_instance;
3846 struct bnx2x *bp = netdev_priv(dev);
3847
3848 /* Return here if interrupt is disabled */
3849 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3850 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3851 return IRQ_HANDLED;
3852 }
3853
523224a3
DK
3854 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3855 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3856
3857#ifdef BNX2X_STOP_ON_ERROR
3858 if (unlikely(bp->panic))
3859 return IRQ_HANDLED;
3860#endif
3861
993ac7b5
MC
3862#ifdef BCM_CNIC
3863 {
3864 struct cnic_ops *c_ops;
3865
3866 rcu_read_lock();
3867 c_ops = rcu_dereference(bp->cnic_ops);
3868 if (c_ops)
3869 c_ops->cnic_handler(bp->cnic_data, NULL);
3870 rcu_read_unlock();
3871 }
3872#endif
1cf167f2 3873 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3874
3875 return IRQ_HANDLED;
3876}
3877
3878/* end of slow path */
3879
a2fbb9ea
ET
3880static void bnx2x_timer(unsigned long data)
3881{
3882 struct bnx2x *bp = (struct bnx2x *) data;
3883
3884 if (!netif_running(bp->dev))
3885 return;
3886
3887 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3888 goto timer_restart;
a2fbb9ea
ET
3889
3890 if (poll) {
3891 struct bnx2x_fastpath *fp = &bp->fp[0];
3892 int rc;
3893
7961f791 3894 bnx2x_tx_int(fp);
a2fbb9ea
ET
3895 rc = bnx2x_rx_int(fp, 1000);
3896 }
3897
34f80b04 3898 if (!BP_NOMCP(bp)) {
f2e0899f 3899 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3900 u32 drv_pulse;
3901 u32 mcp_pulse;
3902
3903 ++bp->fw_drv_pulse_wr_seq;
3904 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3905 /* TBD - add SYSTEM_TIME */
3906 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3907 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3908
f2e0899f 3909 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3910 MCP_PULSE_SEQ_MASK);
3911 /* The delta between driver pulse and mcp response
3912 * should be 1 (before mcp response) or 0 (after mcp response)
3913 */
3914 if ((drv_pulse != mcp_pulse) &&
3915 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3916 /* someone lost a heartbeat... */
3917 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3918 drv_pulse, mcp_pulse);
3919 }
3920 }
3921
f34d28ea 3922 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3923 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3924
f1410647 3925timer_restart:
a2fbb9ea
ET
3926 mod_timer(&bp->timer, jiffies + bp->current_interval);
3927}
3928
3929/* end of Statistics */
3930
3931/* nic init */
3932
3933/*
3934 * nic init service functions
3935 */
3936
523224a3 3937static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3938{
523224a3
DK
3939 u32 i;
3940 if (!(len%4) && !(addr%4))
3941 for (i = 0; i < len; i += 4)
3942 REG_WR(bp, addr + i, fill);
3943 else
3944 for (i = 0; i < len; i++)
3945 REG_WR8(bp, addr + i, fill);
34f80b04 3946
34f80b04
EG
3947}
3948
523224a3
DK
3949/* helper: writes FP SP data to FW - data_size in dwords */
3950static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3951 int fw_sb_id,
3952 u32 *sb_data_p,
3953 u32 data_size)
34f80b04 3954{
a2fbb9ea 3955 int index;
523224a3
DK
3956 for (index = 0; index < data_size; index++)
3957 REG_WR(bp, BAR_CSTRORM_INTMEM +
3958 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3959 sizeof(u32)*index,
3960 *(sb_data_p + index));
3961}
a2fbb9ea 3962
523224a3
DK
3963static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3964{
3965 u32 *sb_data_p;
3966 u32 data_size = 0;
f2e0899f 3967 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3968 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3969
523224a3 3970 /* disable the function first */
f2e0899f
DK
3971 if (CHIP_IS_E2(bp)) {
3972 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3973 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3974 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3975 sb_data_e2.common.p_func.vf_valid = false;
3976 sb_data_p = (u32 *)&sb_data_e2;
3977 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3978 } else {
3979 memset(&sb_data_e1x, 0,
3980 sizeof(struct hc_status_block_data_e1x));
3981 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3982 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3983 sb_data_e1x.common.p_func.vf_valid = false;
3984 sb_data_p = (u32 *)&sb_data_e1x;
3985 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3986 }
523224a3 3987 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3988
523224a3
DK
3989 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3990 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3991 CSTORM_STATUS_BLOCK_SIZE);
3992 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3993 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3994 CSTORM_SYNC_BLOCK_SIZE);
3995}
34f80b04 3996
523224a3
DK
3997/* helper: writes SP SB data to FW */
3998static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3999 struct hc_sp_status_block_data *sp_sb_data)
4000{
4001 int func = BP_FUNC(bp);
4002 int i;
4003 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4004 REG_WR(bp, BAR_CSTRORM_INTMEM +
4005 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4006 i*sizeof(u32),
4007 *((u32 *)sp_sb_data + i));
34f80b04
EG
4008}
4009
523224a3 4010static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
4011{
4012 int func = BP_FUNC(bp);
523224a3
DK
4013 struct hc_sp_status_block_data sp_sb_data;
4014 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 4015
523224a3
DK
4016 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4017 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4018 sp_sb_data.p_func.vf_valid = false;
4019
4020 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4021
4022 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4023 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4024 CSTORM_SP_STATUS_BLOCK_SIZE);
4025 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4026 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4027 CSTORM_SP_SYNC_BLOCK_SIZE);
4028
4029}
4030
4031
4032static inline
4033void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4034 int igu_sb_id, int igu_seg_id)
4035{
4036 hc_sm->igu_sb_id = igu_sb_id;
4037 hc_sm->igu_seg_id = igu_seg_id;
4038 hc_sm->timer_value = 0xFF;
4039 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
4040}
4041
8d96286a 4042static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
523224a3 4043 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 4044{
523224a3
DK
4045 int igu_seg_id;
4046
f2e0899f 4047 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
4048 struct hc_status_block_data_e1x sb_data_e1x;
4049 struct hc_status_block_sm *hc_sm_p;
4050 struct hc_index_data *hc_index_p;
4051 int data_size;
4052 u32 *sb_data_p;
4053
f2e0899f
DK
4054 if (CHIP_INT_MODE_IS_BC(bp))
4055 igu_seg_id = HC_SEG_ACCESS_NORM;
4056 else
4057 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
4058
4059 bnx2x_zero_fp_sb(bp, fw_sb_id);
4060
f2e0899f
DK
4061 if (CHIP_IS_E2(bp)) {
4062 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4063 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4064 sb_data_e2.common.p_func.vf_id = vfid;
4065 sb_data_e2.common.p_func.vf_valid = vf_valid;
4066 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4067 sb_data_e2.common.same_igu_sb_1b = true;
4068 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4069 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4070 hc_sm_p = sb_data_e2.common.state_machine;
4071 hc_index_p = sb_data_e2.index_data;
4072 sb_data_p = (u32 *)&sb_data_e2;
4073 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4074 } else {
4075 memset(&sb_data_e1x, 0,
4076 sizeof(struct hc_status_block_data_e1x));
4077 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4078 sb_data_e1x.common.p_func.vf_id = 0xff;
4079 sb_data_e1x.common.p_func.vf_valid = false;
4080 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4081 sb_data_e1x.common.same_igu_sb_1b = true;
4082 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4083 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4084 hc_sm_p = sb_data_e1x.common.state_machine;
4085 hc_index_p = sb_data_e1x.index_data;
4086 sb_data_p = (u32 *)&sb_data_e1x;
4087 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4088 }
523224a3
DK
4089
4090 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4091 igu_sb_id, igu_seg_id);
4092 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4093 igu_sb_id, igu_seg_id);
4094
4095 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4096
4097 /* write indecies to HW */
4098 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4099}
4100
4101static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4102 u8 sb_index, u8 disable, u16 usec)
4103{
4104 int port = BP_PORT(bp);
4105 u8 ticks = usec / BNX2X_BTR;
4106
4107 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4108
4109 disable = disable ? 1 : (usec ? 0 : 1);
4110 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4111}
4112
4113static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4114 u16 tx_usec, u16 rx_usec)
4115{
4116 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4117 false, rx_usec);
4118 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4119 false, tx_usec);
4120}
f2e0899f 4121
523224a3
DK
4122static void bnx2x_init_def_sb(struct bnx2x *bp)
4123{
4124 struct host_sp_status_block *def_sb = bp->def_status_blk;
4125 dma_addr_t mapping = bp->def_status_blk_mapping;
4126 int igu_sp_sb_index;
4127 int igu_seg_id;
34f80b04
EG
4128 int port = BP_PORT(bp);
4129 int func = BP_FUNC(bp);
523224a3 4130 int reg_offset;
a2fbb9ea 4131 u64 section;
523224a3
DK
4132 int index;
4133 struct hc_sp_status_block_data sp_sb_data;
4134 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4135
f2e0899f
DK
4136 if (CHIP_INT_MODE_IS_BC(bp)) {
4137 igu_sp_sb_index = DEF_SB_IGU_ID;
4138 igu_seg_id = HC_SEG_ACCESS_DEF;
4139 } else {
4140 igu_sp_sb_index = bp->igu_dsb_id;
4141 igu_seg_id = IGU_SEG_ACCESS_DEF;
4142 }
a2fbb9ea
ET
4143
4144 /* ATTN */
523224a3 4145 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 4146 atten_status_block);
523224a3 4147 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 4148
49d66772
ET
4149 bp->attn_state = 0;
4150
a2fbb9ea
ET
4151 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4152 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 4153 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
4154 int sindex;
4155 /* take care of sig[0]..sig[4] */
4156 for (sindex = 0; sindex < 4; sindex++)
4157 bp->attn_group[index].sig[sindex] =
4158 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
4159
4160 if (CHIP_IS_E2(bp))
4161 /*
4162 * enable5 is separate from the rest of the registers,
4163 * and therefore the address skip is 4
4164 * and not 16 between the different groups
4165 */
4166 bp->attn_group[index].sig[4] = REG_RD(bp,
4167 reg_offset + 0x10 + 0x4*index);
4168 else
4169 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
4170 }
4171
f2e0899f
DK
4172 if (bp->common.int_block == INT_BLOCK_HC) {
4173 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4174 HC_REG_ATTN_MSG0_ADDR_L);
4175
4176 REG_WR(bp, reg_offset, U64_LO(section));
4177 REG_WR(bp, reg_offset + 4, U64_HI(section));
4178 } else if (CHIP_IS_E2(bp)) {
4179 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4180 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4181 }
a2fbb9ea 4182
523224a3
DK
4183 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4184 sp_sb);
a2fbb9ea 4185
523224a3 4186 bnx2x_zero_sp_sb(bp);
a2fbb9ea 4187
523224a3
DK
4188 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4189 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4190 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4191 sp_sb_data.igu_seg_id = igu_seg_id;
4192 sp_sb_data.p_func.pf_id = func;
f2e0899f 4193 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 4194 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 4195
523224a3 4196 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 4197
bb2a0f7a 4198 bp->stats_pending = 0;
66e855f3 4199 bp->set_mac_pending = 0;
bb2a0f7a 4200
523224a3 4201 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4202}
4203
9f6c9258 4204void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4205{
a2fbb9ea
ET
4206 int i;
4207
ec6ba945 4208 for_each_eth_queue(bp, i)
523224a3
DK
4209 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4210 bp->rx_ticks, bp->tx_ticks);
a2fbb9ea
ET
4211}
4212
a2fbb9ea
ET
4213static void bnx2x_init_sp_ring(struct bnx2x *bp)
4214{
a2fbb9ea 4215 spin_lock_init(&bp->spq_lock);
8fe23fbd 4216 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
a2fbb9ea 4217
a2fbb9ea 4218 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4219 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4220 bp->spq_prod_bd = bp->spq;
4221 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
4222}
4223
523224a3 4224static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
4225{
4226 int i;
523224a3
DK
4227 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4228 union event_ring_elem *elem =
4229 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 4230
523224a3
DK
4231 elem->next_page.addr.hi =
4232 cpu_to_le32(U64_HI(bp->eq_mapping +
4233 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4234 elem->next_page.addr.lo =
4235 cpu_to_le32(U64_LO(bp->eq_mapping +
4236 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 4237 }
523224a3
DK
4238 bp->eq_cons = 0;
4239 bp->eq_prod = NUM_EQ_DESC;
4240 bp->eq_cons_sb = BNX2X_EQ_INDEX;
a2fbb9ea
ET
4241}
4242
4243static void bnx2x_init_ind_table(struct bnx2x *bp)
4244{
26c8fa4d 4245 int func = BP_FUNC(bp);
a2fbb9ea
ET
4246 int i;
4247
555f6c78 4248 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4249 return;
4250
555f6c78
EG
4251 DP(NETIF_MSG_IFUP,
4252 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4253 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4254 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4255 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
ec6ba945
VZ
4256 bp->fp->cl_id + (i % (bp->num_queues -
4257 NONE_ETH_CONTEXT_USE)));
a2fbb9ea
ET
4258}
4259
9f6c9258 4260void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4261{
34f80b04 4262 int mode = bp->rx_mode;
ec6ba945 4263 int port = BP_PORT(bp);
523224a3 4264 u16 cl_id;
ec6ba945 4265 u32 def_q_filters = 0;
523224a3 4266
581ce43d
EG
4267 /* All but management unicast packets should pass to the host as well */
4268 u32 llh_mask =
4269 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4270 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4271 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4272 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4273
a2fbb9ea
ET
4274 switch (mode) {
4275 case BNX2X_RX_MODE_NONE: /* no Rx */
ec6ba945
VZ
4276 def_q_filters = BNX2X_ACCEPT_NONE;
4277#ifdef BCM_CNIC
4278 if (!NO_FCOE(bp)) {
4279 cl_id = bnx2x_fcoe(bp, cl_id);
4280 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4281 }
4282#endif
a2fbb9ea 4283 break;
356e2385 4284
a2fbb9ea 4285 case BNX2X_RX_MODE_NORMAL:
ec6ba945
VZ
4286 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4287 BNX2X_ACCEPT_MULTICAST;
4288#ifdef BCM_CNIC
711c9146
VZ
4289 if (!NO_FCOE(bp)) {
4290 cl_id = bnx2x_fcoe(bp, cl_id);
4291 bnx2x_rxq_set_mac_filters(bp, cl_id,
4292 BNX2X_ACCEPT_UNICAST |
4293 BNX2X_ACCEPT_MULTICAST);
4294 }
ec6ba945 4295#endif
a2fbb9ea 4296 break;
356e2385 4297
a2fbb9ea 4298 case BNX2X_RX_MODE_ALLMULTI:
ec6ba945
VZ
4299 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4300 BNX2X_ACCEPT_ALL_MULTICAST;
4301#ifdef BCM_CNIC
711c9146
VZ
4302 /*
4303 * Prevent duplication of multicast packets by configuring FCoE
4304 * L2 Client to receive only matched unicast frames.
4305 */
4306 if (!NO_FCOE(bp)) {
4307 cl_id = bnx2x_fcoe(bp, cl_id);
4308 bnx2x_rxq_set_mac_filters(bp, cl_id,
4309 BNX2X_ACCEPT_UNICAST);
4310 }
ec6ba945 4311#endif
a2fbb9ea 4312 break;
356e2385 4313
a2fbb9ea 4314 case BNX2X_RX_MODE_PROMISC:
ec6ba945
VZ
4315 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4316#ifdef BCM_CNIC
711c9146
VZ
4317 /*
4318 * Prevent packets duplication by configuring DROP_ALL for FCoE
4319 * L2 Client.
4320 */
4321 if (!NO_FCOE(bp)) {
4322 cl_id = bnx2x_fcoe(bp, cl_id);
4323 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4324 }
ec6ba945 4325#endif
581ce43d
EG
4326 /* pass management unicast packets as well */
4327 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4328 break;
356e2385 4329
a2fbb9ea 4330 default:
34f80b04
EG
4331 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4332 break;
a2fbb9ea
ET
4333 }
4334
ec6ba945
VZ
4335 cl_id = BP_L_ID(bp);
4336 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4337
581ce43d 4338 REG_WR(bp,
ec6ba945
VZ
4339 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4340 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
581ce43d 4341
523224a3
DK
4342 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4343 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
ec6ba945
VZ
4344 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4345 "unmatched_ucast 0x%x\n", mode,
523224a3
DK
4346 bp->mac_filters.ucast_drop_all,
4347 bp->mac_filters.mcast_drop_all,
4348 bp->mac_filters.bcast_drop_all,
4349 bp->mac_filters.ucast_accept_all,
4350 bp->mac_filters.mcast_accept_all,
ec6ba945
VZ
4351 bp->mac_filters.bcast_accept_all,
4352 bp->mac_filters.unmatched_unicast
523224a3 4353 );
a2fbb9ea 4354
523224a3 4355 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
4356}
4357
471de716
EG
4358static void bnx2x_init_internal_common(struct bnx2x *bp)
4359{
4360 int i;
4361
523224a3 4362 if (!CHIP_IS_E1(bp)) {
de832a55 4363
523224a3
DK
4364 /* xstorm needs to know whether to add ovlan to packets or not,
4365 * in switch-independent we'll write 0 to here... */
34f80b04 4366 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4367 bp->mf_mode);
34f80b04 4368 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4369 bp->mf_mode);
34f80b04 4370 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4371 bp->mf_mode);
34f80b04 4372 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4373 bp->mf_mode);
34f80b04
EG
4374 }
4375
0793f83f
DK
4376 if (IS_MF_SI(bp))
4377 /*
4378 * In switch independent mode, the TSTORM needs to accept
4379 * packets that failed classification, since approximate match
4380 * mac addresses aren't written to NIG LLH
4381 */
4382 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4383 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4384
523224a3
DK
4385 /* Zero this manually as its initialization is
4386 currently missing in the initTool */
4387 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 4388 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 4389 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
4390 if (CHIP_IS_E2(bp)) {
4391 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4392 CHIP_INT_MODE_IS_BC(bp) ?
4393 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4394 }
523224a3 4395}
8a1c38d1 4396
523224a3
DK
4397static void bnx2x_init_internal_port(struct bnx2x *bp)
4398{
4399 /* port */
e4901dde 4400 bnx2x_dcb_init_intmem_pfc(bp);
a2fbb9ea
ET
4401}
4402
471de716
EG
4403static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4404{
4405 switch (load_code) {
4406 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 4407 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
4408 bnx2x_init_internal_common(bp);
4409 /* no break */
4410
4411 case FW_MSG_CODE_DRV_LOAD_PORT:
4412 bnx2x_init_internal_port(bp);
4413 /* no break */
4414
4415 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
4416 /* internal memory per function is
4417 initialized inside bnx2x_pf_init */
471de716
EG
4418 break;
4419
4420 default:
4421 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4422 break;
4423 }
4424}
4425
523224a3
DK
4426static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4427{
4428 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4429
4430 fp->state = BNX2X_FP_STATE_CLOSED;
4431
4432 fp->index = fp->cid = fp_idx;
4433 fp->cl_id = BP_L_ID(bp) + fp_idx;
4434 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4435 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4436 /* qZone id equals to FW (per path) client id */
4437 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
4438 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4439 ETH_MAX_RX_CLIENTS_E1H);
523224a3 4440 /* init shortcut */
f2e0899f
DK
4441 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4442 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4443 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4444 /* Setup SB indicies */
4445 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4446 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4447
4448 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4449 "cl_id %d fw_sb %d igu_sb %d\n",
4450 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4451 fp->igu_sb_id);
4452 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4453 fp->fw_sb_id, fp->igu_sb_id);
4454
4455 bnx2x_update_fpsb_idx(fp);
4456}
4457
9f6c9258 4458void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4459{
4460 int i;
4461
ec6ba945 4462 for_each_eth_queue(bp, i)
523224a3 4463 bnx2x_init_fp_sb(bp, i);
37b091ba 4464#ifdef BCM_CNIC
ec6ba945
VZ
4465 if (!NO_FCOE(bp))
4466 bnx2x_init_fcoe_fp(bp);
523224a3
DK
4467
4468 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4469 BNX2X_VF_ID_INVALID, false,
4470 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4471
37b091ba 4472#endif
a2fbb9ea 4473
16119785
EG
4474 /* ensure status block indices were read */
4475 rmb();
4476
523224a3 4477 bnx2x_init_def_sb(bp);
5c862848 4478 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4479 bnx2x_init_rx_rings(bp);
523224a3 4480 bnx2x_init_tx_rings(bp);
a2fbb9ea 4481 bnx2x_init_sp_ring(bp);
523224a3 4482 bnx2x_init_eq_ring(bp);
471de716 4483 bnx2x_init_internal(bp, load_code);
523224a3 4484 bnx2x_pf_init(bp);
a2fbb9ea 4485 bnx2x_init_ind_table(bp);
0ef00459
EG
4486 bnx2x_stats_init(bp);
4487
4488 /* At this point, we are ready for interrupts */
4489 atomic_set(&bp->intr_sem, 0);
4490
4491 /* flush all before enabling interrupts */
4492 mb();
4493 mmiowb();
4494
615f8fd9 4495 bnx2x_int_enable(bp);
eb8da205
EG
4496
4497 /* Check for SPIO5 */
4498 bnx2x_attn_int_deasserted0(bp,
4499 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4500 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4501}
4502
4503/* end of nic init */
4504
4505/*
4506 * gzip service functions
4507 */
4508
4509static int bnx2x_gunzip_init(struct bnx2x *bp)
4510{
1a983142
FT
4511 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4512 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4513 if (bp->gunzip_buf == NULL)
4514 goto gunzip_nomem1;
4515
4516 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4517 if (bp->strm == NULL)
4518 goto gunzip_nomem2;
4519
4520 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4521 GFP_KERNEL);
4522 if (bp->strm->workspace == NULL)
4523 goto gunzip_nomem3;
4524
4525 return 0;
4526
4527gunzip_nomem3:
4528 kfree(bp->strm);
4529 bp->strm = NULL;
4530
4531gunzip_nomem2:
1a983142
FT
4532 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4533 bp->gunzip_mapping);
a2fbb9ea
ET
4534 bp->gunzip_buf = NULL;
4535
4536gunzip_nomem1:
cdaa7cb8
VZ
4537 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4538 " un-compression\n");
a2fbb9ea
ET
4539 return -ENOMEM;
4540}
4541
4542static void bnx2x_gunzip_end(struct bnx2x *bp)
4543{
4544 kfree(bp->strm->workspace);
a2fbb9ea
ET
4545 kfree(bp->strm);
4546 bp->strm = NULL;
4547
4548 if (bp->gunzip_buf) {
1a983142
FT
4549 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4550 bp->gunzip_mapping);
a2fbb9ea
ET
4551 bp->gunzip_buf = NULL;
4552 }
4553}
4554
94a78b79 4555static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4556{
4557 int n, rc;
4558
4559 /* check gzip header */
94a78b79
VZ
4560 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4561 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4562 return -EINVAL;
94a78b79 4563 }
a2fbb9ea
ET
4564
4565 n = 10;
4566
34f80b04 4567#define FNAME 0x8
a2fbb9ea
ET
4568
4569 if (zbuf[3] & FNAME)
4570 while ((zbuf[n++] != 0) && (n < len));
4571
94a78b79 4572 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4573 bp->strm->avail_in = len - n;
4574 bp->strm->next_out = bp->gunzip_buf;
4575 bp->strm->avail_out = FW_BUF_SIZE;
4576
4577 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4578 if (rc != Z_OK)
4579 return rc;
4580
4581 rc = zlib_inflate(bp->strm, Z_FINISH);
4582 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4583 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4584 bp->strm->msg);
a2fbb9ea
ET
4585
4586 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4587 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4588 netdev_err(bp->dev, "Firmware decompression error:"
4589 " gunzip_outlen (%d) not aligned\n",
4590 bp->gunzip_outlen);
a2fbb9ea
ET
4591 bp->gunzip_outlen >>= 2;
4592
4593 zlib_inflateEnd(bp->strm);
4594
4595 if (rc == Z_STREAM_END)
4596 return 0;
4597
4598 return rc;
4599}
4600
4601/* nic load/unload */
4602
4603/*
34f80b04 4604 * General service functions
a2fbb9ea
ET
4605 */
4606
4607/* send a NIG loopback debug packet */
4608static void bnx2x_lb_pckt(struct bnx2x *bp)
4609{
a2fbb9ea 4610 u32 wb_write[3];
a2fbb9ea
ET
4611
4612 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4613 wb_write[0] = 0x55555555;
4614 wb_write[1] = 0x55555555;
34f80b04 4615 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4616 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4617
4618 /* NON-IP protocol */
a2fbb9ea
ET
4619 wb_write[0] = 0x09000000;
4620 wb_write[1] = 0x55555555;
34f80b04 4621 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4622 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4623}
4624
4625/* some of the internal memories
4626 * are not directly readable from the driver
4627 * to test them we send debug packets
4628 */
4629static int bnx2x_int_mem_test(struct bnx2x *bp)
4630{
4631 int factor;
4632 int count, i;
4633 u32 val = 0;
4634
ad8d3948 4635 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4636 factor = 120;
ad8d3948
EG
4637 else if (CHIP_REV_IS_EMUL(bp))
4638 factor = 200;
4639 else
a2fbb9ea 4640 factor = 1;
a2fbb9ea 4641
a2fbb9ea
ET
4642 /* Disable inputs of parser neighbor blocks */
4643 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4644 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4645 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4646 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4647
4648 /* Write 0 to parser credits for CFC search request */
4649 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4650
4651 /* send Ethernet packet */
4652 bnx2x_lb_pckt(bp);
4653
4654 /* TODO do i reset NIG statistic? */
4655 /* Wait until NIG register shows 1 packet of size 0x10 */
4656 count = 1000 * factor;
4657 while (count) {
34f80b04 4658
a2fbb9ea
ET
4659 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4660 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4661 if (val == 0x10)
4662 break;
4663
4664 msleep(10);
4665 count--;
4666 }
4667 if (val != 0x10) {
4668 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4669 return -1;
4670 }
4671
4672 /* Wait until PRS register shows 1 packet */
4673 count = 1000 * factor;
4674 while (count) {
4675 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4676 if (val == 1)
4677 break;
4678
4679 msleep(10);
4680 count--;
4681 }
4682 if (val != 0x1) {
4683 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4684 return -2;
4685 }
4686
4687 /* Reset and init BRB, PRS */
34f80b04 4688 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4689 msleep(50);
34f80b04 4690 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4691 msleep(50);
94a78b79
VZ
4692 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4693 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4694
4695 DP(NETIF_MSG_HW, "part2\n");
4696
4697 /* Disable inputs of parser neighbor blocks */
4698 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4699 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4700 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4701 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4702
4703 /* Write 0 to parser credits for CFC search request */
4704 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4705
4706 /* send 10 Ethernet packets */
4707 for (i = 0; i < 10; i++)
4708 bnx2x_lb_pckt(bp);
4709
4710 /* Wait until NIG register shows 10 + 1
4711 packets of size 11*0x10 = 0xb0 */
4712 count = 1000 * factor;
4713 while (count) {
34f80b04 4714
a2fbb9ea
ET
4715 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4716 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4717 if (val == 0xb0)
4718 break;
4719
4720 msleep(10);
4721 count--;
4722 }
4723 if (val != 0xb0) {
4724 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4725 return -3;
4726 }
4727
4728 /* Wait until PRS register shows 2 packets */
4729 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4730 if (val != 2)
4731 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4732
4733 /* Write 1 to parser credits for CFC search request */
4734 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4735
4736 /* Wait until PRS register shows 3 packets */
4737 msleep(10 * factor);
4738 /* Wait until NIG register shows 1 packet of size 0x10 */
4739 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4740 if (val != 3)
4741 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4742
4743 /* clear NIG EOP FIFO */
4744 for (i = 0; i < 11; i++)
4745 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4746 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4747 if (val != 1) {
4748 BNX2X_ERR("clear of NIG failed\n");
4749 return -4;
4750 }
4751
4752 /* Reset and init BRB, PRS, NIG */
4753 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4754 msleep(50);
4755 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4756 msleep(50);
94a78b79
VZ
4757 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4758 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4759#ifndef BCM_CNIC
a2fbb9ea
ET
4760 /* set NIC mode */
4761 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4762#endif
4763
4764 /* Enable inputs of parser neighbor blocks */
4765 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4766 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4767 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4768 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4769
4770 DP(NETIF_MSG_HW, "done\n");
4771
4772 return 0; /* OK */
4773}
4774
4a33bc03 4775static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
a2fbb9ea
ET
4776{
4777 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4778 if (CHIP_IS_E2(bp))
4779 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4780 else
4781 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4782 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4783 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4784 /*
4785 * mask read length error interrupts in brb for parser
4786 * (parsing unit and 'checksum and crc' unit)
4787 * these errors are legal (PU reads fixed length and CAC can cause
4788 * read length error on truncated packets)
4789 */
4790 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4791 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4792 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4793 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4794 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4795 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4796/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4797/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4798 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4799 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4800 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4801/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4802/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4803 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4804 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4805 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4806 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4807/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4808/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 4809
34f80b04
EG
4810 if (CHIP_REV_IS_FPGA(bp))
4811 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4812 else if (CHIP_IS_E2(bp))
4813 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4814 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4815 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4816 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4817 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4818 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4819 else
4820 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4821 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4822 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4823 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4824/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4825/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4826 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4827 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04 4828/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4a33bc03 4829 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
a2fbb9ea
ET
4830}
4831
81f75bbf
EG
4832static void bnx2x_reset_common(struct bnx2x *bp)
4833{
4834 /* reset_common */
4835 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4836 0xd3ffff7f);
4837 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4838}
4839
573f2035
EG
4840static void bnx2x_init_pxp(struct bnx2x *bp)
4841{
4842 u16 devctl;
4843 int r_order, w_order;
4844
4845 pci_read_config_word(bp->pdev,
4846 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4847 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4848 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4849 if (bp->mrrs == -1)
4850 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4851 else {
4852 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4853 r_order = bp->mrrs;
4854 }
4855
4856 bnx2x_init_pxp_arb(bp, r_order, w_order);
4857}
fd4ef40d
EG
4858
4859static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4860{
2145a920 4861 int is_required;
fd4ef40d 4862 u32 val;
2145a920 4863 int port;
fd4ef40d 4864
2145a920
VZ
4865 if (BP_NOMCP(bp))
4866 return;
4867
4868 is_required = 0;
fd4ef40d
EG
4869 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4870 SHARED_HW_CFG_FAN_FAILURE_MASK;
4871
4872 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4873 is_required = 1;
4874
4875 /*
4876 * The fan failure mechanism is usually related to the PHY type since
4877 * the power consumption of the board is affected by the PHY. Currently,
4878 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4879 */
4880 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4881 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4882 is_required |=
d90d96ba
YR
4883 bnx2x_fan_failure_det_req(
4884 bp,
4885 bp->common.shmem_base,
a22f0788 4886 bp->common.shmem2_base,
d90d96ba 4887 port);
fd4ef40d
EG
4888 }
4889
4890 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4891
4892 if (is_required == 0)
4893 return;
4894
4895 /* Fan failure is indicated by SPIO 5 */
4896 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4897 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4898
4899 /* set to active low mode */
4900 val = REG_RD(bp, MISC_REG_SPIO_INT);
4901 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4902 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4903 REG_WR(bp, MISC_REG_SPIO_INT, val);
4904
4905 /* enable interrupt to signal the IGU */
4906 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4907 val |= (1 << MISC_REGISTERS_SPIO_5);
4908 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4909}
4910
f2e0899f
DK
4911static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4912{
4913 u32 offset = 0;
4914
4915 if (CHIP_IS_E1(bp))
4916 return;
4917 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4918 return;
4919
4920 switch (BP_ABS_FUNC(bp)) {
4921 case 0:
4922 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4923 break;
4924 case 1:
4925 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4926 break;
4927 case 2:
4928 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4929 break;
4930 case 3:
4931 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4932 break;
4933 case 4:
4934 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4935 break;
4936 case 5:
4937 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4938 break;
4939 case 6:
4940 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4941 break;
4942 case 7:
4943 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4944 break;
4945 default:
4946 return;
4947 }
4948
4949 REG_WR(bp, offset, pretend_func_num);
4950 REG_RD(bp, offset);
4951 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4952}
4953
4954static void bnx2x_pf_disable(struct bnx2x *bp)
4955{
4956 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4957 val &= ~IGU_PF_CONF_FUNC_EN;
4958
4959 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4960 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4961 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4962}
4963
523224a3 4964static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4965{
a2fbb9ea 4966 u32 val, i;
a2fbb9ea 4967
f2e0899f 4968 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4969
81f75bbf 4970 bnx2x_reset_common(bp);
34f80b04
EG
4971 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4972 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4973
94a78b79 4974 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4975 if (!CHIP_IS_E1(bp))
fb3bff17 4976 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4977
f2e0899f
DK
4978 if (CHIP_IS_E2(bp)) {
4979 u8 fid;
4980
4981 /**
4982 * 4-port mode or 2-port mode we need to turn of master-enable
4983 * for everyone, after that, turn it back on for self.
4984 * so, we disregard multi-function or not, and always disable
4985 * for all functions on the given path, this means 0,2,4,6 for
4986 * path 0 and 1,3,5,7 for path 1
4987 */
4988 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4989 if (fid == BP_ABS_FUNC(bp)) {
4990 REG_WR(bp,
4991 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4992 1);
4993 continue;
4994 }
4995
4996 bnx2x_pretend_func(bp, fid);
4997 /* clear pf enable */
4998 bnx2x_pf_disable(bp);
4999 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5000 }
5001 }
a2fbb9ea 5002
94a78b79 5003 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5004 if (CHIP_IS_E1(bp)) {
5005 /* enable HW interrupt from PXP on USDM overflow
5006 bit 16 on INT_MASK_0 */
5007 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5008 }
a2fbb9ea 5009
94a78b79 5010 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5011 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5012
5013#ifdef __BIG_ENDIAN
34f80b04
EG
5014 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5015 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5016 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5017 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5018 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5019 /* make sure this value is 0 */
5020 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5021
5022/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5023 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5024 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5025 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5026 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5027#endif
5028
523224a3
DK
5029 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5030
34f80b04
EG
5031 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5032 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5033
34f80b04
EG
5034 /* let the HW do it's magic ... */
5035 msleep(100);
5036 /* finish PXP init */
5037 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5038 if (val != 1) {
5039 BNX2X_ERR("PXP2 CFG failed\n");
5040 return -EBUSY;
5041 }
5042 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5043 if (val != 1) {
5044 BNX2X_ERR("PXP2 RD_INIT failed\n");
5045 return -EBUSY;
5046 }
a2fbb9ea 5047
f2e0899f
DK
5048 /* Timers bug workaround E2 only. We need to set the entire ILT to
5049 * have entries with value "0" and valid bit on.
5050 * This needs to be done by the first PF that is loaded in a path
5051 * (i.e. common phase)
5052 */
5053 if (CHIP_IS_E2(bp)) {
5054 struct ilt_client_info ilt_cli;
5055 struct bnx2x_ilt ilt;
5056 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5057 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5058
b595076a 5059 /* initialize dummy TM client */
f2e0899f
DK
5060 ilt_cli.start = 0;
5061 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5062 ilt_cli.client_num = ILT_CLIENT_TM;
5063
5064 /* Step 1: set zeroes to all ilt page entries with valid bit on
5065 * Step 2: set the timers first/last ilt entry to point
5066 * to the entire range to prevent ILT range error for 3rd/4th
5067 * vnic (this code assumes existance of the vnic)
5068 *
5069 * both steps performed by call to bnx2x_ilt_client_init_op()
5070 * with dummy TM client
5071 *
5072 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5073 * and his brother are split registers
5074 */
5075 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5076 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5077 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5078
5079 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5080 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5081 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5082 }
5083
5084
34f80b04
EG
5085 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5086 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5087
f2e0899f
DK
5088 if (CHIP_IS_E2(bp)) {
5089 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5090 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5091 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5092
5093 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5094
5095 /* let the HW do it's magic ... */
5096 do {
5097 msleep(200);
5098 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5099 } while (factor-- && (val != 1));
5100
5101 if (val != 1) {
5102 BNX2X_ERR("ATC_INIT failed\n");
5103 return -EBUSY;
5104 }
5105 }
5106
94a78b79 5107 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5108
34f80b04
EG
5109 /* clean the DMAE memory */
5110 bp->dmae_ready = 1;
5111 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5112
94a78b79
VZ
5113 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5114 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5115 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5116 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5117
34f80b04
EG
5118 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5119 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5120 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5121 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5122
94a78b79 5123 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 5124
f2e0899f
DK
5125 if (CHIP_MODE_IS_4_PORT(bp))
5126 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
f85582f8 5127
523224a3
DK
5128 /* QM queues pointers table */
5129 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5130
34f80b04
EG
5131 /* soft reset pulse */
5132 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5133 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5134
37b091ba 5135#ifdef BCM_CNIC
94a78b79 5136 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5137#endif
a2fbb9ea 5138
94a78b79 5139 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
5140 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5141
34f80b04
EG
5142 if (!CHIP_REV_IS_SLOW(bp)) {
5143 /* enable hw interrupt from doorbell Q */
5144 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5145 }
a2fbb9ea 5146
94a78b79 5147 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
5148 if (CHIP_MODE_IS_4_PORT(bp)) {
5149 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5150 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5151 }
5152
94a78b79 5153 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5154 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5155#ifndef BCM_CNIC
3196a88a
EG
5156 /* set NIC mode */
5157 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5158#endif
f2e0899f 5159 if (!CHIP_IS_E1(bp))
0793f83f 5160 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
f85582f8 5161
f2e0899f
DK
5162 if (CHIP_IS_E2(bp)) {
5163 /* Bit-map indicating which L2 hdrs may appear after the
5164 basic Ethernet header */
0793f83f 5165 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5166 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5167 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5168 }
a2fbb9ea 5169
94a78b79
VZ
5170 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5171 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5172 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5173 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5174
ca00392c
EG
5175 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5176 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5177 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5178 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5179
94a78b79
VZ
5180 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5181 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5182 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5183 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5184
f2e0899f
DK
5185 if (CHIP_MODE_IS_4_PORT(bp))
5186 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5187
34f80b04
EG
5188 /* sync semi rtc */
5189 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5190 0x80000000);
5191 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5192 0x80000000);
a2fbb9ea 5193
94a78b79
VZ
5194 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5195 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5196 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5197
f2e0899f 5198 if (CHIP_IS_E2(bp)) {
0793f83f 5199 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5200 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5201 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5202 }
5203
34f80b04 5204 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5205 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5206 REG_WR(bp, i, random32());
f85582f8 5207
94a78b79 5208 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5209#ifdef BCM_CNIC
5210 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5211 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5212 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5213 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5214 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5215 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5216 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5217 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5218 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5219 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5220#endif
34f80b04 5221 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5222
34f80b04
EG
5223 if (sizeof(union cdu_context) != 1024)
5224 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5225 dev_alert(&bp->pdev->dev, "please adjust the size "
5226 "of cdu_context(%ld)\n",
7995c64e 5227 (long)sizeof(union cdu_context));
a2fbb9ea 5228
94a78b79 5229 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5230 val = (4 << 24) + (0 << 12) + 1024;
5231 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5232
94a78b79 5233 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5234 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5235 /* enable context validation interrupt from CFC */
5236 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5237
5238 /* set the thresholds to prevent CFC/CDU race */
5239 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5240
94a78b79 5241 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
5242
5243 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5244 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5245
5246 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 5247 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5248
94a78b79 5249 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5250 /* Reset PCIE errors for debug */
5251 REG_WR(bp, 0x2814, 0xffffffff);
5252 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5253
f2e0899f
DK
5254 if (CHIP_IS_E2(bp)) {
5255 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5256 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5257 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5258 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5259 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5260 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5261 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5262 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5263 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5264 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5265 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5266 }
5267
94a78b79 5268 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5269 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5270 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5271 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5272
94a78b79 5273 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 5274 if (!CHIP_IS_E1(bp)) {
fb3bff17 5275 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
0793f83f 5276 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
34f80b04 5277 }
f2e0899f
DK
5278 if (CHIP_IS_E2(bp)) {
5279 /* Bit-map indicating which L2 hdrs may appear after the
5280 basic Ethernet header */
0793f83f 5281 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
f2e0899f 5282 }
34f80b04
EG
5283
5284 if (CHIP_REV_IS_SLOW(bp))
5285 msleep(200);
5286
5287 /* finish CFC init */
5288 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5289 if (val != 1) {
5290 BNX2X_ERR("CFC LL_INIT failed\n");
5291 return -EBUSY;
5292 }
5293 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5294 if (val != 1) {
5295 BNX2X_ERR("CFC AC_INIT failed\n");
5296 return -EBUSY;
5297 }
5298 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5299 if (val != 1) {
5300 BNX2X_ERR("CFC CAM_INIT failed\n");
5301 return -EBUSY;
5302 }
5303 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5304
f2e0899f
DK
5305 if (CHIP_IS_E1(bp)) {
5306 /* read NIG statistic
5307 to see if this is our first up since powerup */
5308 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5309 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 5310
f2e0899f
DK
5311 /* do internal memory self test */
5312 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5313 BNX2X_ERR("internal mem self test failed\n");
5314 return -EBUSY;
5315 }
34f80b04
EG
5316 }
5317
fd4ef40d
EG
5318 bnx2x_setup_fan_failure_detection(bp);
5319
34f80b04
EG
5320 /* clear PXP2 attentions */
5321 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5322
4a33bc03
VZ
5323 bnx2x_enable_blocks_attention(bp);
5324 if (CHIP_PARITY_ENABLED(bp))
5325 bnx2x_enable_blocks_parity(bp);
a2fbb9ea 5326
6bbca910 5327 if (!BP_NOMCP(bp)) {
f2e0899f
DK
5328 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5329 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5330 CHIP_IS_E1x(bp)) {
5331 u32 shmem_base[2], shmem2_base[2];
5332 shmem_base[0] = bp->common.shmem_base;
5333 shmem2_base[0] = bp->common.shmem2_base;
5334 if (CHIP_IS_E2(bp)) {
5335 shmem_base[1] =
5336 SHMEM2_RD(bp, other_shmem_base_addr);
5337 shmem2_base[1] =
5338 SHMEM2_RD(bp, other_shmem2_base_addr);
5339 }
5340 bnx2x_acquire_phy_lock(bp);
5341 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5342 bp->common.chip_id);
5343 bnx2x_release_phy_lock(bp);
5344 }
6bbca910
YR
5345 } else
5346 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5347
34f80b04
EG
5348 return 0;
5349}
a2fbb9ea 5350
523224a3 5351static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
5352{
5353 int port = BP_PORT(bp);
94a78b79 5354 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5355 u32 low, high;
34f80b04 5356 u32 val;
a2fbb9ea 5357
cdaa7cb8 5358 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5359
5360 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5361
94a78b79 5362 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5363 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 5364
f2e0899f
DK
5365 /* Timers bug workaround: disables the pf_master bit in pglue at
5366 * common phase, we need to enable it here before any dmae access are
5367 * attempted. Therefore we manually added the enable-master to the
5368 * port phase (it also happens in the function phase)
5369 */
5370 if (CHIP_IS_E2(bp))
5371 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5372
ca00392c
EG
5373 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5374 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5375 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5376 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5377
523224a3
DK
5378 /* QM cid (connection) count */
5379 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 5380
523224a3 5381#ifdef BCM_CNIC
94a78b79 5382 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5383 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5384 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5385#endif
cdaa7cb8 5386
94a78b79 5387 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5388
f2e0899f
DK
5389 if (CHIP_MODE_IS_4_PORT(bp))
5390 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5391
5392 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5393 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5394 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5395 /* no pause for emulation and FPGA */
5396 low = 0;
5397 high = 513;
5398 } else {
5399 if (IS_MF(bp))
5400 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5401 else if (bp->dev->mtu > 4096) {
5402 if (bp->flags & ONE_PORT_FLAG)
5403 low = 160;
5404 else {
5405 val = bp->dev->mtu;
5406 /* (24*1024 + val*4)/256 */
5407 low = 96 + (val/64) +
5408 ((val % 64) ? 1 : 0);
5409 }
5410 } else
5411 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5412 high = low + 56; /* 14*1024/256 */
5413 }
5414 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5415 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 5416 }
1c06328c 5417
f2e0899f
DK
5418 if (CHIP_MODE_IS_4_PORT(bp)) {
5419 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5420 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5421 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5422 BRB1_REG_MAC_GUARANTIED_0), 40);
5423 }
1c06328c 5424
94a78b79 5425 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5426
94a78b79 5427 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5428 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5429 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5430 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5431
94a78b79
VZ
5432 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5433 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5434 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5435 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
5436 if (CHIP_MODE_IS_4_PORT(bp))
5437 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 5438
94a78b79 5439 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5440 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5441
94a78b79 5442 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5443
f2e0899f
DK
5444 if (!CHIP_IS_E2(bp)) {
5445 /* configure PBF to work without PAUSE mtu 9000 */
5446 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5447
f2e0899f
DK
5448 /* update threshold */
5449 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5450 /* update init credit */
5451 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5452
f2e0899f
DK
5453 /* probe changes */
5454 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5455 udelay(50);
5456 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5457 }
a2fbb9ea 5458
37b091ba
MC
5459#ifdef BCM_CNIC
5460 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5461#endif
94a78b79 5462 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5463 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5464
5465 if (CHIP_IS_E1(bp)) {
5466 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5467 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5468 }
94a78b79 5469 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5470
f2e0899f
DK
5471 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5472
94a78b79 5473 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5474 /* init aeu_mask_attn_func_0/1:
5475 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5476 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5477 * bits 4-7 are used for "per vn group attention" */
e4901dde
VZ
5478 val = IS_MF(bp) ? 0xF7 : 0x7;
5479 /* Enable DCBX attention for all but E1 */
5480 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5481 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
34f80b04 5482
94a78b79 5483 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5484 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5485 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5486 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5487 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5488
94a78b79 5489 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5490
5491 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5492
f2e0899f 5493 if (!CHIP_IS_E1(bp)) {
fb3bff17 5494 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5495 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
0793f83f 5496 (IS_MF_SD(bp) ? 0x1 : 0x2));
34f80b04 5497
f2e0899f
DK
5498 if (CHIP_IS_E2(bp)) {
5499 val = 0;
5500 switch (bp->mf_mode) {
5501 case MULTI_FUNCTION_SD:
5502 val = 1;
5503 break;
5504 case MULTI_FUNCTION_SI:
5505 val = 2;
5506 break;
5507 }
5508
5509 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5510 NIG_REG_LLH0_CLS_TYPE), val);
5511 }
1c06328c
EG
5512 {
5513 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5514 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5515 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5516 }
34f80b04
EG
5517 }
5518
94a78b79 5519 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5520 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5521 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5522 bp->common.shmem2_base, port)) {
4d295db0
EG
5523 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5524 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5525 val = REG_RD(bp, reg_addr);
f1410647 5526 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5527 REG_WR(bp, reg_addr, val);
f1410647 5528 }
c18487ee 5529 bnx2x__link_reset(bp);
a2fbb9ea 5530
34f80b04
EG
5531 return 0;
5532}
5533
34f80b04
EG
5534static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5535{
5536 int reg;
5537
f2e0899f 5538 if (CHIP_IS_E1(bp))
34f80b04 5539 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5540 else
5541 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5542
5543 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5544}
5545
f2e0899f
DK
5546static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5547{
5548 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5549}
5550
5551static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5552{
5553 u32 i, base = FUNC_ILT_BASE(func);
5554 for (i = base; i < base + ILT_PER_FUNC; i++)
5555 bnx2x_ilt_wr(bp, i, 0);
5556}
5557
523224a3 5558static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5559{
5560 int port = BP_PORT(bp);
5561 int func = BP_FUNC(bp);
523224a3
DK
5562 struct bnx2x_ilt *ilt = BP_ILT(bp);
5563 u16 cdu_ilt_start;
8badd27a 5564 u32 addr, val;
f4a66897
VZ
5565 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5566 int i, main_mem_width;
34f80b04 5567
cdaa7cb8 5568 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5569
8badd27a 5570 /* set MSI reconfigure capability */
f2e0899f
DK
5571 if (bp->common.int_block == INT_BLOCK_HC) {
5572 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5573 val = REG_RD(bp, addr);
5574 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5575 REG_WR(bp, addr, val);
5576 }
8badd27a 5577
523224a3
DK
5578 ilt = BP_ILT(bp);
5579 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5580
523224a3
DK
5581 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5582 ilt->lines[cdu_ilt_start + i].page =
5583 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5584 ilt->lines[cdu_ilt_start + i].page_mapping =
5585 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5586 /* cdu ilt pages are allocated manually so there's no need to
5587 set the size */
37b091ba 5588 }
523224a3 5589 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 5590
523224a3
DK
5591#ifdef BCM_CNIC
5592 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5593
523224a3
DK
5594 /* T1 hash bits value determines the T1 number of entries */
5595 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5596#endif
37b091ba 5597
523224a3
DK
5598#ifndef BCM_CNIC
5599 /* set NIC mode */
5600 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5601#endif /* BCM_CNIC */
37b091ba 5602
f2e0899f
DK
5603 if (CHIP_IS_E2(bp)) {
5604 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5605
5606 /* Turn on a single ISR mode in IGU if driver is going to use
5607 * INT#x or MSI
5608 */
5609 if (!(bp->flags & USING_MSIX_FLAG))
5610 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5611 /*
5612 * Timers workaround bug: function init part.
5613 * Need to wait 20msec after initializing ILT,
5614 * needed to make sure there are no requests in
5615 * one of the PXP internal queues with "old" ILT addresses
5616 */
5617 msleep(20);
5618 /*
5619 * Master enable - Due to WB DMAE writes performed before this
5620 * register is re-initialized as part of the regular function
5621 * init
5622 */
5623 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5624 /* Enable the function in IGU */
5625 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5626 }
5627
523224a3 5628 bp->dmae_ready = 1;
34f80b04 5629
523224a3
DK
5630 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5631
f2e0899f
DK
5632 if (CHIP_IS_E2(bp))
5633 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5634
523224a3
DK
5635 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5636 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5637 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5638 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5639 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5640 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5641 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5642 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5643 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5644
f2e0899f
DK
5645 if (CHIP_IS_E2(bp)) {
5646 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5647 BP_PATH(bp));
5648 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5649 BP_PATH(bp));
5650 }
5651
5652 if (CHIP_MODE_IS_4_PORT(bp))
5653 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5654
5655 if (CHIP_IS_E2(bp))
5656 REG_WR(bp, QM_REG_PF_EN, 1);
5657
523224a3 5658 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5659
5660 if (CHIP_MODE_IS_4_PORT(bp))
5661 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5662
523224a3
DK
5663 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5664 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5665 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5666 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5667 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5668 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5669 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5670 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5671 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5672 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5673 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5674 if (CHIP_IS_E2(bp))
5675 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5676
523224a3
DK
5677 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5678
5679 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5680
f2e0899f
DK
5681 if (CHIP_IS_E2(bp))
5682 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5683
fb3bff17 5684 if (IS_MF(bp)) {
34f80b04 5685 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5686 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5687 }
5688
523224a3
DK
5689 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5690
34f80b04 5691 /* HC init per function */
f2e0899f
DK
5692 if (bp->common.int_block == INT_BLOCK_HC) {
5693 if (CHIP_IS_E1H(bp)) {
5694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5695
5696 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5697 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5698 }
5699 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5700
5701 } else {
5702 int num_segs, sb_idx, prod_offset;
5703
34f80b04
EG
5704 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5705
f2e0899f
DK
5706 if (CHIP_IS_E2(bp)) {
5707 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5708 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5709 }
5710
5711 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5712
5713 if (CHIP_IS_E2(bp)) {
5714 int dsb_idx = 0;
5715 /**
5716 * Producer memory:
5717 * E2 mode: address 0-135 match to the mapping memory;
5718 * 136 - PF0 default prod; 137 - PF1 default prod;
5719 * 138 - PF2 default prod; 139 - PF3 default prod;
5720 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5721 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5722 * 144-147 reserved.
5723 *
5724 * E1.5 mode - In backward compatible mode;
5725 * for non default SB; each even line in the memory
5726 * holds the U producer and each odd line hold
5727 * the C producer. The first 128 producers are for
5728 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5729 * producers are for the DSB for each PF.
5730 * Each PF has five segments: (the order inside each
5731 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5732 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5733 * 144-147 attn prods;
5734 */
5735 /* non-default-status-blocks */
5736 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5737 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5738 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5739 prod_offset = (bp->igu_base_sb + sb_idx) *
5740 num_segs;
5741
5742 for (i = 0; i < num_segs; i++) {
5743 addr = IGU_REG_PROD_CONS_MEMORY +
5744 (prod_offset + i) * 4;
5745 REG_WR(bp, addr, 0);
5746 }
5747 /* send consumer update with value 0 */
5748 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5749 USTORM_ID, 0, IGU_INT_NOP, 1);
5750 bnx2x_igu_clear_sb(bp,
5751 bp->igu_base_sb + sb_idx);
5752 }
5753
5754 /* default-status-blocks */
5755 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5756 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5757
5758 if (CHIP_MODE_IS_4_PORT(bp))
5759 dsb_idx = BP_FUNC(bp);
5760 else
5761 dsb_idx = BP_E1HVN(bp);
5762
5763 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5764 IGU_BC_BASE_DSB_PROD + dsb_idx :
5765 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5766
5767 for (i = 0; i < (num_segs * E1HVN_MAX);
5768 i += E1HVN_MAX) {
5769 addr = IGU_REG_PROD_CONS_MEMORY +
5770 (prod_offset + i)*4;
5771 REG_WR(bp, addr, 0);
5772 }
5773 /* send consumer update with 0 */
5774 if (CHIP_INT_MODE_IS_BC(bp)) {
5775 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5776 USTORM_ID, 0, IGU_INT_NOP, 1);
5777 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5778 CSTORM_ID, 0, IGU_INT_NOP, 1);
5779 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5780 XSTORM_ID, 0, IGU_INT_NOP, 1);
5781 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5782 TSTORM_ID, 0, IGU_INT_NOP, 1);
5783 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5784 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5785 } else {
5786 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5787 USTORM_ID, 0, IGU_INT_NOP, 1);
5788 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5789 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5790 }
5791 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5792
5793 /* !!! these should become driver const once
5794 rf-tool supports split-68 const */
5795 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5796 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5797 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5798 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5799 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5800 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5801 }
34f80b04 5802 }
34f80b04 5803
c14423fe 5804 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5805 REG_WR(bp, 0x2114, 0xffffffff);
5806 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5807
5808 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5809 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5810 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5811 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5812 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5813 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5814
f4a66897
VZ
5815 if (CHIP_IS_E1x(bp)) {
5816 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5817 main_mem_base = HC_REG_MAIN_MEMORY +
5818 BP_PORT(bp) * (main_mem_size * 4);
5819 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5820 main_mem_width = 8;
5821
5822 val = REG_RD(bp, main_mem_prty_clr);
5823 if (val)
5824 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5825 "block during "
5826 "function init (0x%x)!\n", val);
5827
5828 /* Clear "false" parity errors in MSI-X table */
5829 for (i = main_mem_base;
5830 i < main_mem_base + main_mem_size * 4;
5831 i += main_mem_width) {
5832 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5833 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5834 i, main_mem_width / 4);
5835 }
5836 /* Clear HC parity attention */
5837 REG_RD(bp, main_mem_prty_clr);
5838 }
5839
b7737c9b 5840 bnx2x_phy_probe(&bp->link_params);
f85582f8 5841
34f80b04
EG
5842 return 0;
5843}
5844
9f6c9258 5845int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5846{
523224a3 5847 int rc = 0;
a2fbb9ea 5848
34f80b04 5849 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5850 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5851
34f80b04
EG
5852 bp->dmae_ready = 0;
5853 mutex_init(&bp->dmae_mutex);
54016b26
EG
5854 rc = bnx2x_gunzip_init(bp);
5855 if (rc)
5856 return rc;
a2fbb9ea 5857
34f80b04
EG
5858 switch (load_code) {
5859 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5860 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5861 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5862 if (rc)
5863 goto init_hw_err;
5864 /* no break */
5865
5866 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5867 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5868 if (rc)
5869 goto init_hw_err;
5870 /* no break */
5871
5872 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5873 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5874 if (rc)
5875 goto init_hw_err;
5876 break;
5877
5878 default:
5879 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5880 break;
5881 }
5882
5883 if (!BP_NOMCP(bp)) {
f2e0899f 5884 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5885
5886 bp->fw_drv_pulse_wr_seq =
f2e0899f 5887 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5888 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5889 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5890 }
a2fbb9ea 5891
34f80b04
EG
5892init_hw_err:
5893 bnx2x_gunzip_end(bp);
5894
5895 return rc;
a2fbb9ea
ET
5896}
5897
9f6c9258 5898void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
5899{
5900
5901#define BNX2X_PCI_FREE(x, y, size) \
5902 do { \
5903 if (x) { \
523224a3 5904 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
a2fbb9ea
ET
5905 x = NULL; \
5906 y = 0; \
5907 } \
5908 } while (0)
5909
5910#define BNX2X_FREE(x) \
5911 do { \
5912 if (x) { \
523224a3 5913 kfree((void *)x); \
a2fbb9ea
ET
5914 x = NULL; \
5915 } \
5916 } while (0)
5917
5918 int i;
5919
5920 /* fastpath */
555f6c78 5921 /* Common */
a2fbb9ea 5922 for_each_queue(bp, i) {
ec6ba945
VZ
5923#ifdef BCM_CNIC
5924 /* FCoE client uses default status block */
5925 if (IS_FCOE_IDX(i)) {
5926 union host_hc_status_block *sb =
5927 &bnx2x_fp(bp, i, status_blk);
5928 memset(sb, 0, sizeof(union host_hc_status_block));
5929 bnx2x_fp(bp, i, status_blk_mapping) = 0;
5930 } else {
5931#endif
555f6c78 5932 /* status blocks */
f2e0899f
DK
5933 if (CHIP_IS_E2(bp))
5934 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5935 bnx2x_fp(bp, i, status_blk_mapping),
5936 sizeof(struct host_hc_status_block_e2));
5937 else
5938 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5939 bnx2x_fp(bp, i, status_blk_mapping),
5940 sizeof(struct host_hc_status_block_e1x));
ec6ba945
VZ
5941#ifdef BCM_CNIC
5942 }
5943#endif
555f6c78
EG
5944 }
5945 /* Rx */
ec6ba945 5946 for_each_rx_queue(bp, i) {
a2fbb9ea 5947
555f6c78 5948 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5949 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5950 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5951 bnx2x_fp(bp, i, rx_desc_mapping),
5952 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5953
5954 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5955 bnx2x_fp(bp, i, rx_comp_mapping),
5956 sizeof(struct eth_fast_path_rx_cqe) *
5957 NUM_RCQ_BD);
a2fbb9ea 5958
7a9b2557 5959 /* SGE ring */
32626230 5960 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5961 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5962 bnx2x_fp(bp, i, rx_sge_mapping),
5963 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5964 }
555f6c78 5965 /* Tx */
ec6ba945 5966 for_each_tx_queue(bp, i) {
555f6c78
EG
5967
5968 /* fastpath tx rings: tx_buf tx_desc */
5969 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5970 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5971 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 5972 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 5973 }
a2fbb9ea
ET
5974 /* end of fastpath */
5975
5976 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5977 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5978
5979 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5980 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5981
523224a3
DK
5982 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5983 bp->context.size);
5984
5985 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5986
5987 BNX2X_FREE(bp->ilt->lines);
f85582f8 5988
37b091ba 5989#ifdef BCM_CNIC
f2e0899f
DK
5990 if (CHIP_IS_E2(bp))
5991 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5992 sizeof(struct host_hc_status_block_e2));
5993 else
5994 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5995 sizeof(struct host_hc_status_block_e1x));
f85582f8 5996
523224a3 5997 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 5998#endif
f85582f8 5999
7a9b2557 6000 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 6001
523224a3
DK
6002 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6003 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6004
a2fbb9ea
ET
6005#undef BNX2X_PCI_FREE
6006#undef BNX2X_KFREE
6007}
6008
f2e0899f
DK
6009static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6010{
6011 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6012 if (CHIP_IS_E2(bp)) {
6013 bnx2x_fp(bp, index, sb_index_values) =
6014 (__le16 *)status_blk.e2_sb->sb.index_values;
6015 bnx2x_fp(bp, index, sb_running_index) =
6016 (__le16 *)status_blk.e2_sb->sb.running_index;
6017 } else {
6018 bnx2x_fp(bp, index, sb_index_values) =
6019 (__le16 *)status_blk.e1x_sb->sb.index_values;
6020 bnx2x_fp(bp, index, sb_running_index) =
6021 (__le16 *)status_blk.e1x_sb->sb.running_index;
6022 }
6023}
6024
9f6c9258 6025int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea 6026{
a2fbb9ea
ET
6027#define BNX2X_PCI_ALLOC(x, y, size) \
6028 do { \
1a983142 6029 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
6030 if (x == NULL) \
6031 goto alloc_mem_err; \
6032 memset(x, 0, size); \
6033 } while (0)
a2fbb9ea 6034
9f6c9258
DK
6035#define BNX2X_ALLOC(x, size) \
6036 do { \
523224a3 6037 x = kzalloc(size, GFP_KERNEL); \
9f6c9258
DK
6038 if (x == NULL) \
6039 goto alloc_mem_err; \
9f6c9258 6040 } while (0)
a2fbb9ea 6041
9f6c9258 6042 int i;
a2fbb9ea 6043
9f6c9258
DK
6044 /* fastpath */
6045 /* Common */
a2fbb9ea 6046 for_each_queue(bp, i) {
f2e0899f 6047 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
9f6c9258 6048 bnx2x_fp(bp, i, bp) = bp;
9f6c9258 6049 /* status blocks */
ec6ba945
VZ
6050#ifdef BCM_CNIC
6051 if (!IS_FCOE_IDX(i)) {
6052#endif
6053 if (CHIP_IS_E2(bp))
6054 BNX2X_PCI_ALLOC(sb->e2_sb,
6055 &bnx2x_fp(bp, i, status_blk_mapping),
6056 sizeof(struct host_hc_status_block_e2));
6057 else
6058 BNX2X_PCI_ALLOC(sb->e1x_sb,
6059 &bnx2x_fp(bp, i, status_blk_mapping),
6060 sizeof(struct host_hc_status_block_e1x));
6061#ifdef BCM_CNIC
6062 }
6063#endif
f2e0899f 6064 set_sb_shortcuts(bp, i);
a2fbb9ea 6065 }
9f6c9258
DK
6066 /* Rx */
6067 for_each_queue(bp, i) {
a2fbb9ea 6068
9f6c9258
DK
6069 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6070 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6071 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6072 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6073 &bnx2x_fp(bp, i, rx_desc_mapping),
6074 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 6075
9f6c9258
DK
6076 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6077 &bnx2x_fp(bp, i, rx_comp_mapping),
6078 sizeof(struct eth_fast_path_rx_cqe) *
6079 NUM_RCQ_BD);
a2fbb9ea 6080
9f6c9258
DK
6081 /* SGE ring */
6082 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6083 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6084 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6085 &bnx2x_fp(bp, i, rx_sge_mapping),
6086 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6087 }
6088 /* Tx */
6089 for_each_queue(bp, i) {
8badd27a 6090
9f6c9258
DK
6091 /* fastpath tx rings: tx_buf tx_desc */
6092 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6093 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6094 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6095 &bnx2x_fp(bp, i, tx_desc_mapping),
6096 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 6097 }
9f6c9258 6098 /* end of fastpath */
8badd27a 6099
523224a3 6100#ifdef BCM_CNIC
f2e0899f
DK
6101 if (CHIP_IS_E2(bp))
6102 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6103 sizeof(struct host_hc_status_block_e2));
6104 else
6105 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6106 sizeof(struct host_hc_status_block_e1x));
8badd27a 6107
523224a3
DK
6108 /* allocate searcher T2 table */
6109 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6110#endif
a2fbb9ea 6111
8badd27a 6112
523224a3
DK
6113 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6114 sizeof(struct host_sp_status_block));
a2fbb9ea 6115
523224a3
DK
6116 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6117 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6118
523224a3 6119 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
f85582f8 6120
523224a3
DK
6121 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6122 bp->context.size);
65abd74d 6123
523224a3 6124 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 6125
523224a3
DK
6126 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6127 goto alloc_mem_err;
65abd74d 6128
9f6c9258
DK
6129 /* Slow path ring */
6130 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 6131
523224a3
DK
6132 /* EQ */
6133 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6134 BCM_PAGE_SIZE * NUM_EQ_PAGES);
9f6c9258 6135 return 0;
e1510706 6136
9f6c9258
DK
6137alloc_mem_err:
6138 bnx2x_free_mem(bp);
6139 return -ENOMEM;
e1510706 6140
9f6c9258
DK
6141#undef BNX2X_PCI_ALLOC
6142#undef BNX2X_ALLOC
65abd74d
YG
6143}
6144
a2fbb9ea
ET
6145/*
6146 * Init service functions
6147 */
8d96286a 6148static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6149 int *state_p, int flags);
6150
523224a3 6151int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 6152{
523224a3 6153 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 6154
523224a3
DK
6155 /* Wait for completion */
6156 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6157 WAIT_RAMROD_COMMON);
6158}
a2fbb9ea 6159
8d96286a 6160static int bnx2x_func_stop(struct bnx2x *bp)
523224a3
DK
6161{
6162 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 6163
523224a3
DK
6164 /* Wait for completion */
6165 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6166 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
6167}
6168
e665bfda 6169/**
f85582f8 6170 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
e665bfda
MC
6171 *
6172 * @param bp driver descriptor
6173 * @param set set or clear an entry (1 or 0)
6174 * @param mac pointer to a buffer containing a MAC
6175 * @param cl_bit_vec bit vector of clients to register a MAC for
6176 * @param cam_offset offset in a CAM to use
523224a3 6177 * @param is_bcast is the set MAC a broadcast address (for E1 only)
e665bfda 6178 */
215faf9c 6179static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
f85582f8
DK
6180 u32 cl_bit_vec, u8 cam_offset,
6181 u8 is_bcast)
34f80b04 6182{
523224a3
DK
6183 struct mac_configuration_cmd *config =
6184 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6185 int ramrod_flags = WAIT_RAMROD_COMMON;
6186
6187 bp->set_mac_pending = 1;
6188 smp_wmb();
6189
8d9c5f34 6190 config->hdr.length = 1;
e665bfda
MC
6191 config->hdr.offset = cam_offset;
6192 config->hdr.client_id = 0xff;
34f80b04
EG
6193 config->hdr.reserved1 = 0;
6194
6195 /* primary MAC */
6196 config->config_table[0].msb_mac_addr =
e665bfda 6197 swab16(*(u16 *)&mac[0]);
34f80b04 6198 config->config_table[0].middle_mac_addr =
e665bfda 6199 swab16(*(u16 *)&mac[2]);
34f80b04 6200 config->config_table[0].lsb_mac_addr =
e665bfda 6201 swab16(*(u16 *)&mac[4]);
ca00392c 6202 config->config_table[0].clients_bit_vector =
e665bfda 6203 cpu_to_le32(cl_bit_vec);
34f80b04 6204 config->config_table[0].vlan_id = 0;
523224a3 6205 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 6206 if (set)
523224a3
DK
6207 SET_FLAG(config->config_table[0].flags,
6208 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6209 T_ETH_MAC_COMMAND_SET);
3101c2bc 6210 else
523224a3
DK
6211 SET_FLAG(config->config_table[0].flags,
6212 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6213 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 6214
523224a3
DK
6215 if (is_bcast)
6216 SET_FLAG(config->config_table[0].flags,
6217 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6218
6219 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 6220 (set ? "setting" : "clearing"),
34f80b04
EG
6221 config->config_table[0].msb_mac_addr,
6222 config->config_table[0].middle_mac_addr,
523224a3 6223 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 6224
523224a3 6225 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 6226 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
6227 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6228
6229 /* Wait for a completion */
6230 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
6231}
6232
8d96286a 6233static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6234 int *state_p, int flags)
a2fbb9ea
ET
6235{
6236 /* can take a while if any port is running */
8b3a0f0b 6237 int cnt = 5000;
523224a3
DK
6238 u8 poll = flags & WAIT_RAMROD_POLL;
6239 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 6240
c14423fe
ET
6241 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6242 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6243
6244 might_sleep();
34f80b04 6245 while (cnt--) {
a2fbb9ea 6246 if (poll) {
523224a3
DK
6247 if (common)
6248 bnx2x_eq_int(bp);
6249 else {
6250 bnx2x_rx_int(bp->fp, 10);
6251 /* if index is different from 0
6252 * the reply for some commands will
6253 * be on the non default queue
6254 */
6255 if (idx)
6256 bnx2x_rx_int(&bp->fp[idx], 10);
6257 }
a2fbb9ea 6258 }
a2fbb9ea 6259
3101c2bc 6260 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6261 if (*state_p == state) {
6262#ifdef BNX2X_STOP_ON_ERROR
6263 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6264#endif
a2fbb9ea 6265 return 0;
8b3a0f0b 6266 }
a2fbb9ea 6267
a2fbb9ea 6268 msleep(1);
e3553b29
EG
6269
6270 if (bp->panic)
6271 return -EIO;
a2fbb9ea
ET
6272 }
6273
a2fbb9ea 6274 /* timeout! */
49d66772
ET
6275 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6276 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6277#ifdef BNX2X_STOP_ON_ERROR
6278 bnx2x_panic();
6279#endif
a2fbb9ea 6280
49d66772 6281 return -EBUSY;
a2fbb9ea
ET
6282}
6283
8d96286a 6284static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 6285{
f2e0899f
DK
6286 if (CHIP_IS_E1H(bp))
6287 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6288 else if (CHIP_MODE_IS_4_PORT(bp))
6289 return BP_FUNC(bp) * 32 + rel_offset;
6290 else
6291 return BP_VN(bp) * 32 + rel_offset;
523224a3
DK
6292}
6293
0793f83f
DK
6294/**
6295 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6296 * relevant. In addition, current implementation is tuned for a
6297 * single ETH MAC.
6298 *
6299 * When multiple unicast ETH MACs PF configuration in switch
6300 * independent mode is required (NetQ, multiple netdev MACs,
6301 * etc.), consider better utilisation of 16 per function MAC
6302 * entries in the LLH memory.
6303 */
6304enum {
6305 LLH_CAM_ISCSI_ETH_LINE = 0,
6306 LLH_CAM_ETH_LINE,
6307 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6308};
6309
6310static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6311 int set,
6312 unsigned char *dev_addr,
6313 int index)
6314{
6315 u32 wb_data[2];
6316 u32 mem_offset, ena_offset, mem_index;
6317 /**
6318 * indexes mapping:
6319 * 0..7 - goes to MEM
6320 * 8..15 - goes to MEM2
6321 */
6322
6323 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6324 return;
6325
6326 /* calculate memory start offset according to the mapping
6327 * and index in the memory */
6328 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6329 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6330 NIG_REG_LLH0_FUNC_MEM;
6331 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6332 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6333 mem_index = index;
6334 } else {
6335 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6336 NIG_REG_P0_LLH_FUNC_MEM2;
6337 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6338 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6339 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6340 }
6341
6342 if (set) {
6343 /* LLH_FUNC_MEM is a u64 WB register */
6344 mem_offset += 8*mem_index;
6345
6346 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6347 (dev_addr[4] << 8) | dev_addr[5]);
6348 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6349
6350 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6351 }
6352
6353 /* enable/disable the entry */
6354 REG_WR(bp, ena_offset + 4*mem_index, set);
6355
6356}
6357
523224a3
DK
6358void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6359{
6360 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6361 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 6362
523224a3
DK
6363 /* networking MAC */
6364 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6365 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 6366
0793f83f
DK
6367 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6368
523224a3
DK
6369 if (CHIP_IS_E1(bp)) {
6370 /* broadcast MAC */
215faf9c
JP
6371 static const u8 bcast[ETH_ALEN] = {
6372 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6373 };
523224a3
DK
6374 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6375 }
e665bfda 6376}
523224a3
DK
6377static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6378{
6379 int i = 0, old;
6380 struct net_device *dev = bp->dev;
6381 struct netdev_hw_addr *ha;
6382 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6383 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6384
6385 netdev_for_each_mc_addr(ha, dev) {
6386 /* copy mac */
6387 config_cmd->config_table[i].msb_mac_addr =
6388 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6389 config_cmd->config_table[i].middle_mac_addr =
6390 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6391 config_cmd->config_table[i].lsb_mac_addr =
6392 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 6393
523224a3
DK
6394 config_cmd->config_table[i].vlan_id = 0;
6395 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6396 config_cmd->config_table[i].clients_bit_vector =
6397 cpu_to_le32(1 << BP_L_ID(bp));
6398
6399 SET_FLAG(config_cmd->config_table[i].flags,
6400 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6401 T_ETH_MAC_COMMAND_SET);
6402
6403 DP(NETIF_MSG_IFUP,
6404 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6405 config_cmd->config_table[i].msb_mac_addr,
6406 config_cmd->config_table[i].middle_mac_addr,
6407 config_cmd->config_table[i].lsb_mac_addr);
6408 i++;
6409 }
6410 old = config_cmd->hdr.length;
6411 if (old > i) {
6412 for (; i < old; i++) {
6413 if (CAM_IS_INVALID(config_cmd->
6414 config_table[i])) {
6415 /* already invalidated */
6416 break;
6417 }
6418 /* invalidate */
6419 SET_FLAG(config_cmd->config_table[i].flags,
6420 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6421 T_ETH_MAC_COMMAND_INVALIDATE);
6422 }
6423 }
6424
6425 config_cmd->hdr.length = i;
6426 config_cmd->hdr.offset = offset;
6427 config_cmd->hdr.client_id = 0xff;
6428 config_cmd->hdr.reserved1 = 0;
6429
6430 bp->set_mac_pending = 1;
6431 smp_wmb();
6432
6433 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6434 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6435}
6436static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
e665bfda 6437{
523224a3
DK
6438 int i;
6439 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6440 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6441 int ramrod_flags = WAIT_RAMROD_COMMON;
6442
6443 bp->set_mac_pending = 1;
e665bfda
MC
6444 smp_wmb();
6445
523224a3
DK
6446 for (i = 0; i < config_cmd->hdr.length; i++)
6447 SET_FLAG(config_cmd->config_table[i].flags,
6448 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6449 T_ETH_MAC_COMMAND_INVALIDATE);
6450
6451 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6452 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
6453
6454 /* Wait for a completion */
523224a3
DK
6455 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6456 ramrod_flags);
6457
e665bfda
MC
6458}
6459
993ac7b5
MC
6460#ifdef BCM_CNIC
6461/**
6462 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6463 * MAC(s). This function will wait until the ramdord completion
6464 * returns.
6465 *
6466 * @param bp driver handle
6467 * @param set set or clear the CAM entry
6468 *
6469 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6470 */
8d96286a 6471static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 6472{
523224a3
DK
6473 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6474 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
ec6ba945
VZ
6475 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6476 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
523224a3 6477 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
993ac7b5
MC
6478
6479 /* Send a SET_MAC ramrod */
523224a3
DK
6480 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6481 cam_offset, 0);
0793f83f
DK
6482
6483 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
ec6ba945
VZ
6484
6485 return 0;
6486}
6487
6488/**
6489 * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6490 * ETH MAC(s). This function will wait until the ramdord
6491 * completion returns.
6492 *
6493 * @param bp driver handle
6494 * @param set set or clear the CAM entry
6495 *
6496 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6497 */
6498int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6499{
6500 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6501 /**
6502 * CAM allocation for E1H
6503 * eth unicasts: by func number
6504 * iscsi: by func number
6505 * fip unicast: by func number
6506 * fip multicast: by func number
6507 */
6508 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6509 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6510
6511 return 0;
6512}
6513
6514int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6515{
6516 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6517
6518 /**
6519 * CAM allocation for E1H
6520 * eth unicasts: by func number
6521 * iscsi: by func number
6522 * fip unicast: by func number
6523 * fip multicast: by func number
6524 */
6525 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6526 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6527
993ac7b5
MC
6528 return 0;
6529}
6530#endif
6531
523224a3
DK
6532static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6533 struct bnx2x_client_init_params *params,
6534 u8 activate,
6535 struct client_init_ramrod_data *data)
6536{
6537 /* Clear the buffer */
6538 memset(data, 0, sizeof(*data));
6539
6540 /* general */
6541 data->general.client_id = params->rxq_params.cl_id;
6542 data->general.statistics_counter_id = params->rxq_params.stat_id;
6543 data->general.statistics_en_flg =
6544 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
ec6ba945
VZ
6545 data->general.is_fcoe_flg =
6546 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
523224a3
DK
6547 data->general.activate_flg = activate;
6548 data->general.sp_client_id = params->rxq_params.spcl_id;
6549
6550 /* Rx data */
6551 data->rx.tpa_en_flg =
6552 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6553 data->rx.vmqueue_mode_en_flg = 0;
6554 data->rx.cache_line_alignment_log_size =
6555 params->rxq_params.cache_line_log;
6556 data->rx.enable_dynamic_hc =
6557 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6558 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6559 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6560 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6561
6562 /* We don't set drop flags */
6563 data->rx.drop_ip_cs_err_flg = 0;
6564 data->rx.drop_tcp_cs_err_flg = 0;
6565 data->rx.drop_ttl0_flg = 0;
6566 data->rx.drop_udp_cs_err_flg = 0;
6567
6568 data->rx.inner_vlan_removal_enable_flg =
6569 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6570 data->rx.outer_vlan_removal_enable_flg =
6571 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6572 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6573 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6574 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6575 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6576 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6577 data->rx.bd_page_base.lo =
6578 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6579 data->rx.bd_page_base.hi =
6580 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6581 data->rx.sge_page_base.lo =
6582 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6583 data->rx.sge_page_base.hi =
6584 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6585 data->rx.cqe_page_base.lo =
6586 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6587 data->rx.cqe_page_base.hi =
6588 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6589 data->rx.is_leading_rss =
6590 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6591 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6592
6593 /* Tx data */
6594 data->tx.enforce_security_flg = 0; /* VF specific */
6595 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6596 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6597 data->tx.mtu = 0; /* VF specific */
6598 data->tx.tx_bd_page_base.lo =
6599 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6600 data->tx.tx_bd_page_base.hi =
6601 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6602
6603 /* flow control data */
6604 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6605 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6606 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6607 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6608 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6609 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6610 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6611
6612 data->fc.safc_group_num = params->txq_params.cos;
6613 data->fc.safc_group_en_flg =
6614 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
ec6ba945
VZ
6615 data->fc.traffic_type =
6616 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6617 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
523224a3
DK
6618}
6619
6620static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6621{
6622 /* ustorm cxt validation */
6623 cxt->ustorm_ag_context.cdu_usage =
6624 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6625 ETH_CONNECTION_TYPE);
6626 /* xcontext validation */
6627 cxt->xstorm_ag_context.cdu_reserved =
6628 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6629 ETH_CONNECTION_TYPE);
6630}
6631
8d96286a 6632static int bnx2x_setup_fw_client(struct bnx2x *bp,
6633 struct bnx2x_client_init_params *params,
6634 u8 activate,
6635 struct client_init_ramrod_data *data,
6636 dma_addr_t data_mapping)
523224a3
DK
6637{
6638 u16 hc_usec;
6639 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6640 int ramrod_flags = 0, rc;
6641
6642 /* HC and context validation values */
6643 hc_usec = params->txq_params.hc_rate ?
6644 1000000 / params->txq_params.hc_rate : 0;
6645 bnx2x_update_coalesce_sb_index(bp,
6646 params->txq_params.fw_sb_id,
6647 params->txq_params.sb_cq_index,
6648 !(params->txq_params.flags & QUEUE_FLG_HC),
6649 hc_usec);
6650
6651 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6652
6653 hc_usec = params->rxq_params.hc_rate ?
6654 1000000 / params->rxq_params.hc_rate : 0;
6655 bnx2x_update_coalesce_sb_index(bp,
6656 params->rxq_params.fw_sb_id,
6657 params->rxq_params.sb_cq_index,
6658 !(params->rxq_params.flags & QUEUE_FLG_HC),
6659 hc_usec);
6660
6661 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6662 params->rxq_params.cid);
6663
6664 /* zero stats */
6665 if (params->txq_params.flags & QUEUE_FLG_STATS)
6666 storm_memset_xstats_zero(bp, BP_PORT(bp),
6667 params->txq_params.stat_id);
6668
6669 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6670 storm_memset_ustats_zero(bp, BP_PORT(bp),
6671 params->rxq_params.stat_id);
6672 storm_memset_tstats_zero(bp, BP_PORT(bp),
6673 params->rxq_params.stat_id);
6674 }
6675
6676 /* Fill the ramrod data */
6677 bnx2x_fill_cl_init_data(bp, params, activate, data);
6678
6679 /* SETUP ramrod.
6680 *
6681 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6682 * barrier except from mmiowb() is needed to impose a
6683 * proper ordering of memory operations.
6684 */
6685 mmiowb();
a2fbb9ea 6686
a2fbb9ea 6687
523224a3
DK
6688 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6689 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 6690
34f80b04 6691 /* Wait for completion */
523224a3
DK
6692 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6693 params->ramrod_params.index,
6694 params->ramrod_params.pstate,
6695 ramrod_flags);
34f80b04 6696 return rc;
a2fbb9ea
ET
6697}
6698
d6214d7a
DK
6699/**
6700 * Configure interrupt mode according to current configuration.
6701 * In case of MSI-X it will also try to enable MSI-X.
6702 *
6703 * @param bp
6704 *
6705 * @return int
6706 */
6707static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 6708{
d6214d7a 6709 int rc = 0;
ca00392c 6710
d6214d7a
DK
6711 switch (bp->int_mode) {
6712 case INT_MODE_MSI:
6713 bnx2x_enable_msi(bp);
6714 /* falling through... */
6715 case INT_MODE_INTx:
ec6ba945 6716 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a 6717 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
ca00392c 6718 break;
d6214d7a
DK
6719 default:
6720 /* Set number of queues according to bp->multi_mode value */
6721 bnx2x_set_num_queues(bp);
ca00392c 6722
d6214d7a
DK
6723 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6724 bp->num_queues);
ca00392c 6725
d6214d7a
DK
6726 /* if we can't use MSI-X we only need one fp,
6727 * so try to enable MSI-X with the requested number of fp's
6728 * and fallback to MSI or legacy INTx with one fp
6729 */
6730 rc = bnx2x_enable_msix(bp);
6731 if (rc) {
6732 /* failed to enable MSI-X */
6733 if (bp->multi_mode)
6734 DP(NETIF_MSG_IFUP,
6735 "Multi requested but failed to "
6736 "enable MSI-X (%d), "
6737 "set number of queues to %d\n",
6738 bp->num_queues,
ec6ba945
VZ
6739 1 + NONE_ETH_CONTEXT_USE);
6740 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a
DK
6741
6742 if (!(bp->flags & DISABLE_MSI_FLAG))
6743 bnx2x_enable_msi(bp);
6744 }
ca00392c 6745
9f6c9258
DK
6746 break;
6747 }
d6214d7a
DK
6748
6749 return rc;
a2fbb9ea
ET
6750}
6751
c2bff63f
DK
6752/* must be called prioir to any HW initializations */
6753static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6754{
6755 return L2_ILT_LINES(bp);
6756}
6757
523224a3
DK
6758void bnx2x_ilt_set_info(struct bnx2x *bp)
6759{
6760 struct ilt_client_info *ilt_client;
6761 struct bnx2x_ilt *ilt = BP_ILT(bp);
6762 u16 line = 0;
6763
6764 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6765 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6766
6767 /* CDU */
6768 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6769 ilt_client->client_num = ILT_CLIENT_CDU;
6770 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6771 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6772 ilt_client->start = line;
6773 line += L2_ILT_LINES(bp);
6774#ifdef BCM_CNIC
6775 line += CNIC_ILT_LINES;
6776#endif
6777 ilt_client->end = line - 1;
6778
6779 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6780 "flags 0x%x, hw psz %d\n",
6781 ilt_client->start,
6782 ilt_client->end,
6783 ilt_client->page_size,
6784 ilt_client->flags,
6785 ilog2(ilt_client->page_size >> 12));
6786
6787 /* QM */
6788 if (QM_INIT(bp->qm_cid_count)) {
6789 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6790 ilt_client->client_num = ILT_CLIENT_QM;
6791 ilt_client->page_size = QM_ILT_PAGE_SZ;
6792 ilt_client->flags = 0;
6793 ilt_client->start = line;
6794
6795 /* 4 bytes for each cid */
6796 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6797 QM_ILT_PAGE_SZ);
6798
6799 ilt_client->end = line - 1;
6800
6801 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6802 "flags 0x%x, hw psz %d\n",
6803 ilt_client->start,
6804 ilt_client->end,
6805 ilt_client->page_size,
6806 ilt_client->flags,
6807 ilog2(ilt_client->page_size >> 12));
6808
6809 }
6810 /* SRC */
6811 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6812#ifdef BCM_CNIC
6813 ilt_client->client_num = ILT_CLIENT_SRC;
6814 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6815 ilt_client->flags = 0;
6816 ilt_client->start = line;
6817 line += SRC_ILT_LINES;
6818 ilt_client->end = line - 1;
6819
6820 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6821 "flags 0x%x, hw psz %d\n",
6822 ilt_client->start,
6823 ilt_client->end,
6824 ilt_client->page_size,
6825 ilt_client->flags,
6826 ilog2(ilt_client->page_size >> 12));
6827
6828#else
6829 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6830#endif
9f6c9258 6831
523224a3
DK
6832 /* TM */
6833 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6834#ifdef BCM_CNIC
6835 ilt_client->client_num = ILT_CLIENT_TM;
6836 ilt_client->page_size = TM_ILT_PAGE_SZ;
6837 ilt_client->flags = 0;
6838 ilt_client->start = line;
6839 line += TM_ILT_LINES;
6840 ilt_client->end = line - 1;
6841
6842 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6843 "flags 0x%x, hw psz %d\n",
6844 ilt_client->start,
6845 ilt_client->end,
6846 ilt_client->page_size,
6847 ilt_client->flags,
6848 ilog2(ilt_client->page_size >> 12));
9f6c9258 6849
523224a3
DK
6850#else
6851 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6852#endif
6853}
f85582f8 6854
523224a3
DK
6855int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6856 int is_leading)
a2fbb9ea 6857{
523224a3 6858 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
6859 int rc;
6860
ec6ba945
VZ
6861 /* reset IGU state skip FCoE L2 queue */
6862 if (!IS_FCOE_FP(fp))
6863 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
523224a3 6864 IGU_INT_ENABLE, 0);
a2fbb9ea 6865
523224a3
DK
6866 params.ramrod_params.pstate = &fp->state;
6867 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6868 params.ramrod_params.index = fp->index;
6869 params.ramrod_params.cid = fp->cid;
a2fbb9ea 6870
ec6ba945
VZ
6871#ifdef BCM_CNIC
6872 if (IS_FCOE_FP(fp))
6873 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6874
6875#endif
6876
523224a3
DK
6877 if (is_leading)
6878 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 6879
523224a3
DK
6880 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6881
6882 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6883
6884 rc = bnx2x_setup_fw_client(bp, &params, 1,
6885 bnx2x_sp(bp, client_init_data),
6886 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 6887 return rc;
a2fbb9ea
ET
6888}
6889
8d96286a 6890static int bnx2x_stop_fw_client(struct bnx2x *bp,
6891 struct bnx2x_client_ramrod_params *p)
a2fbb9ea 6892{
34f80b04 6893 int rc;
a2fbb9ea 6894
523224a3 6895 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 6896
523224a3
DK
6897 /* halt the connection */
6898 *p->pstate = BNX2X_FP_STATE_HALTING;
6899 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6900 p->cl_id, 0);
a2fbb9ea 6901
34f80b04 6902 /* Wait for completion */
523224a3
DK
6903 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6904 p->pstate, poll_flag);
34f80b04 6905 if (rc) /* timeout */
da5a662a 6906 return rc;
a2fbb9ea 6907
523224a3
DK
6908 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6909 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6910 p->cl_id, 0);
6911 /* Wait for completion */
6912 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6913 p->pstate, poll_flag);
6914 if (rc) /* timeout */
6915 return rc;
a2fbb9ea 6916
a2fbb9ea 6917
523224a3
DK
6918 /* delete cfc entry */
6919 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 6920
523224a3
DK
6921 /* Wait for completion */
6922 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6923 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 6924 return rc;
a2fbb9ea
ET
6925}
6926
523224a3
DK
6927static int bnx2x_stop_client(struct bnx2x *bp, int index)
6928{
6929 struct bnx2x_client_ramrod_params client_stop = {0};
6930 struct bnx2x_fastpath *fp = &bp->fp[index];
6931
6932 client_stop.index = index;
6933 client_stop.cid = fp->cid;
6934 client_stop.cl_id = fp->cl_id;
6935 client_stop.pstate = &(fp->state);
6936 client_stop.poll = 0;
6937
6938 return bnx2x_stop_fw_client(bp, &client_stop);
6939}
6940
6941
34f80b04
EG
6942static void bnx2x_reset_func(struct bnx2x *bp)
6943{
6944 int port = BP_PORT(bp);
6945 int func = BP_FUNC(bp);
f2e0899f 6946 int i;
523224a3 6947 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
6948 (CHIP_IS_E2(bp) ?
6949 offsetof(struct hc_status_block_data_e2, common) :
6950 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
6951 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6952 int pfid_offset = offsetof(struct pci_entity, pf_id);
6953
6954 /* Disable the function in the FW */
6955 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6956 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6957 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6958 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6959
6960 /* FP SBs */
ec6ba945 6961 for_each_eth_queue(bp, i) {
523224a3
DK
6962 struct bnx2x_fastpath *fp = &bp->fp[i];
6963 REG_WR8(bp,
6964 BAR_CSTRORM_INTMEM +
6965 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6966 + pfunc_offset_fp + pfid_offset,
6967 HC_FUNCTION_DISABLED);
6968 }
6969
6970 /* SP SB */
6971 REG_WR8(bp,
6972 BAR_CSTRORM_INTMEM +
6973 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6974 pfunc_offset_sp + pfid_offset,
6975 HC_FUNCTION_DISABLED);
6976
6977
6978 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6979 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6980 0);
34f80b04
EG
6981
6982 /* Configure IGU */
f2e0899f
DK
6983 if (bp->common.int_block == INT_BLOCK_HC) {
6984 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6985 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6986 } else {
6987 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6988 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6989 }
34f80b04 6990
37b091ba
MC
6991#ifdef BCM_CNIC
6992 /* Disable Timer scan */
6993 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6994 /*
6995 * Wait for at least 10ms and up to 2 second for the timers scan to
6996 * complete
6997 */
6998 for (i = 0; i < 200; i++) {
6999 msleep(10);
7000 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7001 break;
7002 }
7003#endif
34f80b04 7004 /* Clear ILT */
f2e0899f
DK
7005 bnx2x_clear_func_ilt(bp, func);
7006
7007 /* Timers workaround bug for E2: if this is vnic-3,
7008 * we need to set the entire ilt range for this timers.
7009 */
7010 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
7011 struct ilt_client_info ilt_cli;
7012 /* use dummy TM client */
7013 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7014 ilt_cli.start = 0;
7015 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7016 ilt_cli.client_num = ILT_CLIENT_TM;
7017
7018 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7019 }
7020
7021 /* this assumes that reset_port() called before reset_func()*/
7022 if (CHIP_IS_E2(bp))
7023 bnx2x_pf_disable(bp);
523224a3
DK
7024
7025 bp->dmae_ready = 0;
34f80b04
EG
7026}
7027
7028static void bnx2x_reset_port(struct bnx2x *bp)
7029{
7030 int port = BP_PORT(bp);
7031 u32 val;
7032
7033 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7034
7035 /* Do not rcv packets to BRB */
7036 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7037 /* Do not direct rcv packets that are not for MCP to the BRB */
7038 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7039 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7040
7041 /* Configure AEU */
7042 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7043
7044 msleep(100);
7045 /* Check for BRB port occupancy */
7046 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7047 if (val)
7048 DP(NETIF_MSG_IFDOWN,
33471629 7049 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7050
7051 /* TODO: Close Doorbell port? */
7052}
7053
34f80b04
EG
7054static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7055{
7056 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 7057 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
7058
7059 switch (reset_code) {
7060 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7061 bnx2x_reset_port(bp);
7062 bnx2x_reset_func(bp);
7063 bnx2x_reset_common(bp);
7064 break;
7065
7066 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7067 bnx2x_reset_port(bp);
7068 bnx2x_reset_func(bp);
7069 break;
7070
7071 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7072 bnx2x_reset_func(bp);
7073 break;
49d66772 7074
34f80b04
EG
7075 default:
7076 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7077 break;
7078 }
7079}
7080
ec6ba945
VZ
7081#ifdef BCM_CNIC
7082static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7083{
7084 if (bp->flags & FCOE_MACS_SET) {
7085 if (!IS_MF_SD(bp))
7086 bnx2x_set_fip_eth_mac_addr(bp, 0);
7087
7088 bnx2x_set_all_enode_macs(bp, 0);
7089
7090 bp->flags &= ~FCOE_MACS_SET;
7091 }
7092}
7093#endif
7094
9f6c9258 7095void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7096{
da5a662a 7097 int port = BP_PORT(bp);
a2fbb9ea 7098 u32 reset_code = 0;
da5a662a 7099 int i, cnt, rc;
a2fbb9ea 7100
555f6c78 7101 /* Wait until tx fastpath tasks complete */
ec6ba945 7102 for_each_tx_queue(bp, i) {
228241eb
ET
7103 struct bnx2x_fastpath *fp = &bp->fp[i];
7104
34f80b04 7105 cnt = 1000;
e8b5fc51 7106 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7107
34f80b04
EG
7108 if (!cnt) {
7109 BNX2X_ERR("timeout waiting for queue[%d]\n",
7110 i);
7111#ifdef BNX2X_STOP_ON_ERROR
7112 bnx2x_panic();
7113 return -EBUSY;
7114#else
7115 break;
7116#endif
7117 }
7118 cnt--;
da5a662a 7119 msleep(1);
34f80b04 7120 }
228241eb 7121 }
da5a662a
VZ
7122 /* Give HW time to discard old tx messages */
7123 msleep(1);
a2fbb9ea 7124
3101c2bc 7125 if (CHIP_IS_E1(bp)) {
523224a3
DK
7126 /* invalidate mc list,
7127 * wait and poll (interrupts are off)
7128 */
7129 bnx2x_invlidate_e1_mc_list(bp);
7130 bnx2x_set_eth_mac(bp, 0);
3101c2bc 7131
523224a3 7132 } else {
65abd74d
YG
7133 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7134
523224a3 7135 bnx2x_set_eth_mac(bp, 0);
3101c2bc
YG
7136
7137 for (i = 0; i < MC_HASH_SIZE; i++)
7138 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7139 }
523224a3 7140
993ac7b5 7141#ifdef BCM_CNIC
ec6ba945 7142 bnx2x_del_fcoe_eth_macs(bp);
993ac7b5 7143#endif
3101c2bc 7144
65abd74d
YG
7145 if (unload_mode == UNLOAD_NORMAL)
7146 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7147
7d0446c2 7148 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7149 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7150
7d0446c2 7151 else if (bp->wol) {
65abd74d
YG
7152 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7153 u8 *mac_addr = bp->dev->dev_addr;
7154 u32 val;
7155 /* The mac address is written to entries 1-4 to
7156 preserve entry 0 which is used by the PMF */
7157 u8 entry = (BP_E1HVN(bp) + 1)*8;
7158
7159 val = (mac_addr[0] << 8) | mac_addr[1];
7160 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7161
7162 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7163 (mac_addr[4] << 8) | mac_addr[5];
7164 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7165
7166 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7167
7168 } else
7169 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7170
34f80b04
EG
7171 /* Close multi and leading connections
7172 Completions for ramrods are collected in a synchronous way */
523224a3
DK
7173 for_each_queue(bp, i)
7174
7175 if (bnx2x_stop_client(bp, i))
7176#ifdef BNX2X_STOP_ON_ERROR
7177 return;
7178#else
228241eb 7179 goto unload_error;
523224a3 7180#endif
a2fbb9ea 7181
523224a3 7182 rc = bnx2x_func_stop(bp);
da5a662a 7183 if (rc) {
523224a3 7184 BNX2X_ERR("Function stop failed!\n");
da5a662a 7185#ifdef BNX2X_STOP_ON_ERROR
523224a3 7186 return;
da5a662a
VZ
7187#else
7188 goto unload_error;
34f80b04 7189#endif
228241eb 7190 }
523224a3 7191#ifndef BNX2X_STOP_ON_ERROR
228241eb 7192unload_error:
523224a3 7193#endif
34f80b04 7194 if (!BP_NOMCP(bp))
a22f0788 7195 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 7196 else {
f2e0899f
DK
7197 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7198 "%d, %d, %d\n", BP_PATH(bp),
7199 load_count[BP_PATH(bp)][0],
7200 load_count[BP_PATH(bp)][1],
7201 load_count[BP_PATH(bp)][2]);
7202 load_count[BP_PATH(bp)][0]--;
7203 load_count[BP_PATH(bp)][1 + port]--;
7204 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7205 "%d, %d, %d\n", BP_PATH(bp),
7206 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7207 load_count[BP_PATH(bp)][2]);
7208 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 7209 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 7210 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
7211 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7212 else
7213 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7214 }
a2fbb9ea 7215
34f80b04
EG
7216 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7217 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7218 bnx2x__link_reset(bp);
a2fbb9ea 7219
523224a3
DK
7220 /* Disable HW interrupts, NAPI */
7221 bnx2x_netif_stop(bp, 1);
7222
7223 /* Release IRQs */
d6214d7a 7224 bnx2x_free_irq(bp);
523224a3 7225
a2fbb9ea 7226 /* Reset the chip */
228241eb 7227 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7228
7229 /* Report UNLOAD_DONE to MCP */
34f80b04 7230 if (!BP_NOMCP(bp))
a22f0788 7231 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 7232
72fd0718
VZ
7233}
7234
9f6c9258 7235void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
7236{
7237 u32 val;
7238
7239 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7240
7241 if (CHIP_IS_E1(bp)) {
7242 int port = BP_PORT(bp);
7243 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7244 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7245
7246 val = REG_RD(bp, addr);
7247 val &= ~(0x300);
7248 REG_WR(bp, addr, val);
7249 } else if (CHIP_IS_E1H(bp)) {
7250 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7251 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7252 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7253 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7254 }
7255}
7256
72fd0718
VZ
7257/* Close gates #2, #3 and #4: */
7258static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7259{
7260 u32 val, addr;
7261
7262 /* Gates #2 and #4a are closed/opened for "not E1" only */
7263 if (!CHIP_IS_E1(bp)) {
7264 /* #4 */
7265 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7266 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7267 close ? (val | 0x1) : (val & (~(u32)1)));
7268 /* #2 */
7269 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7270 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7271 close ? (val | 0x1) : (val & (~(u32)1)));
7272 }
7273
7274 /* #3 */
7275 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7276 val = REG_RD(bp, addr);
7277 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7278
7279 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7280 close ? "closing" : "opening");
7281 mmiowb();
7282}
7283
7284#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7285
7286static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7287{
7288 /* Do some magic... */
7289 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7290 *magic_val = val & SHARED_MF_CLP_MAGIC;
7291 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7292}
7293
7294/* Restore the value of the `magic' bit.
7295 *
7296 * @param pdev Device handle.
7297 * @param magic_val Old value of the `magic' bit.
7298 */
7299static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7300{
7301 /* Restore the `magic' bit value... */
72fd0718
VZ
7302 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7303 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7304 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7305}
7306
f85582f8
DK
7307/**
7308 * Prepares for MCP reset: takes care of CLP configurations.
72fd0718
VZ
7309 *
7310 * @param bp
7311 * @param magic_val Old value of 'magic' bit.
7312 */
7313static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7314{
7315 u32 shmem;
7316 u32 validity_offset;
7317
7318 DP(NETIF_MSG_HW, "Starting\n");
7319
7320 /* Set `magic' bit in order to save MF config */
7321 if (!CHIP_IS_E1(bp))
7322 bnx2x_clp_reset_prep(bp, magic_val);
7323
7324 /* Get shmem offset */
7325 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7326 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7327
7328 /* Clear validity map flags */
7329 if (shmem > 0)
7330 REG_WR(bp, shmem + validity_offset, 0);
7331}
7332
7333#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7334#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7335
7336/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7337 * depending on the HW type.
7338 *
7339 * @param bp
7340 */
7341static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7342{
7343 /* special handling for emulation and FPGA,
7344 wait 10 times longer */
7345 if (CHIP_REV_IS_SLOW(bp))
7346 msleep(MCP_ONE_TIMEOUT*10);
7347 else
7348 msleep(MCP_ONE_TIMEOUT);
7349}
7350
7351static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7352{
7353 u32 shmem, cnt, validity_offset, val;
7354 int rc = 0;
7355
7356 msleep(100);
7357
7358 /* Get shmem offset */
7359 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7360 if (shmem == 0) {
7361 BNX2X_ERR("Shmem 0 return failure\n");
7362 rc = -ENOTTY;
7363 goto exit_lbl;
7364 }
7365
7366 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7367
7368 /* Wait for MCP to come up */
7369 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7370 /* TBD: its best to check validity map of last port.
7371 * currently checks on port 0.
7372 */
7373 val = REG_RD(bp, shmem + validity_offset);
7374 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7375 shmem + validity_offset, val);
7376
7377 /* check that shared memory is valid. */
7378 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7379 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7380 break;
7381
7382 bnx2x_mcp_wait_one(bp);
7383 }
7384
7385 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7386
7387 /* Check that shared memory is valid. This indicates that MCP is up. */
7388 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7389 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7390 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7391 rc = -ENOTTY;
7392 goto exit_lbl;
7393 }
7394
7395exit_lbl:
7396 /* Restore the `magic' bit value */
7397 if (!CHIP_IS_E1(bp))
7398 bnx2x_clp_reset_done(bp, magic_val);
7399
7400 return rc;
7401}
7402
7403static void bnx2x_pxp_prep(struct bnx2x *bp)
7404{
7405 if (!CHIP_IS_E1(bp)) {
7406 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7407 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7408 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7409 mmiowb();
7410 }
7411}
7412
7413/*
7414 * Reset the whole chip except for:
7415 * - PCIE core
7416 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7417 * one reset bit)
7418 * - IGU
7419 * - MISC (including AEU)
7420 * - GRC
7421 * - RBCN, RBCP
7422 */
7423static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7424{
7425 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7426
7427 not_reset_mask1 =
7428 MISC_REGISTERS_RESET_REG_1_RST_HC |
7429 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7430 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7431
7432 not_reset_mask2 =
7433 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7434 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7435 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7436 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7437 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7438 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7439 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7440 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7441
7442 reset_mask1 = 0xffffffff;
7443
7444 if (CHIP_IS_E1(bp))
7445 reset_mask2 = 0xffff;
7446 else
7447 reset_mask2 = 0x1ffff;
7448
7449 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7450 reset_mask1 & (~not_reset_mask1));
7451 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7452 reset_mask2 & (~not_reset_mask2));
7453
7454 barrier();
7455 mmiowb();
7456
7457 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7458 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7459 mmiowb();
7460}
7461
7462static int bnx2x_process_kill(struct bnx2x *bp)
7463{
7464 int cnt = 1000;
7465 u32 val = 0;
7466 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7467
7468
7469 /* Empty the Tetris buffer, wait for 1s */
7470 do {
7471 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7472 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7473 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7474 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7475 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7476 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7477 ((port_is_idle_0 & 0x1) == 0x1) &&
7478 ((port_is_idle_1 & 0x1) == 0x1) &&
7479 (pgl_exp_rom2 == 0xffffffff))
7480 break;
7481 msleep(1);
7482 } while (cnt-- > 0);
7483
7484 if (cnt <= 0) {
7485 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7486 " are still"
7487 " outstanding read requests after 1s!\n");
7488 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7489 " port_is_idle_0=0x%08x,"
7490 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7491 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7492 pgl_exp_rom2);
7493 return -EAGAIN;
7494 }
7495
7496 barrier();
7497
7498 /* Close gates #2, #3 and #4 */
7499 bnx2x_set_234_gates(bp, true);
7500
7501 /* TBD: Indicate that "process kill" is in progress to MCP */
7502
7503 /* Clear "unprepared" bit */
7504 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7505 barrier();
7506
7507 /* Make sure all is written to the chip before the reset */
7508 mmiowb();
7509
7510 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7511 * PSWHST, GRC and PSWRD Tetris buffer.
7512 */
7513 msleep(1);
7514
7515 /* Prepare to chip reset: */
7516 /* MCP */
7517 bnx2x_reset_mcp_prep(bp, &val);
7518
7519 /* PXP */
7520 bnx2x_pxp_prep(bp);
7521 barrier();
7522
7523 /* reset the chip */
7524 bnx2x_process_kill_chip_reset(bp);
7525 barrier();
7526
7527 /* Recover after reset: */
7528 /* MCP */
7529 if (bnx2x_reset_mcp_comp(bp, val))
7530 return -EAGAIN;
7531
7532 /* PXP */
7533 bnx2x_pxp_prep(bp);
7534
7535 /* Open the gates #2, #3 and #4 */
7536 bnx2x_set_234_gates(bp, false);
7537
7538 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7539 * reset state, re-enable attentions. */
7540
a2fbb9ea
ET
7541 return 0;
7542}
7543
72fd0718
VZ
7544static int bnx2x_leader_reset(struct bnx2x *bp)
7545{
7546 int rc = 0;
7547 /* Try to recover after the failure */
7548 if (bnx2x_process_kill(bp)) {
7549 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7550 bp->dev->name);
7551 rc = -EAGAIN;
7552 goto exit_leader_reset;
7553 }
7554
7555 /* Clear "reset is in progress" bit and update the driver state */
7556 bnx2x_set_reset_done(bp);
7557 bp->recovery_state = BNX2X_RECOVERY_DONE;
7558
7559exit_leader_reset:
7560 bp->is_leader = 0;
7561 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7562 smp_wmb();
7563 return rc;
7564}
7565
72fd0718
VZ
7566/* Assumption: runs under rtnl lock. This together with the fact
7567 * that it's called only from bnx2x_reset_task() ensure that it
7568 * will never be called when netif_running(bp->dev) is false.
7569 */
7570static void bnx2x_parity_recover(struct bnx2x *bp)
7571{
7572 DP(NETIF_MSG_HW, "Handling parity\n");
7573 while (1) {
7574 switch (bp->recovery_state) {
7575 case BNX2X_RECOVERY_INIT:
7576 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7577 /* Try to get a LEADER_LOCK HW lock */
7578 if (bnx2x_trylock_hw_lock(bp,
7579 HW_LOCK_RESOURCE_RESERVED_08))
7580 bp->is_leader = 1;
7581
7582 /* Stop the driver */
7583 /* If interface has been removed - break */
7584 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7585 return;
7586
7587 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7588 /* Ensure "is_leader" and "recovery_state"
7589 * update values are seen on other CPUs
7590 */
7591 smp_wmb();
7592 break;
7593
7594 case BNX2X_RECOVERY_WAIT:
7595 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7596 if (bp->is_leader) {
7597 u32 load_counter = bnx2x_get_load_cnt(bp);
7598 if (load_counter) {
7599 /* Wait until all other functions get
7600 * down.
7601 */
7602 schedule_delayed_work(&bp->reset_task,
7603 HZ/10);
7604 return;
7605 } else {
7606 /* If all other functions got down -
7607 * try to bring the chip back to
7608 * normal. In any case it's an exit
7609 * point for a leader.
7610 */
7611 if (bnx2x_leader_reset(bp) ||
7612 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7613 printk(KERN_ERR"%s: Recovery "
7614 "has failed. Power cycle is "
7615 "needed.\n", bp->dev->name);
7616 /* Disconnect this device */
7617 netif_device_detach(bp->dev);
7618 /* Block ifup for all function
7619 * of this ASIC until
7620 * "process kill" or power
7621 * cycle.
7622 */
7623 bnx2x_set_reset_in_progress(bp);
7624 /* Shut down the power */
7625 bnx2x_set_power_state(bp,
7626 PCI_D3hot);
7627 return;
7628 }
7629
7630 return;
7631 }
7632 } else { /* non-leader */
7633 if (!bnx2x_reset_is_done(bp)) {
7634 /* Try to get a LEADER_LOCK HW lock as
7635 * long as a former leader may have
7636 * been unloaded by the user or
7637 * released a leadership by another
7638 * reason.
7639 */
7640 if (bnx2x_trylock_hw_lock(bp,
7641 HW_LOCK_RESOURCE_RESERVED_08)) {
7642 /* I'm a leader now! Restart a
7643 * switch case.
7644 */
7645 bp->is_leader = 1;
7646 break;
7647 }
7648
7649 schedule_delayed_work(&bp->reset_task,
7650 HZ/10);
7651 return;
7652
7653 } else { /* A leader has completed
7654 * the "process kill". It's an exit
7655 * point for a non-leader.
7656 */
7657 bnx2x_nic_load(bp, LOAD_NORMAL);
7658 bp->recovery_state =
7659 BNX2X_RECOVERY_DONE;
7660 smp_wmb();
7661 return;
7662 }
7663 }
7664 default:
7665 return;
7666 }
7667 }
7668}
7669
7670/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7671 * scheduled on a general queue in order to prevent a dead lock.
7672 */
34f80b04
EG
7673static void bnx2x_reset_task(struct work_struct *work)
7674{
72fd0718 7675 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7676
7677#ifdef BNX2X_STOP_ON_ERROR
7678 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7679 " so reset not done to allow debug dump,\n"
72fd0718 7680 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7681 return;
7682#endif
7683
7684 rtnl_lock();
7685
7686 if (!netif_running(bp->dev))
7687 goto reset_task_exit;
7688
72fd0718
VZ
7689 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7690 bnx2x_parity_recover(bp);
7691 else {
7692 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7693 bnx2x_nic_load(bp, LOAD_NORMAL);
7694 }
34f80b04
EG
7695
7696reset_task_exit:
7697 rtnl_unlock();
7698}
7699
a2fbb9ea
ET
7700/* end of nic load/unload */
7701
a2fbb9ea
ET
7702/*
7703 * Init service functions
7704 */
7705
8d96286a 7706static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
f2e0899f
DK
7707{
7708 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7709 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7710 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
7711}
7712
f2e0899f 7713static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 7714{
f2e0899f 7715 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
7716
7717 /* Flush all outstanding writes */
7718 mmiowb();
7719
7720 /* Pretend to be function 0 */
7721 REG_WR(bp, reg, 0);
f2e0899f 7722 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
7723
7724 /* From now we are in the "like-E1" mode */
7725 bnx2x_int_disable(bp);
7726
7727 /* Flush all outstanding writes */
7728 mmiowb();
7729
f2e0899f
DK
7730 /* Restore the original function */
7731 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7732 REG_RD(bp, reg);
f1ef27ef
EG
7733}
7734
f2e0899f 7735static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 7736{
f2e0899f 7737 if (CHIP_IS_E1(bp))
f1ef27ef 7738 bnx2x_int_disable(bp);
f2e0899f
DK
7739 else
7740 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
7741}
7742
34f80b04
EG
7743static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7744{
7745 u32 val;
7746
7747 /* Check if there is any driver already loaded */
7748 val = REG_RD(bp, MISC_REG_UNPREPARED);
7749 if (val == 0x1) {
7750 /* Check if it is the UNDI driver
7751 * UNDI driver initializes CID offset for normal bell to 0x7
7752 */
4a37fb66 7753 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7754 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7755 if (val == 0x7) {
7756 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
7757 /* save our pf_num */
7758 int orig_pf_num = bp->pf_num;
da5a662a
VZ
7759 u32 swap_en;
7760 u32 swap_val;
34f80b04 7761
b4661739
EG
7762 /* clear the UNDI indication */
7763 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7764
34f80b04
EG
7765 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7766
7767 /* try unload UNDI on port 0 */
f2e0899f 7768 bp->pf_num = 0;
da5a662a 7769 bp->fw_seq =
f2e0899f 7770 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7771 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 7772 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7773
7774 /* if UNDI is loaded on the other port */
7775 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7776
da5a662a 7777 /* send "DONE" for previous unload */
a22f0788
YR
7778 bnx2x_fw_command(bp,
7779 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7780
7781 /* unload UNDI on port 1 */
f2e0899f 7782 bp->pf_num = 1;
da5a662a 7783 bp->fw_seq =
f2e0899f 7784 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
7785 DRV_MSG_SEQ_NUMBER_MASK);
7786 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7787
a22f0788 7788 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7789 }
7790
b4661739
EG
7791 /* now it's safe to release the lock */
7792 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7793
f2e0899f 7794 bnx2x_undi_int_disable(bp);
da5a662a
VZ
7795
7796 /* close input traffic and wait for it */
7797 /* Do not rcv packets to BRB */
7798 REG_WR(bp,
7799 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7800 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7801 /* Do not direct rcv packets that are not for MCP to
7802 * the BRB */
7803 REG_WR(bp,
7804 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7805 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7806 /* clear AEU */
7807 REG_WR(bp,
7808 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7809 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7810 msleep(10);
7811
7812 /* save NIG port swap info */
7813 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7814 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7815 /* reset device */
7816 REG_WR(bp,
7817 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7818 0xd3ffffff);
34f80b04
EG
7819 REG_WR(bp,
7820 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7821 0x1403);
da5a662a
VZ
7822 /* take the NIG out of reset and restore swap values */
7823 REG_WR(bp,
7824 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7825 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7826 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7827 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7828
7829 /* send unload done to the MCP */
a22f0788 7830 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7831
7832 /* restore our func and fw_seq */
f2e0899f 7833 bp->pf_num = orig_pf_num;
da5a662a 7834 bp->fw_seq =
f2e0899f 7835 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7836 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7837 } else
7838 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7839 }
7840}
7841
7842static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7843{
7844 u32 val, val2, val3, val4, id;
72ce58c3 7845 u16 pmc;
34f80b04
EG
7846
7847 /* Get the chip revision id and number. */
7848 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7849 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7850 id = ((val & 0xffff) << 16);
7851 val = REG_RD(bp, MISC_REG_CHIP_REV);
7852 id |= ((val & 0xf) << 12);
7853 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7854 id |= ((val & 0xff) << 4);
5a40e08e 7855 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7856 id |= (val & 0xf);
7857 bp->common.chip_id = id;
523224a3
DK
7858
7859 /* Set doorbell size */
7860 bp->db_size = (1 << BNX2X_DB_SHIFT);
7861
f2e0899f
DK
7862 if (CHIP_IS_E2(bp)) {
7863 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7864 if ((val & 1) == 0)
7865 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7866 else
7867 val = (val >> 1) & 1;
7868 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7869 "2_PORT_MODE");
7870 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7871 CHIP_2_PORT_MODE;
7872
7873 if (CHIP_MODE_IS_4_PORT(bp))
7874 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7875 else
7876 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7877 } else {
7878 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7879 bp->pfid = bp->pf_num; /* 0..7 */
7880 }
7881
523224a3
DK
7882 /*
7883 * set base FW non-default (fast path) status block id, this value is
7884 * used to initialize the fw_sb_id saved on the fp/queue structure to
7885 * determine the id used by the FW.
7886 */
f2e0899f
DK
7887 if (CHIP_IS_E1x(bp))
7888 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7889 else /* E2 */
7890 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7891
7892 bp->link_params.chip_id = bp->common.chip_id;
7893 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 7894
1c06328c
EG
7895 val = (REG_RD(bp, 0x2874) & 0x55);
7896 if ((bp->common.chip_id & 0x1) ||
7897 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7898 bp->flags |= ONE_PORT_FLAG;
7899 BNX2X_DEV_INFO("single port device\n");
7900 }
7901
34f80b04
EG
7902 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7903 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7904 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7905 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7906 bp->common.flash_size, bp->common.flash_size);
7907
7908 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
f2e0899f
DK
7909 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7910 MISC_REG_GENERIC_CR_1 :
7911 MISC_REG_GENERIC_CR_0));
34f80b04 7912 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 7913 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
7914 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7915 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 7916
f2e0899f 7917 if (!bp->common.shmem_base) {
34f80b04
EG
7918 BNX2X_DEV_INFO("MCP not active\n");
7919 bp->flags |= NO_MCP_FLAG;
7920 return;
7921 }
7922
7923 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7924 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7925 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f2e0899f 7926 BNX2X_ERR("BAD MCP validity signature\n");
34f80b04
EG
7927
7928 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7929 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7930
7931 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7932 SHARED_HW_CFG_LED_MODE_MASK) >>
7933 SHARED_HW_CFG_LED_MODE_SHIFT);
7934
c2c8b03e
EG
7935 bp->link_params.feature_config_flags = 0;
7936 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7937 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7938 bp->link_params.feature_config_flags |=
7939 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7940 else
7941 bp->link_params.feature_config_flags &=
7942 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7943
34f80b04
EG
7944 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7945 bp->common.bc_ver = val;
7946 BNX2X_DEV_INFO("bc_ver %X\n", val);
7947 if (val < BNX2X_BC_VER) {
7948 /* for now only warn
7949 * later we might need to enforce this */
f2e0899f
DK
7950 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7951 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 7952 }
4d295db0 7953 bp->link_params.feature_config_flags |=
a22f0788 7954 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
7955 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7956
a22f0788
YR
7957 bp->link_params.feature_config_flags |=
7958 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7959 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3
EG
7960
7961 if (BP_E1HVN(bp) == 0) {
7962 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7963 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7964 } else {
7965 /* no WOL capability for E1HVN != 0 */
7966 bp->flags |= NO_WOL_FLAG;
7967 }
7968 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7969 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7970
7971 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7972 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7973 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7974 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7975
cdaa7cb8
VZ
7976 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7977 val, val2, val3, val4);
34f80b04
EG
7978}
7979
f2e0899f
DK
7980#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7981#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7982
7983static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7984{
7985 int pfid = BP_FUNC(bp);
7986 int vn = BP_E1HVN(bp);
7987 int igu_sb_id;
7988 u32 val;
7989 u8 fid;
7990
7991 bp->igu_base_sb = 0xff;
7992 bp->igu_sb_cnt = 0;
7993 if (CHIP_INT_MODE_IS_BC(bp)) {
7994 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
ec6ba945 7995 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
7996
7997 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7998 FP_SB_MAX_E1x;
7999
8000 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
8001 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8002
8003 return;
8004 }
8005
8006 /* IGU in normal mode - read CAM */
8007 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8008 igu_sb_id++) {
8009 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8010 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8011 continue;
8012 fid = IGU_FID(val);
8013 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8014 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8015 continue;
8016 if (IGU_VEC(val) == 0)
8017 /* default status block */
8018 bp->igu_dsb_id = igu_sb_id;
8019 else {
8020 if (bp->igu_base_sb == 0xff)
8021 bp->igu_base_sb = igu_sb_id;
8022 bp->igu_sb_cnt++;
8023 }
8024 }
8025 }
ec6ba945
VZ
8026 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8027 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8028 if (bp->igu_sb_cnt == 0)
8029 BNX2X_ERR("CAM configuration error\n");
8030}
8031
34f80b04
EG
8032static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8033 u32 switch_cfg)
a2fbb9ea 8034{
a22f0788
YR
8035 int cfg_size = 0, idx, port = BP_PORT(bp);
8036
8037 /* Aggregation of supported attributes of all external phys */
8038 bp->port.supported[0] = 0;
8039 bp->port.supported[1] = 0;
b7737c9b
YR
8040 switch (bp->link_params.num_phys) {
8041 case 1:
a22f0788
YR
8042 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8043 cfg_size = 1;
8044 break;
b7737c9b 8045 case 2:
a22f0788
YR
8046 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8047 cfg_size = 1;
8048 break;
8049 case 3:
8050 if (bp->link_params.multi_phy_config &
8051 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8052 bp->port.supported[1] =
8053 bp->link_params.phy[EXT_PHY1].supported;
8054 bp->port.supported[0] =
8055 bp->link_params.phy[EXT_PHY2].supported;
8056 } else {
8057 bp->port.supported[0] =
8058 bp->link_params.phy[EXT_PHY1].supported;
8059 bp->port.supported[1] =
8060 bp->link_params.phy[EXT_PHY2].supported;
8061 }
8062 cfg_size = 2;
8063 break;
b7737c9b 8064 }
a2fbb9ea 8065
a22f0788 8066 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 8067 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 8068 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 8069 SHMEM_RD(bp,
a22f0788
YR
8070 dev_info.port_hw_config[port].external_phy_config),
8071 SHMEM_RD(bp,
8072 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 8073 return;
f85582f8 8074 }
a2fbb9ea 8075
b7737c9b
YR
8076 switch (switch_cfg) {
8077 case SWITCH_CFG_1G:
34f80b04
EG
8078 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8079 port*0x10);
8080 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8081 break;
8082
8083 case SWITCH_CFG_10G:
34f80b04
EG
8084 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8085 port*0x18);
8086 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8087 break;
8088
8089 default:
8090 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 8091 bp->port.link_config[0]);
a2fbb9ea
ET
8092 return;
8093 }
a22f0788
YR
8094 /* mask what we support according to speed_cap_mask per configuration */
8095 for (idx = 0; idx < cfg_size; idx++) {
8096 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8097 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 8098 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8099
a22f0788 8100 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8101 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 8102 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8103
a22f0788 8104 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8105 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 8106 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8107
a22f0788 8108 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8109 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 8110 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8111
a22f0788 8112 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8113 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 8114 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 8115 SUPPORTED_1000baseT_Full);
a2fbb9ea 8116
a22f0788 8117 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8118 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 8119 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8120
a22f0788 8121 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8122 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
8123 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
8124
8125 }
a2fbb9ea 8126
a22f0788
YR
8127 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8128 bp->port.supported[1]);
a2fbb9ea
ET
8129}
8130
34f80b04 8131static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8132{
a22f0788
YR
8133 u32 link_config, idx, cfg_size = 0;
8134 bp->port.advertising[0] = 0;
8135 bp->port.advertising[1] = 0;
8136 switch (bp->link_params.num_phys) {
8137 case 1:
8138 case 2:
8139 cfg_size = 1;
8140 break;
8141 case 3:
8142 cfg_size = 2;
8143 break;
8144 }
8145 for (idx = 0; idx < cfg_size; idx++) {
8146 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8147 link_config = bp->port.link_config[idx];
8148 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 8149 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
8150 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8151 bp->link_params.req_line_speed[idx] =
8152 SPEED_AUTO_NEG;
8153 bp->port.advertising[idx] |=
8154 bp->port.supported[idx];
f85582f8
DK
8155 } else {
8156 /* force 10G, no AN */
a22f0788
YR
8157 bp->link_params.req_line_speed[idx] =
8158 SPEED_10000;
8159 bp->port.advertising[idx] |=
8160 (ADVERTISED_10000baseT_Full |
f85582f8 8161 ADVERTISED_FIBRE);
a22f0788 8162 continue;
f85582f8
DK
8163 }
8164 break;
a2fbb9ea 8165
f85582f8 8166 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
8167 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8168 bp->link_params.req_line_speed[idx] =
8169 SPEED_10;
8170 bp->port.advertising[idx] |=
8171 (ADVERTISED_10baseT_Full |
f85582f8
DK
8172 ADVERTISED_TP);
8173 } else {
8174 BNX2X_ERROR("NVRAM config error. "
8175 "Invalid link_config 0x%x"
8176 " speed_cap_mask 0x%x\n",
8177 link_config,
a22f0788 8178 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8179 return;
8180 }
8181 break;
a2fbb9ea 8182
f85582f8 8183 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
8184 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8185 bp->link_params.req_line_speed[idx] =
8186 SPEED_10;
8187 bp->link_params.req_duplex[idx] =
8188 DUPLEX_HALF;
8189 bp->port.advertising[idx] |=
8190 (ADVERTISED_10baseT_Half |
f85582f8
DK
8191 ADVERTISED_TP);
8192 } else {
8193 BNX2X_ERROR("NVRAM config error. "
8194 "Invalid link_config 0x%x"
8195 " speed_cap_mask 0x%x\n",
8196 link_config,
8197 bp->link_params.speed_cap_mask[idx]);
8198 return;
8199 }
8200 break;
a2fbb9ea 8201
f85582f8
DK
8202 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8203 if (bp->port.supported[idx] &
8204 SUPPORTED_100baseT_Full) {
a22f0788
YR
8205 bp->link_params.req_line_speed[idx] =
8206 SPEED_100;
8207 bp->port.advertising[idx] |=
8208 (ADVERTISED_100baseT_Full |
f85582f8
DK
8209 ADVERTISED_TP);
8210 } else {
8211 BNX2X_ERROR("NVRAM config error. "
8212 "Invalid link_config 0x%x"
8213 " speed_cap_mask 0x%x\n",
8214 link_config,
8215 bp->link_params.speed_cap_mask[idx]);
8216 return;
8217 }
8218 break;
a2fbb9ea 8219
f85582f8
DK
8220 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8221 if (bp->port.supported[idx] &
8222 SUPPORTED_100baseT_Half) {
8223 bp->link_params.req_line_speed[idx] =
8224 SPEED_100;
8225 bp->link_params.req_duplex[idx] =
8226 DUPLEX_HALF;
a22f0788
YR
8227 bp->port.advertising[idx] |=
8228 (ADVERTISED_100baseT_Half |
f85582f8
DK
8229 ADVERTISED_TP);
8230 } else {
8231 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8232 "Invalid link_config 0x%x"
8233 " speed_cap_mask 0x%x\n",
a22f0788
YR
8234 link_config,
8235 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8236 return;
8237 }
8238 break;
a2fbb9ea 8239
f85582f8 8240 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
8241 if (bp->port.supported[idx] &
8242 SUPPORTED_1000baseT_Full) {
8243 bp->link_params.req_line_speed[idx] =
8244 SPEED_1000;
8245 bp->port.advertising[idx] |=
8246 (ADVERTISED_1000baseT_Full |
f85582f8
DK
8247 ADVERTISED_TP);
8248 } else {
8249 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8250 "Invalid link_config 0x%x"
8251 " speed_cap_mask 0x%x\n",
a22f0788
YR
8252 link_config,
8253 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8254 return;
8255 }
8256 break;
a2fbb9ea 8257
f85582f8 8258 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
8259 if (bp->port.supported[idx] &
8260 SUPPORTED_2500baseX_Full) {
8261 bp->link_params.req_line_speed[idx] =
8262 SPEED_2500;
8263 bp->port.advertising[idx] |=
8264 (ADVERTISED_2500baseX_Full |
34f80b04 8265 ADVERTISED_TP);
f85582f8
DK
8266 } else {
8267 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8268 "Invalid link_config 0x%x"
8269 " speed_cap_mask 0x%x\n",
a22f0788 8270 link_config,
f85582f8
DK
8271 bp->link_params.speed_cap_mask[idx]);
8272 return;
8273 }
8274 break;
a2fbb9ea 8275
f85582f8
DK
8276 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8277 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8278 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
8279 if (bp->port.supported[idx] &
8280 SUPPORTED_10000baseT_Full) {
8281 bp->link_params.req_line_speed[idx] =
8282 SPEED_10000;
8283 bp->port.advertising[idx] |=
8284 (ADVERTISED_10000baseT_Full |
34f80b04 8285 ADVERTISED_FIBRE);
f85582f8
DK
8286 } else {
8287 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8288 "Invalid link_config 0x%x"
8289 " speed_cap_mask 0x%x\n",
a22f0788 8290 link_config,
f85582f8
DK
8291 bp->link_params.speed_cap_mask[idx]);
8292 return;
8293 }
8294 break;
a2fbb9ea 8295
f85582f8
DK
8296 default:
8297 BNX2X_ERROR("NVRAM config error. "
8298 "BAD link speed link_config 0x%x\n",
8299 link_config);
8300 bp->link_params.req_line_speed[idx] =
8301 SPEED_AUTO_NEG;
8302 bp->port.advertising[idx] =
8303 bp->port.supported[idx];
8304 break;
8305 }
a2fbb9ea 8306
a22f0788 8307 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 8308 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
8309 if ((bp->link_params.req_flow_ctrl[idx] ==
8310 BNX2X_FLOW_CTRL_AUTO) &&
8311 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8312 bp->link_params.req_flow_ctrl[idx] =
8313 BNX2X_FLOW_CTRL_NONE;
8314 }
a2fbb9ea 8315
a22f0788
YR
8316 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8317 " 0x%x advertising 0x%x\n",
8318 bp->link_params.req_line_speed[idx],
8319 bp->link_params.req_duplex[idx],
8320 bp->link_params.req_flow_ctrl[idx],
8321 bp->port.advertising[idx]);
8322 }
a2fbb9ea
ET
8323}
8324
e665bfda
MC
8325static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8326{
8327 mac_hi = cpu_to_be16(mac_hi);
8328 mac_lo = cpu_to_be32(mac_lo);
8329 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8330 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8331}
8332
34f80b04 8333static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8334{
34f80b04 8335 int port = BP_PORT(bp);
589abe3a 8336 u32 config;
6f38ad93 8337 u32 ext_phy_type, ext_phy_config;
a2fbb9ea 8338
c18487ee 8339 bp->link_params.bp = bp;
34f80b04 8340 bp->link_params.port = port;
c18487ee 8341
c18487ee 8342 bp->link_params.lane_config =
a2fbb9ea 8343 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 8344
a22f0788 8345 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
8346 SHMEM_RD(bp,
8347 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
8348 bp->link_params.speed_cap_mask[1] =
8349 SHMEM_RD(bp,
8350 dev_info.port_hw_config[port].speed_capability_mask2);
8351 bp->port.link_config[0] =
a2fbb9ea
ET
8352 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8353
a22f0788
YR
8354 bp->port.link_config[1] =
8355 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 8356
a22f0788
YR
8357 bp->link_params.multi_phy_config =
8358 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
8359 /* If the device is capable of WoL, set the default state according
8360 * to the HW
8361 */
4d295db0 8362 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8363 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8364 (config & PORT_FEATURE_WOL_ENABLED));
8365
f85582f8 8366 BNX2X_DEV_INFO("lane_config 0x%08x "
a22f0788 8367 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 8368 bp->link_params.lane_config,
a22f0788
YR
8369 bp->link_params.speed_cap_mask[0],
8370 bp->port.link_config[0]);
a2fbb9ea 8371
a22f0788 8372 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 8373 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 8374 bnx2x_phy_probe(&bp->link_params);
c18487ee 8375 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8376
8377 bnx2x_link_settings_requested(bp);
8378
01cd4528
EG
8379 /*
8380 * If connected directly, work with the internal PHY, otherwise, work
8381 * with the external PHY
8382 */
b7737c9b
YR
8383 ext_phy_config =
8384 SHMEM_RD(bp,
8385 dev_info.port_hw_config[port].external_phy_config);
8386 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 8387 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 8388 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
8389
8390 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8391 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8392 bp->mdio.prtad =
b7737c9b 8393 XGXS_EXT_PHY_ADDR(ext_phy_config);
5866df6d
YR
8394
8395 /*
8396 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8397 * In MF mode, it is set to cover self test cases
8398 */
8399 if (IS_MF(bp))
8400 bp->port.need_hw_lock = 1;
8401 else
8402 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8403 bp->common.shmem_base,
8404 bp->common.shmem2_base);
0793f83f 8405}
01cd4528 8406
0793f83f
DK
8407static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8408{
8409 u32 val, val2;
8410 int func = BP_ABS_FUNC(bp);
8411 int port = BP_PORT(bp);
8412
8413 if (BP_NOMCP(bp)) {
8414 BNX2X_ERROR("warning: random MAC workaround active\n");
8415 random_ether_addr(bp->dev->dev_addr);
8416 } else if (IS_MF(bp)) {
8417 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8418 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8419 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8420 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8421 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
37b091ba
MC
8422
8423#ifdef BCM_CNIC
0793f83f
DK
8424 /* iSCSI NPAR MAC */
8425 if (IS_MF_SI(bp)) {
8426 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8427 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8428 val2 = MF_CFG_RD(bp, func_ext_config[func].
8429 iscsi_mac_addr_upper);
8430 val = MF_CFG_RD(bp, func_ext_config[func].
8431 iscsi_mac_addr_lower);
8432 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8433 }
8434 }
37b091ba 8435#endif
0793f83f
DK
8436 } else {
8437 /* in SF read MACs from port configuration */
8438 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8439 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8440 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8441
8442#ifdef BCM_CNIC
8443 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8444 iscsi_mac_upper);
8445 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8446 iscsi_mac_lower);
8447 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8448#endif
8449 }
8450
8451 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8452 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8453
ec6ba945
VZ
8454#ifdef BCM_CNIC
8455 /* Inform the upper layers about FCoE MAC */
8456 if (!CHIP_IS_E1x(bp)) {
8457 if (IS_MF_SD(bp))
8458 memcpy(bp->fip_mac, bp->dev->dev_addr,
8459 sizeof(bp->fip_mac));
8460 else
8461 memcpy(bp->fip_mac, bp->iscsi_mac,
8462 sizeof(bp->fip_mac));
8463 }
8464#endif
34f80b04
EG
8465}
8466
8467static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8468{
0793f83f
DK
8469 int /*abs*/func = BP_ABS_FUNC(bp);
8470 int vn, port;
8471 u32 val = 0;
34f80b04 8472 int rc = 0;
a2fbb9ea 8473
34f80b04 8474 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8475
f2e0899f
DK
8476 if (CHIP_IS_E1x(bp)) {
8477 bp->common.int_block = INT_BLOCK_HC;
8478
8479 bp->igu_dsb_id = DEF_SB_IGU_ID;
8480 bp->igu_base_sb = 0;
ec6ba945
VZ
8481 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8482 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8483 } else {
8484 bp->common.int_block = INT_BLOCK_IGU;
8485 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8486 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8487 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8488 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8489 } else
8490 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 8491
f2e0899f
DK
8492 bnx2x_get_igu_cam_info(bp);
8493
8494 }
8495 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8496 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8497
8498 /*
8499 * Initialize MF configuration
8500 */
523224a3 8501
fb3bff17
DK
8502 bp->mf_ov = 0;
8503 bp->mf_mode = 0;
f2e0899f 8504 vn = BP_E1HVN(bp);
0793f83f
DK
8505 port = BP_PORT(bp);
8506
f2e0899f 8507 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
0793f83f
DK
8508 DP(NETIF_MSG_PROBE,
8509 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8510 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8511 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
f2e0899f
DK
8512 if (SHMEM2_HAS(bp, mf_cfg_addr))
8513 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8514 else
8515 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
8516 offsetof(struct shmem_region, func_mb) +
8517 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
0793f83f
DK
8518 /*
8519 * get mf configuration:
8520 * 1. existance of MF configuration
8521 * 2. MAC address must be legal (check only upper bytes)
8522 * for Switch-Independent mode;
8523 * OVLAN must be legal for Switch-Dependent mode
8524 * 3. SF_MODE configures specific MF mode
8525 */
8526 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8527 /* get mf configuration */
8528 val = SHMEM_RD(bp,
8529 dev_info.shared_feature_config.config);
8530 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8531
8532 switch (val) {
8533 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8534 val = MF_CFG_RD(bp, func_mf_config[func].
8535 mac_upper);
8536 /* check for legal mac (upper bytes)*/
8537 if (val != 0xffff) {
8538 bp->mf_mode = MULTI_FUNCTION_SI;
8539 bp->mf_config[vn] = MF_CFG_RD(bp,
8540 func_mf_config[func].config);
8541 } else
8542 DP(NETIF_MSG_PROBE, "illegal MAC "
8543 "address for SI\n");
8544 break;
8545 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8546 /* get OV configuration */
8547 val = MF_CFG_RD(bp,
8548 func_mf_config[FUNC_0].e1hov_tag);
8549 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8550
8551 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8552 bp->mf_mode = MULTI_FUNCTION_SD;
8553 bp->mf_config[vn] = MF_CFG_RD(bp,
8554 func_mf_config[func].config);
8555 } else
8556 DP(NETIF_MSG_PROBE, "illegal OV for "
8557 "SD\n");
8558 break;
8559 default:
8560 /* Unknown configuration: reset mf_config */
8561 bp->mf_config[vn] = 0;
8562 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8563 val);
8564 }
8565 }
a2fbb9ea 8566
2691d51d 8567 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 8568 IS_MF(bp) ? "multi" : "single");
2691d51d 8569
0793f83f
DK
8570 switch (bp->mf_mode) {
8571 case MULTI_FUNCTION_SD:
8572 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8573 FUNC_MF_CFG_E1HOV_TAG_MASK;
2691d51d 8574 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 8575 bp->mf_ov = val;
0793f83f
DK
8576 BNX2X_DEV_INFO("MF OV for func %d is %d"
8577 " (0x%04x)\n", func,
8578 bp->mf_ov, bp->mf_ov);
2691d51d 8579 } else {
0793f83f
DK
8580 BNX2X_ERR("No valid MF OV for func %d,"
8581 " aborting\n", func);
34f80b04
EG
8582 rc = -EPERM;
8583 }
0793f83f
DK
8584 break;
8585 case MULTI_FUNCTION_SI:
8586 BNX2X_DEV_INFO("func %d is in MF "
8587 "switch-independent mode\n", func);
8588 break;
8589 default:
8590 if (vn) {
8591 BNX2X_ERR("VN %d in single function mode,"
8592 " aborting\n", vn);
2691d51d
EG
8593 rc = -EPERM;
8594 }
0793f83f 8595 break;
34f80b04 8596 }
0793f83f 8597
34f80b04 8598 }
a2fbb9ea 8599
f2e0899f
DK
8600 /* adjust igu_sb_cnt to MF for E1x */
8601 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
8602 bp->igu_sb_cnt /= E1HVN_MAX;
8603
f2e0899f
DK
8604 /*
8605 * adjust E2 sb count: to be removed when FW will support
8606 * more then 16 L2 clients
8607 */
8608#define MAX_L2_CLIENTS 16
8609 if (CHIP_IS_E2(bp))
8610 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8611 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8612
34f80b04
EG
8613 if (!BP_NOMCP(bp)) {
8614 bnx2x_get_port_hwinfo(bp);
8615
f2e0899f
DK
8616 bp->fw_seq =
8617 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8618 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
8619 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8620 }
8621
0793f83f
DK
8622 /* Get MAC addresses */
8623 bnx2x_get_mac_hwinfo(bp);
a2fbb9ea 8624
34f80b04
EG
8625 return rc;
8626}
8627
34f24c7f
VZ
8628static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8629{
8630 int cnt, i, block_end, rodi;
8631 char vpd_data[BNX2X_VPD_LEN+1];
8632 char str_id_reg[VENDOR_ID_LEN+1];
8633 char str_id_cap[VENDOR_ID_LEN+1];
8634 u8 len;
8635
8636 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8637 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8638
8639 if (cnt < BNX2X_VPD_LEN)
8640 goto out_not_found;
8641
8642 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8643 PCI_VPD_LRDT_RO_DATA);
8644 if (i < 0)
8645 goto out_not_found;
8646
8647
8648 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8649 pci_vpd_lrdt_size(&vpd_data[i]);
8650
8651 i += PCI_VPD_LRDT_TAG_SIZE;
8652
8653 if (block_end > BNX2X_VPD_LEN)
8654 goto out_not_found;
8655
8656 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8657 PCI_VPD_RO_KEYWORD_MFR_ID);
8658 if (rodi < 0)
8659 goto out_not_found;
8660
8661 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8662
8663 if (len != VENDOR_ID_LEN)
8664 goto out_not_found;
8665
8666 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8667
8668 /* vendor specific info */
8669 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8670 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8671 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8672 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8673
8674 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8675 PCI_VPD_RO_KEYWORD_VENDOR0);
8676 if (rodi >= 0) {
8677 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8678
8679 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8680
8681 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8682 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8683 bp->fw_ver[len] = ' ';
8684 }
8685 }
8686 return;
8687 }
8688out_not_found:
8689 return;
8690}
8691
34f80b04
EG
8692static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8693{
f2e0899f 8694 int func;
87942b46 8695 int timer_interval;
34f80b04
EG
8696 int rc;
8697
da5a662a
VZ
8698 /* Disable interrupt handling until HW is initialized */
8699 atomic_set(&bp->intr_sem, 1);
e1510706 8700 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8701
34f80b04 8702 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8703 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 8704 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
8705#ifdef BCM_CNIC
8706 mutex_init(&bp->cnic_mutex);
8707#endif
a2fbb9ea 8708
1cf167f2 8709 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8710 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8711
8712 rc = bnx2x_get_hwinfo(bp);
8713
523224a3
DK
8714 if (!rc)
8715 rc = bnx2x_alloc_mem_bp(bp);
8716
34f24c7f 8717 bnx2x_read_fwinfo(bp);
f2e0899f
DK
8718
8719 func = BP_FUNC(bp);
8720
34f80b04
EG
8721 /* need to reset chip if undi was active */
8722 if (!BP_NOMCP(bp))
8723 bnx2x_undi_unload(bp);
8724
8725 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8726 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8727
8728 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8729 dev_err(&bp->pdev->dev, "MCP disabled, "
8730 "must load devices in order!\n");
34f80b04 8731
555f6c78 8732 bp->multi_mode = multi_mode;
5d7cd496 8733 bp->int_mode = int_mode;
555f6c78 8734
4fd89b7a
DK
8735 bp->dev->features |= NETIF_F_GRO;
8736
7a9b2557
VZ
8737 /* Set TPA flags */
8738 if (disable_tpa) {
8739 bp->flags &= ~TPA_ENABLE_FLAG;
8740 bp->dev->features &= ~NETIF_F_LRO;
8741 } else {
8742 bp->flags |= TPA_ENABLE_FLAG;
8743 bp->dev->features |= NETIF_F_LRO;
8744 }
5d7cd496 8745 bp->disable_tpa = disable_tpa;
7a9b2557 8746
a18f5128
EG
8747 if (CHIP_IS_E1(bp))
8748 bp->dropless_fc = 0;
8749 else
8750 bp->dropless_fc = dropless_fc;
8751
8d5726c4 8752 bp->mrrs = mrrs;
7a9b2557 8753
34f80b04 8754 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04
EG
8755
8756 bp->rx_csum = 1;
34f80b04 8757
7d323bfd 8758 /* make sure that the numbers are in the right granularity */
523224a3
DK
8759 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8760 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 8761
87942b46
EG
8762 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8763 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8764
8765 init_timer(&bp->timer);
8766 bp->timer.expires = jiffies + bp->current_interval;
8767 bp->timer.data = (unsigned long) bp;
8768 bp->timer.function = bnx2x_timer;
8769
785b9b1a 8770 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
e4901dde
VZ
8771 bnx2x_dcbx_init_params(bp);
8772
34f80b04 8773 return rc;
a2fbb9ea
ET
8774}
8775
a2fbb9ea 8776
de0c62db
DK
8777/****************************************************************************
8778* General service functions
8779****************************************************************************/
a2fbb9ea 8780
bb2a0f7a 8781/* called with rtnl_lock */
a2fbb9ea
ET
8782static int bnx2x_open(struct net_device *dev)
8783{
8784 struct bnx2x *bp = netdev_priv(dev);
8785
6eccabb3
EG
8786 netif_carrier_off(dev);
8787
a2fbb9ea
ET
8788 bnx2x_set_power_state(bp, PCI_D0);
8789
72fd0718
VZ
8790 if (!bnx2x_reset_is_done(bp)) {
8791 do {
8792 /* Reset MCP mail box sequence if there is on going
8793 * recovery
8794 */
8795 bp->fw_seq = 0;
8796
8797 /* If it's the first function to load and reset done
8798 * is still not cleared it may mean that. We don't
8799 * check the attention state here because it may have
8800 * already been cleared by a "common" reset but we
8801 * shell proceed with "process kill" anyway.
8802 */
8803 if ((bnx2x_get_load_cnt(bp) == 0) &&
8804 bnx2x_trylock_hw_lock(bp,
8805 HW_LOCK_RESOURCE_RESERVED_08) &&
8806 (!bnx2x_leader_reset(bp))) {
8807 DP(NETIF_MSG_HW, "Recovered in open\n");
8808 break;
8809 }
8810
8811 bnx2x_set_power_state(bp, PCI_D3hot);
8812
8813 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8814 " completed yet. Try again later. If u still see this"
8815 " message after a few retries then power cycle is"
8816 " required.\n", bp->dev->name);
8817
8818 return -EAGAIN;
8819 } while (0);
8820 }
8821
8822 bp->recovery_state = BNX2X_RECOVERY_DONE;
8823
bb2a0f7a 8824 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8825}
8826
bb2a0f7a 8827/* called with rtnl_lock */
a2fbb9ea
ET
8828static int bnx2x_close(struct net_device *dev)
8829{
a2fbb9ea
ET
8830 struct bnx2x *bp = netdev_priv(dev);
8831
8832 /* Unload the driver, release IRQs */
bb2a0f7a 8833 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8834 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8835
8836 return 0;
8837}
8838
f5372251 8839/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 8840void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
8841{
8842 struct bnx2x *bp = netdev_priv(dev);
8843 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8844 int port = BP_PORT(bp);
8845
8846 if (bp->state != BNX2X_STATE_OPEN) {
8847 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8848 return;
8849 }
8850
8851 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8852
8853 if (dev->flags & IFF_PROMISC)
8854 rx_mode = BNX2X_RX_MODE_PROMISC;
34f80b04 8855 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
8856 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8857 CHIP_IS_E1(bp)))
34f80b04 8858 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04
EG
8859 else { /* some multicasts */
8860 if (CHIP_IS_E1(bp)) {
523224a3
DK
8861 /*
8862 * set mc list, do not wait as wait implies sleep
8863 * and set_rx_mode can be invoked from non-sleepable
8864 * context
8865 */
8866 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8867 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8868 BNX2X_MAX_MULTICAST*(1 + port));
e665bfda 8869
523224a3 8870 bnx2x_set_e1_mc_list(bp, offset);
34f80b04
EG
8871 } else { /* E1H */
8872 /* Accept one or more multicasts */
22bedad3 8873 struct netdev_hw_addr *ha;
34f80b04
EG
8874 u32 mc_filter[MC_HASH_SIZE];
8875 u32 crc, bit, regidx;
8876 int i;
8877
8878 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8879
22bedad3 8880 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 8881 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
523224a3 8882 bnx2x_mc_addr(ha));
34f80b04 8883
523224a3
DK
8884 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8885 ETH_ALEN);
34f80b04
EG
8886 bit = (crc >> 24) & 0xff;
8887 regidx = bit >> 5;
8888 bit &= 0x1f;
8889 mc_filter[regidx] |= (1 << bit);
8890 }
8891
8892 for (i = 0; i < MC_HASH_SIZE; i++)
8893 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8894 mc_filter[i]);
8895 }
8896 }
8897
8898 bp->rx_mode = rx_mode;
8899 bnx2x_set_storm_rx_mode(bp);
8900}
8901
c18487ee 8902/* called with rtnl_lock */
01cd4528
EG
8903static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8904 int devad, u16 addr)
a2fbb9ea 8905{
01cd4528
EG
8906 struct bnx2x *bp = netdev_priv(netdev);
8907 u16 value;
8908 int rc;
a2fbb9ea 8909
01cd4528
EG
8910 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8911 prtad, devad, addr);
a2fbb9ea 8912
01cd4528
EG
8913 /* The HW expects different devad if CL22 is used */
8914 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 8915
01cd4528 8916 bnx2x_acquire_phy_lock(bp);
e10bc84d 8917 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
8918 bnx2x_release_phy_lock(bp);
8919 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 8920
01cd4528
EG
8921 if (!rc)
8922 rc = value;
8923 return rc;
8924}
a2fbb9ea 8925
01cd4528
EG
8926/* called with rtnl_lock */
8927static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8928 u16 addr, u16 value)
8929{
8930 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
8931 int rc;
8932
8933 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8934 " value 0x%x\n", prtad, devad, addr, value);
8935
01cd4528
EG
8936 /* The HW expects different devad if CL22 is used */
8937 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 8938
01cd4528 8939 bnx2x_acquire_phy_lock(bp);
e10bc84d 8940 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
8941 bnx2x_release_phy_lock(bp);
8942 return rc;
8943}
c18487ee 8944
01cd4528
EG
8945/* called with rtnl_lock */
8946static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8947{
8948 struct bnx2x *bp = netdev_priv(dev);
8949 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 8950
01cd4528
EG
8951 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8952 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 8953
01cd4528
EG
8954 if (!netif_running(dev))
8955 return -EAGAIN;
8956
8957 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
8958}
8959
257ddbda 8960#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
8961static void poll_bnx2x(struct net_device *dev)
8962{
8963 struct bnx2x *bp = netdev_priv(dev);
8964
8965 disable_irq(bp->pdev->irq);
8966 bnx2x_interrupt(bp->pdev->irq, dev);
8967 enable_irq(bp->pdev->irq);
8968}
8969#endif
8970
c64213cd
SH
8971static const struct net_device_ops bnx2x_netdev_ops = {
8972 .ndo_open = bnx2x_open,
8973 .ndo_stop = bnx2x_close,
8974 .ndo_start_xmit = bnx2x_start_xmit,
8307fa3e 8975 .ndo_select_queue = bnx2x_select_queue,
356e2385 8976 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
8977 .ndo_set_mac_address = bnx2x_change_mac_addr,
8978 .ndo_validate_addr = eth_validate_addr,
8979 .ndo_do_ioctl = bnx2x_ioctl,
8980 .ndo_change_mtu = bnx2x_change_mtu,
8981 .ndo_tx_timeout = bnx2x_tx_timeout,
257ddbda 8982#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
8983 .ndo_poll_controller = poll_bnx2x,
8984#endif
8985};
8986
34f80b04
EG
8987static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8988 struct net_device *dev)
a2fbb9ea
ET
8989{
8990 struct bnx2x *bp;
8991 int rc;
8992
8993 SET_NETDEV_DEV(dev, &pdev->dev);
8994 bp = netdev_priv(dev);
8995
34f80b04
EG
8996 bp->dev = dev;
8997 bp->pdev = pdev;
a2fbb9ea 8998 bp->flags = 0;
f2e0899f 8999 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9000
9001 rc = pci_enable_device(pdev);
9002 if (rc) {
cdaa7cb8
VZ
9003 dev_err(&bp->pdev->dev,
9004 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
9005 goto err_out;
9006 }
9007
9008 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9009 dev_err(&bp->pdev->dev,
9010 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
9011 rc = -ENODEV;
9012 goto err_out_disable;
9013 }
9014
9015 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9016 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9017 " base address, aborting\n");
a2fbb9ea
ET
9018 rc = -ENODEV;
9019 goto err_out_disable;
9020 }
9021
34f80b04
EG
9022 if (atomic_read(&pdev->enable_cnt) == 1) {
9023 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9024 if (rc) {
cdaa7cb8
VZ
9025 dev_err(&bp->pdev->dev,
9026 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
9027 goto err_out_disable;
9028 }
a2fbb9ea 9029
34f80b04
EG
9030 pci_set_master(pdev);
9031 pci_save_state(pdev);
9032 }
a2fbb9ea
ET
9033
9034 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9035 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
9036 dev_err(&bp->pdev->dev,
9037 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
9038 rc = -EIO;
9039 goto err_out_release;
9040 }
9041
9042 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9043 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
9044 dev_err(&bp->pdev->dev,
9045 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
9046 rc = -EIO;
9047 goto err_out_release;
9048 }
9049
1a983142 9050 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 9051 bp->flags |= USING_DAC_FLAG;
1a983142 9052 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
9053 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9054 " failed, aborting\n");
a2fbb9ea
ET
9055 rc = -EIO;
9056 goto err_out_release;
9057 }
9058
1a983142 9059 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
9060 dev_err(&bp->pdev->dev,
9061 "System does not support DMA, aborting\n");
a2fbb9ea
ET
9062 rc = -EIO;
9063 goto err_out_release;
9064 }
9065
34f80b04
EG
9066 dev->mem_start = pci_resource_start(pdev, 0);
9067 dev->base_addr = dev->mem_start;
9068 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
9069
9070 dev->irq = pdev->irq;
9071
275f165f 9072 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 9073 if (!bp->regview) {
cdaa7cb8
VZ
9074 dev_err(&bp->pdev->dev,
9075 "Cannot map register space, aborting\n");
a2fbb9ea
ET
9076 rc = -ENOMEM;
9077 goto err_out_release;
9078 }
9079
34f80b04 9080 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 9081 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 9082 pci_resource_len(pdev, 2)));
a2fbb9ea 9083 if (!bp->doorbells) {
cdaa7cb8
VZ
9084 dev_err(&bp->pdev->dev,
9085 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
9086 rc = -ENOMEM;
9087 goto err_out_unmap;
9088 }
9089
9090 bnx2x_set_power_state(bp, PCI_D0);
9091
34f80b04
EG
9092 /* clean indirect addresses */
9093 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9094 PCICFG_VENDOR_ID_OFFSET);
9095 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9096 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9097 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9098 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 9099
72fd0718
VZ
9100 /* Reset the load counter */
9101 bnx2x_clear_load_cnt(bp);
9102
34f80b04 9103 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 9104
c64213cd 9105 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 9106 bnx2x_set_ethtool_ops(dev);
34f80b04 9107 dev->features |= NETIF_F_SG;
79032644 9108 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
34f80b04
EG
9109 if (bp->flags & USING_DAC_FLAG)
9110 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
9111 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9112 dev->features |= NETIF_F_TSO6;
34f80b04 9113 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
5316bc0b
EG
9114
9115 dev->vlan_features |= NETIF_F_SG;
79032644 9116 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5316bc0b
EG
9117 if (bp->flags & USING_DAC_FLAG)
9118 dev->vlan_features |= NETIF_F_HIGHDMA;
9119 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9120 dev->vlan_features |= NETIF_F_TSO6;
a2fbb9ea 9121
785b9b1a
SR
9122#ifdef BCM_DCB
9123 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9124#endif
9125
01cd4528
EG
9126 /* get_port_hwinfo() will set prtad and mmds properly */
9127 bp->mdio.prtad = MDIO_PRTAD_NONE;
9128 bp->mdio.mmds = 0;
9129 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9130 bp->mdio.dev = dev;
9131 bp->mdio.mdio_read = bnx2x_mdio_read;
9132 bp->mdio.mdio_write = bnx2x_mdio_write;
9133
a2fbb9ea
ET
9134 return 0;
9135
9136err_out_unmap:
9137 if (bp->regview) {
9138 iounmap(bp->regview);
9139 bp->regview = NULL;
9140 }
a2fbb9ea
ET
9141 if (bp->doorbells) {
9142 iounmap(bp->doorbells);
9143 bp->doorbells = NULL;
9144 }
9145
9146err_out_release:
34f80b04
EG
9147 if (atomic_read(&pdev->enable_cnt) == 1)
9148 pci_release_regions(pdev);
a2fbb9ea
ET
9149
9150err_out_disable:
9151 pci_disable_device(pdev);
9152 pci_set_drvdata(pdev, NULL);
9153
9154err_out:
9155 return rc;
9156}
9157
37f9ce62
EG
9158static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9159 int *width, int *speed)
25047950
ET
9160{
9161 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9162
37f9ce62 9163 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 9164
37f9ce62
EG
9165 /* return value of 1=2.5GHz 2=5GHz */
9166 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 9167}
37f9ce62 9168
6891dd25 9169static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 9170{
37f9ce62 9171 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
9172 struct bnx2x_fw_file_hdr *fw_hdr;
9173 struct bnx2x_fw_file_section *sections;
94a78b79 9174 u32 offset, len, num_ops;
37f9ce62 9175 u16 *ops_offsets;
94a78b79 9176 int i;
37f9ce62 9177 const u8 *fw_ver;
94a78b79
VZ
9178
9179 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9180 return -EINVAL;
9181
9182 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9183 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9184
9185 /* Make sure none of the offsets and sizes make us read beyond
9186 * the end of the firmware data */
9187 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9188 offset = be32_to_cpu(sections[i].offset);
9189 len = be32_to_cpu(sections[i].len);
9190 if (offset + len > firmware->size) {
cdaa7cb8
VZ
9191 dev_err(&bp->pdev->dev,
9192 "Section %d length is out of bounds\n", i);
94a78b79
VZ
9193 return -EINVAL;
9194 }
9195 }
9196
9197 /* Likewise for the init_ops offsets */
9198 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9199 ops_offsets = (u16 *)(firmware->data + offset);
9200 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9201
9202 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9203 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
9204 dev_err(&bp->pdev->dev,
9205 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
9206 return -EINVAL;
9207 }
9208 }
9209
9210 /* Check FW version */
9211 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9212 fw_ver = firmware->data + offset;
9213 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9214 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9215 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9216 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
9217 dev_err(&bp->pdev->dev,
9218 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
9219 fw_ver[0], fw_ver[1], fw_ver[2],
9220 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9221 BCM_5710_FW_MINOR_VERSION,
9222 BCM_5710_FW_REVISION_VERSION,
9223 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 9224 return -EINVAL;
94a78b79
VZ
9225 }
9226
9227 return 0;
9228}
9229
ab6ad5a4 9230static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9231{
ab6ad5a4
EG
9232 const __be32 *source = (const __be32 *)_source;
9233 u32 *target = (u32 *)_target;
94a78b79 9234 u32 i;
94a78b79
VZ
9235
9236 for (i = 0; i < n/4; i++)
9237 target[i] = be32_to_cpu(source[i]);
9238}
9239
9240/*
9241 Ops array is stored in the following format:
9242 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9243 */
ab6ad5a4 9244static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 9245{
ab6ad5a4
EG
9246 const __be32 *source = (const __be32 *)_source;
9247 struct raw_op *target = (struct raw_op *)_target;
94a78b79 9248 u32 i, j, tmp;
94a78b79 9249
ab6ad5a4 9250 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
9251 tmp = be32_to_cpu(source[j]);
9252 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
9253 target[i].offset = tmp & 0xffffff;
9254 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
9255 }
9256}
ab6ad5a4 9257
523224a3
DK
9258/**
9259 * IRO array is stored in the following format:
9260 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9261 */
9262static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9263{
9264 const __be32 *source = (const __be32 *)_source;
9265 struct iro *target = (struct iro *)_target;
9266 u32 i, j, tmp;
9267
9268 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9269 target[i].base = be32_to_cpu(source[j]);
9270 j++;
9271 tmp = be32_to_cpu(source[j]);
9272 target[i].m1 = (tmp >> 16) & 0xffff;
9273 target[i].m2 = tmp & 0xffff;
9274 j++;
9275 tmp = be32_to_cpu(source[j]);
9276 target[i].m3 = (tmp >> 16) & 0xffff;
9277 target[i].size = tmp & 0xffff;
9278 j++;
9279 }
9280}
9281
ab6ad5a4 9282static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9283{
ab6ad5a4
EG
9284 const __be16 *source = (const __be16 *)_source;
9285 u16 *target = (u16 *)_target;
94a78b79 9286 u32 i;
94a78b79
VZ
9287
9288 for (i = 0; i < n/2; i++)
9289 target[i] = be16_to_cpu(source[i]);
9290}
9291
7995c64e
JP
9292#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9293do { \
9294 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9295 bp->arr = kmalloc(len, GFP_KERNEL); \
9296 if (!bp->arr) { \
9297 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9298 goto lbl; \
9299 } \
9300 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9301 (u8 *)bp->arr, len); \
9302} while (0)
94a78b79 9303
6891dd25 9304int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 9305{
45229b42 9306 const char *fw_file_name;
94a78b79 9307 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 9308 int rc;
94a78b79 9309
94a78b79 9310 if (CHIP_IS_E1(bp))
45229b42 9311 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 9312 else if (CHIP_IS_E1H(bp))
45229b42 9313 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
9314 else if (CHIP_IS_E2(bp))
9315 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 9316 else {
6891dd25 9317 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
9318 return -EINVAL;
9319 }
94a78b79 9320
6891dd25 9321 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 9322
6891dd25 9323 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 9324 if (rc) {
6891dd25 9325 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
9326 goto request_firmware_exit;
9327 }
9328
9329 rc = bnx2x_check_firmware(bp);
9330 if (rc) {
6891dd25 9331 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
9332 goto request_firmware_exit;
9333 }
9334
9335 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9336
9337 /* Initialize the pointers to the init arrays */
9338 /* Blob */
9339 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9340
9341 /* Opcodes */
9342 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9343
9344 /* Offsets */
ab6ad5a4
EG
9345 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9346 be16_to_cpu_n);
94a78b79
VZ
9347
9348 /* STORMs firmware */
573f2035
EG
9349 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9350 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9351 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9352 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9353 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9354 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9355 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9356 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9357 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9358 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9359 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9360 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9361 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9362 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9363 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9364 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
9365 /* IRO */
9366 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
9367
9368 return 0;
ab6ad5a4 9369
523224a3
DK
9370iro_alloc_err:
9371 kfree(bp->init_ops_offsets);
94a78b79
VZ
9372init_offsets_alloc_err:
9373 kfree(bp->init_ops);
9374init_ops_alloc_err:
9375 kfree(bp->init_data);
9376request_firmware_exit:
9377 release_firmware(bp->firmware);
9378
9379 return rc;
9380}
9381
523224a3
DK
9382static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9383{
9384 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 9385
523224a3
DK
9386#ifdef BCM_CNIC
9387 cid_count += CNIC_CID_MAX;
9388#endif
9389 return roundup(cid_count, QM_CID_ROUND);
9390}
f85582f8 9391
a2fbb9ea
ET
9392static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9393 const struct pci_device_id *ent)
9394{
a2fbb9ea
ET
9395 struct net_device *dev = NULL;
9396 struct bnx2x *bp;
37f9ce62 9397 int pcie_width, pcie_speed;
523224a3
DK
9398 int rc, cid_count;
9399
f2e0899f
DK
9400 switch (ent->driver_data) {
9401 case BCM57710:
9402 case BCM57711:
9403 case BCM57711E:
9404 cid_count = FP_SB_MAX_E1x;
9405 break;
9406
9407 case BCM57712:
9408 case BCM57712E:
9409 cid_count = FP_SB_MAX_E2;
9410 break;
a2fbb9ea 9411
f2e0899f
DK
9412 default:
9413 pr_err("Unknown board_type (%ld), aborting\n",
9414 ent->driver_data);
870634b0 9415 return -ENODEV;
f2e0899f
DK
9416 }
9417
ec6ba945 9418 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
f85582f8 9419
a2fbb9ea 9420 /* dev zeroed in init_etherdev */
523224a3 9421 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 9422 if (!dev) {
cdaa7cb8 9423 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 9424 return -ENOMEM;
34f80b04 9425 }
a2fbb9ea 9426
a2fbb9ea 9427 bp = netdev_priv(dev);
7995c64e 9428 bp->msg_enable = debug;
a2fbb9ea 9429
df4770de
EG
9430 pci_set_drvdata(pdev, dev);
9431
523224a3
DK
9432 bp->l2_cid_count = cid_count;
9433
34f80b04 9434 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
9435 if (rc < 0) {
9436 free_netdev(dev);
9437 return rc;
9438 }
9439
34f80b04 9440 rc = bnx2x_init_bp(bp);
693fc0d1
EG
9441 if (rc)
9442 goto init_one_exit;
9443
523224a3
DK
9444 /* calc qm_cid_count */
9445 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9446
ec6ba945
VZ
9447#ifdef BCM_CNIC
9448 /* disable FCOE L2 queue for E1x*/
9449 if (CHIP_IS_E1x(bp))
9450 bp->flags |= NO_FCOE_FLAG;
9451
9452#endif
9453
d6214d7a
DK
9454 /* Configure interupt mode: try to enable MSI-X/MSI if
9455 * needed, set bp->num_queues appropriately.
9456 */
9457 bnx2x_set_int_mode(bp);
9458
9459 /* Add all NAPI objects */
9460 bnx2x_add_all_napi(bp);
9461
b340007f
VZ
9462 rc = register_netdev(dev);
9463 if (rc) {
9464 dev_err(&pdev->dev, "Cannot register net device\n");
9465 goto init_one_exit;
9466 }
9467
ec6ba945
VZ
9468#ifdef BCM_CNIC
9469 if (!NO_FCOE(bp)) {
9470 /* Add storage MAC address */
9471 rtnl_lock();
9472 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9473 rtnl_unlock();
9474 }
9475#endif
9476
37f9ce62 9477 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
d6214d7a 9478
cdaa7cb8
VZ
9479 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9480 " IRQ %d, ", board_info[ent->driver_data].name,
9481 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
9482 pcie_width,
9483 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9484 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9485 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
9486 dev->base_addr, bp->pdev->irq);
9487 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 9488
a2fbb9ea 9489 return 0;
34f80b04
EG
9490
9491init_one_exit:
9492 if (bp->regview)
9493 iounmap(bp->regview);
9494
9495 if (bp->doorbells)
9496 iounmap(bp->doorbells);
9497
9498 free_netdev(dev);
9499
9500 if (atomic_read(&pdev->enable_cnt) == 1)
9501 pci_release_regions(pdev);
9502
9503 pci_disable_device(pdev);
9504 pci_set_drvdata(pdev, NULL);
9505
9506 return rc;
a2fbb9ea
ET
9507}
9508
9509static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9510{
9511 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
9512 struct bnx2x *bp;
9513
9514 if (!dev) {
cdaa7cb8 9515 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
9516 return;
9517 }
228241eb 9518 bp = netdev_priv(dev);
a2fbb9ea 9519
ec6ba945
VZ
9520#ifdef BCM_CNIC
9521 /* Delete storage MAC address */
9522 if (!NO_FCOE(bp)) {
9523 rtnl_lock();
9524 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9525 rtnl_unlock();
9526 }
9527#endif
9528
a2fbb9ea
ET
9529 unregister_netdev(dev);
9530
d6214d7a
DK
9531 /* Delete all NAPI objects */
9532 bnx2x_del_all_napi(bp);
9533
084d6cbb
VZ
9534 /* Power on: we can't let PCI layer write to us while we are in D3 */
9535 bnx2x_set_power_state(bp, PCI_D0);
9536
d6214d7a
DK
9537 /* Disable MSI/MSI-X */
9538 bnx2x_disable_msi(bp);
f85582f8 9539
084d6cbb
VZ
9540 /* Power off */
9541 bnx2x_set_power_state(bp, PCI_D3hot);
9542
72fd0718
VZ
9543 /* Make sure RESET task is not scheduled before continuing */
9544 cancel_delayed_work_sync(&bp->reset_task);
9545
a2fbb9ea
ET
9546 if (bp->regview)
9547 iounmap(bp->regview);
9548
9549 if (bp->doorbells)
9550 iounmap(bp->doorbells);
9551
523224a3
DK
9552 bnx2x_free_mem_bp(bp);
9553
a2fbb9ea 9554 free_netdev(dev);
34f80b04
EG
9555
9556 if (atomic_read(&pdev->enable_cnt) == 1)
9557 pci_release_regions(pdev);
9558
a2fbb9ea
ET
9559 pci_disable_device(pdev);
9560 pci_set_drvdata(pdev, NULL);
9561}
9562
f8ef6e44
YG
9563static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9564{
9565 int i;
9566
9567 bp->state = BNX2X_STATE_ERROR;
9568
9569 bp->rx_mode = BNX2X_RX_MODE_NONE;
9570
9571 bnx2x_netif_stop(bp, 0);
c89af1a3 9572 netif_carrier_off(bp->dev);
f8ef6e44
YG
9573
9574 del_timer_sync(&bp->timer);
9575 bp->stats_state = STATS_STATE_DISABLED;
9576 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9577
9578 /* Release IRQs */
d6214d7a 9579 bnx2x_free_irq(bp);
f8ef6e44 9580
f8ef6e44
YG
9581 /* Free SKBs, SGEs, TPA pool and driver internals */
9582 bnx2x_free_skbs(bp);
523224a3 9583
ec6ba945 9584 for_each_rx_queue(bp, i)
f8ef6e44 9585 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 9586
f8ef6e44
YG
9587 bnx2x_free_mem(bp);
9588
9589 bp->state = BNX2X_STATE_CLOSED;
9590
f8ef6e44
YG
9591 return 0;
9592}
9593
9594static void bnx2x_eeh_recover(struct bnx2x *bp)
9595{
9596 u32 val;
9597
9598 mutex_init(&bp->port.phy_mutex);
9599
9600 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9601 bp->link_params.shmem_base = bp->common.shmem_base;
9602 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9603
9604 if (!bp->common.shmem_base ||
9605 (bp->common.shmem_base < 0xA0000) ||
9606 (bp->common.shmem_base >= 0xC0000)) {
9607 BNX2X_DEV_INFO("MCP not active\n");
9608 bp->flags |= NO_MCP_FLAG;
9609 return;
9610 }
9611
9612 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9613 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9614 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9615 BNX2X_ERR("BAD MCP validity signature\n");
9616
9617 if (!BP_NOMCP(bp)) {
f2e0899f
DK
9618 bp->fw_seq =
9619 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9620 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
9621 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9622 }
9623}
9624
493adb1f
WX
9625/**
9626 * bnx2x_io_error_detected - called when PCI error is detected
9627 * @pdev: Pointer to PCI device
9628 * @state: The current pci connection state
9629 *
9630 * This function is called after a PCI bus error affecting
9631 * this device has been detected.
9632 */
9633static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9634 pci_channel_state_t state)
9635{
9636 struct net_device *dev = pci_get_drvdata(pdev);
9637 struct bnx2x *bp = netdev_priv(dev);
9638
9639 rtnl_lock();
9640
9641 netif_device_detach(dev);
9642
07ce50e4
DN
9643 if (state == pci_channel_io_perm_failure) {
9644 rtnl_unlock();
9645 return PCI_ERS_RESULT_DISCONNECT;
9646 }
9647
493adb1f 9648 if (netif_running(dev))
f8ef6e44 9649 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9650
9651 pci_disable_device(pdev);
9652
9653 rtnl_unlock();
9654
9655 /* Request a slot reset */
9656 return PCI_ERS_RESULT_NEED_RESET;
9657}
9658
9659/**
9660 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9661 * @pdev: Pointer to PCI device
9662 *
9663 * Restart the card from scratch, as if from a cold-boot.
9664 */
9665static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9666{
9667 struct net_device *dev = pci_get_drvdata(pdev);
9668 struct bnx2x *bp = netdev_priv(dev);
9669
9670 rtnl_lock();
9671
9672 if (pci_enable_device(pdev)) {
9673 dev_err(&pdev->dev,
9674 "Cannot re-enable PCI device after reset\n");
9675 rtnl_unlock();
9676 return PCI_ERS_RESULT_DISCONNECT;
9677 }
9678
9679 pci_set_master(pdev);
9680 pci_restore_state(pdev);
9681
9682 if (netif_running(dev))
9683 bnx2x_set_power_state(bp, PCI_D0);
9684
9685 rtnl_unlock();
9686
9687 return PCI_ERS_RESULT_RECOVERED;
9688}
9689
9690/**
9691 * bnx2x_io_resume - called when traffic can start flowing again
9692 * @pdev: Pointer to PCI device
9693 *
9694 * This callback is called when the error recovery driver tells us that
9695 * its OK to resume normal operation.
9696 */
9697static void bnx2x_io_resume(struct pci_dev *pdev)
9698{
9699 struct net_device *dev = pci_get_drvdata(pdev);
9700 struct bnx2x *bp = netdev_priv(dev);
9701
72fd0718 9702 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
9703 printk(KERN_ERR "Handling parity error recovery. "
9704 "Try again later\n");
72fd0718
VZ
9705 return;
9706 }
9707
493adb1f
WX
9708 rtnl_lock();
9709
f8ef6e44
YG
9710 bnx2x_eeh_recover(bp);
9711
493adb1f 9712 if (netif_running(dev))
f8ef6e44 9713 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
9714
9715 netif_device_attach(dev);
9716
9717 rtnl_unlock();
9718}
9719
9720static struct pci_error_handlers bnx2x_err_handler = {
9721 .error_detected = bnx2x_io_error_detected,
356e2385
EG
9722 .slot_reset = bnx2x_io_slot_reset,
9723 .resume = bnx2x_io_resume,
493adb1f
WX
9724};
9725
a2fbb9ea 9726static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
9727 .name = DRV_MODULE_NAME,
9728 .id_table = bnx2x_pci_tbl,
9729 .probe = bnx2x_init_one,
9730 .remove = __devexit_p(bnx2x_remove_one),
9731 .suspend = bnx2x_suspend,
9732 .resume = bnx2x_resume,
9733 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
9734};
9735
9736static int __init bnx2x_init(void)
9737{
dd21ca6d
SG
9738 int ret;
9739
7995c64e 9740 pr_info("%s", version);
938cf541 9741
1cf167f2
EG
9742 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9743 if (bnx2x_wq == NULL) {
7995c64e 9744 pr_err("Cannot create workqueue\n");
1cf167f2
EG
9745 return -ENOMEM;
9746 }
9747
dd21ca6d
SG
9748 ret = pci_register_driver(&bnx2x_pci_driver);
9749 if (ret) {
7995c64e 9750 pr_err("Cannot register driver\n");
dd21ca6d
SG
9751 destroy_workqueue(bnx2x_wq);
9752 }
9753 return ret;
a2fbb9ea
ET
9754}
9755
9756static void __exit bnx2x_cleanup(void)
9757{
9758 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
9759
9760 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
9761}
9762
9763module_init(bnx2x_init);
9764module_exit(bnx2x_cleanup);
9765
993ac7b5
MC
9766#ifdef BCM_CNIC
9767
9768/* count denotes the number of new completions we have seen */
9769static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9770{
9771 struct eth_spe *spe;
9772
9773#ifdef BNX2X_STOP_ON_ERROR
9774 if (unlikely(bp->panic))
9775 return;
9776#endif
9777
9778 spin_lock_bh(&bp->spq_lock);
c2bff63f 9779 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
9780 bp->cnic_spq_pending -= count;
9781
993ac7b5 9782
c2bff63f
DK
9783 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9784 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9785 & SPE_HDR_CONN_TYPE) >>
9786 SPE_HDR_CONN_TYPE_SHIFT;
9787
9788 /* Set validation for iSCSI L2 client before sending SETUP
9789 * ramrod
9790 */
9791 if (type == ETH_CONNECTION_TYPE) {
9792 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9793 hdr.conn_and_cmd_data) >>
9794 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9795
9796 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9797 bnx2x_set_ctx_validation(&bp->context.
9798 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9799 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9800 }
9801
9802 /* There may be not more than 8 L2 and COMMON SPEs and not more
9803 * than 8 L5 SPEs in the air.
9804 */
9805 if ((type == NONE_CONNECTION_TYPE) ||
9806 (type == ETH_CONNECTION_TYPE)) {
9807 if (!atomic_read(&bp->spq_left))
9808 break;
9809 else
9810 atomic_dec(&bp->spq_left);
ec6ba945
VZ
9811 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9812 (type == FCOE_CONNECTION_TYPE)) {
c2bff63f
DK
9813 if (bp->cnic_spq_pending >=
9814 bp->cnic_eth_dev.max_kwqe_pending)
9815 break;
9816 else
9817 bp->cnic_spq_pending++;
9818 } else {
9819 BNX2X_ERR("Unknown SPE type: %d\n", type);
9820 bnx2x_panic();
993ac7b5 9821 break;
c2bff63f 9822 }
993ac7b5
MC
9823
9824 spe = bnx2x_sp_get_next(bp);
9825 *spe = *bp->cnic_kwq_cons;
9826
993ac7b5
MC
9827 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9828 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9829
9830 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9831 bp->cnic_kwq_cons = bp->cnic_kwq;
9832 else
9833 bp->cnic_kwq_cons++;
9834 }
9835 bnx2x_sp_prod_update(bp);
9836 spin_unlock_bh(&bp->spq_lock);
9837}
9838
9839static int bnx2x_cnic_sp_queue(struct net_device *dev,
9840 struct kwqe_16 *kwqes[], u32 count)
9841{
9842 struct bnx2x *bp = netdev_priv(dev);
9843 int i;
9844
9845#ifdef BNX2X_STOP_ON_ERROR
9846 if (unlikely(bp->panic))
9847 return -EIO;
9848#endif
9849
9850 spin_lock_bh(&bp->spq_lock);
9851
9852 for (i = 0; i < count; i++) {
9853 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9854
9855 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9856 break;
9857
9858 *bp->cnic_kwq_prod = *spe;
9859
9860 bp->cnic_kwq_pending++;
9861
9862 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9863 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
9864 spe->data.update_data_addr.hi,
9865 spe->data.update_data_addr.lo,
993ac7b5
MC
9866 bp->cnic_kwq_pending);
9867
9868 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9869 bp->cnic_kwq_prod = bp->cnic_kwq;
9870 else
9871 bp->cnic_kwq_prod++;
9872 }
9873
9874 spin_unlock_bh(&bp->spq_lock);
9875
9876 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9877 bnx2x_cnic_sp_post(bp, 0);
9878
9879 return i;
9880}
9881
9882static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9883{
9884 struct cnic_ops *c_ops;
9885 int rc = 0;
9886
9887 mutex_lock(&bp->cnic_mutex);
9888 c_ops = bp->cnic_ops;
9889 if (c_ops)
9890 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9891 mutex_unlock(&bp->cnic_mutex);
9892
9893 return rc;
9894}
9895
9896static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9897{
9898 struct cnic_ops *c_ops;
9899 int rc = 0;
9900
9901 rcu_read_lock();
9902 c_ops = rcu_dereference(bp->cnic_ops);
9903 if (c_ops)
9904 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9905 rcu_read_unlock();
9906
9907 return rc;
9908}
9909
9910/*
9911 * for commands that have no data
9912 */
9f6c9258 9913int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
9914{
9915 struct cnic_ctl_info ctl = {0};
9916
9917 ctl.cmd = cmd;
9918
9919 return bnx2x_cnic_ctl_send(bp, &ctl);
9920}
9921
9922static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9923{
9924 struct cnic_ctl_info ctl;
9925
9926 /* first we tell CNIC and only then we count this as a completion */
9927 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9928 ctl.data.comp.cid = cid;
9929
9930 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 9931 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
9932}
9933
9934static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9935{
9936 struct bnx2x *bp = netdev_priv(dev);
9937 int rc = 0;
9938
9939 switch (ctl->cmd) {
9940 case DRV_CTL_CTXTBL_WR_CMD: {
9941 u32 index = ctl->data.io.offset;
9942 dma_addr_t addr = ctl->data.io.dma_addr;
9943
9944 bnx2x_ilt_wr(bp, index, addr);
9945 break;
9946 }
9947
c2bff63f
DK
9948 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9949 int count = ctl->data.credit.credit_count;
993ac7b5
MC
9950
9951 bnx2x_cnic_sp_post(bp, count);
9952 break;
9953 }
9954
9955 /* rtnl_lock is held. */
9956 case DRV_CTL_START_L2_CMD: {
9957 u32 cli = ctl->data.ring.client_id;
9958
ec6ba945
VZ
9959 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
9960 bnx2x_del_fcoe_eth_macs(bp);
9961
523224a3
DK
9962 /* Set iSCSI MAC address */
9963 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9964
9965 mmiowb();
9966 barrier();
9967
9968 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9969 * because it's the only way for UIO Client to accept
9970 * multicasts (in non-promiscuous mode only one Client per
9971 * function will receive multicast packets (leading in our
9972 * case).
9973 */
9974 bnx2x_rxq_set_mac_filters(bp, cli,
9975 BNX2X_ACCEPT_UNICAST |
9976 BNX2X_ACCEPT_BROADCAST |
9977 BNX2X_ACCEPT_ALL_MULTICAST);
9978 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9979
993ac7b5
MC
9980 break;
9981 }
9982
9983 /* rtnl_lock is held. */
9984 case DRV_CTL_STOP_L2_CMD: {
9985 u32 cli = ctl->data.ring.client_id;
9986
523224a3
DK
9987 /* Stop accepting on iSCSI L2 ring */
9988 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9989 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9990
9991 mmiowb();
9992 barrier();
9993
9994 /* Unset iSCSI L2 MAC */
9995 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
9996 break;
9997 }
c2bff63f
DK
9998 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9999 int count = ctl->data.credit.credit_count;
10000
10001 smp_mb__before_atomic_inc();
10002 atomic_add(count, &bp->spq_left);
10003 smp_mb__after_atomic_inc();
10004 break;
10005 }
993ac7b5
MC
10006
10007 default:
10008 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10009 rc = -EINVAL;
10010 }
10011
10012 return rc;
10013}
10014
9f6c9258 10015void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
10016{
10017 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10018
10019 if (bp->flags & USING_MSIX_FLAG) {
10020 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10021 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10022 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10023 } else {
10024 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10025 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10026 }
f2e0899f
DK
10027 if (CHIP_IS_E2(bp))
10028 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10029 else
10030 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10031
993ac7b5 10032 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 10033 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
10034 cp->irq_arr[1].status_blk = bp->def_status_blk;
10035 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 10036 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
10037
10038 cp->num_irq = 2;
10039}
10040
10041static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10042 void *data)
10043{
10044 struct bnx2x *bp = netdev_priv(dev);
10045 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10046
10047 if (ops == NULL)
10048 return -EINVAL;
10049
10050 if (atomic_read(&bp->intr_sem) != 0)
10051 return -EBUSY;
10052
10053 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10054 if (!bp->cnic_kwq)
10055 return -ENOMEM;
10056
10057 bp->cnic_kwq_cons = bp->cnic_kwq;
10058 bp->cnic_kwq_prod = bp->cnic_kwq;
10059 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10060
10061 bp->cnic_spq_pending = 0;
10062 bp->cnic_kwq_pending = 0;
10063
10064 bp->cnic_data = data;
10065
10066 cp->num_irq = 0;
10067 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 10068 cp->iro_arr = bp->iro_arr;
993ac7b5 10069
993ac7b5 10070 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 10071
993ac7b5
MC
10072 rcu_assign_pointer(bp->cnic_ops, ops);
10073
10074 return 0;
10075}
10076
10077static int bnx2x_unregister_cnic(struct net_device *dev)
10078{
10079 struct bnx2x *bp = netdev_priv(dev);
10080 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10081
10082 mutex_lock(&bp->cnic_mutex);
993ac7b5
MC
10083 cp->drv_state = 0;
10084 rcu_assign_pointer(bp->cnic_ops, NULL);
10085 mutex_unlock(&bp->cnic_mutex);
10086 synchronize_rcu();
10087 kfree(bp->cnic_kwq);
10088 bp->cnic_kwq = NULL;
10089
10090 return 0;
10091}
10092
10093struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10094{
10095 struct bnx2x *bp = netdev_priv(dev);
10096 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10097
10098 cp->drv_owner = THIS_MODULE;
10099 cp->chip_id = CHIP_ID(bp);
10100 cp->pdev = bp->pdev;
10101 cp->io_base = bp->regview;
10102 cp->io_base2 = bp->doorbells;
10103 cp->max_kwqe_pending = 8;
523224a3 10104 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
10105 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10106 bnx2x_cid_ilt_lines(bp);
993ac7b5 10107 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 10108 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
10109 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10110 cp->drv_ctl = bnx2x_drv_ctl;
10111 cp->drv_register_cnic = bnx2x_register_cnic;
10112 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
ec6ba945
VZ
10113 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10114 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10115 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
c2bff63f
DK
10116 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10117
10118 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10119 "starting cid %d\n",
10120 cp->ctx_blk_size,
10121 cp->ctx_tbl_offset,
10122 cp->ctx_tbl_len,
10123 cp->starting_cid);
993ac7b5
MC
10124 return cp;
10125}
10126EXPORT_SYMBOL(bnx2x_cnic_probe);
10127
10128#endif /* BCM_CNIC */
94a78b79 10129
This page took 1.316737 seconds and 5 git commands to generate.