Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc...
[deliverable/linux.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
e4901dde 58#include "bnx2x_dcb.h"
a2fbb9ea 59
94a78b79
VZ
60#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
45229b42
BH
63#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
68#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 70#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
80MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
82MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
84MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 86MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 87
555f6c78
EG
88static int multi_mode = 1;
89module_param(multi_mode, int, 0);
ca00392c
EG
90MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
d6214d7a 93int num_queues;
54b9ddaa
VZ
94module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
555f6c78 97
19680c48 98static int disable_tpa;
19680c48 99module_param(disable_tpa, int, 0);
9898f86d 100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
101
102static int int_mode;
103module_param(int_mode, int, 0);
cdaa7cb8
VZ
104MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
8badd27a 106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
1cf167f2 123static struct workqueue_struct *bnx2x_wq;
a2fbb9ea 124
ec6ba945
VZ
125#ifdef BCM_CNIC
126static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127#endif
128
a2fbb9ea
ET
129enum bnx2x_board_type {
130 BCM57710 = 0,
34f80b04
EG
131 BCM57711 = 1,
132 BCM57711E = 2,
f2e0899f
DK
133 BCM57712 = 3,
134 BCM57712E = 4
a2fbb9ea
ET
135};
136
34f80b04 137/* indexed by board_type, above */
53a10565 138static struct {
a2fbb9ea
ET
139 char *name;
140} board_info[] __devinitdata = {
34f80b04
EG
141 { "Broadcom NetXtreme II BCM57710 XGb" },
142 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
143 { "Broadcom NetXtreme II BCM57711E XGb" },
144 { "Broadcom NetXtreme II BCM57712 XGb" },
145 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
146};
147
f2e0899f
DK
148#ifndef PCI_DEVICE_ID_NX2_57712
149#define PCI_DEVICE_ID_NX2_57712 0x1662
150#endif
151#ifndef PCI_DEVICE_ID_NX2_57712E
152#define PCI_DEVICE_ID_NX2_57712E 0x1663
153#endif
34f80b04 154
a3aa1884 155static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
157 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
158 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
159 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
160 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
161 { 0 }
162};
163
164MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
165
166/****************************************************************************
167* General service functions
168****************************************************************************/
169
523224a3
DK
170static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
171 u32 addr, dma_addr_t mapping)
172{
173 REG_WR(bp, addr, U64_LO(mapping));
174 REG_WR(bp, addr + 4, U64_HI(mapping));
175}
176
177static inline void __storm_memset_fill(struct bnx2x *bp,
178 u32 addr, size_t size, u32 val)
179{
180 int i;
181 for (i = 0; i < size/4; i++)
182 REG_WR(bp, addr + (i * 4), val);
183}
184
185static inline void storm_memset_ustats_zero(struct bnx2x *bp,
186 u8 port, u16 stat_id)
187{
188 size_t size = sizeof(struct ustorm_per_client_stats);
189
190 u32 addr = BAR_USTRORM_INTMEM +
191 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
192
193 __storm_memset_fill(bp, addr, size, 0);
194}
195
196static inline void storm_memset_tstats_zero(struct bnx2x *bp,
197 u8 port, u16 stat_id)
198{
199 size_t size = sizeof(struct tstorm_per_client_stats);
200
201 u32 addr = BAR_TSTRORM_INTMEM +
202 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
203
204 __storm_memset_fill(bp, addr, size, 0);
205}
206
207static inline void storm_memset_xstats_zero(struct bnx2x *bp,
208 u8 port, u16 stat_id)
209{
210 size_t size = sizeof(struct xstorm_per_client_stats);
211
212 u32 addr = BAR_XSTRORM_INTMEM +
213 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
214
215 __storm_memset_fill(bp, addr, size, 0);
216}
217
218
219static inline void storm_memset_spq_addr(struct bnx2x *bp,
220 dma_addr_t mapping, u16 abs_fid)
221{
222 u32 addr = XSEM_REG_FAST_MEMORY +
223 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
224
225 __storm_memset_dma_mapping(bp, addr, mapping);
226}
227
228static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
229{
230 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
231}
232
233static inline void storm_memset_func_cfg(struct bnx2x *bp,
234 struct tstorm_eth_function_common_config *tcfg,
235 u16 abs_fid)
236{
237 size_t size = sizeof(struct tstorm_eth_function_common_config);
238
239 u32 addr = BAR_TSTRORM_INTMEM +
240 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
241
242 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
243}
244
245static inline void storm_memset_xstats_flags(struct bnx2x *bp,
246 struct stats_indication_flags *flags,
247 u16 abs_fid)
248{
249 size_t size = sizeof(struct stats_indication_flags);
250
251 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
252
253 __storm_memset_struct(bp, addr, size, (u32 *)flags);
254}
255
256static inline void storm_memset_tstats_flags(struct bnx2x *bp,
257 struct stats_indication_flags *flags,
258 u16 abs_fid)
259{
260 size_t size = sizeof(struct stats_indication_flags);
261
262 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
263
264 __storm_memset_struct(bp, addr, size, (u32 *)flags);
265}
266
267static inline void storm_memset_ustats_flags(struct bnx2x *bp,
268 struct stats_indication_flags *flags,
269 u16 abs_fid)
270{
271 size_t size = sizeof(struct stats_indication_flags);
272
273 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
274
275 __storm_memset_struct(bp, addr, size, (u32 *)flags);
276}
277
278static inline void storm_memset_cstats_flags(struct bnx2x *bp,
279 struct stats_indication_flags *flags,
280 u16 abs_fid)
281{
282 size_t size = sizeof(struct stats_indication_flags);
283
284 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
285
286 __storm_memset_struct(bp, addr, size, (u32 *)flags);
287}
288
289static inline void storm_memset_xstats_addr(struct bnx2x *bp,
290 dma_addr_t mapping, u16 abs_fid)
291{
292 u32 addr = BAR_XSTRORM_INTMEM +
293 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295 __storm_memset_dma_mapping(bp, addr, mapping);
296}
297
298static inline void storm_memset_tstats_addr(struct bnx2x *bp,
299 dma_addr_t mapping, u16 abs_fid)
300{
301 u32 addr = BAR_TSTRORM_INTMEM +
302 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304 __storm_memset_dma_mapping(bp, addr, mapping);
305}
306
307static inline void storm_memset_ustats_addr(struct bnx2x *bp,
308 dma_addr_t mapping, u16 abs_fid)
309{
310 u32 addr = BAR_USTRORM_INTMEM +
311 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
312
313 __storm_memset_dma_mapping(bp, addr, mapping);
314}
315
316static inline void storm_memset_cstats_addr(struct bnx2x *bp,
317 dma_addr_t mapping, u16 abs_fid)
318{
319 u32 addr = BAR_CSTRORM_INTMEM +
320 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
321
322 __storm_memset_dma_mapping(bp, addr, mapping);
323}
324
325static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
326 u16 pf_id)
327{
328 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
331 pf_id);
332 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
333 pf_id);
334 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
335 pf_id);
336}
337
338static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
339 u8 enable)
340{
341 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
344 enable);
345 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
346 enable);
347 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
348 enable);
349}
350
351static inline void storm_memset_eq_data(struct bnx2x *bp,
352 struct event_ring_data *eq_data,
353 u16 pfid)
354{
355 size_t size = sizeof(struct event_ring_data);
356
357 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
358
359 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
360}
361
362static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
363 u16 pfid)
364{
365 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
366 REG_WR16(bp, addr, eq_prod);
367}
368
369static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
370 u16 fw_sb_id, u8 sb_index,
371 u8 ticks)
372{
373
f2e0899f
DK
374 int index_offset = CHIP_IS_E2(bp) ?
375 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
376 offsetof(struct hc_status_block_data_e1x, index_data);
377 u32 addr = BAR_CSTRORM_INTMEM +
378 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
379 index_offset +
380 sizeof(struct hc_index_data)*sb_index +
381 offsetof(struct hc_index_data, timeout);
382 REG_WR8(bp, addr, ticks);
383 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
384 port, fw_sb_id, sb_index, ticks);
385}
386static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
387 u16 fw_sb_id, u8 sb_index,
388 u8 disable)
389{
390 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
391 int index_offset = CHIP_IS_E2(bp) ?
392 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
393 offsetof(struct hc_status_block_data_e1x, index_data);
394 u32 addr = BAR_CSTRORM_INTMEM +
395 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
396 index_offset +
397 sizeof(struct hc_index_data)*sb_index +
398 offsetof(struct hc_index_data, flags);
399 u16 flags = REG_RD16(bp, addr);
400 /* clear and set */
401 flags &= ~HC_INDEX_DATA_HC_ENABLED;
402 flags |= enable_flag;
403 REG_WR16(bp, addr, flags);
404 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
405 port, fw_sb_id, sb_index, disable);
406}
407
a2fbb9ea
ET
408/* used only at init
409 * locking is done by mcp
410 */
8d96286a 411static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
412{
413 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
414 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
415 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
416 PCICFG_VENDOR_ID_OFFSET);
417}
418
a2fbb9ea
ET
419static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
420{
421 u32 val;
422
423 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
424 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
425 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
426 PCICFG_VENDOR_ID_OFFSET);
427
428 return val;
429}
a2fbb9ea 430
f2e0899f
DK
431#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
432#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
433#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
434#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
435#define DMAE_DP_DST_NONE "dst_addr [none]"
436
8d96286a 437static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
438 int msglvl)
f2e0899f
DK
439{
440 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
441
442 switch (dmae->opcode & DMAE_COMMAND_DST) {
443 case DMAE_CMD_DST_PCI:
444 if (src_type == DMAE_CMD_SRC_PCI)
445 DP(msglvl, "DMAE: opcode 0x%08x\n"
446 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
447 "comp_addr [%x:%08x], comp_val 0x%08x\n",
448 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
449 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
450 dmae->comp_addr_hi, dmae->comp_addr_lo,
451 dmae->comp_val);
452 else
453 DP(msglvl, "DMAE: opcode 0x%08x\n"
454 "src [%08x], len [%d*4], dst [%x:%08x]\n"
455 "comp_addr [%x:%08x], comp_val 0x%08x\n",
456 dmae->opcode, dmae->src_addr_lo >> 2,
457 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
458 dmae->comp_addr_hi, dmae->comp_addr_lo,
459 dmae->comp_val);
460 break;
461 case DMAE_CMD_DST_GRC:
462 if (src_type == DMAE_CMD_SRC_PCI)
463 DP(msglvl, "DMAE: opcode 0x%08x\n"
464 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
465 "comp_addr [%x:%08x], comp_val 0x%08x\n",
466 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
467 dmae->len, dmae->dst_addr_lo >> 2,
468 dmae->comp_addr_hi, dmae->comp_addr_lo,
469 dmae->comp_val);
470 else
471 DP(msglvl, "DMAE: opcode 0x%08x\n"
472 "src [%08x], len [%d*4], dst [%08x]\n"
473 "comp_addr [%x:%08x], comp_val 0x%08x\n",
474 dmae->opcode, dmae->src_addr_lo >> 2,
475 dmae->len, dmae->dst_addr_lo >> 2,
476 dmae->comp_addr_hi, dmae->comp_addr_lo,
477 dmae->comp_val);
478 break;
479 default:
480 if (src_type == DMAE_CMD_SRC_PCI)
481 DP(msglvl, "DMAE: opcode 0x%08x\n"
482 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
483 "dst_addr [none]\n"
484 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
485 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
486 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
487 dmae->comp_val);
488 else
489 DP(msglvl, "DMAE: opcode 0x%08x\n"
490 DP_LEVEL "src_addr [%08x] len [%d * 4] "
491 "dst_addr [none]\n"
492 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
493 dmae->opcode, dmae->src_addr_lo >> 2,
494 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495 dmae->comp_val);
496 break;
497 }
498
499}
500
6c719d00 501const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
502 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
503 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
504 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
505 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
506};
507
508/* copy command into DMAE command memory and set DMAE command go */
6c719d00 509void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
510{
511 u32 cmd_offset;
512 int i;
513
514 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
515 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
516 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
517
ad8d3948
EG
518 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
519 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
520 }
521 REG_WR(bp, dmae_reg_go_c[idx], 1);
522}
523
f2e0899f 524u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 525{
f2e0899f
DK
526 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
527 DMAE_CMD_C_ENABLE);
528}
ad8d3948 529
f2e0899f
DK
530u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
531{
532 return opcode & ~DMAE_CMD_SRC_RESET;
533}
ad8d3948 534
f2e0899f
DK
535u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
536 bool with_comp, u8 comp_type)
537{
538 u32 opcode = 0;
539
540 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
541 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 542
f2e0899f
DK
543 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
544
545 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
546 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
547 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
548 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 549
a2fbb9ea 550#ifdef __BIG_ENDIAN
f2e0899f 551 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 552#else
f2e0899f 553 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 554#endif
f2e0899f
DK
555 if (with_comp)
556 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
557 return opcode;
558}
559
8d96286a 560static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
561 struct dmae_command *dmae,
562 u8 src_type, u8 dst_type)
f2e0899f
DK
563{
564 memset(dmae, 0, sizeof(struct dmae_command));
565
566 /* set the opcode */
567 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
568 true, DMAE_COMP_PCI);
569
570 /* fill in the completion parameters */
571 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
572 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
573 dmae->comp_val = DMAE_COMP_VAL;
574}
575
576/* issue a dmae command over the init-channel and wailt for completion */
8d96286a 577static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
578 struct dmae_command *dmae)
f2e0899f
DK
579{
580 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
581 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
582 int rc = 0;
583
584 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
585 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 587
f2e0899f 588 /* lock the dmae channel */
6e30dd4e 589 spin_lock_bh(&bp->dmae_lock);
5ff7b6d4 590
f2e0899f 591 /* reset completion */
a2fbb9ea
ET
592 *wb_comp = 0;
593
f2e0899f
DK
594 /* post the command on the channel used for initializations */
595 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 596
f2e0899f 597 /* wait for completion */
a2fbb9ea 598 udelay(5);
f2e0899f 599 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
600 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
601
ad8d3948 602 if (!cnt) {
c3eefaf6 603 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
604 rc = DMAE_TIMEOUT;
605 goto unlock;
a2fbb9ea 606 }
ad8d3948 607 cnt--;
f2e0899f 608 udelay(50);
a2fbb9ea 609 }
f2e0899f
DK
610 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
611 BNX2X_ERR("DMAE PCI error!\n");
612 rc = DMAE_PCI_ERROR;
613 }
614
615 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
616 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 618
f2e0899f 619unlock:
6e30dd4e 620 spin_unlock_bh(&bp->dmae_lock);
f2e0899f
DK
621 return rc;
622}
623
624void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
625 u32 len32)
626{
627 struct dmae_command dmae;
628
629 if (!bp->dmae_ready) {
630 u32 *data = bnx2x_sp(bp, wb_data[0]);
631
632 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
633 " using indirect\n", dst_addr, len32);
634 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
635 return;
636 }
637
638 /* set opcode and fixed command fields */
639 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
640
641 /* fill in addresses and len */
642 dmae.src_addr_lo = U64_LO(dma_addr);
643 dmae.src_addr_hi = U64_HI(dma_addr);
644 dmae.dst_addr_lo = dst_addr >> 2;
645 dmae.dst_addr_hi = 0;
646 dmae.len = len32;
647
648 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
649
650 /* issue the command and wait for completion */
651 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
652}
653
c18487ee 654void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 655{
5ff7b6d4 656 struct dmae_command dmae;
ad8d3948
EG
657
658 if (!bp->dmae_ready) {
659 u32 *data = bnx2x_sp(bp, wb_data[0]);
660 int i;
661
662 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
663 " using indirect\n", src_addr, len32);
664 for (i = 0; i < len32; i++)
665 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
666 return;
667 }
668
f2e0899f
DK
669 /* set opcode and fixed command fields */
670 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 671
f2e0899f 672 /* fill in addresses and len */
5ff7b6d4
EG
673 dmae.src_addr_lo = src_addr >> 2;
674 dmae.src_addr_hi = 0;
675 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
676 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
677 dmae.len = len32;
ad8d3948 678
f2e0899f 679 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 680
f2e0899f
DK
681 /* issue the command and wait for completion */
682 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
683}
684
8d96286a 685static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
686 u32 addr, u32 len)
573f2035 687{
02e3c6cb 688 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
689 int offset = 0;
690
02e3c6cb 691 while (len > dmae_wr_max) {
573f2035 692 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
693 addr + offset, dmae_wr_max);
694 offset += dmae_wr_max * 4;
695 len -= dmae_wr_max;
573f2035
EG
696 }
697
698 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
699}
700
ad8d3948
EG
701/* used only for slowpath so not inlined */
702static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
703{
704 u32 wb_write[2];
705
706 wb_write[0] = val_hi;
707 wb_write[1] = val_lo;
708 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 709}
a2fbb9ea 710
ad8d3948
EG
711#ifdef USE_WB_RD
712static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
713{
714 u32 wb_data[2];
715
716 REG_RD_DMAE(bp, reg, wb_data, 2);
717
718 return HILO_U64(wb_data[0], wb_data[1]);
719}
720#endif
721
a2fbb9ea
ET
722static int bnx2x_mc_assert(struct bnx2x *bp)
723{
a2fbb9ea 724 char last_idx;
34f80b04
EG
725 int i, rc = 0;
726 u32 row0, row1, row2, row3;
727
728 /* XSTORM */
729 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_INDEX_OFFSET);
731 if (last_idx)
732 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
733
734 /* print the asserts */
735 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
736
737 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
738 XSTORM_ASSERT_LIST_OFFSET(i));
739 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
740 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
741 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
742 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
743 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
744 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
745
746 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
747 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
748 " 0x%08x 0x%08x 0x%08x\n",
749 i, row3, row2, row1, row0);
750 rc++;
751 } else {
752 break;
753 }
754 }
755
756 /* TSTORM */
757 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_INDEX_OFFSET);
759 if (last_idx)
760 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
761
762 /* print the asserts */
763 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
764
765 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
766 TSTORM_ASSERT_LIST_OFFSET(i));
767 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
768 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
769 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
770 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
771 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
772 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
773
774 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
775 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
776 " 0x%08x 0x%08x 0x%08x\n",
777 i, row3, row2, row1, row0);
778 rc++;
779 } else {
780 break;
781 }
782 }
783
784 /* CSTORM */
785 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_INDEX_OFFSET);
787 if (last_idx)
788 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
789
790 /* print the asserts */
791 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
792
793 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
794 CSTORM_ASSERT_LIST_OFFSET(i));
795 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
796 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
797 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
798 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
799 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
800 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
801
802 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
803 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
804 " 0x%08x 0x%08x 0x%08x\n",
805 i, row3, row2, row1, row0);
806 rc++;
807 } else {
808 break;
809 }
810 }
811
812 /* USTORM */
813 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_INDEX_OFFSET);
815 if (last_idx)
816 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
817
818 /* print the asserts */
819 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
820
821 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
822 USTORM_ASSERT_LIST_OFFSET(i));
823 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
824 USTORM_ASSERT_LIST_OFFSET(i) + 4);
825 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
826 USTORM_ASSERT_LIST_OFFSET(i) + 8);
827 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
828 USTORM_ASSERT_LIST_OFFSET(i) + 12);
829
830 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
831 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
832 " 0x%08x 0x%08x 0x%08x\n",
833 i, row3, row2, row1, row0);
834 rc++;
835 } else {
836 break;
a2fbb9ea
ET
837 }
838 }
34f80b04 839
a2fbb9ea
ET
840 return rc;
841}
c14423fe 842
a2fbb9ea
ET
843static void bnx2x_fw_dump(struct bnx2x *bp)
844{
cdaa7cb8 845 u32 addr;
a2fbb9ea 846 u32 mark, offset;
4781bfad 847 __be32 data[9];
a2fbb9ea 848 int word;
f2e0899f 849 u32 trace_shmem_base;
2145a920
VZ
850 if (BP_NOMCP(bp)) {
851 BNX2X_ERR("NO MCP - can not dump\n");
852 return;
853 }
cdaa7cb8 854
f2e0899f
DK
855 if (BP_PATH(bp) == 0)
856 trace_shmem_base = bp->common.shmem_base;
857 else
858 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
859 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 860 mark = REG_RD(bp, addr);
f2e0899f
DK
861 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
862 + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 863 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 864
7995c64e 865 pr_err("");
f2e0899f 866 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 867 for (word = 0; word < 8; word++)
cdaa7cb8 868 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 869 data[8] = 0x0;
7995c64e 870 pr_cont("%s", (char *)data);
a2fbb9ea 871 }
cdaa7cb8 872 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 873 for (word = 0; word < 8; word++)
cdaa7cb8 874 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 875 data[8] = 0x0;
7995c64e 876 pr_cont("%s", (char *)data);
a2fbb9ea 877 }
7995c64e 878 pr_err("end of fw dump\n");
a2fbb9ea
ET
879}
880
6c719d00 881void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
882{
883 int i;
523224a3
DK
884 u16 j;
885 struct hc_sp_status_block_data sp_sb_data;
886 int func = BP_FUNC(bp);
887#ifdef BNX2X_STOP_ON_ERROR
888 u16 start = 0, end = 0;
889#endif
a2fbb9ea 890
66e855f3
YG
891 bp->stats_state = STATS_STATE_DISABLED;
892 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
893
a2fbb9ea
ET
894 BNX2X_ERR("begin crash dump -----------------\n");
895
8440d2b6
EG
896 /* Indices */
897 /* Common */
523224a3 898 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 899 " spq_prod_idx(0x%x)\n",
523224a3
DK
900 bp->def_idx, bp->def_att_idx,
901 bp->attn_state, bp->spq_prod_idx);
902 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
903 bp->def_status_blk->atten_status_block.attn_bits,
904 bp->def_status_blk->atten_status_block.attn_bits_ack,
905 bp->def_status_blk->atten_status_block.status_block_id,
906 bp->def_status_blk->atten_status_block.attn_bits_index);
907 BNX2X_ERR(" def (");
908 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
909 pr_cont("0x%x%s",
910 bp->def_status_blk->sp_sb.index_values[i],
911 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
912
913 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
914 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
915 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
916 i*sizeof(u32));
917
918 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
919 "pf_id(0x%x) vnic_id(0x%x) "
920 "vf_id(0x%x) vf_valid (0x%x)\n",
921 sp_sb_data.igu_sb_id,
922 sp_sb_data.igu_seg_id,
923 sp_sb_data.p_func.pf_id,
924 sp_sb_data.p_func.vnic_id,
925 sp_sb_data.p_func.vf_id,
926 sp_sb_data.p_func.vf_valid);
927
8440d2b6 928
ec6ba945 929 for_each_eth_queue(bp, i) {
a2fbb9ea 930 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 931 int loop;
f2e0899f 932 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
933 struct hc_status_block_data_e1x sb_data_e1x;
934 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
935 CHIP_IS_E2(bp) ?
936 sb_data_e2.common.state_machine :
523224a3
DK
937 sb_data_e1x.common.state_machine;
938 struct hc_index_data *hc_index_p =
f2e0899f
DK
939 CHIP_IS_E2(bp) ?
940 sb_data_e2.index_data :
523224a3
DK
941 sb_data_e1x.index_data;
942 int data_size;
943 u32 *sb_data_p;
944
945 /* Rx */
cdaa7cb8 946 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 947 " rx_comp_prod(0x%x)"
cdaa7cb8 948 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 949 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 950 fp->rx_comp_prod,
66e855f3 951 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 952 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 953 " fp_hc_idx(0x%x)\n",
8440d2b6 954 fp->rx_sge_prod, fp->last_max_sge,
523224a3 955 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 956
523224a3 957 /* Tx */
cdaa7cb8
VZ
958 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
959 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
960 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
961 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
962 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 963
f2e0899f
DK
964 loop = CHIP_IS_E2(bp) ?
965 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
966
967 /* host sb data */
968
ec6ba945
VZ
969#ifdef BCM_CNIC
970 if (IS_FCOE_FP(fp))
971 continue;
972#endif
523224a3
DK
973 BNX2X_ERR(" run indexes (");
974 for (j = 0; j < HC_SB_MAX_SM; j++)
975 pr_cont("0x%x%s",
976 fp->sb_running_index[j],
977 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
978
979 BNX2X_ERR(" indexes (");
980 for (j = 0; j < loop; j++)
981 pr_cont("0x%x%s",
982 fp->sb_index_values[j],
983 (j == loop - 1) ? ")" : " ");
984 /* fw sb data */
f2e0899f
DK
985 data_size = CHIP_IS_E2(bp) ?
986 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
987 sizeof(struct hc_status_block_data_e1x);
988 data_size /= sizeof(u32);
f2e0899f
DK
989 sb_data_p = CHIP_IS_E2(bp) ?
990 (u32 *)&sb_data_e2 :
991 (u32 *)&sb_data_e1x;
523224a3
DK
992 /* copy sb data in here */
993 for (j = 0; j < data_size; j++)
994 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
995 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
996 j * sizeof(u32));
997
f2e0899f
DK
998 if (CHIP_IS_E2(bp)) {
999 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1000 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1001 sb_data_e2.common.p_func.pf_id,
1002 sb_data_e2.common.p_func.vf_id,
1003 sb_data_e2.common.p_func.vf_valid,
1004 sb_data_e2.common.p_func.vnic_id,
1005 sb_data_e2.common.same_igu_sb_1b);
1006 } else {
1007 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1008 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1009 sb_data_e1x.common.p_func.pf_id,
1010 sb_data_e1x.common.p_func.vf_id,
1011 sb_data_e1x.common.p_func.vf_valid,
1012 sb_data_e1x.common.p_func.vnic_id,
1013 sb_data_e1x.common.same_igu_sb_1b);
1014 }
523224a3
DK
1015
1016 /* SB_SMs data */
1017 for (j = 0; j < HC_SB_MAX_SM; j++) {
1018 pr_cont("SM[%d] __flags (0x%x) "
1019 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1020 "time_to_expire (0x%x) "
1021 "timer_value(0x%x)\n", j,
1022 hc_sm_p[j].__flags,
1023 hc_sm_p[j].igu_sb_id,
1024 hc_sm_p[j].igu_seg_id,
1025 hc_sm_p[j].time_to_expire,
1026 hc_sm_p[j].timer_value);
1027 }
1028
1029 /* Indecies data */
1030 for (j = 0; j < loop; j++) {
1031 pr_cont("INDEX[%d] flags (0x%x) "
1032 "timeout (0x%x)\n", j,
1033 hc_index_p[j].flags,
1034 hc_index_p[j].timeout);
1035 }
8440d2b6 1036 }
a2fbb9ea 1037
523224a3 1038#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
1039 /* Rings */
1040 /* Rx */
ec6ba945 1041 for_each_rx_queue(bp, i) {
8440d2b6 1042 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1043
1044 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1045 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1046 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1047 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1048 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1049
c3eefaf6
EG
1050 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1051 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
1052 }
1053
3196a88a
EG
1054 start = RX_SGE(fp->rx_sge_prod);
1055 end = RX_SGE(fp->last_max_sge);
8440d2b6 1056 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1057 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1058 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1059
c3eefaf6
EG
1060 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1061 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1062 }
1063
a2fbb9ea
ET
1064 start = RCQ_BD(fp->rx_comp_cons - 10);
1065 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1066 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1067 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1068
c3eefaf6
EG
1069 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1070 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1071 }
1072 }
1073
8440d2b6 1074 /* Tx */
ec6ba945 1075 for_each_tx_queue(bp, i) {
8440d2b6
EG
1076 struct bnx2x_fastpath *fp = &bp->fp[i];
1077
1078 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1079 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1080 for (j = start; j != end; j = TX_BD(j + 1)) {
1081 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1082
c3eefaf6
EG
1083 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1084 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
1085 }
1086
1087 start = TX_BD(fp->tx_bd_cons - 10);
1088 end = TX_BD(fp->tx_bd_cons + 254);
1089 for (j = start; j != end; j = TX_BD(j + 1)) {
1090 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1091
c3eefaf6
EG
1092 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1093 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
1094 }
1095 }
523224a3 1096#endif
34f80b04 1097 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1098 bnx2x_mc_assert(bp);
1099 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1100}
1101
f2e0899f 1102static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1103{
34f80b04 1104 int port = BP_PORT(bp);
a2fbb9ea
ET
1105 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1106 u32 val = REG_RD(bp, addr);
1107 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1108 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
1109
1110 if (msix) {
8badd27a
EG
1111 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1112 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1113 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1114 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
1115 } else if (msi) {
1116 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1117 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1118 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1119 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1120 } else {
1121 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1122 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1123 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1124 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1125
a0fd065c
DK
1126 if (!CHIP_IS_E1(bp)) {
1127 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1128 val, port, addr);
615f8fd9 1129
a0fd065c 1130 REG_WR(bp, addr, val);
615f8fd9 1131
a0fd065c
DK
1132 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1133 }
a2fbb9ea
ET
1134 }
1135
a0fd065c
DK
1136 if (CHIP_IS_E1(bp))
1137 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1138
8badd27a
EG
1139 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1140 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1141
1142 REG_WR(bp, addr, val);
37dbbf32
EG
1143 /*
1144 * Ensure that HC_CONFIG is written before leading/trailing edge config
1145 */
1146 mmiowb();
1147 barrier();
34f80b04 1148
f2e0899f 1149 if (!CHIP_IS_E1(bp)) {
34f80b04 1150 /* init leading/trailing edge */
fb3bff17 1151 if (IS_MF(bp)) {
8badd27a 1152 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1153 if (bp->port.pmf)
4acac6a5
EG
1154 /* enable nig and gpio3 attention */
1155 val |= 0x1100;
34f80b04
EG
1156 } else
1157 val = 0xffff;
1158
1159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1160 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1161 }
37dbbf32
EG
1162
1163 /* Make sure that interrupts are indeed enabled from here on */
1164 mmiowb();
a2fbb9ea
ET
1165}
1166
f2e0899f
DK
1167static void bnx2x_igu_int_enable(struct bnx2x *bp)
1168{
1169 u32 val;
1170 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1171 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1172
1173 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1174
1175 if (msix) {
1176 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1177 IGU_PF_CONF_SINGLE_ISR_EN);
1178 val |= (IGU_PF_CONF_FUNC_EN |
1179 IGU_PF_CONF_MSI_MSIX_EN |
1180 IGU_PF_CONF_ATTN_BIT_EN);
1181 } else if (msi) {
1182 val &= ~IGU_PF_CONF_INT_LINE_EN;
1183 val |= (IGU_PF_CONF_FUNC_EN |
1184 IGU_PF_CONF_MSI_MSIX_EN |
1185 IGU_PF_CONF_ATTN_BIT_EN |
1186 IGU_PF_CONF_SINGLE_ISR_EN);
1187 } else {
1188 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1189 val |= (IGU_PF_CONF_FUNC_EN |
1190 IGU_PF_CONF_INT_LINE_EN |
1191 IGU_PF_CONF_ATTN_BIT_EN |
1192 IGU_PF_CONF_SINGLE_ISR_EN);
1193 }
1194
1195 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1196 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1197
1198 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1199
1200 barrier();
1201
1202 /* init leading/trailing edge */
1203 if (IS_MF(bp)) {
1204 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1205 if (bp->port.pmf)
1206 /* enable nig and gpio3 attention */
1207 val |= 0x1100;
1208 } else
1209 val = 0xffff;
1210
1211 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1212 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1213
1214 /* Make sure that interrupts are indeed enabled from here on */
1215 mmiowb();
1216}
1217
1218void bnx2x_int_enable(struct bnx2x *bp)
1219{
1220 if (bp->common.int_block == INT_BLOCK_HC)
1221 bnx2x_hc_int_enable(bp);
1222 else
1223 bnx2x_igu_int_enable(bp);
1224}
1225
1226static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1227{
34f80b04 1228 int port = BP_PORT(bp);
a2fbb9ea
ET
1229 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1230 u32 val = REG_RD(bp, addr);
1231
a0fd065c
DK
1232 /*
1233 * in E1 we must use only PCI configuration space to disable
1234 * MSI/MSIX capablility
1235 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1236 */
1237 if (CHIP_IS_E1(bp)) {
1238 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1239 * Use mask register to prevent from HC sending interrupts
1240 * after we exit the function
1241 */
1242 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1243
1244 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1245 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1246 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1247 } else
1248 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1249 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1250 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1251 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1252
1253 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1254 val, port, addr);
1255
8badd27a
EG
1256 /* flush all outstanding writes */
1257 mmiowb();
1258
a2fbb9ea
ET
1259 REG_WR(bp, addr, val);
1260 if (REG_RD(bp, addr) != val)
1261 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1262}
1263
f2e0899f
DK
1264static void bnx2x_igu_int_disable(struct bnx2x *bp)
1265{
1266 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1267
1268 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1269 IGU_PF_CONF_INT_LINE_EN |
1270 IGU_PF_CONF_ATTN_BIT_EN);
1271
1272 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1273
1274 /* flush all outstanding writes */
1275 mmiowb();
1276
1277 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1278 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1279 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1280}
1281
8d96286a 1282static void bnx2x_int_disable(struct bnx2x *bp)
f2e0899f
DK
1283{
1284 if (bp->common.int_block == INT_BLOCK_HC)
1285 bnx2x_hc_int_disable(bp);
1286 else
1287 bnx2x_igu_int_disable(bp);
1288}
1289
9f6c9258 1290void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1291{
a2fbb9ea 1292 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1293 int i, offset;
a2fbb9ea 1294
34f80b04 1295 /* disable interrupt handling */
a2fbb9ea 1296 atomic_inc(&bp->intr_sem);
e1510706
EG
1297 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1298
f8ef6e44
YG
1299 if (disable_hw)
1300 /* prevent the HW from sending interrupts */
1301 bnx2x_int_disable(bp);
a2fbb9ea
ET
1302
1303 /* make sure all ISRs are done */
1304 if (msix) {
8badd27a
EG
1305 synchronize_irq(bp->msix_table[0].vector);
1306 offset = 1;
37b091ba
MC
1307#ifdef BCM_CNIC
1308 offset++;
1309#endif
ec6ba945 1310 for_each_eth_queue(bp, i)
8badd27a 1311 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1312 } else
1313 synchronize_irq(bp->pdev->irq);
1314
1315 /* make sure sp_task is not running */
1cf167f2
EG
1316 cancel_delayed_work(&bp->sp_task);
1317 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1318}
1319
34f80b04 1320/* fast path */
a2fbb9ea
ET
1321
1322/*
34f80b04 1323 * General service functions
a2fbb9ea
ET
1324 */
1325
72fd0718
VZ
1326/* Return true if succeeded to acquire the lock */
1327static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1328{
1329 u32 lock_status;
1330 u32 resource_bit = (1 << resource);
1331 int func = BP_FUNC(bp);
1332 u32 hw_lock_control_reg;
1333
1334 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1335
1336 /* Validating that the resource is within range */
1337 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1338 DP(NETIF_MSG_HW,
1339 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1340 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1341 return false;
72fd0718
VZ
1342 }
1343
1344 if (func <= 5)
1345 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1346 else
1347 hw_lock_control_reg =
1348 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1349
1350 /* Try to acquire the lock */
1351 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1352 lock_status = REG_RD(bp, hw_lock_control_reg);
1353 if (lock_status & resource_bit)
1354 return true;
1355
1356 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1357 return false;
1358}
1359
993ac7b5
MC
1360#ifdef BCM_CNIC
1361static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1362#endif
3196a88a 1363
9f6c9258 1364void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1365 union eth_rx_cqe *rr_cqe)
1366{
1367 struct bnx2x *bp = fp->bp;
1368 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1369 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1370
34f80b04 1371 DP(BNX2X_MSG_SP,
a2fbb9ea 1372 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1373 fp->index, cid, command, bp->state,
34f80b04 1374 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1375
523224a3
DK
1376 switch (command | fp->state) {
1377 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1378 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1379 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1380 break;
1381
523224a3
DK
1382 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1383 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1384 fp->state = BNX2X_FP_STATE_HALTED;
1385 break;
1386
523224a3
DK
1387 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1388 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1389 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1390 break;
1391
523224a3
DK
1392 default:
1393 BNX2X_ERR("unexpected MC reply (%d) "
1394 "fp[%d] state is %x\n",
1395 command, fp->index, fp->state);
993ac7b5 1396 break;
523224a3 1397 }
3196a88a 1398
8fe23fbd 1399 smp_mb__before_atomic_inc();
6e30dd4e 1400 atomic_inc(&bp->cq_spq_left);
523224a3
DK
1401 /* push the change in fp->state and towards the memory */
1402 smp_wmb();
49d66772 1403
523224a3 1404 return;
a2fbb9ea
ET
1405}
1406
9f6c9258 1407irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1408{
555f6c78 1409 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1410 u16 status = bnx2x_ack_int(bp);
34f80b04 1411 u16 mask;
ca00392c 1412 int i;
a2fbb9ea 1413
34f80b04 1414 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1415 if (unlikely(status == 0)) {
1416 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1417 return IRQ_NONE;
1418 }
f5372251 1419 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1420
34f80b04 1421 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1422 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1423 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1424 return IRQ_HANDLED;
1425 }
1426
3196a88a
EG
1427#ifdef BNX2X_STOP_ON_ERROR
1428 if (unlikely(bp->panic))
1429 return IRQ_HANDLED;
1430#endif
1431
ec6ba945 1432 for_each_eth_queue(bp, i) {
ca00392c 1433 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1434
523224a3 1435 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1436 if (status & mask) {
54b9ddaa
VZ
1437 /* Handle Rx and Tx according to SB id */
1438 prefetch(fp->rx_cons_sb);
54b9ddaa 1439 prefetch(fp->tx_cons_sb);
523224a3 1440 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1441 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1442 status &= ~mask;
1443 }
a2fbb9ea
ET
1444 }
1445
993ac7b5 1446#ifdef BCM_CNIC
523224a3 1447 mask = 0x2;
993ac7b5
MC
1448 if (status & (mask | 0x1)) {
1449 struct cnic_ops *c_ops = NULL;
1450
1451 rcu_read_lock();
1452 c_ops = rcu_dereference(bp->cnic_ops);
1453 if (c_ops)
1454 c_ops->cnic_handler(bp->cnic_data, NULL);
1455 rcu_read_unlock();
1456
1457 status &= ~mask;
1458 }
1459#endif
a2fbb9ea 1460
34f80b04 1461 if (unlikely(status & 0x1)) {
1cf167f2 1462 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1463
1464 status &= ~0x1;
1465 if (!status)
1466 return IRQ_HANDLED;
1467 }
1468
cdaa7cb8
VZ
1469 if (unlikely(status))
1470 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1471 status);
a2fbb9ea 1472
c18487ee 1473 return IRQ_HANDLED;
a2fbb9ea
ET
1474}
1475
c18487ee 1476/* end of fast path */
a2fbb9ea 1477
a2fbb9ea 1478
c18487ee
YR
1479/* Link */
1480
1481/*
1482 * General service functions
1483 */
a2fbb9ea 1484
9f6c9258 1485int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1486{
1487 u32 lock_status;
1488 u32 resource_bit = (1 << resource);
4a37fb66
YG
1489 int func = BP_FUNC(bp);
1490 u32 hw_lock_control_reg;
c18487ee 1491 int cnt;
a2fbb9ea 1492
c18487ee
YR
1493 /* Validating that the resource is within range */
1494 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1495 DP(NETIF_MSG_HW,
1496 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1497 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1498 return -EINVAL;
1499 }
a2fbb9ea 1500
4a37fb66
YG
1501 if (func <= 5) {
1502 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1503 } else {
1504 hw_lock_control_reg =
1505 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1506 }
1507
c18487ee 1508 /* Validating that the resource is not already taken */
4a37fb66 1509 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1510 if (lock_status & resource_bit) {
1511 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1512 lock_status, resource_bit);
1513 return -EEXIST;
1514 }
a2fbb9ea 1515
46230476
EG
1516 /* Try for 5 second every 5ms */
1517 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1518 /* Try to acquire the lock */
4a37fb66
YG
1519 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1520 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1521 if (lock_status & resource_bit)
1522 return 0;
a2fbb9ea 1523
c18487ee 1524 msleep(5);
a2fbb9ea 1525 }
c18487ee
YR
1526 DP(NETIF_MSG_HW, "Timeout\n");
1527 return -EAGAIN;
1528}
a2fbb9ea 1529
9f6c9258 1530int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1531{
1532 u32 lock_status;
1533 u32 resource_bit = (1 << resource);
4a37fb66
YG
1534 int func = BP_FUNC(bp);
1535 u32 hw_lock_control_reg;
a2fbb9ea 1536
72fd0718
VZ
1537 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1538
c18487ee
YR
1539 /* Validating that the resource is within range */
1540 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1541 DP(NETIF_MSG_HW,
1542 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1543 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1544 return -EINVAL;
1545 }
1546
4a37fb66
YG
1547 if (func <= 5) {
1548 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1549 } else {
1550 hw_lock_control_reg =
1551 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1552 }
1553
c18487ee 1554 /* Validating that the resource is currently taken */
4a37fb66 1555 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1556 if (!(lock_status & resource_bit)) {
1557 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1558 lock_status, resource_bit);
1559 return -EFAULT;
a2fbb9ea
ET
1560 }
1561
9f6c9258
DK
1562 REG_WR(bp, hw_lock_control_reg, resource_bit);
1563 return 0;
c18487ee 1564}
a2fbb9ea 1565
9f6c9258 1566
4acac6a5
EG
1567int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1568{
1569 /* The GPIO should be swapped if swap register is set and active */
1570 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1571 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1572 int gpio_shift = gpio_num +
1573 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1574 u32 gpio_mask = (1 << gpio_shift);
1575 u32 gpio_reg;
1576 int value;
1577
1578 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1579 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1580 return -EINVAL;
1581 }
1582
1583 /* read GPIO value */
1584 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1585
1586 /* get the requested pin value */
1587 if ((gpio_reg & gpio_mask) == gpio_mask)
1588 value = 1;
1589 else
1590 value = 0;
1591
1592 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1593
1594 return value;
1595}
1596
17de50b7 1597int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1598{
1599 /* The GPIO should be swapped if swap register is set and active */
1600 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1601 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1602 int gpio_shift = gpio_num +
1603 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1604 u32 gpio_mask = (1 << gpio_shift);
1605 u32 gpio_reg;
a2fbb9ea 1606
c18487ee
YR
1607 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1608 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1609 return -EINVAL;
1610 }
a2fbb9ea 1611
4a37fb66 1612 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1613 /* read GPIO and mask except the float bits */
1614 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1615
c18487ee
YR
1616 switch (mode) {
1617 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1618 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1619 gpio_num, gpio_shift);
1620 /* clear FLOAT and set CLR */
1621 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1622 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1623 break;
a2fbb9ea 1624
c18487ee
YR
1625 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1626 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1627 gpio_num, gpio_shift);
1628 /* clear FLOAT and set SET */
1629 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1630 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1631 break;
a2fbb9ea 1632
17de50b7 1633 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1634 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1635 gpio_num, gpio_shift);
1636 /* set FLOAT */
1637 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1638 break;
a2fbb9ea 1639
c18487ee
YR
1640 default:
1641 break;
a2fbb9ea
ET
1642 }
1643
c18487ee 1644 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1645 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1646
c18487ee 1647 return 0;
a2fbb9ea
ET
1648}
1649
4acac6a5
EG
1650int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1651{
1652 /* The GPIO should be swapped if swap register is set and active */
1653 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1654 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1655 int gpio_shift = gpio_num +
1656 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1657 u32 gpio_mask = (1 << gpio_shift);
1658 u32 gpio_reg;
1659
1660 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1661 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1662 return -EINVAL;
1663 }
1664
1665 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1666 /* read GPIO int */
1667 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1668
1669 switch (mode) {
1670 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1671 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1672 "output low\n", gpio_num, gpio_shift);
1673 /* clear SET and set CLR */
1674 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676 break;
1677
1678 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1679 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1680 "output high\n", gpio_num, gpio_shift);
1681 /* clear CLR and set SET */
1682 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1683 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1684 break;
1685
1686 default:
1687 break;
1688 }
1689
1690 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1691 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1692
1693 return 0;
1694}
1695
c18487ee 1696static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1697{
c18487ee
YR
1698 u32 spio_mask = (1 << spio_num);
1699 u32 spio_reg;
a2fbb9ea 1700
c18487ee
YR
1701 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1702 (spio_num > MISC_REGISTERS_SPIO_7)) {
1703 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1704 return -EINVAL;
a2fbb9ea
ET
1705 }
1706
4a37fb66 1707 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1708 /* read SPIO and mask except the float bits */
1709 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1710
c18487ee 1711 switch (mode) {
6378c025 1712 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1713 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1714 /* clear FLOAT and set CLR */
1715 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1717 break;
a2fbb9ea 1718
6378c025 1719 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1720 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1721 /* clear FLOAT and set SET */
1722 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1724 break;
a2fbb9ea 1725
c18487ee
YR
1726 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1727 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1728 /* set FLOAT */
1729 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1730 break;
a2fbb9ea 1731
c18487ee
YR
1732 default:
1733 break;
a2fbb9ea
ET
1734 }
1735
c18487ee 1736 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1737 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1738
a2fbb9ea
ET
1739 return 0;
1740}
1741
a22f0788
YR
1742int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1743{
1744 u32 sel_phy_idx = 0;
1745 if (bp->link_vars.link_up) {
1746 sel_phy_idx = EXT_PHY1;
1747 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1748 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1749 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1750 sel_phy_idx = EXT_PHY2;
1751 } else {
1752
1753 switch (bnx2x_phy_selection(&bp->link_params)) {
1754 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1755 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1756 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1757 sel_phy_idx = EXT_PHY1;
1758 break;
1759 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1760 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1761 sel_phy_idx = EXT_PHY2;
1762 break;
1763 }
1764 }
1765 /*
1766 * The selected actived PHY is always after swapping (in case PHY
1767 * swapping is enabled). So when swapping is enabled, we need to reverse
1768 * the configuration
1769 */
1770
1771 if (bp->link_params.multi_phy_config &
1772 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1773 if (sel_phy_idx == EXT_PHY1)
1774 sel_phy_idx = EXT_PHY2;
1775 else if (sel_phy_idx == EXT_PHY2)
1776 sel_phy_idx = EXT_PHY1;
1777 }
1778 return LINK_CONFIG_IDX(sel_phy_idx);
1779}
1780
9f6c9258 1781void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1782{
a22f0788 1783 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1784 switch (bp->link_vars.ieee_fc &
1785 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1786 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1787 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1788 ADVERTISED_Pause);
c18487ee 1789 break;
356e2385 1790
c18487ee 1791 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1792 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 1793 ADVERTISED_Pause);
c18487ee 1794 break;
356e2385 1795
c18487ee 1796 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1797 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1798 break;
356e2385 1799
c18487ee 1800 default:
a22f0788 1801 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1802 ADVERTISED_Pause);
c18487ee
YR
1803 break;
1804 }
1805}
f1410647 1806
9f6c9258 1807u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1808{
19680c48
EG
1809 if (!BP_NOMCP(bp)) {
1810 u8 rc;
a22f0788
YR
1811 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1812 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1813 /* Initialize link parameters structure variables */
8c99e7b0
YR
1814 /* It is recommended to turn off RX FC for jumbo frames
1815 for better performance */
f2e0899f 1816 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1817 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1818 else
c0700f90 1819 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1820
4a37fb66 1821 bnx2x_acquire_phy_lock(bp);
b5bf9068 1822
a22f0788 1823 if (load_mode == LOAD_DIAG) {
de6eae1f 1824 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1825 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1826 }
b5bf9068 1827
19680c48 1828 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1829
4a37fb66 1830 bnx2x_release_phy_lock(bp);
a2fbb9ea 1831
3c96c68b
EG
1832 bnx2x_calc_fc_adv(bp);
1833
b5bf9068
EG
1834 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1835 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1836 bnx2x_link_report(bp);
b5bf9068 1837 }
a22f0788 1838 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1839 return rc;
1840 }
f5372251 1841 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1842 return -EINVAL;
a2fbb9ea
ET
1843}
1844
9f6c9258 1845void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1846{
19680c48 1847 if (!BP_NOMCP(bp)) {
4a37fb66 1848 bnx2x_acquire_phy_lock(bp);
54c2fb78 1849 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1850 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1851 bnx2x_release_phy_lock(bp);
a2fbb9ea 1852
19680c48
EG
1853 bnx2x_calc_fc_adv(bp);
1854 } else
f5372251 1855 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1856}
a2fbb9ea 1857
c18487ee
YR
1858static void bnx2x__link_reset(struct bnx2x *bp)
1859{
19680c48 1860 if (!BP_NOMCP(bp)) {
4a37fb66 1861 bnx2x_acquire_phy_lock(bp);
589abe3a 1862 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1863 bnx2x_release_phy_lock(bp);
19680c48 1864 } else
f5372251 1865 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1866}
a2fbb9ea 1867
a22f0788 1868u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1869{
2145a920 1870 u8 rc = 0;
a2fbb9ea 1871
2145a920
VZ
1872 if (!BP_NOMCP(bp)) {
1873 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1874 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1875 is_serdes);
2145a920
VZ
1876 bnx2x_release_phy_lock(bp);
1877 } else
1878 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1879
c18487ee
YR
1880 return rc;
1881}
a2fbb9ea 1882
8a1c38d1 1883static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1884{
8a1c38d1
EG
1885 u32 r_param = bp->link_vars.line_speed / 8;
1886 u32 fair_periodic_timeout_usec;
1887 u32 t_fair;
34f80b04 1888
8a1c38d1
EG
1889 memset(&(bp->cmng.rs_vars), 0,
1890 sizeof(struct rate_shaping_vars_per_port));
1891 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1892
8a1c38d1
EG
1893 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1894 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1895
8a1c38d1
EG
1896 /* this is the threshold below which no timer arming will occur
1897 1.25 coefficient is for the threshold to be a little bigger
1898 than the real time, to compensate for timer in-accuracy */
1899 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1900 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1901
8a1c38d1
EG
1902 /* resolution of fairness timer */
1903 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1904 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1905 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1906
8a1c38d1
EG
1907 /* this is the threshold below which we won't arm the timer anymore */
1908 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1909
8a1c38d1
EG
1910 /* we multiply by 1e3/8 to get bytes/msec.
1911 We don't want the credits to pass a credit
1912 of the t_fair*FAIR_MEM (algorithm resolution) */
1913 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1914 /* since each tick is 4 usec */
1915 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1916}
1917
2691d51d
EG
1918/* Calculates the sum of vn_min_rates.
1919 It's needed for further normalizing of the min_rates.
1920 Returns:
1921 sum of vn_min_rates.
1922 or
1923 0 - if all the min_rates are 0.
1924 In the later case fainess algorithm should be deactivated.
1925 If not all min_rates are zero then those that are zeroes will be set to 1.
1926 */
1927static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1928{
1929 int all_zero = 1;
2691d51d
EG
1930 int vn;
1931
1932 bp->vn_weight_sum = 0;
1933 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1934 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1935 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1936 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1937
1938 /* Skip hidden vns */
1939 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1940 continue;
1941
1942 /* If min rate is zero - set it to 1 */
1943 if (!vn_min_rate)
1944 vn_min_rate = DEF_MIN_RATE;
1945 else
1946 all_zero = 0;
1947
1948 bp->vn_weight_sum += vn_min_rate;
1949 }
1950
1951 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1952 if (all_zero) {
1953 bp->cmng.flags.cmng_enables &=
1954 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1955 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1956 " fairness will be disabled\n");
1957 } else
1958 bp->cmng.flags.cmng_enables |=
1959 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1960}
1961
f2e0899f 1962static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1963{
1964 struct rate_shaping_vars_per_vn m_rs_vn;
1965 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1966 u32 vn_cfg = bp->mf_config[vn];
1967 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1968 u16 vn_min_rate, vn_max_rate;
1969 int i;
1970
1971 /* If function is hidden - set min and max to zeroes */
1972 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1973 vn_min_rate = 0;
1974 vn_max_rate = 0;
1975
1976 } else {
1977 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1978 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1 1979 /* If min rate is zero - set it to 1 */
f2e0899f 1980 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
1981 vn_min_rate = DEF_MIN_RATE;
1982 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1983 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1984 }
f85582f8 1985
8a1c38d1 1986 DP(NETIF_MSG_IFUP,
b015e3d1 1987 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1988 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1989
1990 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1991 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1992
1993 /* global vn counter - maximal Mbps for this vn */
1994 m_rs_vn.vn_counter.rate = vn_max_rate;
1995
1996 /* quota - number of bytes transmitted in this period */
1997 m_rs_vn.vn_counter.quota =
1998 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1999
8a1c38d1 2000 if (bp->vn_weight_sum) {
34f80b04
EG
2001 /* credit for each period of the fairness algorithm:
2002 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2003 vn_weight_sum should not be larger than 10000, thus
2004 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2005 than zero */
34f80b04 2006 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
2007 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2008 (8 * bp->vn_weight_sum))),
2009 (bp->cmng.fair_vars.fair_threshold * 2));
2010 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2011 m_fair_vn.vn_credit_delta);
2012 }
2013
34f80b04
EG
2014 /* Store it to internal memory */
2015 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2016 REG_WR(bp, BAR_XSTRORM_INTMEM +
2017 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2018 ((u32 *)(&m_rs_vn))[i]);
2019
2020 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2021 REG_WR(bp, BAR_XSTRORM_INTMEM +
2022 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2023 ((u32 *)(&m_fair_vn))[i]);
2024}
f85582f8 2025
523224a3
DK
2026static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2027{
2028 if (CHIP_REV_IS_SLOW(bp))
2029 return CMNG_FNS_NONE;
fb3bff17 2030 if (IS_MF(bp))
523224a3
DK
2031 return CMNG_FNS_MINMAX;
2032
2033 return CMNG_FNS_NONE;
2034}
2035
2036static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2037{
0793f83f 2038 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
523224a3
DK
2039
2040 if (BP_NOMCP(bp))
2041 return; /* what should be the default bvalue in this case */
2042
0793f83f
DK
2043 /* For 2 port configuration the absolute function number formula
2044 * is:
2045 * abs_func = 2 * vn + BP_PORT + BP_PATH
2046 *
2047 * and there are 4 functions per port
2048 *
2049 * For 4 port configuration it is
2050 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2051 *
2052 * and there are 2 functions per port
2053 */
523224a3 2054 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
0793f83f
DK
2055 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2056
2057 if (func >= E1H_FUNC_MAX)
2058 break;
2059
f2e0899f 2060 bp->mf_config[vn] =
523224a3
DK
2061 MF_CFG_RD(bp, func_mf_config[func].config);
2062 }
2063}
2064
2065static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2066{
2067
2068 if (cmng_type == CMNG_FNS_MINMAX) {
2069 int vn;
2070
2071 /* clear cmng_enables */
2072 bp->cmng.flags.cmng_enables = 0;
2073
2074 /* read mf conf from shmem */
2075 if (read_cfg)
2076 bnx2x_read_mf_cfg(bp);
2077
2078 /* Init rate shaping and fairness contexts */
2079 bnx2x_init_port_minmax(bp);
2080
2081 /* vn_weight_sum and enable fairness if not 0 */
2082 bnx2x_calc_vn_weight_sum(bp);
2083
2084 /* calculate and set min-max rate for each vn */
2085 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2086 bnx2x_init_vn_minmax(bp, vn);
2087
2088 /* always enable rate shaping and fairness */
2089 bp->cmng.flags.cmng_enables |=
2090 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2091 if (!bp->vn_weight_sum)
2092 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2093 " fairness will be disabled\n");
2094 return;
2095 }
2096
2097 /* rate shaping and fairness are disabled */
2098 DP(NETIF_MSG_IFUP,
2099 "rate shaping and fairness are disabled\n");
2100}
34f80b04 2101
523224a3
DK
2102static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2103{
2104 int port = BP_PORT(bp);
2105 int func;
2106 int vn;
2107
2108 /* Set the attention towards other drivers on the same port */
2109 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2110 if (vn == BP_E1HVN(bp))
2111 continue;
2112
2113 func = ((vn << 1) | port);
2114 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2115 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2116 }
2117}
8a1c38d1 2118
c18487ee
YR
2119/* This function is called upon link interrupt */
2120static void bnx2x_link_attn(struct bnx2x *bp)
2121{
d9e8b185 2122 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2123 /* Make sure that we are synced with the current statistics */
2124 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2125
c18487ee 2126 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2127
bb2a0f7a
YG
2128 if (bp->link_vars.link_up) {
2129
1c06328c 2130 /* dropless flow control */
f2e0899f 2131 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2132 int port = BP_PORT(bp);
2133 u32 pause_enabled = 0;
2134
2135 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2136 pause_enabled = 1;
2137
2138 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2139 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2140 pause_enabled);
2141 }
2142
bb2a0f7a
YG
2143 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2144 struct host_port_stats *pstats;
2145
2146 pstats = bnx2x_sp(bp, port_stats);
2147 /* reset old bmac stats */
2148 memset(&(pstats->mac_stx[0]), 0,
2149 sizeof(struct mac_stx));
2150 }
f34d28ea 2151 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2152 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2153 }
2154
d9e8b185
VZ
2155 /* indicate link status only if link status actually changed */
2156 if (prev_link_status != bp->link_vars.link_status)
2157 bnx2x_link_report(bp);
34f80b04 2158
f2e0899f
DK
2159 if (IS_MF(bp))
2160 bnx2x_link_sync_notify(bp);
34f80b04 2161
f2e0899f
DK
2162 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2163 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2164
f2e0899f
DK
2165 if (cmng_fns != CMNG_FNS_NONE) {
2166 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2167 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2168 } else
2169 /* rate shaping and fairness are disabled */
2170 DP(NETIF_MSG_IFUP,
2171 "single function mode without fairness\n");
34f80b04 2172 }
c18487ee 2173}
a2fbb9ea 2174
9f6c9258 2175void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2176{
f34d28ea 2177 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2178 return;
a2fbb9ea 2179
c18487ee 2180 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2181
bb2a0f7a
YG
2182 if (bp->link_vars.link_up)
2183 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2184 else
2185 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2186
f2e0899f
DK
2187 /* the link status update could be the result of a DCC event
2188 hence re-read the shmem mf configuration */
2189 bnx2x_read_mf_cfg(bp);
2691d51d 2190
c18487ee
YR
2191 /* indicate link status */
2192 bnx2x_link_report(bp);
a2fbb9ea 2193}
a2fbb9ea 2194
34f80b04
EG
2195static void bnx2x_pmf_update(struct bnx2x *bp)
2196{
2197 int port = BP_PORT(bp);
2198 u32 val;
2199
2200 bp->port.pmf = 1;
2201 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2202
2203 /* enable nig attention */
2204 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
2205 if (bp->common.int_block == INT_BLOCK_HC) {
2206 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2207 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2208 } else if (CHIP_IS_E2(bp)) {
2209 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2210 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2211 }
bb2a0f7a
YG
2212
2213 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2214}
2215
c18487ee 2216/* end of Link */
a2fbb9ea
ET
2217
2218/* slow path */
2219
2220/*
2221 * General service functions
2222 */
2223
2691d51d 2224/* send the MCP a request, block until there is a reply */
a22f0788 2225u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2226{
f2e0899f 2227 int mb_idx = BP_FW_MB_IDX(bp);
2691d51d
EG
2228 u32 seq = ++bp->fw_seq;
2229 u32 rc = 0;
2230 u32 cnt = 1;
2231 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2232
c4ff7cbf 2233 mutex_lock(&bp->fw_mb_mutex);
f2e0899f
DK
2234 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2235 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2236
2691d51d
EG
2237 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2238
2239 do {
2240 /* let the FW do it's magic ... */
2241 msleep(delay);
2242
f2e0899f 2243 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2244
c4ff7cbf
EG
2245 /* Give the FW up to 5 second (500*10ms) */
2246 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2247
2248 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2249 cnt*delay, rc, seq);
2250
2251 /* is this a reply to our command? */
2252 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2253 rc &= FW_MSG_CODE_MASK;
2254 else {
2255 /* FW BUG! */
2256 BNX2X_ERR("FW failed to respond!\n");
2257 bnx2x_fw_dump(bp);
2258 rc = 0;
2259 }
c4ff7cbf 2260 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2261
2262 return rc;
2263}
2264
ec6ba945
VZ
2265static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2266{
2267#ifdef BCM_CNIC
2268 if (IS_FCOE_FP(fp) && IS_MF(bp))
2269 return false;
2270#endif
2271 return true;
2272}
2273
523224a3 2274/* must be called under rtnl_lock */
8d96286a 2275static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2276{
523224a3 2277 u32 mask = (1 << cl_id);
2691d51d 2278
523224a3
DK
2279 /* initial seeting is BNX2X_ACCEPT_NONE */
2280 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2281 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2282 u8 unmatched_unicast = 0;
2691d51d 2283
0793f83f
DK
2284 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2285 unmatched_unicast = 1;
2286
523224a3
DK
2287 if (filters & BNX2X_PROMISCUOUS_MODE) {
2288 /* promiscious - accept all, drop none */
2289 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2290 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
0793f83f
DK
2291 if (IS_MF_SI(bp)) {
2292 /*
2293 * SI mode defines to accept in promiscuos mode
2294 * only unmatched packets
2295 */
2296 unmatched_unicast = 1;
2297 accp_all_ucast = 0;
2298 }
523224a3
DK
2299 }
2300 if (filters & BNX2X_ACCEPT_UNICAST) {
2301 /* accept matched ucast */
2302 drop_all_ucast = 0;
2303 }
d9c8f498 2304 if (filters & BNX2X_ACCEPT_MULTICAST)
523224a3
DK
2305 /* accept matched mcast */
2306 drop_all_mcast = 0;
d9c8f498 2307
523224a3
DK
2308 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2309 /* accept all mcast */
2310 drop_all_ucast = 0;
2311 accp_all_ucast = 1;
2312 }
2313 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2314 /* accept all mcast */
2315 drop_all_mcast = 0;
2316 accp_all_mcast = 1;
2317 }
2318 if (filters & BNX2X_ACCEPT_BROADCAST) {
2319 /* accept (all) bcast */
2320 drop_all_bcast = 0;
2321 accp_all_bcast = 1;
2322 }
2691d51d 2323
523224a3
DK
2324 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2325 bp->mac_filters.ucast_drop_all | mask :
2326 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2327
523224a3
DK
2328 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2329 bp->mac_filters.mcast_drop_all | mask :
2330 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2331
523224a3
DK
2332 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2333 bp->mac_filters.bcast_drop_all | mask :
2334 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2335
523224a3
DK
2336 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2337 bp->mac_filters.ucast_accept_all | mask :
2338 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2339
523224a3
DK
2340 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2341 bp->mac_filters.mcast_accept_all | mask :
2342 bp->mac_filters.mcast_accept_all & ~mask;
2343
2344 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2345 bp->mac_filters.bcast_accept_all | mask :
2346 bp->mac_filters.bcast_accept_all & ~mask;
2347
2348 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2349 bp->mac_filters.unmatched_unicast | mask :
2350 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2351}
2352
8d96286a 2353static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2354{
030f3356
DK
2355 struct tstorm_eth_function_common_config tcfg = {0};
2356 u16 rss_flgs;
2691d51d 2357
030f3356
DK
2358 /* tpa */
2359 if (p->func_flgs & FUNC_FLG_TPA)
2360 tcfg.config_flags |=
2361 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2362
030f3356
DK
2363 /* set rss flags */
2364 rss_flgs = (p->rss->mode <<
2365 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2366
2367 if (p->rss->cap & RSS_IPV4_CAP)
2368 rss_flgs |= RSS_IPV4_CAP_MASK;
2369 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2370 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2371 if (p->rss->cap & RSS_IPV6_CAP)
2372 rss_flgs |= RSS_IPV6_CAP_MASK;
2373 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2374 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2375
2376 tcfg.config_flags |= rss_flgs;
2377 tcfg.rss_result_mask = p->rss->result_mask;
2378
2379 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2380
523224a3
DK
2381 /* Enable the function in the FW */
2382 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2383 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2384
523224a3
DK
2385 /* statistics */
2386 if (p->func_flgs & FUNC_FLG_STATS) {
2387 struct stats_indication_flags stats_flags = {0};
2388 stats_flags.collect_eth = 1;
2691d51d 2389
523224a3
DK
2390 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2391 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2392
523224a3
DK
2393 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2394 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2395
523224a3
DK
2396 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2397 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2398
523224a3
DK
2399 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2400 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2401 }
2402
523224a3
DK
2403 /* spq */
2404 if (p->func_flgs & FUNC_FLG_SPQ) {
2405 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2406 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2407 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2408 }
2691d51d
EG
2409}
2410
523224a3
DK
2411static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2412 struct bnx2x_fastpath *fp)
28912902 2413{
523224a3 2414 u16 flags = 0;
28912902 2415
523224a3
DK
2416 /* calculate queue flags */
2417 flags |= QUEUE_FLG_CACHE_ALIGN;
2418 flags |= QUEUE_FLG_HC;
0793f83f 2419 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
28912902 2420
523224a3
DK
2421 flags |= QUEUE_FLG_VLAN;
2422 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
523224a3
DK
2423
2424 if (!fp->disable_tpa)
2425 flags |= QUEUE_FLG_TPA;
2426
ec6ba945
VZ
2427 flags = stat_counter_valid(bp, fp) ?
2428 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
523224a3
DK
2429
2430 return flags;
2431}
2432
2433static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2434 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2435 struct bnx2x_rxq_init_params *rxq_init)
2436{
2437 u16 max_sge = 0;
2438 u16 sge_sz = 0;
2439 u16 tpa_agg_size = 0;
2440
2441 /* calculate queue flags */
2442 u16 flags = bnx2x_get_cl_flags(bp, fp);
2443
2444 if (!fp->disable_tpa) {
2445 pause->sge_th_hi = 250;
2446 pause->sge_th_lo = 150;
2447 tpa_agg_size = min_t(u32,
2448 (min_t(u32, 8, MAX_SKB_FRAGS) *
2449 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2450 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2451 SGE_PAGE_SHIFT;
2452 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2453 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2454 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2455 0xffff);
2456 }
2457
2458 /* pause - not for e1 */
2459 if (!CHIP_IS_E1(bp)) {
2460 pause->bd_th_hi = 350;
2461 pause->bd_th_lo = 250;
2462 pause->rcq_th_hi = 350;
2463 pause->rcq_th_lo = 250;
2464 pause->sge_th_hi = 0;
2465 pause->sge_th_lo = 0;
2466 pause->pri_map = 1;
2467 }
2468
2469 /* rxq setup */
2470 rxq_init->flags = flags;
2471 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2472 rxq_init->dscr_map = fp->rx_desc_mapping;
2473 rxq_init->sge_map = fp->rx_sge_mapping;
2474 rxq_init->rcq_map = fp->rx_comp_mapping;
2475 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
a8c94b91
VZ
2476
2477 /* Always use mini-jumbo MTU for FCoE L2 ring */
2478 if (IS_FCOE_FP(fp))
2479 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2480 else
2481 rxq_init->mtu = bp->dev->mtu;
2482
2483 rxq_init->buf_sz = fp->rx_buf_size;
523224a3
DK
2484 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2485 rxq_init->cl_id = fp->cl_id;
2486 rxq_init->spcl_id = fp->cl_id;
2487 rxq_init->stat_id = fp->cl_id;
2488 rxq_init->tpa_agg_sz = tpa_agg_size;
2489 rxq_init->sge_buf_sz = sge_sz;
2490 rxq_init->max_sges_pkt = max_sge;
2491 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2492 rxq_init->fw_sb_id = fp->fw_sb_id;
2493
ec6ba945
VZ
2494 if (IS_FCOE_FP(fp))
2495 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2496 else
2497 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
523224a3
DK
2498
2499 rxq_init->cid = HW_CID(bp, fp->cid);
2500
2501 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2502}
2503
2504static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2505 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2506{
2507 u16 flags = bnx2x_get_cl_flags(bp, fp);
2508
2509 txq_init->flags = flags;
2510 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2511 txq_init->dscr_map = fp->tx_desc_mapping;
2512 txq_init->stat_id = fp->cl_id;
2513 txq_init->cid = HW_CID(bp, fp->cid);
2514 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2515 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2516 txq_init->fw_sb_id = fp->fw_sb_id;
ec6ba945
VZ
2517
2518 if (IS_FCOE_FP(fp)) {
2519 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2520 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2521 }
2522
523224a3
DK
2523 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2524}
2525
8d96286a 2526static void bnx2x_pf_init(struct bnx2x *bp)
523224a3
DK
2527{
2528 struct bnx2x_func_init_params func_init = {0};
2529 struct bnx2x_rss_params rss = {0};
2530 struct event_ring_data eq_data = { {0} };
2531 u16 flags;
2532
2533 /* pf specific setups */
2534 if (!CHIP_IS_E1(bp))
fb3bff17 2535 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2536
f2e0899f
DK
2537 if (CHIP_IS_E2(bp)) {
2538 /* reset IGU PF statistics: MSIX + ATTN */
2539 /* PF */
2540 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2541 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2542 (CHIP_MODE_IS_4_PORT(bp) ?
2543 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2544 /* ATTN */
2545 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2546 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2547 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2548 (CHIP_MODE_IS_4_PORT(bp) ?
2549 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2550 }
2551
523224a3
DK
2552 /* function setup flags */
2553 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2554
f2e0899f
DK
2555 if (CHIP_IS_E1x(bp))
2556 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2557 else
2558 flags |= FUNC_FLG_TPA;
523224a3 2559
030f3356
DK
2560 /* function setup */
2561
523224a3
DK
2562 /**
2563 * Although RSS is meaningless when there is a single HW queue we
2564 * still need it enabled in order to have HW Rx hash generated.
523224a3 2565 */
030f3356
DK
2566 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2567 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2568 rss.mode = bp->multi_mode;
2569 rss.result_mask = MULTI_MASK;
2570 func_init.rss = &rss;
523224a3
DK
2571
2572 func_init.func_flgs = flags;
2573 func_init.pf_id = BP_FUNC(bp);
2574 func_init.func_id = BP_FUNC(bp);
2575 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2576 func_init.spq_map = bp->spq_mapping;
2577 func_init.spq_prod = bp->spq_prod_idx;
2578
2579 bnx2x_func_init(bp, &func_init);
2580
2581 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2582
2583 /*
2584 Congestion management values depend on the link rate
2585 There is no active link so initial link rate is set to 10 Gbps.
2586 When the link comes up The congestion management values are
2587 re-calculated according to the actual link rate.
2588 */
2589 bp->link_vars.line_speed = SPEED_10000;
2590 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2591
2592 /* Only the PMF sets the HW */
2593 if (bp->port.pmf)
2594 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2595
2596 /* no rx until link is up */
2597 bp->rx_mode = BNX2X_RX_MODE_NONE;
2598 bnx2x_set_storm_rx_mode(bp);
2599
2600 /* init Event Queue */
2601 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2602 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2603 eq_data.producer = bp->eq_prod;
2604 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2605 eq_data.sb_id = DEF_SB_ID;
2606 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2607}
2608
2609
2610static void bnx2x_e1h_disable(struct bnx2x *bp)
2611{
2612 int port = BP_PORT(bp);
2613
2614 netif_tx_disable(bp->dev);
2615
2616 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2617
2618 netif_carrier_off(bp->dev);
2619}
2620
2621static void bnx2x_e1h_enable(struct bnx2x *bp)
2622{
2623 int port = BP_PORT(bp);
2624
2625 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2626
2627 /* Tx queue should be only reenabled */
2628 netif_tx_wake_all_queues(bp->dev);
2629
2630 /*
2631 * Should not call netif_carrier_on since it will be called if the link
2632 * is up when checking for link state
2633 */
2634}
2635
0793f83f
DK
2636/* called due to MCP event (on pmf):
2637 * reread new bandwidth configuration
2638 * configure FW
2639 * notify others function about the change
2640 */
2641static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2642{
2643 if (bp->link_vars.link_up) {
2644 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2645 bnx2x_link_sync_notify(bp);
2646 }
2647 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2648}
2649
2650static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2651{
2652 bnx2x_config_mf_bw(bp);
2653 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2654}
2655
523224a3
DK
2656static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2657{
2658 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2659
2660 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2661
2662 /*
2663 * This is the only place besides the function initialization
2664 * where the bp->flags can change so it is done without any
2665 * locks
2666 */
f2e0899f 2667 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2668 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2669 bp->flags |= MF_FUNC_DIS;
2670
2671 bnx2x_e1h_disable(bp);
2672 } else {
2673 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2674 bp->flags &= ~MF_FUNC_DIS;
2675
2676 bnx2x_e1h_enable(bp);
2677 }
2678 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2679 }
2680 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
0793f83f 2681 bnx2x_config_mf_bw(bp);
523224a3
DK
2682 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2683 }
2684
2685 /* Report results to MCP */
2686 if (dcc_event)
2687 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2688 else
2689 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2690}
2691
2692/* must be called under the spq lock */
2693static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2694{
2695 struct eth_spe *next_spe = bp->spq_prod_bd;
2696
2697 if (bp->spq_prod_bd == bp->spq_last_bd) {
2698 bp->spq_prod_bd = bp->spq;
2699 bp->spq_prod_idx = 0;
2700 DP(NETIF_MSG_TIMER, "end of spq\n");
2701 } else {
2702 bp->spq_prod_bd++;
2703 bp->spq_prod_idx++;
2704 }
2705 return next_spe;
2706}
2707
2708/* must be called under the spq lock */
28912902
MC
2709static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2710{
2711 int func = BP_FUNC(bp);
2712
2713 /* Make sure that BD data is updated before writing the producer */
2714 wmb();
2715
523224a3 2716 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 2717 bp->spq_prod_idx);
28912902
MC
2718 mmiowb();
2719}
2720
a2fbb9ea 2721/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2722int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
f85582f8 2723 u32 data_hi, u32 data_lo, int common)
a2fbb9ea 2724{
28912902 2725 struct eth_spe *spe;
523224a3 2726 u16 type;
a2fbb9ea 2727
a2fbb9ea
ET
2728#ifdef BNX2X_STOP_ON_ERROR
2729 if (unlikely(bp->panic))
2730 return -EIO;
2731#endif
2732
34f80b04 2733 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2734
6e30dd4e
VZ
2735 if (common) {
2736 if (!atomic_read(&bp->eq_spq_left)) {
2737 BNX2X_ERR("BUG! EQ ring full!\n");
2738 spin_unlock_bh(&bp->spq_lock);
2739 bnx2x_panic();
2740 return -EBUSY;
2741 }
2742 } else if (!atomic_read(&bp->cq_spq_left)) {
2743 BNX2X_ERR("BUG! SPQ ring full!\n");
2744 spin_unlock_bh(&bp->spq_lock);
2745 bnx2x_panic();
2746 return -EBUSY;
a2fbb9ea 2747 }
f1410647 2748
28912902
MC
2749 spe = bnx2x_sp_get_next(bp);
2750
a2fbb9ea 2751 /* CID needs port number to be encoded int it */
28912902 2752 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2753 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2754 HW_CID(bp, cid));
523224a3 2755
a2fbb9ea 2756 if (common)
523224a3
DK
2757 /* Common ramrods:
2758 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2759 * TRAFFIC_STOP, TRAFFIC_START
2760 */
2761 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2762 & SPE_HDR_CONN_TYPE;
2763 else
2764 /* ETH ramrods: SETUP, HALT */
2765 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2766 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2767
523224a3
DK
2768 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2769 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2770
523224a3
DK
2771 spe->hdr.type = cpu_to_le16(type);
2772
2773 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2774 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2775
2776 /* stats ramrod has it's own slot on the spq */
6e30dd4e 2777 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
523224a3
DK
2778 /* It's ok if the actual decrement is issued towards the memory
2779 * somewhere between the spin_lock and spin_unlock. Thus no
2780 * more explict memory barrier is needed.
2781 */
6e30dd4e
VZ
2782 if (common)
2783 atomic_dec(&bp->eq_spq_left);
2784 else
2785 atomic_dec(&bp->cq_spq_left);
2786 }
2787
a2fbb9ea 2788
cdaa7cb8 2789 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3 2790 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
6e30dd4e 2791 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
cdaa7cb8
VZ
2792 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2793 (u32)(U64_LO(bp->spq_mapping) +
2794 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
6e30dd4e
VZ
2795 HW_CID(bp, cid), data_hi, data_lo, type,
2796 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
cdaa7cb8 2797
28912902 2798 bnx2x_sp_prod_update(bp);
34f80b04 2799 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2800 return 0;
2801}
2802
2803/* acquire split MCP access lock register */
4a37fb66 2804static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2805{
72fd0718 2806 u32 j, val;
34f80b04 2807 int rc = 0;
a2fbb9ea
ET
2808
2809 might_sleep();
72fd0718 2810 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2811 val = (1UL << 31);
2812 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2813 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2814 if (val & (1L << 31))
2815 break;
2816
2817 msleep(5);
2818 }
a2fbb9ea 2819 if (!(val & (1L << 31))) {
19680c48 2820 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2821 rc = -EBUSY;
2822 }
2823
2824 return rc;
2825}
2826
4a37fb66
YG
2827/* release split MCP access lock register */
2828static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2829{
72fd0718 2830 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2831}
2832
523224a3
DK
2833#define BNX2X_DEF_SB_ATT_IDX 0x0001
2834#define BNX2X_DEF_SB_IDX 0x0002
2835
a2fbb9ea
ET
2836static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2837{
523224a3 2838 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2839 u16 rc = 0;
2840
2841 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2842 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2843 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2844 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2845 }
523224a3
DK
2846
2847 if (bp->def_idx != def_sb->sp_sb.running_index) {
2848 bp->def_idx = def_sb->sp_sb.running_index;
2849 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2850 }
523224a3
DK
2851
2852 /* Do not reorder: indecies reading should complete before handling */
2853 barrier();
a2fbb9ea
ET
2854 return rc;
2855}
2856
2857/*
2858 * slow path service functions
2859 */
2860
2861static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2862{
34f80b04 2863 int port = BP_PORT(bp);
a2fbb9ea
ET
2864 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2865 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2866 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2867 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2868 u32 aeu_mask;
87942b46 2869 u32 nig_mask = 0;
f2e0899f 2870 u32 reg_addr;
a2fbb9ea 2871
a2fbb9ea
ET
2872 if (bp->attn_state & asserted)
2873 BNX2X_ERR("IGU ERROR\n");
2874
3fcaf2e5
EG
2875 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2876 aeu_mask = REG_RD(bp, aeu_addr);
2877
a2fbb9ea 2878 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2879 aeu_mask, asserted);
72fd0718 2880 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2881 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2882
3fcaf2e5
EG
2883 REG_WR(bp, aeu_addr, aeu_mask);
2884 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2885
3fcaf2e5 2886 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2887 bp->attn_state |= asserted;
3fcaf2e5 2888 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2889
2890 if (asserted & ATTN_HARD_WIRED_MASK) {
2891 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2892
a5e9a7cf
EG
2893 bnx2x_acquire_phy_lock(bp);
2894
877e9aa4 2895 /* save nig interrupt mask */
87942b46 2896 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2897 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2898
c18487ee 2899 bnx2x_link_attn(bp);
a2fbb9ea
ET
2900
2901 /* handle unicore attn? */
2902 }
2903 if (asserted & ATTN_SW_TIMER_4_FUNC)
2904 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2905
2906 if (asserted & GPIO_2_FUNC)
2907 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2908
2909 if (asserted & GPIO_3_FUNC)
2910 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2911
2912 if (asserted & GPIO_4_FUNC)
2913 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2914
2915 if (port == 0) {
2916 if (asserted & ATTN_GENERAL_ATTN_1) {
2917 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2918 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2919 }
2920 if (asserted & ATTN_GENERAL_ATTN_2) {
2921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2923 }
2924 if (asserted & ATTN_GENERAL_ATTN_3) {
2925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2927 }
2928 } else {
2929 if (asserted & ATTN_GENERAL_ATTN_4) {
2930 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2931 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2932 }
2933 if (asserted & ATTN_GENERAL_ATTN_5) {
2934 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2935 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2936 }
2937 if (asserted & ATTN_GENERAL_ATTN_6) {
2938 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2939 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2940 }
2941 }
2942
2943 } /* if hardwired */
2944
f2e0899f
DK
2945 if (bp->common.int_block == INT_BLOCK_HC)
2946 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2947 COMMAND_REG_ATTN_BITS_SET);
2948 else
2949 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2950
2951 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2952 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2953 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2954
2955 /* now set back the mask */
a5e9a7cf 2956 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2957 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2958 bnx2x_release_phy_lock(bp);
2959 }
a2fbb9ea
ET
2960}
2961
fd4ef40d
EG
2962static inline void bnx2x_fan_failure(struct bnx2x *bp)
2963{
2964 int port = BP_PORT(bp);
b7737c9b 2965 u32 ext_phy_config;
fd4ef40d 2966 /* mark the failure */
b7737c9b
YR
2967 ext_phy_config =
2968 SHMEM_RD(bp,
2969 dev_info.port_hw_config[port].external_phy_config);
2970
2971 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2972 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2973 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2974 ext_phy_config);
fd4ef40d
EG
2975
2976 /* log the failure */
cdaa7cb8
VZ
2977 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2978 " the driver to shutdown the card to prevent permanent"
2979 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2980}
ab6ad5a4 2981
877e9aa4 2982static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2983{
34f80b04 2984 int port = BP_PORT(bp);
877e9aa4 2985 int reg_offset;
d90d96ba 2986 u32 val;
877e9aa4 2987
34f80b04
EG
2988 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2989 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2990
34f80b04 2991 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2992
2993 val = REG_RD(bp, reg_offset);
2994 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2995 REG_WR(bp, reg_offset, val);
2996
2997 BNX2X_ERR("SPIO5 hw attention\n");
2998
fd4ef40d 2999 /* Fan failure attention */
d90d96ba 3000 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 3001 bnx2x_fan_failure(bp);
877e9aa4 3002 }
34f80b04 3003
589abe3a
EG
3004 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3005 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3006 bnx2x_acquire_phy_lock(bp);
3007 bnx2x_handle_module_detect_int(&bp->link_params);
3008 bnx2x_release_phy_lock(bp);
3009 }
3010
34f80b04
EG
3011 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3012
3013 val = REG_RD(bp, reg_offset);
3014 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3015 REG_WR(bp, reg_offset, val);
3016
3017 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3018 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3019 bnx2x_panic();
3020 }
877e9aa4
ET
3021}
3022
3023static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3024{
3025 u32 val;
3026
0626b899 3027 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3028
3029 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3030 BNX2X_ERR("DB hw attention 0x%x\n", val);
3031 /* DORQ discard attention */
3032 if (val & 0x2)
3033 BNX2X_ERR("FATAL error from DORQ\n");
3034 }
34f80b04
EG
3035
3036 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3037
3038 int port = BP_PORT(bp);
3039 int reg_offset;
3040
3041 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3042 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3043
3044 val = REG_RD(bp, reg_offset);
3045 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3046 REG_WR(bp, reg_offset, val);
3047
3048 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3049 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3050 bnx2x_panic();
3051 }
877e9aa4
ET
3052}
3053
3054static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3055{
3056 u32 val;
3057
3058 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3059
3060 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3061 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3062 /* CFC error attention */
3063 if (val & 0x2)
3064 BNX2X_ERR("FATAL error from CFC\n");
3065 }
3066
3067 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3068
3069 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3070 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3071 /* RQ_USDMDP_FIFO_OVERFLOW */
3072 if (val & 0x18000)
3073 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
3074 if (CHIP_IS_E2(bp)) {
3075 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3076 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3077 }
877e9aa4 3078 }
34f80b04
EG
3079
3080 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3081
3082 int port = BP_PORT(bp);
3083 int reg_offset;
3084
3085 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3086 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3087
3088 val = REG_RD(bp, reg_offset);
3089 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3090 REG_WR(bp, reg_offset, val);
3091
3092 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3093 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3094 bnx2x_panic();
3095 }
877e9aa4
ET
3096}
3097
3098static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3099{
34f80b04
EG
3100 u32 val;
3101
877e9aa4
ET
3102 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3103
34f80b04
EG
3104 if (attn & BNX2X_PMF_LINK_ASSERT) {
3105 int func = BP_FUNC(bp);
3106
3107 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
3108 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3109 func_mf_config[BP_ABS_FUNC(bp)].config);
3110 val = SHMEM_RD(bp,
3111 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3112 if (val & DRV_STATUS_DCC_EVENT_MASK)
3113 bnx2x_dcc_event(bp,
3114 (val & DRV_STATUS_DCC_EVENT_MASK));
0793f83f
DK
3115
3116 if (val & DRV_STATUS_SET_MF_BW)
3117 bnx2x_set_mf_bw(bp);
3118
34f80b04 3119 bnx2x__link_status_update(bp);
2691d51d 3120 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3121 bnx2x_pmf_update(bp);
3122
e4901dde 3123 if (bp->port.pmf &&
785b9b1a
SR
3124 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3125 bp->dcbx_enabled > 0)
e4901dde
VZ
3126 /* start dcbx state machine */
3127 bnx2x_dcbx_set_params(bp,
3128 BNX2X_DCBX_STATE_NEG_RECEIVED);
34f80b04 3129 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3130
3131 BNX2X_ERR("MC assert!\n");
3132 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3133 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3134 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3136 bnx2x_panic();
3137
3138 } else if (attn & BNX2X_MCP_ASSERT) {
3139
3140 BNX2X_ERR("MCP assert!\n");
3141 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3142 bnx2x_fw_dump(bp);
877e9aa4
ET
3143
3144 } else
3145 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3146 }
3147
3148 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3149 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3150 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
3151 val = CHIP_IS_E1(bp) ? 0 :
3152 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
3153 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3154 }
3155 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
3156 val = CHIP_IS_E1(bp) ? 0 :
3157 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
3158 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3159 }
877e9aa4 3160 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3161 }
3162}
3163
72fd0718
VZ
3164#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3165#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3166#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3167#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3168#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
f85582f8 3169
72fd0718
VZ
3170/*
3171 * should be run under rtnl lock
3172 */
3173static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3174{
3175 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3176 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3177 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3178 barrier();
3179 mmiowb();
3180}
3181
3182/*
3183 * should be run under rtnl lock
3184 */
3185static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3186{
3187 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188 val |= (1 << 16);
3189 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3190 barrier();
3191 mmiowb();
3192}
3193
3194/*
3195 * should be run under rtnl lock
3196 */
9f6c9258 3197bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
3198{
3199 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3201 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3202}
3203
3204/*
3205 * should be run under rtnl lock
3206 */
9f6c9258 3207inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3208{
3209 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3210
3211 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3212
3213 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3214 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3215 barrier();
3216 mmiowb();
3217}
3218
3219/*
3220 * should be run under rtnl lock
3221 */
9f6c9258 3222u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3223{
3224 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225
3226 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3227
3228 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3229 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3230 barrier();
3231 mmiowb();
3232
3233 return val1;
3234}
3235
3236/*
3237 * should be run under rtnl lock
3238 */
3239static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3240{
3241 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3242}
3243
3244static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3245{
3246 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3247 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3248}
3249
3250static inline void _print_next_block(int idx, const char *blk)
3251{
3252 if (idx)
3253 pr_cont(", ");
3254 pr_cont("%s", blk);
3255}
3256
3257static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3258{
3259 int i = 0;
3260 u32 cur_bit = 0;
3261 for (i = 0; sig; i++) {
3262 cur_bit = ((u32)0x1 << i);
3263 if (sig & cur_bit) {
3264 switch (cur_bit) {
3265 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3266 _print_next_block(par_num++, "BRB");
3267 break;
3268 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3269 _print_next_block(par_num++, "PARSER");
3270 break;
3271 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3272 _print_next_block(par_num++, "TSDM");
3273 break;
3274 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3275 _print_next_block(par_num++, "SEARCHER");
3276 break;
3277 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3278 _print_next_block(par_num++, "TSEMI");
3279 break;
3280 }
3281
3282 /* Clear the bit */
3283 sig &= ~cur_bit;
3284 }
3285 }
3286
3287 return par_num;
3288}
3289
3290static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3291{
3292 int i = 0;
3293 u32 cur_bit = 0;
3294 for (i = 0; sig; i++) {
3295 cur_bit = ((u32)0x1 << i);
3296 if (sig & cur_bit) {
3297 switch (cur_bit) {
3298 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3299 _print_next_block(par_num++, "PBCLIENT");
3300 break;
3301 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3302 _print_next_block(par_num++, "QM");
3303 break;
3304 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3305 _print_next_block(par_num++, "XSDM");
3306 break;
3307 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3308 _print_next_block(par_num++, "XSEMI");
3309 break;
3310 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3311 _print_next_block(par_num++, "DOORBELLQ");
3312 break;
3313 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3314 _print_next_block(par_num++, "VAUX PCI CORE");
3315 break;
3316 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3317 _print_next_block(par_num++, "DEBUG");
3318 break;
3319 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3320 _print_next_block(par_num++, "USDM");
3321 break;
3322 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3323 _print_next_block(par_num++, "USEMI");
3324 break;
3325 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3326 _print_next_block(par_num++, "UPB");
3327 break;
3328 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3329 _print_next_block(par_num++, "CSDM");
3330 break;
3331 }
3332
3333 /* Clear the bit */
3334 sig &= ~cur_bit;
3335 }
3336 }
3337
3338 return par_num;
3339}
3340
3341static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3342{
3343 int i = 0;
3344 u32 cur_bit = 0;
3345 for (i = 0; sig; i++) {
3346 cur_bit = ((u32)0x1 << i);
3347 if (sig & cur_bit) {
3348 switch (cur_bit) {
3349 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3350 _print_next_block(par_num++, "CSEMI");
3351 break;
3352 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3353 _print_next_block(par_num++, "PXP");
3354 break;
3355 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3356 _print_next_block(par_num++,
3357 "PXPPCICLOCKCLIENT");
3358 break;
3359 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3360 _print_next_block(par_num++, "CFC");
3361 break;
3362 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3363 _print_next_block(par_num++, "CDU");
3364 break;
3365 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3366 _print_next_block(par_num++, "IGU");
3367 break;
3368 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3369 _print_next_block(par_num++, "MISC");
3370 break;
3371 }
3372
3373 /* Clear the bit */
3374 sig &= ~cur_bit;
3375 }
3376 }
3377
3378 return par_num;
3379}
3380
3381static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3382{
3383 int i = 0;
3384 u32 cur_bit = 0;
3385 for (i = 0; sig; i++) {
3386 cur_bit = ((u32)0x1 << i);
3387 if (sig & cur_bit) {
3388 switch (cur_bit) {
3389 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3390 _print_next_block(par_num++, "MCP ROM");
3391 break;
3392 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3393 _print_next_block(par_num++, "MCP UMP RX");
3394 break;
3395 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3396 _print_next_block(par_num++, "MCP UMP TX");
3397 break;
3398 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3399 _print_next_block(par_num++, "MCP SCPAD");
3400 break;
3401 }
3402
3403 /* Clear the bit */
3404 sig &= ~cur_bit;
3405 }
3406 }
3407
3408 return par_num;
3409}
3410
3411static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3412 u32 sig2, u32 sig3)
3413{
3414 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3415 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3416 int par_num = 0;
3417 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3418 "[0]:0x%08x [1]:0x%08x "
3419 "[2]:0x%08x [3]:0x%08x\n",
3420 sig0 & HW_PRTY_ASSERT_SET_0,
3421 sig1 & HW_PRTY_ASSERT_SET_1,
3422 sig2 & HW_PRTY_ASSERT_SET_2,
3423 sig3 & HW_PRTY_ASSERT_SET_3);
3424 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3425 bp->dev->name);
3426 par_num = bnx2x_print_blocks_with_parity0(
3427 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3428 par_num = bnx2x_print_blocks_with_parity1(
3429 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3430 par_num = bnx2x_print_blocks_with_parity2(
3431 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3432 par_num = bnx2x_print_blocks_with_parity3(
3433 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3434 printk("\n");
3435 return true;
3436 } else
3437 return false;
3438}
3439
9f6c9258 3440bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3441{
a2fbb9ea 3442 struct attn_route attn;
72fd0718
VZ
3443 int port = BP_PORT(bp);
3444
3445 attn.sig[0] = REG_RD(bp,
3446 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3447 port*4);
3448 attn.sig[1] = REG_RD(bp,
3449 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3450 port*4);
3451 attn.sig[2] = REG_RD(bp,
3452 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3453 port*4);
3454 attn.sig[3] = REG_RD(bp,
3455 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3456 port*4);
3457
3458 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3459 attn.sig[3]);
3460}
3461
f2e0899f
DK
3462
3463static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3464{
3465 u32 val;
3466 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3467
3468 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3469 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3470 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3471 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3472 "ADDRESS_ERROR\n");
3473 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3474 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3475 "INCORRECT_RCV_BEHAVIOR\n");
3476 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3477 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3478 "WAS_ERROR_ATTN\n");
3479 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3480 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3481 "VF_LENGTH_VIOLATION_ATTN\n");
3482 if (val &
3483 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3484 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3485 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3486 if (val &
3487 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3488 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3489 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3490 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3491 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3492 "TCPL_ERROR_ATTN\n");
3493 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3494 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3495 "TCPL_IN_TWO_RCBS_ATTN\n");
3496 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3497 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3498 "CSSNOOP_FIFO_OVERFLOW\n");
3499 }
3500 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3501 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3502 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3503 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3504 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3505 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3506 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3507 "_ATC_TCPL_TO_NOT_PEND\n");
3508 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3509 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3510 "ATC_GPA_MULTIPLE_HITS\n");
3511 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3512 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3513 "ATC_RCPL_TO_EMPTY_CNT\n");
3514 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3515 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3516 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3517 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3518 "ATC_IREQ_LESS_THAN_STU\n");
3519 }
3520
3521 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3522 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3523 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3524 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3525 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3526 }
3527
3528}
3529
72fd0718
VZ
3530static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3531{
3532 struct attn_route attn, *group_mask;
34f80b04 3533 int port = BP_PORT(bp);
877e9aa4 3534 int index;
a2fbb9ea
ET
3535 u32 reg_addr;
3536 u32 val;
3fcaf2e5 3537 u32 aeu_mask;
a2fbb9ea
ET
3538
3539 /* need to take HW lock because MCP or other port might also
3540 try to handle this event */
4a37fb66 3541 bnx2x_acquire_alr(bp);
a2fbb9ea 3542
4a33bc03 3543 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
72fd0718
VZ
3544 bp->recovery_state = BNX2X_RECOVERY_INIT;
3545 bnx2x_set_reset_in_progress(bp);
3546 schedule_delayed_work(&bp->reset_task, 0);
3547 /* Disable HW interrupts */
3548 bnx2x_int_disable(bp);
3549 bnx2x_release_alr(bp);
3550 /* In case of parity errors don't handle attentions so that
3551 * other function would "see" parity errors.
3552 */
3553 return;
3554 }
3555
a2fbb9ea
ET
3556 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3557 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3558 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3559 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3560 if (CHIP_IS_E2(bp))
3561 attn.sig[4] =
3562 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3563 else
3564 attn.sig[4] = 0;
3565
3566 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3567 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3568
3569 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3570 if (deasserted & (1 << index)) {
72fd0718 3571 group_mask = &bp->attn_group[index];
a2fbb9ea 3572
f2e0899f
DK
3573 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3574 "%08x %08x %08x\n",
3575 index,
3576 group_mask->sig[0], group_mask->sig[1],
3577 group_mask->sig[2], group_mask->sig[3],
3578 group_mask->sig[4]);
a2fbb9ea 3579
f2e0899f
DK
3580 bnx2x_attn_int_deasserted4(bp,
3581 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3582 bnx2x_attn_int_deasserted3(bp,
72fd0718 3583 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3584 bnx2x_attn_int_deasserted1(bp,
72fd0718 3585 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3586 bnx2x_attn_int_deasserted2(bp,
72fd0718 3587 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3588 bnx2x_attn_int_deasserted0(bp,
72fd0718 3589 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3590 }
3591 }
3592
4a37fb66 3593 bnx2x_release_alr(bp);
a2fbb9ea 3594
f2e0899f
DK
3595 if (bp->common.int_block == INT_BLOCK_HC)
3596 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3597 COMMAND_REG_ATTN_BITS_CLR);
3598 else
3599 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3600
3601 val = ~deasserted;
f2e0899f
DK
3602 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3603 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3604 REG_WR(bp, reg_addr, val);
a2fbb9ea 3605
a2fbb9ea 3606 if (~bp->attn_state & deasserted)
3fcaf2e5 3607 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3608
3609 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3610 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3611
3fcaf2e5
EG
3612 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3613 aeu_mask = REG_RD(bp, reg_addr);
3614
3615 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3616 aeu_mask, deasserted);
72fd0718 3617 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3618 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3619
3fcaf2e5
EG
3620 REG_WR(bp, reg_addr, aeu_mask);
3621 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3622
3623 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3624 bp->attn_state &= ~deasserted;
3625 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3626}
3627
3628static void bnx2x_attn_int(struct bnx2x *bp)
3629{
3630 /* read local copy of bits */
68d59484
EG
3631 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3632 attn_bits);
3633 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3634 attn_bits_ack);
a2fbb9ea
ET
3635 u32 attn_state = bp->attn_state;
3636
3637 /* look for changed bits */
3638 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3639 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3640
3641 DP(NETIF_MSG_HW,
3642 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3643 attn_bits, attn_ack, asserted, deasserted);
3644
3645 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3646 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3647
3648 /* handle bits that were raised */
3649 if (asserted)
3650 bnx2x_attn_int_asserted(bp, asserted);
3651
3652 if (deasserted)
3653 bnx2x_attn_int_deasserted(bp, deasserted);
3654}
3655
523224a3
DK
3656static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3657{
3658 /* No memory barriers */
3659 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3660 mmiowb(); /* keep prod updates ordered */
3661}
3662
3663#ifdef BCM_CNIC
3664static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3665 union event_ring_elem *elem)
3666{
3667 if (!bp->cnic_eth_dev.starting_cid ||
3668 cid < bp->cnic_eth_dev.starting_cid)
3669 return 1;
3670
3671 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3672
3673 if (unlikely(elem->message.data.cfc_del_event.error)) {
3674 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3675 cid);
3676 bnx2x_panic_dump(bp);
3677 }
3678 bnx2x_cnic_cfc_comp(bp, cid);
3679 return 0;
3680}
3681#endif
3682
3683static void bnx2x_eq_int(struct bnx2x *bp)
3684{
3685 u16 hw_cons, sw_cons, sw_prod;
3686 union event_ring_elem *elem;
3687 u32 cid;
3688 u8 opcode;
3689 int spqe_cnt = 0;
3690
3691 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3692
3693 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3694 * when we get the the next-page we nned to adjust so the loop
3695 * condition below will be met. The next element is the size of a
3696 * regular element and hence incrementing by 1
3697 */
3698 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3699 hw_cons++;
3700
3701 /* This function may never run in parralel with itself for a
3702 * specific bp, thus there is no need in "paired" read memory
3703 * barrier here.
3704 */
3705 sw_cons = bp->eq_cons;
3706 sw_prod = bp->eq_prod;
3707
6e30dd4e
VZ
3708 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3709 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
523224a3
DK
3710
3711 for (; sw_cons != hw_cons;
3712 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3713
3714
3715 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3716
3717 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3718 opcode = elem->message.opcode;
3719
3720
3721 /* handle eq element */
3722 switch (opcode) {
3723 case EVENT_RING_OPCODE_STAT_QUERY:
3724 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3725 /* nothing to do with stats comp */
3726 continue;
3727
3728 case EVENT_RING_OPCODE_CFC_DEL:
3729 /* handle according to cid range */
3730 /*
3731 * we may want to verify here that the bp state is
3732 * HALTING
3733 */
3734 DP(NETIF_MSG_IFDOWN,
3735 "got delete ramrod for MULTI[%d]\n", cid);
3736#ifdef BCM_CNIC
3737 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3738 goto next_spqe;
ec6ba945
VZ
3739 if (cid == BNX2X_FCOE_ETH_CID)
3740 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3741 else
523224a3 3742#endif
ec6ba945 3743 bnx2x_fp(bp, cid, state) =
523224a3
DK
3744 BNX2X_FP_STATE_CLOSED;
3745
3746 goto next_spqe;
e4901dde
VZ
3747
3748 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3749 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3750 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3751 goto next_spqe;
3752 case EVENT_RING_OPCODE_START_TRAFFIC:
3753 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3754 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3755 goto next_spqe;
523224a3
DK
3756 }
3757
3758 switch (opcode | bp->state) {
3759 case (EVENT_RING_OPCODE_FUNCTION_START |
3760 BNX2X_STATE_OPENING_WAIT4_PORT):
3761 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3762 bp->state = BNX2X_STATE_FUNC_STARTED;
3763 break;
3764
3765 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3766 BNX2X_STATE_CLOSING_WAIT4_HALT):
3767 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3768 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3769 break;
3770
3771 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3772 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3773 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
6e30dd4e
VZ
3774 if (elem->message.data.set_mac_event.echo)
3775 bp->set_mac_pending = 0;
523224a3
DK
3776 break;
3777
3778 case (EVENT_RING_OPCODE_SET_MAC |
3779 BNX2X_STATE_CLOSING_WAIT4_HALT):
3780 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
6e30dd4e
VZ
3781 if (elem->message.data.set_mac_event.echo)
3782 bp->set_mac_pending = 0;
523224a3
DK
3783 break;
3784 default:
3785 /* unknown event log error and continue */
3786 BNX2X_ERR("Unknown EQ event %d\n",
3787 elem->message.opcode);
3788 }
3789next_spqe:
3790 spqe_cnt++;
3791 } /* for */
3792
8fe23fbd 3793 smp_mb__before_atomic_inc();
6e30dd4e 3794 atomic_add(spqe_cnt, &bp->eq_spq_left);
523224a3
DK
3795
3796 bp->eq_cons = sw_cons;
3797 bp->eq_prod = sw_prod;
3798 /* Make sure that above mem writes were issued towards the memory */
3799 smp_wmb();
3800
3801 /* update producer */
3802 bnx2x_update_eq_prod(bp, bp->eq_prod);
3803}
3804
a2fbb9ea
ET
3805static void bnx2x_sp_task(struct work_struct *work)
3806{
1cf167f2 3807 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3808 u16 status;
3809
3810 /* Return here if interrupt is disabled */
3811 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3812 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3813 return;
3814 }
3815
3816 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3817/* if (status == 0) */
3818/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3819
cdaa7cb8 3820 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3821
877e9aa4 3822 /* HW attentions */
523224a3 3823 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3824 bnx2x_attn_int(bp);
523224a3 3825 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3826 }
3827
523224a3
DK
3828 /* SP events: STAT_QUERY and others */
3829 if (status & BNX2X_DEF_SB_IDX) {
ec6ba945
VZ
3830#ifdef BCM_CNIC
3831 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
523224a3 3832
ec6ba945
VZ
3833 if ((!NO_FCOE(bp)) &&
3834 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3835 napi_schedule(&bnx2x_fcoe(bp, napi));
3836#endif
523224a3
DK
3837 /* Handle EQ completions */
3838 bnx2x_eq_int(bp);
3839
3840 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3841 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3842
3843 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3844 }
3845
3846 if (unlikely(status))
3847 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3848 status);
a2fbb9ea 3849
523224a3
DK
3850 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3851 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3852}
3853
9f6c9258 3854irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3855{
3856 struct net_device *dev = dev_instance;
3857 struct bnx2x *bp = netdev_priv(dev);
3858
3859 /* Return here if interrupt is disabled */
3860 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3861 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3862 return IRQ_HANDLED;
3863 }
3864
523224a3
DK
3865 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3866 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3867
3868#ifdef BNX2X_STOP_ON_ERROR
3869 if (unlikely(bp->panic))
3870 return IRQ_HANDLED;
3871#endif
3872
993ac7b5
MC
3873#ifdef BCM_CNIC
3874 {
3875 struct cnic_ops *c_ops;
3876
3877 rcu_read_lock();
3878 c_ops = rcu_dereference(bp->cnic_ops);
3879 if (c_ops)
3880 c_ops->cnic_handler(bp->cnic_data, NULL);
3881 rcu_read_unlock();
3882 }
3883#endif
1cf167f2 3884 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3885
3886 return IRQ_HANDLED;
3887}
3888
3889/* end of slow path */
3890
a2fbb9ea
ET
3891static void bnx2x_timer(unsigned long data)
3892{
3893 struct bnx2x *bp = (struct bnx2x *) data;
3894
3895 if (!netif_running(bp->dev))
3896 return;
3897
3898 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3899 goto timer_restart;
a2fbb9ea
ET
3900
3901 if (poll) {
3902 struct bnx2x_fastpath *fp = &bp->fp[0];
3903 int rc;
3904
7961f791 3905 bnx2x_tx_int(fp);
a2fbb9ea
ET
3906 rc = bnx2x_rx_int(fp, 1000);
3907 }
3908
34f80b04 3909 if (!BP_NOMCP(bp)) {
f2e0899f 3910 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3911 u32 drv_pulse;
3912 u32 mcp_pulse;
3913
3914 ++bp->fw_drv_pulse_wr_seq;
3915 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3916 /* TBD - add SYSTEM_TIME */
3917 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3918 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3919
f2e0899f 3920 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3921 MCP_PULSE_SEQ_MASK);
3922 /* The delta between driver pulse and mcp response
3923 * should be 1 (before mcp response) or 0 (after mcp response)
3924 */
3925 if ((drv_pulse != mcp_pulse) &&
3926 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3927 /* someone lost a heartbeat... */
3928 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3929 drv_pulse, mcp_pulse);
3930 }
3931 }
3932
f34d28ea 3933 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3934 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3935
f1410647 3936timer_restart:
a2fbb9ea
ET
3937 mod_timer(&bp->timer, jiffies + bp->current_interval);
3938}
3939
3940/* end of Statistics */
3941
3942/* nic init */
3943
3944/*
3945 * nic init service functions
3946 */
3947
523224a3 3948static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3949{
523224a3
DK
3950 u32 i;
3951 if (!(len%4) && !(addr%4))
3952 for (i = 0; i < len; i += 4)
3953 REG_WR(bp, addr + i, fill);
3954 else
3955 for (i = 0; i < len; i++)
3956 REG_WR8(bp, addr + i, fill);
34f80b04 3957
34f80b04
EG
3958}
3959
523224a3
DK
3960/* helper: writes FP SP data to FW - data_size in dwords */
3961static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3962 int fw_sb_id,
3963 u32 *sb_data_p,
3964 u32 data_size)
34f80b04 3965{
a2fbb9ea 3966 int index;
523224a3
DK
3967 for (index = 0; index < data_size; index++)
3968 REG_WR(bp, BAR_CSTRORM_INTMEM +
3969 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3970 sizeof(u32)*index,
3971 *(sb_data_p + index));
3972}
a2fbb9ea 3973
523224a3
DK
3974static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3975{
3976 u32 *sb_data_p;
3977 u32 data_size = 0;
f2e0899f 3978 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3979 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3980
523224a3 3981 /* disable the function first */
f2e0899f
DK
3982 if (CHIP_IS_E2(bp)) {
3983 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3984 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3985 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3986 sb_data_e2.common.p_func.vf_valid = false;
3987 sb_data_p = (u32 *)&sb_data_e2;
3988 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3989 } else {
3990 memset(&sb_data_e1x, 0,
3991 sizeof(struct hc_status_block_data_e1x));
3992 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3993 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3994 sb_data_e1x.common.p_func.vf_valid = false;
3995 sb_data_p = (u32 *)&sb_data_e1x;
3996 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3997 }
523224a3 3998 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3999
523224a3
DK
4000 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4001 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
4002 CSTORM_STATUS_BLOCK_SIZE);
4003 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4004 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
4005 CSTORM_SYNC_BLOCK_SIZE);
4006}
34f80b04 4007
523224a3
DK
4008/* helper: writes SP SB data to FW */
4009static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
4010 struct hc_sp_status_block_data *sp_sb_data)
4011{
4012 int func = BP_FUNC(bp);
4013 int i;
4014 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4015 REG_WR(bp, BAR_CSTRORM_INTMEM +
4016 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4017 i*sizeof(u32),
4018 *((u32 *)sp_sb_data + i));
34f80b04
EG
4019}
4020
523224a3 4021static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
4022{
4023 int func = BP_FUNC(bp);
523224a3
DK
4024 struct hc_sp_status_block_data sp_sb_data;
4025 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 4026
523224a3
DK
4027 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4028 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4029 sp_sb_data.p_func.vf_valid = false;
4030
4031 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4032
4033 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4034 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4035 CSTORM_SP_STATUS_BLOCK_SIZE);
4036 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4037 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4038 CSTORM_SP_SYNC_BLOCK_SIZE);
4039
4040}
4041
4042
4043static inline
4044void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4045 int igu_sb_id, int igu_seg_id)
4046{
4047 hc_sm->igu_sb_id = igu_sb_id;
4048 hc_sm->igu_seg_id = igu_seg_id;
4049 hc_sm->timer_value = 0xFF;
4050 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
4051}
4052
8d96286a 4053static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
523224a3 4054 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 4055{
523224a3
DK
4056 int igu_seg_id;
4057
f2e0899f 4058 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
4059 struct hc_status_block_data_e1x sb_data_e1x;
4060 struct hc_status_block_sm *hc_sm_p;
4061 struct hc_index_data *hc_index_p;
4062 int data_size;
4063 u32 *sb_data_p;
4064
f2e0899f
DK
4065 if (CHIP_INT_MODE_IS_BC(bp))
4066 igu_seg_id = HC_SEG_ACCESS_NORM;
4067 else
4068 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
4069
4070 bnx2x_zero_fp_sb(bp, fw_sb_id);
4071
f2e0899f
DK
4072 if (CHIP_IS_E2(bp)) {
4073 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4074 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4075 sb_data_e2.common.p_func.vf_id = vfid;
4076 sb_data_e2.common.p_func.vf_valid = vf_valid;
4077 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4078 sb_data_e2.common.same_igu_sb_1b = true;
4079 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4080 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4081 hc_sm_p = sb_data_e2.common.state_machine;
4082 hc_index_p = sb_data_e2.index_data;
4083 sb_data_p = (u32 *)&sb_data_e2;
4084 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4085 } else {
4086 memset(&sb_data_e1x, 0,
4087 sizeof(struct hc_status_block_data_e1x));
4088 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4089 sb_data_e1x.common.p_func.vf_id = 0xff;
4090 sb_data_e1x.common.p_func.vf_valid = false;
4091 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4092 sb_data_e1x.common.same_igu_sb_1b = true;
4093 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4094 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4095 hc_sm_p = sb_data_e1x.common.state_machine;
4096 hc_index_p = sb_data_e1x.index_data;
4097 sb_data_p = (u32 *)&sb_data_e1x;
4098 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4099 }
523224a3
DK
4100
4101 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4102 igu_sb_id, igu_seg_id);
4103 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4104 igu_sb_id, igu_seg_id);
4105
4106 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4107
4108 /* write indecies to HW */
4109 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4110}
4111
4112static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4113 u8 sb_index, u8 disable, u16 usec)
4114{
4115 int port = BP_PORT(bp);
4116 u8 ticks = usec / BNX2X_BTR;
4117
4118 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4119
4120 disable = disable ? 1 : (usec ? 0 : 1);
4121 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4122}
4123
4124static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4125 u16 tx_usec, u16 rx_usec)
4126{
4127 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4128 false, rx_usec);
4129 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4130 false, tx_usec);
4131}
f2e0899f 4132
523224a3
DK
4133static void bnx2x_init_def_sb(struct bnx2x *bp)
4134{
4135 struct host_sp_status_block *def_sb = bp->def_status_blk;
4136 dma_addr_t mapping = bp->def_status_blk_mapping;
4137 int igu_sp_sb_index;
4138 int igu_seg_id;
34f80b04
EG
4139 int port = BP_PORT(bp);
4140 int func = BP_FUNC(bp);
523224a3 4141 int reg_offset;
a2fbb9ea 4142 u64 section;
523224a3
DK
4143 int index;
4144 struct hc_sp_status_block_data sp_sb_data;
4145 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4146
f2e0899f
DK
4147 if (CHIP_INT_MODE_IS_BC(bp)) {
4148 igu_sp_sb_index = DEF_SB_IGU_ID;
4149 igu_seg_id = HC_SEG_ACCESS_DEF;
4150 } else {
4151 igu_sp_sb_index = bp->igu_dsb_id;
4152 igu_seg_id = IGU_SEG_ACCESS_DEF;
4153 }
a2fbb9ea
ET
4154
4155 /* ATTN */
523224a3 4156 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 4157 atten_status_block);
523224a3 4158 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 4159
49d66772
ET
4160 bp->attn_state = 0;
4161
a2fbb9ea
ET
4162 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4163 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 4164 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
4165 int sindex;
4166 /* take care of sig[0]..sig[4] */
4167 for (sindex = 0; sindex < 4; sindex++)
4168 bp->attn_group[index].sig[sindex] =
4169 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
4170
4171 if (CHIP_IS_E2(bp))
4172 /*
4173 * enable5 is separate from the rest of the registers,
4174 * and therefore the address skip is 4
4175 * and not 16 between the different groups
4176 */
4177 bp->attn_group[index].sig[4] = REG_RD(bp,
4178 reg_offset + 0x10 + 0x4*index);
4179 else
4180 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
4181 }
4182
f2e0899f
DK
4183 if (bp->common.int_block == INT_BLOCK_HC) {
4184 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4185 HC_REG_ATTN_MSG0_ADDR_L);
4186
4187 REG_WR(bp, reg_offset, U64_LO(section));
4188 REG_WR(bp, reg_offset + 4, U64_HI(section));
4189 } else if (CHIP_IS_E2(bp)) {
4190 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4191 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4192 }
a2fbb9ea 4193
523224a3
DK
4194 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4195 sp_sb);
a2fbb9ea 4196
523224a3 4197 bnx2x_zero_sp_sb(bp);
a2fbb9ea 4198
523224a3
DK
4199 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4200 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4201 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4202 sp_sb_data.igu_seg_id = igu_seg_id;
4203 sp_sb_data.p_func.pf_id = func;
f2e0899f 4204 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 4205 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 4206
523224a3 4207 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 4208
bb2a0f7a 4209 bp->stats_pending = 0;
66e855f3 4210 bp->set_mac_pending = 0;
bb2a0f7a 4211
523224a3 4212 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4213}
4214
9f6c9258 4215void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4216{
a2fbb9ea
ET
4217 int i;
4218
ec6ba945 4219 for_each_eth_queue(bp, i)
523224a3
DK
4220 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4221 bp->rx_ticks, bp->tx_ticks);
a2fbb9ea
ET
4222}
4223
a2fbb9ea
ET
4224static void bnx2x_init_sp_ring(struct bnx2x *bp)
4225{
a2fbb9ea 4226 spin_lock_init(&bp->spq_lock);
6e30dd4e 4227 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
a2fbb9ea 4228
a2fbb9ea 4229 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4230 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4231 bp->spq_prod_bd = bp->spq;
4232 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
4233}
4234
523224a3 4235static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
4236{
4237 int i;
523224a3
DK
4238 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4239 union event_ring_elem *elem =
4240 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 4241
523224a3
DK
4242 elem->next_page.addr.hi =
4243 cpu_to_le32(U64_HI(bp->eq_mapping +
4244 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4245 elem->next_page.addr.lo =
4246 cpu_to_le32(U64_LO(bp->eq_mapping +
4247 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 4248 }
523224a3
DK
4249 bp->eq_cons = 0;
4250 bp->eq_prod = NUM_EQ_DESC;
4251 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6e30dd4e
VZ
4252 /* we want a warning message before it gets rought... */
4253 atomic_set(&bp->eq_spq_left,
4254 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
a2fbb9ea
ET
4255}
4256
4257static void bnx2x_init_ind_table(struct bnx2x *bp)
4258{
26c8fa4d 4259 int func = BP_FUNC(bp);
a2fbb9ea
ET
4260 int i;
4261
555f6c78 4262 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4263 return;
4264
555f6c78
EG
4265 DP(NETIF_MSG_IFUP,
4266 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4267 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4268 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4269 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
ec6ba945
VZ
4270 bp->fp->cl_id + (i % (bp->num_queues -
4271 NONE_ETH_CONTEXT_USE)));
a2fbb9ea
ET
4272}
4273
9f6c9258 4274void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4275{
34f80b04 4276 int mode = bp->rx_mode;
ec6ba945 4277 int port = BP_PORT(bp);
523224a3 4278 u16 cl_id;
ec6ba945 4279 u32 def_q_filters = 0;
523224a3 4280
581ce43d
EG
4281 /* All but management unicast packets should pass to the host as well */
4282 u32 llh_mask =
4283 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4284 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4285 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4286 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4287
a2fbb9ea
ET
4288 switch (mode) {
4289 case BNX2X_RX_MODE_NONE: /* no Rx */
ec6ba945
VZ
4290 def_q_filters = BNX2X_ACCEPT_NONE;
4291#ifdef BCM_CNIC
4292 if (!NO_FCOE(bp)) {
4293 cl_id = bnx2x_fcoe(bp, cl_id);
4294 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4295 }
4296#endif
a2fbb9ea 4297 break;
356e2385 4298
a2fbb9ea 4299 case BNX2X_RX_MODE_NORMAL:
ec6ba945
VZ
4300 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4301 BNX2X_ACCEPT_MULTICAST;
4302#ifdef BCM_CNIC
711c9146
VZ
4303 if (!NO_FCOE(bp)) {
4304 cl_id = bnx2x_fcoe(bp, cl_id);
4305 bnx2x_rxq_set_mac_filters(bp, cl_id,
4306 BNX2X_ACCEPT_UNICAST |
4307 BNX2X_ACCEPT_MULTICAST);
4308 }
ec6ba945 4309#endif
a2fbb9ea 4310 break;
356e2385 4311
a2fbb9ea 4312 case BNX2X_RX_MODE_ALLMULTI:
ec6ba945
VZ
4313 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4314 BNX2X_ACCEPT_ALL_MULTICAST;
4315#ifdef BCM_CNIC
711c9146
VZ
4316 /*
4317 * Prevent duplication of multicast packets by configuring FCoE
4318 * L2 Client to receive only matched unicast frames.
4319 */
4320 if (!NO_FCOE(bp)) {
4321 cl_id = bnx2x_fcoe(bp, cl_id);
4322 bnx2x_rxq_set_mac_filters(bp, cl_id,
4323 BNX2X_ACCEPT_UNICAST);
4324 }
ec6ba945 4325#endif
a2fbb9ea 4326 break;
356e2385 4327
a2fbb9ea 4328 case BNX2X_RX_MODE_PROMISC:
ec6ba945
VZ
4329 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4330#ifdef BCM_CNIC
711c9146
VZ
4331 /*
4332 * Prevent packets duplication by configuring DROP_ALL for FCoE
4333 * L2 Client.
4334 */
4335 if (!NO_FCOE(bp)) {
4336 cl_id = bnx2x_fcoe(bp, cl_id);
4337 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4338 }
ec6ba945 4339#endif
581ce43d
EG
4340 /* pass management unicast packets as well */
4341 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4342 break;
356e2385 4343
a2fbb9ea 4344 default:
34f80b04
EG
4345 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4346 break;
a2fbb9ea
ET
4347 }
4348
ec6ba945
VZ
4349 cl_id = BP_L_ID(bp);
4350 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4351
581ce43d 4352 REG_WR(bp,
ec6ba945
VZ
4353 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4354 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
581ce43d 4355
523224a3
DK
4356 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4357 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
ec6ba945
VZ
4358 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4359 "unmatched_ucast 0x%x\n", mode,
523224a3
DK
4360 bp->mac_filters.ucast_drop_all,
4361 bp->mac_filters.mcast_drop_all,
4362 bp->mac_filters.bcast_drop_all,
4363 bp->mac_filters.ucast_accept_all,
4364 bp->mac_filters.mcast_accept_all,
ec6ba945
VZ
4365 bp->mac_filters.bcast_accept_all,
4366 bp->mac_filters.unmatched_unicast
523224a3 4367 );
a2fbb9ea 4368
523224a3 4369 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
4370}
4371
471de716
EG
4372static void bnx2x_init_internal_common(struct bnx2x *bp)
4373{
4374 int i;
4375
523224a3 4376 if (!CHIP_IS_E1(bp)) {
de832a55 4377
523224a3
DK
4378 /* xstorm needs to know whether to add ovlan to packets or not,
4379 * in switch-independent we'll write 0 to here... */
34f80b04 4380 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4381 bp->mf_mode);
34f80b04 4382 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4383 bp->mf_mode);
34f80b04 4384 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4385 bp->mf_mode);
34f80b04 4386 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4387 bp->mf_mode);
34f80b04
EG
4388 }
4389
0793f83f
DK
4390 if (IS_MF_SI(bp))
4391 /*
4392 * In switch independent mode, the TSTORM needs to accept
4393 * packets that failed classification, since approximate match
4394 * mac addresses aren't written to NIG LLH
4395 */
4396 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4397 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4398
523224a3
DK
4399 /* Zero this manually as its initialization is
4400 currently missing in the initTool */
4401 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 4402 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 4403 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
4404 if (CHIP_IS_E2(bp)) {
4405 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4406 CHIP_INT_MODE_IS_BC(bp) ?
4407 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4408 }
523224a3 4409}
8a1c38d1 4410
523224a3
DK
4411static void bnx2x_init_internal_port(struct bnx2x *bp)
4412{
4413 /* port */
e4901dde 4414 bnx2x_dcb_init_intmem_pfc(bp);
a2fbb9ea
ET
4415}
4416
471de716
EG
4417static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4418{
4419 switch (load_code) {
4420 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 4421 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
4422 bnx2x_init_internal_common(bp);
4423 /* no break */
4424
4425 case FW_MSG_CODE_DRV_LOAD_PORT:
4426 bnx2x_init_internal_port(bp);
4427 /* no break */
4428
4429 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
4430 /* internal memory per function is
4431 initialized inside bnx2x_pf_init */
471de716
EG
4432 break;
4433
4434 default:
4435 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4436 break;
4437 }
4438}
4439
523224a3
DK
4440static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4441{
4442 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4443
4444 fp->state = BNX2X_FP_STATE_CLOSED;
4445
4446 fp->index = fp->cid = fp_idx;
4447 fp->cl_id = BP_L_ID(bp) + fp_idx;
4448 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4449 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4450 /* qZone id equals to FW (per path) client id */
4451 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
4452 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4453 ETH_MAX_RX_CLIENTS_E1H);
523224a3 4454 /* init shortcut */
f2e0899f
DK
4455 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4456 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4457 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4458 /* Setup SB indicies */
4459 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4460 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4461
4462 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4463 "cl_id %d fw_sb %d igu_sb %d\n",
4464 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4465 fp->igu_sb_id);
4466 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4467 fp->fw_sb_id, fp->igu_sb_id);
4468
4469 bnx2x_update_fpsb_idx(fp);
4470}
4471
9f6c9258 4472void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4473{
4474 int i;
4475
ec6ba945 4476 for_each_eth_queue(bp, i)
523224a3 4477 bnx2x_init_fp_sb(bp, i);
37b091ba 4478#ifdef BCM_CNIC
ec6ba945
VZ
4479 if (!NO_FCOE(bp))
4480 bnx2x_init_fcoe_fp(bp);
523224a3
DK
4481
4482 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4483 BNX2X_VF_ID_INVALID, false,
4484 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4485
37b091ba 4486#endif
a2fbb9ea 4487
16119785
EG
4488 /* ensure status block indices were read */
4489 rmb();
4490
523224a3 4491 bnx2x_init_def_sb(bp);
5c862848 4492 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4493 bnx2x_init_rx_rings(bp);
523224a3 4494 bnx2x_init_tx_rings(bp);
a2fbb9ea 4495 bnx2x_init_sp_ring(bp);
523224a3 4496 bnx2x_init_eq_ring(bp);
471de716 4497 bnx2x_init_internal(bp, load_code);
523224a3 4498 bnx2x_pf_init(bp);
a2fbb9ea 4499 bnx2x_init_ind_table(bp);
0ef00459
EG
4500 bnx2x_stats_init(bp);
4501
4502 /* At this point, we are ready for interrupts */
4503 atomic_set(&bp->intr_sem, 0);
4504
4505 /* flush all before enabling interrupts */
4506 mb();
4507 mmiowb();
4508
615f8fd9 4509 bnx2x_int_enable(bp);
eb8da205
EG
4510
4511 /* Check for SPIO5 */
4512 bnx2x_attn_int_deasserted0(bp,
4513 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4514 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4515}
4516
4517/* end of nic init */
4518
4519/*
4520 * gzip service functions
4521 */
4522
4523static int bnx2x_gunzip_init(struct bnx2x *bp)
4524{
1a983142
FT
4525 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4526 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4527 if (bp->gunzip_buf == NULL)
4528 goto gunzip_nomem1;
4529
4530 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4531 if (bp->strm == NULL)
4532 goto gunzip_nomem2;
4533
4534 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4535 GFP_KERNEL);
4536 if (bp->strm->workspace == NULL)
4537 goto gunzip_nomem3;
4538
4539 return 0;
4540
4541gunzip_nomem3:
4542 kfree(bp->strm);
4543 bp->strm = NULL;
4544
4545gunzip_nomem2:
1a983142
FT
4546 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4547 bp->gunzip_mapping);
a2fbb9ea
ET
4548 bp->gunzip_buf = NULL;
4549
4550gunzip_nomem1:
cdaa7cb8
VZ
4551 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4552 " un-compression\n");
a2fbb9ea
ET
4553 return -ENOMEM;
4554}
4555
4556static void bnx2x_gunzip_end(struct bnx2x *bp)
4557{
4558 kfree(bp->strm->workspace);
a2fbb9ea
ET
4559 kfree(bp->strm);
4560 bp->strm = NULL;
4561
4562 if (bp->gunzip_buf) {
1a983142
FT
4563 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4564 bp->gunzip_mapping);
a2fbb9ea
ET
4565 bp->gunzip_buf = NULL;
4566 }
4567}
4568
94a78b79 4569static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4570{
4571 int n, rc;
4572
4573 /* check gzip header */
94a78b79
VZ
4574 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4575 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4576 return -EINVAL;
94a78b79 4577 }
a2fbb9ea
ET
4578
4579 n = 10;
4580
34f80b04 4581#define FNAME 0x8
a2fbb9ea
ET
4582
4583 if (zbuf[3] & FNAME)
4584 while ((zbuf[n++] != 0) && (n < len));
4585
94a78b79 4586 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4587 bp->strm->avail_in = len - n;
4588 bp->strm->next_out = bp->gunzip_buf;
4589 bp->strm->avail_out = FW_BUF_SIZE;
4590
4591 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4592 if (rc != Z_OK)
4593 return rc;
4594
4595 rc = zlib_inflate(bp->strm, Z_FINISH);
4596 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4597 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4598 bp->strm->msg);
a2fbb9ea
ET
4599
4600 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4601 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4602 netdev_err(bp->dev, "Firmware decompression error:"
4603 " gunzip_outlen (%d) not aligned\n",
4604 bp->gunzip_outlen);
a2fbb9ea
ET
4605 bp->gunzip_outlen >>= 2;
4606
4607 zlib_inflateEnd(bp->strm);
4608
4609 if (rc == Z_STREAM_END)
4610 return 0;
4611
4612 return rc;
4613}
4614
4615/* nic load/unload */
4616
4617/*
34f80b04 4618 * General service functions
a2fbb9ea
ET
4619 */
4620
4621/* send a NIG loopback debug packet */
4622static void bnx2x_lb_pckt(struct bnx2x *bp)
4623{
a2fbb9ea 4624 u32 wb_write[3];
a2fbb9ea
ET
4625
4626 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4627 wb_write[0] = 0x55555555;
4628 wb_write[1] = 0x55555555;
34f80b04 4629 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4630 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4631
4632 /* NON-IP protocol */
a2fbb9ea
ET
4633 wb_write[0] = 0x09000000;
4634 wb_write[1] = 0x55555555;
34f80b04 4635 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4636 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4637}
4638
4639/* some of the internal memories
4640 * are not directly readable from the driver
4641 * to test them we send debug packets
4642 */
4643static int bnx2x_int_mem_test(struct bnx2x *bp)
4644{
4645 int factor;
4646 int count, i;
4647 u32 val = 0;
4648
ad8d3948 4649 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4650 factor = 120;
ad8d3948
EG
4651 else if (CHIP_REV_IS_EMUL(bp))
4652 factor = 200;
4653 else
a2fbb9ea 4654 factor = 1;
a2fbb9ea 4655
a2fbb9ea
ET
4656 /* Disable inputs of parser neighbor blocks */
4657 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4658 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4659 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4660 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4661
4662 /* Write 0 to parser credits for CFC search request */
4663 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4664
4665 /* send Ethernet packet */
4666 bnx2x_lb_pckt(bp);
4667
4668 /* TODO do i reset NIG statistic? */
4669 /* Wait until NIG register shows 1 packet of size 0x10 */
4670 count = 1000 * factor;
4671 while (count) {
34f80b04 4672
a2fbb9ea
ET
4673 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4674 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4675 if (val == 0x10)
4676 break;
4677
4678 msleep(10);
4679 count--;
4680 }
4681 if (val != 0x10) {
4682 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4683 return -1;
4684 }
4685
4686 /* Wait until PRS register shows 1 packet */
4687 count = 1000 * factor;
4688 while (count) {
4689 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4690 if (val == 1)
4691 break;
4692
4693 msleep(10);
4694 count--;
4695 }
4696 if (val != 0x1) {
4697 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4698 return -2;
4699 }
4700
4701 /* Reset and init BRB, PRS */
34f80b04 4702 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4703 msleep(50);
34f80b04 4704 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4705 msleep(50);
94a78b79
VZ
4706 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4707 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4708
4709 DP(NETIF_MSG_HW, "part2\n");
4710
4711 /* Disable inputs of parser neighbor blocks */
4712 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4713 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4714 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4715 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4716
4717 /* Write 0 to parser credits for CFC search request */
4718 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4719
4720 /* send 10 Ethernet packets */
4721 for (i = 0; i < 10; i++)
4722 bnx2x_lb_pckt(bp);
4723
4724 /* Wait until NIG register shows 10 + 1
4725 packets of size 11*0x10 = 0xb0 */
4726 count = 1000 * factor;
4727 while (count) {
34f80b04 4728
a2fbb9ea
ET
4729 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4730 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4731 if (val == 0xb0)
4732 break;
4733
4734 msleep(10);
4735 count--;
4736 }
4737 if (val != 0xb0) {
4738 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4739 return -3;
4740 }
4741
4742 /* Wait until PRS register shows 2 packets */
4743 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4744 if (val != 2)
4745 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4746
4747 /* Write 1 to parser credits for CFC search request */
4748 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4749
4750 /* Wait until PRS register shows 3 packets */
4751 msleep(10 * factor);
4752 /* Wait until NIG register shows 1 packet of size 0x10 */
4753 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4754 if (val != 3)
4755 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4756
4757 /* clear NIG EOP FIFO */
4758 for (i = 0; i < 11; i++)
4759 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4760 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4761 if (val != 1) {
4762 BNX2X_ERR("clear of NIG failed\n");
4763 return -4;
4764 }
4765
4766 /* Reset and init BRB, PRS, NIG */
4767 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4768 msleep(50);
4769 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4770 msleep(50);
94a78b79
VZ
4771 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4772 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4773#ifndef BCM_CNIC
a2fbb9ea
ET
4774 /* set NIC mode */
4775 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4776#endif
4777
4778 /* Enable inputs of parser neighbor blocks */
4779 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4780 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4781 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4782 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4783
4784 DP(NETIF_MSG_HW, "done\n");
4785
4786 return 0; /* OK */
4787}
4788
4a33bc03 4789static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
a2fbb9ea
ET
4790{
4791 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4792 if (CHIP_IS_E2(bp))
4793 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4794 else
4795 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4796 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4797 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4798 /*
4799 * mask read length error interrupts in brb for parser
4800 * (parsing unit and 'checksum and crc' unit)
4801 * these errors are legal (PU reads fixed length and CAC can cause
4802 * read length error on truncated packets)
4803 */
4804 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4805 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4806 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4807 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4808 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4809 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4810/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4811/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4812 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4813 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4814 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4815/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4816/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4817 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4818 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4819 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4820 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4821/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4822/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 4823
34f80b04
EG
4824 if (CHIP_REV_IS_FPGA(bp))
4825 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4826 else if (CHIP_IS_E2(bp))
4827 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4828 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4829 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4830 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4831 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4832 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4833 else
4834 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4835 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4836 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4837 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4838/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4839/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4840 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4841 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04 4842/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4a33bc03 4843 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
a2fbb9ea
ET
4844}
4845
81f75bbf
EG
4846static void bnx2x_reset_common(struct bnx2x *bp)
4847{
4848 /* reset_common */
4849 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4850 0xd3ffff7f);
4851 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4852}
4853
573f2035
EG
4854static void bnx2x_init_pxp(struct bnx2x *bp)
4855{
4856 u16 devctl;
4857 int r_order, w_order;
4858
4859 pci_read_config_word(bp->pdev,
4860 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4861 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4862 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4863 if (bp->mrrs == -1)
4864 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4865 else {
4866 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4867 r_order = bp->mrrs;
4868 }
4869
4870 bnx2x_init_pxp_arb(bp, r_order, w_order);
4871}
fd4ef40d
EG
4872
4873static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4874{
2145a920 4875 int is_required;
fd4ef40d 4876 u32 val;
2145a920 4877 int port;
fd4ef40d 4878
2145a920
VZ
4879 if (BP_NOMCP(bp))
4880 return;
4881
4882 is_required = 0;
fd4ef40d
EG
4883 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4884 SHARED_HW_CFG_FAN_FAILURE_MASK;
4885
4886 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4887 is_required = 1;
4888
4889 /*
4890 * The fan failure mechanism is usually related to the PHY type since
4891 * the power consumption of the board is affected by the PHY. Currently,
4892 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4893 */
4894 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4895 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4896 is_required |=
d90d96ba
YR
4897 bnx2x_fan_failure_det_req(
4898 bp,
4899 bp->common.shmem_base,
a22f0788 4900 bp->common.shmem2_base,
d90d96ba 4901 port);
fd4ef40d
EG
4902 }
4903
4904 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4905
4906 if (is_required == 0)
4907 return;
4908
4909 /* Fan failure is indicated by SPIO 5 */
4910 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4911 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4912
4913 /* set to active low mode */
4914 val = REG_RD(bp, MISC_REG_SPIO_INT);
4915 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4916 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4917 REG_WR(bp, MISC_REG_SPIO_INT, val);
4918
4919 /* enable interrupt to signal the IGU */
4920 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4921 val |= (1 << MISC_REGISTERS_SPIO_5);
4922 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4923}
4924
f2e0899f
DK
4925static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4926{
4927 u32 offset = 0;
4928
4929 if (CHIP_IS_E1(bp))
4930 return;
4931 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4932 return;
4933
4934 switch (BP_ABS_FUNC(bp)) {
4935 case 0:
4936 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4937 break;
4938 case 1:
4939 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4940 break;
4941 case 2:
4942 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4943 break;
4944 case 3:
4945 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4946 break;
4947 case 4:
4948 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4949 break;
4950 case 5:
4951 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4952 break;
4953 case 6:
4954 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4955 break;
4956 case 7:
4957 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4958 break;
4959 default:
4960 return;
4961 }
4962
4963 REG_WR(bp, offset, pretend_func_num);
4964 REG_RD(bp, offset);
4965 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4966}
4967
4968static void bnx2x_pf_disable(struct bnx2x *bp)
4969{
4970 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4971 val &= ~IGU_PF_CONF_FUNC_EN;
4972
4973 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4974 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4975 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4976}
4977
523224a3 4978static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4979{
a2fbb9ea 4980 u32 val, i;
a2fbb9ea 4981
f2e0899f 4982 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4983
81f75bbf 4984 bnx2x_reset_common(bp);
34f80b04
EG
4985 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4986 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4987
94a78b79 4988 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4989 if (!CHIP_IS_E1(bp))
fb3bff17 4990 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4991
f2e0899f
DK
4992 if (CHIP_IS_E2(bp)) {
4993 u8 fid;
4994
4995 /**
4996 * 4-port mode or 2-port mode we need to turn of master-enable
4997 * for everyone, after that, turn it back on for self.
4998 * so, we disregard multi-function or not, and always disable
4999 * for all functions on the given path, this means 0,2,4,6 for
5000 * path 0 and 1,3,5,7 for path 1
5001 */
5002 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
5003 if (fid == BP_ABS_FUNC(bp)) {
5004 REG_WR(bp,
5005 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
5006 1);
5007 continue;
5008 }
5009
5010 bnx2x_pretend_func(bp, fid);
5011 /* clear pf enable */
5012 bnx2x_pf_disable(bp);
5013 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5014 }
5015 }
a2fbb9ea 5016
94a78b79 5017 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5018 if (CHIP_IS_E1(bp)) {
5019 /* enable HW interrupt from PXP on USDM overflow
5020 bit 16 on INT_MASK_0 */
5021 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5022 }
a2fbb9ea 5023
94a78b79 5024 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5025 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5026
5027#ifdef __BIG_ENDIAN
34f80b04
EG
5028 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5029 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5030 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5031 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5032 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5033 /* make sure this value is 0 */
5034 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5035
5036/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5037 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5038 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5039 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5040 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5041#endif
5042
523224a3
DK
5043 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5044
34f80b04
EG
5045 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5046 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5047
34f80b04
EG
5048 /* let the HW do it's magic ... */
5049 msleep(100);
5050 /* finish PXP init */
5051 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5052 if (val != 1) {
5053 BNX2X_ERR("PXP2 CFG failed\n");
5054 return -EBUSY;
5055 }
5056 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5057 if (val != 1) {
5058 BNX2X_ERR("PXP2 RD_INIT failed\n");
5059 return -EBUSY;
5060 }
a2fbb9ea 5061
f2e0899f
DK
5062 /* Timers bug workaround E2 only. We need to set the entire ILT to
5063 * have entries with value "0" and valid bit on.
5064 * This needs to be done by the first PF that is loaded in a path
5065 * (i.e. common phase)
5066 */
5067 if (CHIP_IS_E2(bp)) {
5068 struct ilt_client_info ilt_cli;
5069 struct bnx2x_ilt ilt;
5070 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5071 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5072
b595076a 5073 /* initialize dummy TM client */
f2e0899f
DK
5074 ilt_cli.start = 0;
5075 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5076 ilt_cli.client_num = ILT_CLIENT_TM;
5077
5078 /* Step 1: set zeroes to all ilt page entries with valid bit on
5079 * Step 2: set the timers first/last ilt entry to point
5080 * to the entire range to prevent ILT range error for 3rd/4th
5081 * vnic (this code assumes existance of the vnic)
5082 *
5083 * both steps performed by call to bnx2x_ilt_client_init_op()
5084 * with dummy TM client
5085 *
5086 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5087 * and his brother are split registers
5088 */
5089 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5090 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5091 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5092
5093 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5094 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5095 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5096 }
5097
5098
34f80b04
EG
5099 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5100 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5101
f2e0899f
DK
5102 if (CHIP_IS_E2(bp)) {
5103 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5104 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5105 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5106
5107 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5108
5109 /* let the HW do it's magic ... */
5110 do {
5111 msleep(200);
5112 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5113 } while (factor-- && (val != 1));
5114
5115 if (val != 1) {
5116 BNX2X_ERR("ATC_INIT failed\n");
5117 return -EBUSY;
5118 }
5119 }
5120
94a78b79 5121 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5122
34f80b04
EG
5123 /* clean the DMAE memory */
5124 bp->dmae_ready = 1;
5125 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5126
94a78b79
VZ
5127 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5128 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5129 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5130 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5131
34f80b04
EG
5132 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5133 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5134 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5135 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5136
94a78b79 5137 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 5138
f2e0899f
DK
5139 if (CHIP_MODE_IS_4_PORT(bp))
5140 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
f85582f8 5141
523224a3
DK
5142 /* QM queues pointers table */
5143 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5144
34f80b04
EG
5145 /* soft reset pulse */
5146 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5147 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5148
37b091ba 5149#ifdef BCM_CNIC
94a78b79 5150 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5151#endif
a2fbb9ea 5152
94a78b79 5153 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
5154 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5155
34f80b04
EG
5156 if (!CHIP_REV_IS_SLOW(bp)) {
5157 /* enable hw interrupt from doorbell Q */
5158 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5159 }
a2fbb9ea 5160
94a78b79 5161 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
5162 if (CHIP_MODE_IS_4_PORT(bp)) {
5163 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5164 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5165 }
5166
94a78b79 5167 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5168 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5169#ifndef BCM_CNIC
3196a88a
EG
5170 /* set NIC mode */
5171 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5172#endif
f2e0899f 5173 if (!CHIP_IS_E1(bp))
0793f83f 5174 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
f85582f8 5175
f2e0899f
DK
5176 if (CHIP_IS_E2(bp)) {
5177 /* Bit-map indicating which L2 hdrs may appear after the
5178 basic Ethernet header */
0793f83f 5179 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5180 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5181 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5182 }
a2fbb9ea 5183
94a78b79
VZ
5184 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5185 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5186 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5187 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5188
ca00392c
EG
5189 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5190 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5191 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5192 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5193
94a78b79
VZ
5194 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5195 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5196 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5197 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5198
f2e0899f
DK
5199 if (CHIP_MODE_IS_4_PORT(bp))
5200 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5201
34f80b04
EG
5202 /* sync semi rtc */
5203 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5204 0x80000000);
5205 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5206 0x80000000);
a2fbb9ea 5207
94a78b79
VZ
5208 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5209 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5210 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5211
f2e0899f 5212 if (CHIP_IS_E2(bp)) {
0793f83f 5213 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5214 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5215 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5216 }
5217
34f80b04 5218 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5219 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5220 REG_WR(bp, i, random32());
f85582f8 5221
94a78b79 5222 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5223#ifdef BCM_CNIC
5224 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5225 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5226 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5227 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5228 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5229 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5230 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5231 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5232 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5233 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5234#endif
34f80b04 5235 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5236
34f80b04
EG
5237 if (sizeof(union cdu_context) != 1024)
5238 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5239 dev_alert(&bp->pdev->dev, "please adjust the size "
5240 "of cdu_context(%ld)\n",
7995c64e 5241 (long)sizeof(union cdu_context));
a2fbb9ea 5242
94a78b79 5243 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5244 val = (4 << 24) + (0 << 12) + 1024;
5245 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5246
94a78b79 5247 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5248 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5249 /* enable context validation interrupt from CFC */
5250 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5251
5252 /* set the thresholds to prevent CFC/CDU race */
5253 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5254
94a78b79 5255 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
5256
5257 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5258 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5259
5260 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 5261 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5262
94a78b79 5263 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5264 /* Reset PCIE errors for debug */
5265 REG_WR(bp, 0x2814, 0xffffffff);
5266 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5267
f2e0899f
DK
5268 if (CHIP_IS_E2(bp)) {
5269 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5270 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5271 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5272 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5273 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5274 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5275 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5276 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5277 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5278 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5279 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5280 }
5281
94a78b79 5282 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5283 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5284 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5285 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5286
94a78b79 5287 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 5288 if (!CHIP_IS_E1(bp)) {
fb3bff17 5289 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
0793f83f 5290 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
34f80b04 5291 }
f2e0899f
DK
5292 if (CHIP_IS_E2(bp)) {
5293 /* Bit-map indicating which L2 hdrs may appear after the
5294 basic Ethernet header */
0793f83f 5295 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
f2e0899f 5296 }
34f80b04
EG
5297
5298 if (CHIP_REV_IS_SLOW(bp))
5299 msleep(200);
5300
5301 /* finish CFC init */
5302 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5303 if (val != 1) {
5304 BNX2X_ERR("CFC LL_INIT failed\n");
5305 return -EBUSY;
5306 }
5307 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5308 if (val != 1) {
5309 BNX2X_ERR("CFC AC_INIT failed\n");
5310 return -EBUSY;
5311 }
5312 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5313 if (val != 1) {
5314 BNX2X_ERR("CFC CAM_INIT failed\n");
5315 return -EBUSY;
5316 }
5317 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5318
f2e0899f
DK
5319 if (CHIP_IS_E1(bp)) {
5320 /* read NIG statistic
5321 to see if this is our first up since powerup */
5322 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5323 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 5324
f2e0899f
DK
5325 /* do internal memory self test */
5326 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5327 BNX2X_ERR("internal mem self test failed\n");
5328 return -EBUSY;
5329 }
34f80b04
EG
5330 }
5331
fd4ef40d
EG
5332 bnx2x_setup_fan_failure_detection(bp);
5333
34f80b04
EG
5334 /* clear PXP2 attentions */
5335 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5336
4a33bc03
VZ
5337 bnx2x_enable_blocks_attention(bp);
5338 if (CHIP_PARITY_ENABLED(bp))
5339 bnx2x_enable_blocks_parity(bp);
a2fbb9ea 5340
6bbca910 5341 if (!BP_NOMCP(bp)) {
f2e0899f
DK
5342 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5343 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5344 CHIP_IS_E1x(bp)) {
5345 u32 shmem_base[2], shmem2_base[2];
5346 shmem_base[0] = bp->common.shmem_base;
5347 shmem2_base[0] = bp->common.shmem2_base;
5348 if (CHIP_IS_E2(bp)) {
5349 shmem_base[1] =
5350 SHMEM2_RD(bp, other_shmem_base_addr);
5351 shmem2_base[1] =
5352 SHMEM2_RD(bp, other_shmem2_base_addr);
5353 }
5354 bnx2x_acquire_phy_lock(bp);
5355 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5356 bp->common.chip_id);
5357 bnx2x_release_phy_lock(bp);
5358 }
6bbca910
YR
5359 } else
5360 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5361
34f80b04
EG
5362 return 0;
5363}
a2fbb9ea 5364
523224a3 5365static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
5366{
5367 int port = BP_PORT(bp);
94a78b79 5368 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5369 u32 low, high;
34f80b04 5370 u32 val;
a2fbb9ea 5371
cdaa7cb8 5372 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5373
5374 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5375
94a78b79 5376 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5377 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 5378
f2e0899f
DK
5379 /* Timers bug workaround: disables the pf_master bit in pglue at
5380 * common phase, we need to enable it here before any dmae access are
5381 * attempted. Therefore we manually added the enable-master to the
5382 * port phase (it also happens in the function phase)
5383 */
5384 if (CHIP_IS_E2(bp))
5385 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5386
ca00392c
EG
5387 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5388 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5389 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5390 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5391
523224a3
DK
5392 /* QM cid (connection) count */
5393 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 5394
523224a3 5395#ifdef BCM_CNIC
94a78b79 5396 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5397 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5398 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5399#endif
cdaa7cb8 5400
94a78b79 5401 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5402
f2e0899f
DK
5403 if (CHIP_MODE_IS_4_PORT(bp))
5404 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5405
5406 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5407 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5408 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5409 /* no pause for emulation and FPGA */
5410 low = 0;
5411 high = 513;
5412 } else {
5413 if (IS_MF(bp))
5414 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5415 else if (bp->dev->mtu > 4096) {
5416 if (bp->flags & ONE_PORT_FLAG)
5417 low = 160;
5418 else {
5419 val = bp->dev->mtu;
5420 /* (24*1024 + val*4)/256 */
5421 low = 96 + (val/64) +
5422 ((val % 64) ? 1 : 0);
5423 }
5424 } else
5425 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5426 high = low + 56; /* 14*1024/256 */
5427 }
5428 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5429 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 5430 }
1c06328c 5431
f2e0899f
DK
5432 if (CHIP_MODE_IS_4_PORT(bp)) {
5433 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5434 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5435 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5436 BRB1_REG_MAC_GUARANTIED_0), 40);
5437 }
1c06328c 5438
94a78b79 5439 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5440
94a78b79 5441 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5442 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5443 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5444 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5445
94a78b79
VZ
5446 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5447 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5448 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5449 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
5450 if (CHIP_MODE_IS_4_PORT(bp))
5451 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 5452
94a78b79 5453 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5454 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5455
94a78b79 5456 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5457
f2e0899f
DK
5458 if (!CHIP_IS_E2(bp)) {
5459 /* configure PBF to work without PAUSE mtu 9000 */
5460 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5461
f2e0899f
DK
5462 /* update threshold */
5463 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5464 /* update init credit */
5465 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5466
f2e0899f
DK
5467 /* probe changes */
5468 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5469 udelay(50);
5470 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5471 }
a2fbb9ea 5472
37b091ba
MC
5473#ifdef BCM_CNIC
5474 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5475#endif
94a78b79 5476 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5477 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5478
5479 if (CHIP_IS_E1(bp)) {
5480 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5481 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5482 }
94a78b79 5483 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5484
f2e0899f
DK
5485 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5486
94a78b79 5487 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5488 /* init aeu_mask_attn_func_0/1:
5489 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5490 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5491 * bits 4-7 are used for "per vn group attention" */
e4901dde
VZ
5492 val = IS_MF(bp) ? 0xF7 : 0x7;
5493 /* Enable DCBX attention for all but E1 */
5494 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5495 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
34f80b04 5496
94a78b79 5497 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5498 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5499 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5500 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5501 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5502
94a78b79 5503 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5504
5505 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5506
f2e0899f 5507 if (!CHIP_IS_E1(bp)) {
fb3bff17 5508 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5509 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
0793f83f 5510 (IS_MF_SD(bp) ? 0x1 : 0x2));
34f80b04 5511
f2e0899f
DK
5512 if (CHIP_IS_E2(bp)) {
5513 val = 0;
5514 switch (bp->mf_mode) {
5515 case MULTI_FUNCTION_SD:
5516 val = 1;
5517 break;
5518 case MULTI_FUNCTION_SI:
5519 val = 2;
5520 break;
5521 }
5522
5523 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5524 NIG_REG_LLH0_CLS_TYPE), val);
5525 }
1c06328c
EG
5526 {
5527 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5528 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5529 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5530 }
34f80b04
EG
5531 }
5532
94a78b79 5533 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5534 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5535 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5536 bp->common.shmem2_base, port)) {
4d295db0
EG
5537 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5538 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5539 val = REG_RD(bp, reg_addr);
f1410647 5540 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5541 REG_WR(bp, reg_addr, val);
f1410647 5542 }
c18487ee 5543 bnx2x__link_reset(bp);
a2fbb9ea 5544
34f80b04
EG
5545 return 0;
5546}
5547
34f80b04
EG
5548static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5549{
5550 int reg;
5551
f2e0899f 5552 if (CHIP_IS_E1(bp))
34f80b04 5553 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5554 else
5555 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5556
5557 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5558}
5559
f2e0899f
DK
5560static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5561{
5562 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5563}
5564
5565static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5566{
5567 u32 i, base = FUNC_ILT_BASE(func);
5568 for (i = base; i < base + ILT_PER_FUNC; i++)
5569 bnx2x_ilt_wr(bp, i, 0);
5570}
5571
523224a3 5572static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5573{
5574 int port = BP_PORT(bp);
5575 int func = BP_FUNC(bp);
523224a3
DK
5576 struct bnx2x_ilt *ilt = BP_ILT(bp);
5577 u16 cdu_ilt_start;
8badd27a 5578 u32 addr, val;
f4a66897
VZ
5579 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5580 int i, main_mem_width;
34f80b04 5581
cdaa7cb8 5582 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5583
8badd27a 5584 /* set MSI reconfigure capability */
f2e0899f
DK
5585 if (bp->common.int_block == INT_BLOCK_HC) {
5586 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5587 val = REG_RD(bp, addr);
5588 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5589 REG_WR(bp, addr, val);
5590 }
8badd27a 5591
523224a3
DK
5592 ilt = BP_ILT(bp);
5593 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5594
523224a3
DK
5595 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5596 ilt->lines[cdu_ilt_start + i].page =
5597 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5598 ilt->lines[cdu_ilt_start + i].page_mapping =
5599 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5600 /* cdu ilt pages are allocated manually so there's no need to
5601 set the size */
37b091ba 5602 }
523224a3 5603 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 5604
523224a3
DK
5605#ifdef BCM_CNIC
5606 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5607
523224a3
DK
5608 /* T1 hash bits value determines the T1 number of entries */
5609 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5610#endif
37b091ba 5611
523224a3
DK
5612#ifndef BCM_CNIC
5613 /* set NIC mode */
5614 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5615#endif /* BCM_CNIC */
37b091ba 5616
f2e0899f
DK
5617 if (CHIP_IS_E2(bp)) {
5618 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5619
5620 /* Turn on a single ISR mode in IGU if driver is going to use
5621 * INT#x or MSI
5622 */
5623 if (!(bp->flags & USING_MSIX_FLAG))
5624 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5625 /*
5626 * Timers workaround bug: function init part.
5627 * Need to wait 20msec after initializing ILT,
5628 * needed to make sure there are no requests in
5629 * one of the PXP internal queues with "old" ILT addresses
5630 */
5631 msleep(20);
5632 /*
5633 * Master enable - Due to WB DMAE writes performed before this
5634 * register is re-initialized as part of the regular function
5635 * init
5636 */
5637 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5638 /* Enable the function in IGU */
5639 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5640 }
5641
523224a3 5642 bp->dmae_ready = 1;
34f80b04 5643
523224a3
DK
5644 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5645
f2e0899f
DK
5646 if (CHIP_IS_E2(bp))
5647 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5648
523224a3
DK
5649 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5650 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5651 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5652 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5653 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5654 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5655 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5656 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5657 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5658
f2e0899f
DK
5659 if (CHIP_IS_E2(bp)) {
5660 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5661 BP_PATH(bp));
5662 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5663 BP_PATH(bp));
5664 }
5665
5666 if (CHIP_MODE_IS_4_PORT(bp))
5667 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5668
5669 if (CHIP_IS_E2(bp))
5670 REG_WR(bp, QM_REG_PF_EN, 1);
5671
523224a3 5672 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5673
5674 if (CHIP_MODE_IS_4_PORT(bp))
5675 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5676
523224a3
DK
5677 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5678 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5679 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5680 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5681 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5682 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5683 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5684 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5685 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5686 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5687 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5688 if (CHIP_IS_E2(bp))
5689 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5690
523224a3
DK
5691 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5692
5693 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5694
f2e0899f
DK
5695 if (CHIP_IS_E2(bp))
5696 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5697
fb3bff17 5698 if (IS_MF(bp)) {
34f80b04 5699 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5700 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5701 }
5702
523224a3
DK
5703 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5704
34f80b04 5705 /* HC init per function */
f2e0899f
DK
5706 if (bp->common.int_block == INT_BLOCK_HC) {
5707 if (CHIP_IS_E1H(bp)) {
5708 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5709
5710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5711 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5712 }
5713 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5714
5715 } else {
5716 int num_segs, sb_idx, prod_offset;
5717
34f80b04
EG
5718 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5719
f2e0899f
DK
5720 if (CHIP_IS_E2(bp)) {
5721 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5722 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5723 }
5724
5725 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5726
5727 if (CHIP_IS_E2(bp)) {
5728 int dsb_idx = 0;
5729 /**
5730 * Producer memory:
5731 * E2 mode: address 0-135 match to the mapping memory;
5732 * 136 - PF0 default prod; 137 - PF1 default prod;
5733 * 138 - PF2 default prod; 139 - PF3 default prod;
5734 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5735 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5736 * 144-147 reserved.
5737 *
5738 * E1.5 mode - In backward compatible mode;
5739 * for non default SB; each even line in the memory
5740 * holds the U producer and each odd line hold
5741 * the C producer. The first 128 producers are for
5742 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5743 * producers are for the DSB for each PF.
5744 * Each PF has five segments: (the order inside each
5745 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5746 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5747 * 144-147 attn prods;
5748 */
5749 /* non-default-status-blocks */
5750 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5751 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5752 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5753 prod_offset = (bp->igu_base_sb + sb_idx) *
5754 num_segs;
5755
5756 for (i = 0; i < num_segs; i++) {
5757 addr = IGU_REG_PROD_CONS_MEMORY +
5758 (prod_offset + i) * 4;
5759 REG_WR(bp, addr, 0);
5760 }
5761 /* send consumer update with value 0 */
5762 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5763 USTORM_ID, 0, IGU_INT_NOP, 1);
5764 bnx2x_igu_clear_sb(bp,
5765 bp->igu_base_sb + sb_idx);
5766 }
5767
5768 /* default-status-blocks */
5769 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5770 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5771
5772 if (CHIP_MODE_IS_4_PORT(bp))
5773 dsb_idx = BP_FUNC(bp);
5774 else
5775 dsb_idx = BP_E1HVN(bp);
5776
5777 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5778 IGU_BC_BASE_DSB_PROD + dsb_idx :
5779 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5780
5781 for (i = 0; i < (num_segs * E1HVN_MAX);
5782 i += E1HVN_MAX) {
5783 addr = IGU_REG_PROD_CONS_MEMORY +
5784 (prod_offset + i)*4;
5785 REG_WR(bp, addr, 0);
5786 }
5787 /* send consumer update with 0 */
5788 if (CHIP_INT_MODE_IS_BC(bp)) {
5789 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5790 USTORM_ID, 0, IGU_INT_NOP, 1);
5791 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5792 CSTORM_ID, 0, IGU_INT_NOP, 1);
5793 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5794 XSTORM_ID, 0, IGU_INT_NOP, 1);
5795 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5796 TSTORM_ID, 0, IGU_INT_NOP, 1);
5797 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5798 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5799 } else {
5800 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5801 USTORM_ID, 0, IGU_INT_NOP, 1);
5802 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5803 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5804 }
5805 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5806
5807 /* !!! these should become driver const once
5808 rf-tool supports split-68 const */
5809 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5810 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5811 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5812 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5813 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5814 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5815 }
34f80b04 5816 }
34f80b04 5817
c14423fe 5818 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5819 REG_WR(bp, 0x2114, 0xffffffff);
5820 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5821
5822 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5823 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5824 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5825 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5826 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5827 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5828
f4a66897
VZ
5829 if (CHIP_IS_E1x(bp)) {
5830 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5831 main_mem_base = HC_REG_MAIN_MEMORY +
5832 BP_PORT(bp) * (main_mem_size * 4);
5833 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5834 main_mem_width = 8;
5835
5836 val = REG_RD(bp, main_mem_prty_clr);
5837 if (val)
5838 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5839 "block during "
5840 "function init (0x%x)!\n", val);
5841
5842 /* Clear "false" parity errors in MSI-X table */
5843 for (i = main_mem_base;
5844 i < main_mem_base + main_mem_size * 4;
5845 i += main_mem_width) {
5846 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5847 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5848 i, main_mem_width / 4);
5849 }
5850 /* Clear HC parity attention */
5851 REG_RD(bp, main_mem_prty_clr);
5852 }
5853
b7737c9b 5854 bnx2x_phy_probe(&bp->link_params);
f85582f8 5855
34f80b04
EG
5856 return 0;
5857}
5858
9f6c9258 5859int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5860{
523224a3 5861 int rc = 0;
a2fbb9ea 5862
34f80b04 5863 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5864 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5865
34f80b04 5866 bp->dmae_ready = 0;
6e30dd4e 5867 spin_lock_init(&bp->dmae_lock);
54016b26
EG
5868 rc = bnx2x_gunzip_init(bp);
5869 if (rc)
5870 return rc;
a2fbb9ea 5871
34f80b04
EG
5872 switch (load_code) {
5873 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5874 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5875 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5876 if (rc)
5877 goto init_hw_err;
5878 /* no break */
5879
5880 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5881 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5882 if (rc)
5883 goto init_hw_err;
5884 /* no break */
5885
5886 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5887 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5888 if (rc)
5889 goto init_hw_err;
5890 break;
5891
5892 default:
5893 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5894 break;
5895 }
5896
5897 if (!BP_NOMCP(bp)) {
f2e0899f 5898 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5899
5900 bp->fw_drv_pulse_wr_seq =
f2e0899f 5901 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5902 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5903 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5904 }
a2fbb9ea 5905
34f80b04
EG
5906init_hw_err:
5907 bnx2x_gunzip_end(bp);
5908
5909 return rc;
a2fbb9ea
ET
5910}
5911
9f6c9258 5912void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
5913{
5914
5915#define BNX2X_PCI_FREE(x, y, size) \
5916 do { \
5917 if (x) { \
523224a3 5918 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
a2fbb9ea
ET
5919 x = NULL; \
5920 y = 0; \
5921 } \
5922 } while (0)
5923
5924#define BNX2X_FREE(x) \
5925 do { \
5926 if (x) { \
523224a3 5927 kfree((void *)x); \
a2fbb9ea
ET
5928 x = NULL; \
5929 } \
5930 } while (0)
5931
5932 int i;
5933
5934 /* fastpath */
555f6c78 5935 /* Common */
a2fbb9ea 5936 for_each_queue(bp, i) {
ec6ba945
VZ
5937#ifdef BCM_CNIC
5938 /* FCoE client uses default status block */
5939 if (IS_FCOE_IDX(i)) {
5940 union host_hc_status_block *sb =
5941 &bnx2x_fp(bp, i, status_blk);
5942 memset(sb, 0, sizeof(union host_hc_status_block));
5943 bnx2x_fp(bp, i, status_blk_mapping) = 0;
5944 } else {
5945#endif
555f6c78 5946 /* status blocks */
f2e0899f
DK
5947 if (CHIP_IS_E2(bp))
5948 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5949 bnx2x_fp(bp, i, status_blk_mapping),
5950 sizeof(struct host_hc_status_block_e2));
5951 else
5952 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5953 bnx2x_fp(bp, i, status_blk_mapping),
5954 sizeof(struct host_hc_status_block_e1x));
ec6ba945
VZ
5955#ifdef BCM_CNIC
5956 }
5957#endif
555f6c78
EG
5958 }
5959 /* Rx */
ec6ba945 5960 for_each_rx_queue(bp, i) {
a2fbb9ea 5961
555f6c78 5962 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5963 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5964 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5965 bnx2x_fp(bp, i, rx_desc_mapping),
5966 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5967
5968 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5969 bnx2x_fp(bp, i, rx_comp_mapping),
5970 sizeof(struct eth_fast_path_rx_cqe) *
5971 NUM_RCQ_BD);
a2fbb9ea 5972
7a9b2557 5973 /* SGE ring */
32626230 5974 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5975 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5976 bnx2x_fp(bp, i, rx_sge_mapping),
5977 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5978 }
555f6c78 5979 /* Tx */
ec6ba945 5980 for_each_tx_queue(bp, i) {
555f6c78
EG
5981
5982 /* fastpath tx rings: tx_buf tx_desc */
5983 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5984 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5985 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 5986 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 5987 }
a2fbb9ea
ET
5988 /* end of fastpath */
5989
5990 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5991 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5992
5993 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5994 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5995
523224a3
DK
5996 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5997 bp->context.size);
5998
5999 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
6000
6001 BNX2X_FREE(bp->ilt->lines);
f85582f8 6002
37b091ba 6003#ifdef BCM_CNIC
f2e0899f
DK
6004 if (CHIP_IS_E2(bp))
6005 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
6006 sizeof(struct host_hc_status_block_e2));
6007 else
6008 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
6009 sizeof(struct host_hc_status_block_e1x));
f85582f8 6010
523224a3 6011 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 6012#endif
f85582f8 6013
7a9b2557 6014 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 6015
523224a3
DK
6016 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6017 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6018
a2fbb9ea
ET
6019#undef BNX2X_PCI_FREE
6020#undef BNX2X_KFREE
6021}
6022
f2e0899f
DK
6023static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6024{
6025 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6026 if (CHIP_IS_E2(bp)) {
6027 bnx2x_fp(bp, index, sb_index_values) =
6028 (__le16 *)status_blk.e2_sb->sb.index_values;
6029 bnx2x_fp(bp, index, sb_running_index) =
6030 (__le16 *)status_blk.e2_sb->sb.running_index;
6031 } else {
6032 bnx2x_fp(bp, index, sb_index_values) =
6033 (__le16 *)status_blk.e1x_sb->sb.index_values;
6034 bnx2x_fp(bp, index, sb_running_index) =
6035 (__le16 *)status_blk.e1x_sb->sb.running_index;
6036 }
6037}
6038
9f6c9258 6039int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea 6040{
a2fbb9ea
ET
6041#define BNX2X_PCI_ALLOC(x, y, size) \
6042 do { \
1a983142 6043 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
6044 if (x == NULL) \
6045 goto alloc_mem_err; \
6046 memset(x, 0, size); \
6047 } while (0)
a2fbb9ea 6048
9f6c9258
DK
6049#define BNX2X_ALLOC(x, size) \
6050 do { \
523224a3 6051 x = kzalloc(size, GFP_KERNEL); \
9f6c9258
DK
6052 if (x == NULL) \
6053 goto alloc_mem_err; \
9f6c9258 6054 } while (0)
a2fbb9ea 6055
9f6c9258 6056 int i;
a2fbb9ea 6057
9f6c9258
DK
6058 /* fastpath */
6059 /* Common */
a2fbb9ea 6060 for_each_queue(bp, i) {
f2e0899f 6061 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
9f6c9258 6062 bnx2x_fp(bp, i, bp) = bp;
9f6c9258 6063 /* status blocks */
ec6ba945
VZ
6064#ifdef BCM_CNIC
6065 if (!IS_FCOE_IDX(i)) {
6066#endif
6067 if (CHIP_IS_E2(bp))
6068 BNX2X_PCI_ALLOC(sb->e2_sb,
6069 &bnx2x_fp(bp, i, status_blk_mapping),
6070 sizeof(struct host_hc_status_block_e2));
6071 else
6072 BNX2X_PCI_ALLOC(sb->e1x_sb,
6073 &bnx2x_fp(bp, i, status_blk_mapping),
6074 sizeof(struct host_hc_status_block_e1x));
6075#ifdef BCM_CNIC
6076 }
6077#endif
f2e0899f 6078 set_sb_shortcuts(bp, i);
a2fbb9ea 6079 }
9f6c9258
DK
6080 /* Rx */
6081 for_each_queue(bp, i) {
a2fbb9ea 6082
9f6c9258
DK
6083 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6084 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6085 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6086 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6087 &bnx2x_fp(bp, i, rx_desc_mapping),
6088 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 6089
9f6c9258
DK
6090 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6091 &bnx2x_fp(bp, i, rx_comp_mapping),
6092 sizeof(struct eth_fast_path_rx_cqe) *
6093 NUM_RCQ_BD);
a2fbb9ea 6094
9f6c9258
DK
6095 /* SGE ring */
6096 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6097 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6098 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6099 &bnx2x_fp(bp, i, rx_sge_mapping),
6100 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6101 }
6102 /* Tx */
6103 for_each_queue(bp, i) {
8badd27a 6104
9f6c9258
DK
6105 /* fastpath tx rings: tx_buf tx_desc */
6106 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6107 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6108 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6109 &bnx2x_fp(bp, i, tx_desc_mapping),
6110 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 6111 }
9f6c9258 6112 /* end of fastpath */
8badd27a 6113
523224a3 6114#ifdef BCM_CNIC
f2e0899f
DK
6115 if (CHIP_IS_E2(bp))
6116 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6117 sizeof(struct host_hc_status_block_e2));
6118 else
6119 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6120 sizeof(struct host_hc_status_block_e1x));
8badd27a 6121
523224a3
DK
6122 /* allocate searcher T2 table */
6123 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6124#endif
a2fbb9ea 6125
8badd27a 6126
523224a3
DK
6127 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6128 sizeof(struct host_sp_status_block));
a2fbb9ea 6129
523224a3
DK
6130 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6131 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6132
523224a3 6133 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
f85582f8 6134
523224a3
DK
6135 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6136 bp->context.size);
65abd74d 6137
523224a3 6138 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 6139
523224a3
DK
6140 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6141 goto alloc_mem_err;
65abd74d 6142
9f6c9258
DK
6143 /* Slow path ring */
6144 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 6145
523224a3
DK
6146 /* EQ */
6147 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6148 BCM_PAGE_SIZE * NUM_EQ_PAGES);
9f6c9258 6149 return 0;
e1510706 6150
9f6c9258
DK
6151alloc_mem_err:
6152 bnx2x_free_mem(bp);
6153 return -ENOMEM;
e1510706 6154
9f6c9258
DK
6155#undef BNX2X_PCI_ALLOC
6156#undef BNX2X_ALLOC
65abd74d
YG
6157}
6158
a2fbb9ea
ET
6159/*
6160 * Init service functions
6161 */
8d96286a 6162static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6163 int *state_p, int flags);
6164
523224a3 6165int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 6166{
523224a3 6167 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 6168
523224a3
DK
6169 /* Wait for completion */
6170 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6171 WAIT_RAMROD_COMMON);
6172}
a2fbb9ea 6173
8d96286a 6174static int bnx2x_func_stop(struct bnx2x *bp)
523224a3
DK
6175{
6176 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 6177
523224a3
DK
6178 /* Wait for completion */
6179 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6180 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
6181}
6182
e665bfda 6183/**
f85582f8 6184 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
e665bfda
MC
6185 *
6186 * @param bp driver descriptor
6187 * @param set set or clear an entry (1 or 0)
6188 * @param mac pointer to a buffer containing a MAC
6189 * @param cl_bit_vec bit vector of clients to register a MAC for
6190 * @param cam_offset offset in a CAM to use
523224a3 6191 * @param is_bcast is the set MAC a broadcast address (for E1 only)
e665bfda 6192 */
215faf9c 6193static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
f85582f8
DK
6194 u32 cl_bit_vec, u8 cam_offset,
6195 u8 is_bcast)
34f80b04 6196{
523224a3
DK
6197 struct mac_configuration_cmd *config =
6198 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6199 int ramrod_flags = WAIT_RAMROD_COMMON;
6200
6201 bp->set_mac_pending = 1;
523224a3 6202
8d9c5f34 6203 config->hdr.length = 1;
e665bfda
MC
6204 config->hdr.offset = cam_offset;
6205 config->hdr.client_id = 0xff;
6e30dd4e
VZ
6206 /* Mark the single MAC configuration ramrod as opposed to a
6207 * UC/MC list configuration).
6208 */
6209 config->hdr.echo = 1;
34f80b04
EG
6210
6211 /* primary MAC */
6212 config->config_table[0].msb_mac_addr =
e665bfda 6213 swab16(*(u16 *)&mac[0]);
34f80b04 6214 config->config_table[0].middle_mac_addr =
e665bfda 6215 swab16(*(u16 *)&mac[2]);
34f80b04 6216 config->config_table[0].lsb_mac_addr =
e665bfda 6217 swab16(*(u16 *)&mac[4]);
ca00392c 6218 config->config_table[0].clients_bit_vector =
e665bfda 6219 cpu_to_le32(cl_bit_vec);
34f80b04 6220 config->config_table[0].vlan_id = 0;
523224a3 6221 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 6222 if (set)
523224a3
DK
6223 SET_FLAG(config->config_table[0].flags,
6224 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6225 T_ETH_MAC_COMMAND_SET);
3101c2bc 6226 else
523224a3
DK
6227 SET_FLAG(config->config_table[0].flags,
6228 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6229 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 6230
523224a3
DK
6231 if (is_bcast)
6232 SET_FLAG(config->config_table[0].flags,
6233 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6234
6235 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 6236 (set ? "setting" : "clearing"),
34f80b04
EG
6237 config->config_table[0].msb_mac_addr,
6238 config->config_table[0].middle_mac_addr,
523224a3 6239 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 6240
6e30dd4e
VZ
6241 mb();
6242
523224a3 6243 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 6244 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
6245 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6246
6247 /* Wait for a completion */
6248 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
6249}
6250
8d96286a 6251static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6252 int *state_p, int flags)
a2fbb9ea
ET
6253{
6254 /* can take a while if any port is running */
8b3a0f0b 6255 int cnt = 5000;
523224a3
DK
6256 u8 poll = flags & WAIT_RAMROD_POLL;
6257 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 6258
c14423fe
ET
6259 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6260 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6261
6262 might_sleep();
34f80b04 6263 while (cnt--) {
a2fbb9ea 6264 if (poll) {
523224a3
DK
6265 if (common)
6266 bnx2x_eq_int(bp);
6267 else {
6268 bnx2x_rx_int(bp->fp, 10);
6269 /* if index is different from 0
6270 * the reply for some commands will
6271 * be on the non default queue
6272 */
6273 if (idx)
6274 bnx2x_rx_int(&bp->fp[idx], 10);
6275 }
a2fbb9ea 6276 }
a2fbb9ea 6277
3101c2bc 6278 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6279 if (*state_p == state) {
6280#ifdef BNX2X_STOP_ON_ERROR
6281 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6282#endif
a2fbb9ea 6283 return 0;
8b3a0f0b 6284 }
a2fbb9ea 6285
a2fbb9ea 6286 msleep(1);
e3553b29
EG
6287
6288 if (bp->panic)
6289 return -EIO;
a2fbb9ea
ET
6290 }
6291
a2fbb9ea 6292 /* timeout! */
49d66772
ET
6293 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6294 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6295#ifdef BNX2X_STOP_ON_ERROR
6296 bnx2x_panic();
6297#endif
a2fbb9ea 6298
49d66772 6299 return -EBUSY;
a2fbb9ea
ET
6300}
6301
8d96286a 6302static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 6303{
f2e0899f
DK
6304 if (CHIP_IS_E1H(bp))
6305 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6306 else if (CHIP_MODE_IS_4_PORT(bp))
6e30dd4e 6307 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
f2e0899f 6308 else
6e30dd4e 6309 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
523224a3
DK
6310}
6311
0793f83f
DK
6312/**
6313 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6314 * relevant. In addition, current implementation is tuned for a
6315 * single ETH MAC.
0793f83f
DK
6316 */
6317enum {
6318 LLH_CAM_ISCSI_ETH_LINE = 0,
6319 LLH_CAM_ETH_LINE,
6320 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6321};
6322
6323static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6324 int set,
6325 unsigned char *dev_addr,
6326 int index)
6327{
6328 u32 wb_data[2];
6329 u32 mem_offset, ena_offset, mem_index;
6330 /**
6331 * indexes mapping:
6332 * 0..7 - goes to MEM
6333 * 8..15 - goes to MEM2
6334 */
6335
6336 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6337 return;
6338
6339 /* calculate memory start offset according to the mapping
6340 * and index in the memory */
6341 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6342 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6343 NIG_REG_LLH0_FUNC_MEM;
6344 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6345 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6346 mem_index = index;
6347 } else {
6348 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6349 NIG_REG_P0_LLH_FUNC_MEM2;
6350 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6351 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6352 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6353 }
6354
6355 if (set) {
6356 /* LLH_FUNC_MEM is a u64 WB register */
6357 mem_offset += 8*mem_index;
6358
6359 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6360 (dev_addr[4] << 8) | dev_addr[5]);
6361 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6362
6363 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6364 }
6365
6366 /* enable/disable the entry */
6367 REG_WR(bp, ena_offset + 4*mem_index, set);
6368
6369}
6370
523224a3
DK
6371void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6372{
6373 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6374 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 6375
523224a3
DK
6376 /* networking MAC */
6377 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6378 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 6379
0793f83f
DK
6380 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6381
523224a3
DK
6382 if (CHIP_IS_E1(bp)) {
6383 /* broadcast MAC */
215faf9c
JP
6384 static const u8 bcast[ETH_ALEN] = {
6385 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6386 };
523224a3
DK
6387 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6388 }
e665bfda 6389}
6e30dd4e
VZ
6390
6391static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6392{
6393 return CHIP_REV_IS_SLOW(bp) ?
6394 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6395 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6396}
6397
6398/* set mc list, do not wait as wait implies sleep and
6399 * set_rx_mode can be invoked from non-sleepable context.
6400 *
6401 * Instead we use the same ramrod data buffer each time we need
6402 * to configure a list of addresses, and use the fact that the
6403 * list of MACs is changed in an incremental way and that the
6404 * function is called under the netif_addr_lock. A temporary
6405 * inconsistent CAM configuration (possible in case of a very fast
6406 * sequence of add/del/add on the host side) will shortly be
6407 * restored by the handler of the last ramrod.
6408 */
6409static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
523224a3
DK
6410{
6411 int i = 0, old;
6412 struct net_device *dev = bp->dev;
6e30dd4e 6413 u8 offset = bnx2x_e1_cam_mc_offset(bp);
523224a3
DK
6414 struct netdev_hw_addr *ha;
6415 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6416 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6417
6e30dd4e
VZ
6418 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6419 return -EINVAL;
6420
523224a3
DK
6421 netdev_for_each_mc_addr(ha, dev) {
6422 /* copy mac */
6423 config_cmd->config_table[i].msb_mac_addr =
6424 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6425 config_cmd->config_table[i].middle_mac_addr =
6426 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6427 config_cmd->config_table[i].lsb_mac_addr =
6428 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 6429
523224a3
DK
6430 config_cmd->config_table[i].vlan_id = 0;
6431 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6432 config_cmd->config_table[i].clients_bit_vector =
6433 cpu_to_le32(1 << BP_L_ID(bp));
6434
6435 SET_FLAG(config_cmd->config_table[i].flags,
6436 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6437 T_ETH_MAC_COMMAND_SET);
6438
6439 DP(NETIF_MSG_IFUP,
6440 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6441 config_cmd->config_table[i].msb_mac_addr,
6442 config_cmd->config_table[i].middle_mac_addr,
6443 config_cmd->config_table[i].lsb_mac_addr);
6444 i++;
6445 }
6446 old = config_cmd->hdr.length;
6447 if (old > i) {
6448 for (; i < old; i++) {
6449 if (CAM_IS_INVALID(config_cmd->
6450 config_table[i])) {
6451 /* already invalidated */
6452 break;
6453 }
6454 /* invalidate */
6455 SET_FLAG(config_cmd->config_table[i].flags,
6456 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6457 T_ETH_MAC_COMMAND_INVALIDATE);
6458 }
6459 }
6460
6e30dd4e
VZ
6461 wmb();
6462
523224a3
DK
6463 config_cmd->hdr.length = i;
6464 config_cmd->hdr.offset = offset;
6465 config_cmd->hdr.client_id = 0xff;
6e30dd4e
VZ
6466 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6467 * synchronization.
6468 */
6469 config_cmd->hdr.echo = 0;
523224a3 6470
6e30dd4e 6471 mb();
523224a3 6472
6e30dd4e 6473 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
523224a3
DK
6474 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6475}
6e30dd4e
VZ
6476
6477void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
e665bfda 6478{
523224a3
DK
6479 int i;
6480 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6481 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6482 int ramrod_flags = WAIT_RAMROD_COMMON;
6e30dd4e 6483 u8 offset = bnx2x_e1_cam_mc_offset(bp);
523224a3 6484
6e30dd4e 6485 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
523224a3
DK
6486 SET_FLAG(config_cmd->config_table[i].flags,
6487 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6488 T_ETH_MAC_COMMAND_INVALIDATE);
6489
6e30dd4e
VZ
6490 wmb();
6491
6492 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6493 config_cmd->hdr.offset = offset;
6494 config_cmd->hdr.client_id = 0xff;
6495 /* We'll wait for a completion this time... */
6496 config_cmd->hdr.echo = 1;
6497
6498 bp->set_mac_pending = 1;
6499
6500 mb();
6501
523224a3
DK
6502 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6503 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
6504
6505 /* Wait for a completion */
523224a3
DK
6506 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6507 ramrod_flags);
6508
e665bfda
MC
6509}
6510
6e30dd4e
VZ
6511/* Accept one or more multicasts */
6512static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6513{
6514 struct net_device *dev = bp->dev;
6515 struct netdev_hw_addr *ha;
6516 u32 mc_filter[MC_HASH_SIZE];
6517 u32 crc, bit, regidx;
6518 int i;
6519
6520 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6521
6522 netdev_for_each_mc_addr(ha, dev) {
6523 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6524 bnx2x_mc_addr(ha));
6525
6526 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6527 ETH_ALEN);
6528 bit = (crc >> 24) & 0xff;
6529 regidx = bit >> 5;
6530 bit &= 0x1f;
6531 mc_filter[regidx] |= (1 << bit);
6532 }
6533
6534 for (i = 0; i < MC_HASH_SIZE; i++)
6535 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6536 mc_filter[i]);
6537
6538 return 0;
6539}
6540
6541void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6542{
6543 int i;
6544
6545 for (i = 0; i < MC_HASH_SIZE; i++)
6546 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6547}
6548
993ac7b5
MC
6549#ifdef BCM_CNIC
6550/**
6551 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6552 * MAC(s). This function will wait until the ramdord completion
6553 * returns.
6554 *
6555 * @param bp driver handle
6556 * @param set set or clear the CAM entry
6557 *
6558 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6559 */
8d96286a 6560static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 6561{
523224a3
DK
6562 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6563 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
ec6ba945
VZ
6564 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6565 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
523224a3 6566 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
2ba45142 6567 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
993ac7b5
MC
6568
6569 /* Send a SET_MAC ramrod */
2ba45142 6570 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
523224a3 6571 cam_offset, 0);
0793f83f 6572
2ba45142 6573 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
ec6ba945
VZ
6574
6575 return 0;
6576}
6577
6578/**
6579 * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6580 * ETH MAC(s). This function will wait until the ramdord
6581 * completion returns.
6582 *
6583 * @param bp driver handle
6584 * @param set set or clear the CAM entry
6585 *
6586 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6587 */
6588int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6589{
6590 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6591 /**
6592 * CAM allocation for E1H
6593 * eth unicasts: by func number
6594 * iscsi: by func number
6595 * fip unicast: by func number
6596 * fip multicast: by func number
6597 */
6598 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6599 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6600
6601 return 0;
6602}
6603
6604int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6605{
6606 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6607
6608 /**
6609 * CAM allocation for E1H
6610 * eth unicasts: by func number
6611 * iscsi: by func number
6612 * fip unicast: by func number
6613 * fip multicast: by func number
6614 */
6615 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6616 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6617
993ac7b5
MC
6618 return 0;
6619}
6620#endif
6621
523224a3
DK
6622static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6623 struct bnx2x_client_init_params *params,
6624 u8 activate,
6625 struct client_init_ramrod_data *data)
6626{
6627 /* Clear the buffer */
6628 memset(data, 0, sizeof(*data));
6629
6630 /* general */
6631 data->general.client_id = params->rxq_params.cl_id;
6632 data->general.statistics_counter_id = params->rxq_params.stat_id;
6633 data->general.statistics_en_flg =
6634 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
ec6ba945
VZ
6635 data->general.is_fcoe_flg =
6636 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
523224a3
DK
6637 data->general.activate_flg = activate;
6638 data->general.sp_client_id = params->rxq_params.spcl_id;
6639
6640 /* Rx data */
6641 data->rx.tpa_en_flg =
6642 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6643 data->rx.vmqueue_mode_en_flg = 0;
6644 data->rx.cache_line_alignment_log_size =
6645 params->rxq_params.cache_line_log;
6646 data->rx.enable_dynamic_hc =
6647 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6648 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6649 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6650 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6651
6652 /* We don't set drop flags */
6653 data->rx.drop_ip_cs_err_flg = 0;
6654 data->rx.drop_tcp_cs_err_flg = 0;
6655 data->rx.drop_ttl0_flg = 0;
6656 data->rx.drop_udp_cs_err_flg = 0;
6657
6658 data->rx.inner_vlan_removal_enable_flg =
6659 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6660 data->rx.outer_vlan_removal_enable_flg =
6661 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6662 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6663 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6664 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6665 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6666 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6667 data->rx.bd_page_base.lo =
6668 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6669 data->rx.bd_page_base.hi =
6670 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6671 data->rx.sge_page_base.lo =
6672 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6673 data->rx.sge_page_base.hi =
6674 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6675 data->rx.cqe_page_base.lo =
6676 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6677 data->rx.cqe_page_base.hi =
6678 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6679 data->rx.is_leading_rss =
6680 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6681 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6682
6683 /* Tx data */
6684 data->tx.enforce_security_flg = 0; /* VF specific */
6685 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6686 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6687 data->tx.mtu = 0; /* VF specific */
6688 data->tx.tx_bd_page_base.lo =
6689 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6690 data->tx.tx_bd_page_base.hi =
6691 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6692
6693 /* flow control data */
6694 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6695 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6696 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6697 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6698 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6699 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6700 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6701
6702 data->fc.safc_group_num = params->txq_params.cos;
6703 data->fc.safc_group_en_flg =
6704 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
ec6ba945
VZ
6705 data->fc.traffic_type =
6706 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6707 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
523224a3
DK
6708}
6709
6710static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6711{
6712 /* ustorm cxt validation */
6713 cxt->ustorm_ag_context.cdu_usage =
6714 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6715 ETH_CONNECTION_TYPE);
6716 /* xcontext validation */
6717 cxt->xstorm_ag_context.cdu_reserved =
6718 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6719 ETH_CONNECTION_TYPE);
6720}
6721
8d96286a 6722static int bnx2x_setup_fw_client(struct bnx2x *bp,
6723 struct bnx2x_client_init_params *params,
6724 u8 activate,
6725 struct client_init_ramrod_data *data,
6726 dma_addr_t data_mapping)
523224a3
DK
6727{
6728 u16 hc_usec;
6729 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6730 int ramrod_flags = 0, rc;
6731
6732 /* HC and context validation values */
6733 hc_usec = params->txq_params.hc_rate ?
6734 1000000 / params->txq_params.hc_rate : 0;
6735 bnx2x_update_coalesce_sb_index(bp,
6736 params->txq_params.fw_sb_id,
6737 params->txq_params.sb_cq_index,
6738 !(params->txq_params.flags & QUEUE_FLG_HC),
6739 hc_usec);
6740
6741 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6742
6743 hc_usec = params->rxq_params.hc_rate ?
6744 1000000 / params->rxq_params.hc_rate : 0;
6745 bnx2x_update_coalesce_sb_index(bp,
6746 params->rxq_params.fw_sb_id,
6747 params->rxq_params.sb_cq_index,
6748 !(params->rxq_params.flags & QUEUE_FLG_HC),
6749 hc_usec);
6750
6751 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6752 params->rxq_params.cid);
6753
6754 /* zero stats */
6755 if (params->txq_params.flags & QUEUE_FLG_STATS)
6756 storm_memset_xstats_zero(bp, BP_PORT(bp),
6757 params->txq_params.stat_id);
6758
6759 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6760 storm_memset_ustats_zero(bp, BP_PORT(bp),
6761 params->rxq_params.stat_id);
6762 storm_memset_tstats_zero(bp, BP_PORT(bp),
6763 params->rxq_params.stat_id);
6764 }
6765
6766 /* Fill the ramrod data */
6767 bnx2x_fill_cl_init_data(bp, params, activate, data);
6768
6769 /* SETUP ramrod.
6770 *
6771 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6772 * barrier except from mmiowb() is needed to impose a
6773 * proper ordering of memory operations.
6774 */
6775 mmiowb();
a2fbb9ea 6776
a2fbb9ea 6777
523224a3
DK
6778 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6779 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 6780
34f80b04 6781 /* Wait for completion */
523224a3
DK
6782 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6783 params->ramrod_params.index,
6784 params->ramrod_params.pstate,
6785 ramrod_flags);
34f80b04 6786 return rc;
a2fbb9ea
ET
6787}
6788
d6214d7a
DK
6789/**
6790 * Configure interrupt mode according to current configuration.
6791 * In case of MSI-X it will also try to enable MSI-X.
6792 *
6793 * @param bp
6794 *
6795 * @return int
6796 */
6797static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 6798{
d6214d7a 6799 int rc = 0;
ca00392c 6800
d6214d7a
DK
6801 switch (bp->int_mode) {
6802 case INT_MODE_MSI:
6803 bnx2x_enable_msi(bp);
6804 /* falling through... */
6805 case INT_MODE_INTx:
ec6ba945 6806 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a 6807 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
ca00392c 6808 break;
d6214d7a
DK
6809 default:
6810 /* Set number of queues according to bp->multi_mode value */
6811 bnx2x_set_num_queues(bp);
ca00392c 6812
d6214d7a
DK
6813 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6814 bp->num_queues);
ca00392c 6815
d6214d7a
DK
6816 /* if we can't use MSI-X we only need one fp,
6817 * so try to enable MSI-X with the requested number of fp's
6818 * and fallback to MSI or legacy INTx with one fp
6819 */
6820 rc = bnx2x_enable_msix(bp);
6821 if (rc) {
6822 /* failed to enable MSI-X */
6823 if (bp->multi_mode)
6824 DP(NETIF_MSG_IFUP,
6825 "Multi requested but failed to "
6826 "enable MSI-X (%d), "
6827 "set number of queues to %d\n",
6828 bp->num_queues,
ec6ba945
VZ
6829 1 + NONE_ETH_CONTEXT_USE);
6830 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a
DK
6831
6832 if (!(bp->flags & DISABLE_MSI_FLAG))
6833 bnx2x_enable_msi(bp);
6834 }
ca00392c 6835
9f6c9258
DK
6836 break;
6837 }
d6214d7a
DK
6838
6839 return rc;
a2fbb9ea
ET
6840}
6841
c2bff63f
DK
6842/* must be called prioir to any HW initializations */
6843static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6844{
6845 return L2_ILT_LINES(bp);
6846}
6847
523224a3
DK
6848void bnx2x_ilt_set_info(struct bnx2x *bp)
6849{
6850 struct ilt_client_info *ilt_client;
6851 struct bnx2x_ilt *ilt = BP_ILT(bp);
6852 u16 line = 0;
6853
6854 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6855 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6856
6857 /* CDU */
6858 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6859 ilt_client->client_num = ILT_CLIENT_CDU;
6860 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6861 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6862 ilt_client->start = line;
6863 line += L2_ILT_LINES(bp);
6864#ifdef BCM_CNIC
6865 line += CNIC_ILT_LINES;
6866#endif
6867 ilt_client->end = line - 1;
6868
6869 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6870 "flags 0x%x, hw psz %d\n",
6871 ilt_client->start,
6872 ilt_client->end,
6873 ilt_client->page_size,
6874 ilt_client->flags,
6875 ilog2(ilt_client->page_size >> 12));
6876
6877 /* QM */
6878 if (QM_INIT(bp->qm_cid_count)) {
6879 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6880 ilt_client->client_num = ILT_CLIENT_QM;
6881 ilt_client->page_size = QM_ILT_PAGE_SZ;
6882 ilt_client->flags = 0;
6883 ilt_client->start = line;
6884
6885 /* 4 bytes for each cid */
6886 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6887 QM_ILT_PAGE_SZ);
6888
6889 ilt_client->end = line - 1;
6890
6891 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6892 "flags 0x%x, hw psz %d\n",
6893 ilt_client->start,
6894 ilt_client->end,
6895 ilt_client->page_size,
6896 ilt_client->flags,
6897 ilog2(ilt_client->page_size >> 12));
6898
6899 }
6900 /* SRC */
6901 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6902#ifdef BCM_CNIC
6903 ilt_client->client_num = ILT_CLIENT_SRC;
6904 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6905 ilt_client->flags = 0;
6906 ilt_client->start = line;
6907 line += SRC_ILT_LINES;
6908 ilt_client->end = line - 1;
6909
6910 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6911 "flags 0x%x, hw psz %d\n",
6912 ilt_client->start,
6913 ilt_client->end,
6914 ilt_client->page_size,
6915 ilt_client->flags,
6916 ilog2(ilt_client->page_size >> 12));
6917
6918#else
6919 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6920#endif
9f6c9258 6921
523224a3
DK
6922 /* TM */
6923 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6924#ifdef BCM_CNIC
6925 ilt_client->client_num = ILT_CLIENT_TM;
6926 ilt_client->page_size = TM_ILT_PAGE_SZ;
6927 ilt_client->flags = 0;
6928 ilt_client->start = line;
6929 line += TM_ILT_LINES;
6930 ilt_client->end = line - 1;
6931
6932 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6933 "flags 0x%x, hw psz %d\n",
6934 ilt_client->start,
6935 ilt_client->end,
6936 ilt_client->page_size,
6937 ilt_client->flags,
6938 ilog2(ilt_client->page_size >> 12));
9f6c9258 6939
523224a3
DK
6940#else
6941 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6942#endif
6943}
f85582f8 6944
523224a3
DK
6945int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6946 int is_leading)
a2fbb9ea 6947{
523224a3 6948 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
6949 int rc;
6950
ec6ba945
VZ
6951 /* reset IGU state skip FCoE L2 queue */
6952 if (!IS_FCOE_FP(fp))
6953 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
523224a3 6954 IGU_INT_ENABLE, 0);
a2fbb9ea 6955
523224a3
DK
6956 params.ramrod_params.pstate = &fp->state;
6957 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6958 params.ramrod_params.index = fp->index;
6959 params.ramrod_params.cid = fp->cid;
a2fbb9ea 6960
ec6ba945
VZ
6961#ifdef BCM_CNIC
6962 if (IS_FCOE_FP(fp))
6963 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6964
6965#endif
6966
523224a3
DK
6967 if (is_leading)
6968 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 6969
523224a3
DK
6970 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6971
6972 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6973
6974 rc = bnx2x_setup_fw_client(bp, &params, 1,
6975 bnx2x_sp(bp, client_init_data),
6976 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 6977 return rc;
a2fbb9ea
ET
6978}
6979
8d96286a 6980static int bnx2x_stop_fw_client(struct bnx2x *bp,
6981 struct bnx2x_client_ramrod_params *p)
a2fbb9ea 6982{
34f80b04 6983 int rc;
a2fbb9ea 6984
523224a3 6985 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 6986
523224a3
DK
6987 /* halt the connection */
6988 *p->pstate = BNX2X_FP_STATE_HALTING;
6989 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6990 p->cl_id, 0);
a2fbb9ea 6991
34f80b04 6992 /* Wait for completion */
523224a3
DK
6993 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6994 p->pstate, poll_flag);
34f80b04 6995 if (rc) /* timeout */
da5a662a 6996 return rc;
a2fbb9ea 6997
523224a3
DK
6998 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6999 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
7000 p->cl_id, 0);
7001 /* Wait for completion */
7002 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
7003 p->pstate, poll_flag);
7004 if (rc) /* timeout */
7005 return rc;
a2fbb9ea 7006
a2fbb9ea 7007
523224a3
DK
7008 /* delete cfc entry */
7009 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 7010
523224a3
DK
7011 /* Wait for completion */
7012 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
7013 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 7014 return rc;
a2fbb9ea
ET
7015}
7016
523224a3
DK
7017static int bnx2x_stop_client(struct bnx2x *bp, int index)
7018{
7019 struct bnx2x_client_ramrod_params client_stop = {0};
7020 struct bnx2x_fastpath *fp = &bp->fp[index];
7021
7022 client_stop.index = index;
7023 client_stop.cid = fp->cid;
7024 client_stop.cl_id = fp->cl_id;
7025 client_stop.pstate = &(fp->state);
7026 client_stop.poll = 0;
7027
7028 return bnx2x_stop_fw_client(bp, &client_stop);
7029}
7030
7031
34f80b04
EG
7032static void bnx2x_reset_func(struct bnx2x *bp)
7033{
7034 int port = BP_PORT(bp);
7035 int func = BP_FUNC(bp);
f2e0899f 7036 int i;
523224a3 7037 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
7038 (CHIP_IS_E2(bp) ?
7039 offsetof(struct hc_status_block_data_e2, common) :
7040 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
7041 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
7042 int pfid_offset = offsetof(struct pci_entity, pf_id);
7043
7044 /* Disable the function in the FW */
7045 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
7046 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
7047 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
7048 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
7049
7050 /* FP SBs */
ec6ba945 7051 for_each_eth_queue(bp, i) {
523224a3
DK
7052 struct bnx2x_fastpath *fp = &bp->fp[i];
7053 REG_WR8(bp,
7054 BAR_CSTRORM_INTMEM +
7055 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
7056 + pfunc_offset_fp + pfid_offset,
7057 HC_FUNCTION_DISABLED);
7058 }
7059
7060 /* SP SB */
7061 REG_WR8(bp,
7062 BAR_CSTRORM_INTMEM +
7063 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
7064 pfunc_offset_sp + pfid_offset,
7065 HC_FUNCTION_DISABLED);
7066
7067
7068 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
7069 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
7070 0);
34f80b04
EG
7071
7072 /* Configure IGU */
f2e0899f
DK
7073 if (bp->common.int_block == INT_BLOCK_HC) {
7074 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7075 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7076 } else {
7077 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7078 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7079 }
34f80b04 7080
37b091ba
MC
7081#ifdef BCM_CNIC
7082 /* Disable Timer scan */
7083 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7084 /*
7085 * Wait for at least 10ms and up to 2 second for the timers scan to
7086 * complete
7087 */
7088 for (i = 0; i < 200; i++) {
7089 msleep(10);
7090 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7091 break;
7092 }
7093#endif
34f80b04 7094 /* Clear ILT */
f2e0899f
DK
7095 bnx2x_clear_func_ilt(bp, func);
7096
7097 /* Timers workaround bug for E2: if this is vnic-3,
7098 * we need to set the entire ilt range for this timers.
7099 */
7100 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
7101 struct ilt_client_info ilt_cli;
7102 /* use dummy TM client */
7103 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7104 ilt_cli.start = 0;
7105 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7106 ilt_cli.client_num = ILT_CLIENT_TM;
7107
7108 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7109 }
7110
7111 /* this assumes that reset_port() called before reset_func()*/
7112 if (CHIP_IS_E2(bp))
7113 bnx2x_pf_disable(bp);
523224a3
DK
7114
7115 bp->dmae_ready = 0;
34f80b04
EG
7116}
7117
7118static void bnx2x_reset_port(struct bnx2x *bp)
7119{
7120 int port = BP_PORT(bp);
7121 u32 val;
7122
7123 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7124
7125 /* Do not rcv packets to BRB */
7126 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7127 /* Do not direct rcv packets that are not for MCP to the BRB */
7128 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7129 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7130
7131 /* Configure AEU */
7132 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7133
7134 msleep(100);
7135 /* Check for BRB port occupancy */
7136 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7137 if (val)
7138 DP(NETIF_MSG_IFDOWN,
33471629 7139 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7140
7141 /* TODO: Close Doorbell port? */
7142}
7143
34f80b04
EG
7144static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7145{
7146 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 7147 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
7148
7149 switch (reset_code) {
7150 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7151 bnx2x_reset_port(bp);
7152 bnx2x_reset_func(bp);
7153 bnx2x_reset_common(bp);
7154 break;
7155
7156 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7157 bnx2x_reset_port(bp);
7158 bnx2x_reset_func(bp);
7159 break;
7160
7161 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7162 bnx2x_reset_func(bp);
7163 break;
49d66772 7164
34f80b04
EG
7165 default:
7166 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7167 break;
7168 }
7169}
7170
ec6ba945
VZ
7171#ifdef BCM_CNIC
7172static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7173{
7174 if (bp->flags & FCOE_MACS_SET) {
7175 if (!IS_MF_SD(bp))
7176 bnx2x_set_fip_eth_mac_addr(bp, 0);
7177
7178 bnx2x_set_all_enode_macs(bp, 0);
7179
7180 bp->flags &= ~FCOE_MACS_SET;
7181 }
7182}
7183#endif
7184
9f6c9258 7185void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7186{
da5a662a 7187 int port = BP_PORT(bp);
a2fbb9ea 7188 u32 reset_code = 0;
da5a662a 7189 int i, cnt, rc;
a2fbb9ea 7190
555f6c78 7191 /* Wait until tx fastpath tasks complete */
ec6ba945 7192 for_each_tx_queue(bp, i) {
228241eb
ET
7193 struct bnx2x_fastpath *fp = &bp->fp[i];
7194
34f80b04 7195 cnt = 1000;
e8b5fc51 7196 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7197
34f80b04
EG
7198 if (!cnt) {
7199 BNX2X_ERR("timeout waiting for queue[%d]\n",
7200 i);
7201#ifdef BNX2X_STOP_ON_ERROR
7202 bnx2x_panic();
7203 return -EBUSY;
7204#else
7205 break;
7206#endif
7207 }
7208 cnt--;
da5a662a 7209 msleep(1);
34f80b04 7210 }
228241eb 7211 }
da5a662a
VZ
7212 /* Give HW time to discard old tx messages */
7213 msleep(1);
a2fbb9ea 7214
6e30dd4e 7215 bnx2x_set_eth_mac(bp, 0);
65abd74d 7216
6e30dd4e 7217 bnx2x_invalidate_uc_list(bp);
3101c2bc 7218
6e30dd4e
VZ
7219 if (CHIP_IS_E1(bp))
7220 bnx2x_invalidate_e1_mc_list(bp);
7221 else {
7222 bnx2x_invalidate_e1h_mc_list(bp);
7223 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3101c2bc 7224 }
523224a3 7225
993ac7b5 7226#ifdef BCM_CNIC
ec6ba945 7227 bnx2x_del_fcoe_eth_macs(bp);
993ac7b5 7228#endif
3101c2bc 7229
65abd74d
YG
7230 if (unload_mode == UNLOAD_NORMAL)
7231 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7232
7d0446c2 7233 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7234 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7235
7d0446c2 7236 else if (bp->wol) {
65abd74d
YG
7237 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7238 u8 *mac_addr = bp->dev->dev_addr;
7239 u32 val;
7240 /* The mac address is written to entries 1-4 to
7241 preserve entry 0 which is used by the PMF */
7242 u8 entry = (BP_E1HVN(bp) + 1)*8;
7243
7244 val = (mac_addr[0] << 8) | mac_addr[1];
7245 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7246
7247 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7248 (mac_addr[4] << 8) | mac_addr[5];
7249 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7250
7251 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7252
7253 } else
7254 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7255
34f80b04
EG
7256 /* Close multi and leading connections
7257 Completions for ramrods are collected in a synchronous way */
523224a3
DK
7258 for_each_queue(bp, i)
7259
7260 if (bnx2x_stop_client(bp, i))
7261#ifdef BNX2X_STOP_ON_ERROR
7262 return;
7263#else
228241eb 7264 goto unload_error;
523224a3 7265#endif
a2fbb9ea 7266
523224a3 7267 rc = bnx2x_func_stop(bp);
da5a662a 7268 if (rc) {
523224a3 7269 BNX2X_ERR("Function stop failed!\n");
da5a662a 7270#ifdef BNX2X_STOP_ON_ERROR
523224a3 7271 return;
da5a662a
VZ
7272#else
7273 goto unload_error;
34f80b04 7274#endif
228241eb 7275 }
523224a3 7276#ifndef BNX2X_STOP_ON_ERROR
228241eb 7277unload_error:
523224a3 7278#endif
34f80b04 7279 if (!BP_NOMCP(bp))
a22f0788 7280 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 7281 else {
f2e0899f
DK
7282 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7283 "%d, %d, %d\n", BP_PATH(bp),
7284 load_count[BP_PATH(bp)][0],
7285 load_count[BP_PATH(bp)][1],
7286 load_count[BP_PATH(bp)][2]);
7287 load_count[BP_PATH(bp)][0]--;
7288 load_count[BP_PATH(bp)][1 + port]--;
7289 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7290 "%d, %d, %d\n", BP_PATH(bp),
7291 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7292 load_count[BP_PATH(bp)][2]);
7293 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 7294 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 7295 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
7296 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7297 else
7298 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7299 }
a2fbb9ea 7300
34f80b04
EG
7301 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7302 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7303 bnx2x__link_reset(bp);
a2fbb9ea 7304
523224a3
DK
7305 /* Disable HW interrupts, NAPI */
7306 bnx2x_netif_stop(bp, 1);
7307
7308 /* Release IRQs */
d6214d7a 7309 bnx2x_free_irq(bp);
523224a3 7310
a2fbb9ea 7311 /* Reset the chip */
228241eb 7312 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7313
7314 /* Report UNLOAD_DONE to MCP */
34f80b04 7315 if (!BP_NOMCP(bp))
a22f0788 7316 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 7317
72fd0718
VZ
7318}
7319
9f6c9258 7320void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
7321{
7322 u32 val;
7323
7324 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7325
7326 if (CHIP_IS_E1(bp)) {
7327 int port = BP_PORT(bp);
7328 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7329 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7330
7331 val = REG_RD(bp, addr);
7332 val &= ~(0x300);
7333 REG_WR(bp, addr, val);
7334 } else if (CHIP_IS_E1H(bp)) {
7335 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7336 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7337 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7338 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7339 }
7340}
7341
72fd0718
VZ
7342/* Close gates #2, #3 and #4: */
7343static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7344{
7345 u32 val, addr;
7346
7347 /* Gates #2 and #4a are closed/opened for "not E1" only */
7348 if (!CHIP_IS_E1(bp)) {
7349 /* #4 */
7350 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7351 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7352 close ? (val | 0x1) : (val & (~(u32)1)));
7353 /* #2 */
7354 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7355 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7356 close ? (val | 0x1) : (val & (~(u32)1)));
7357 }
7358
7359 /* #3 */
7360 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7361 val = REG_RD(bp, addr);
7362 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7363
7364 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7365 close ? "closing" : "opening");
7366 mmiowb();
7367}
7368
7369#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7370
7371static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7372{
7373 /* Do some magic... */
7374 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7375 *magic_val = val & SHARED_MF_CLP_MAGIC;
7376 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7377}
7378
7379/* Restore the value of the `magic' bit.
7380 *
7381 * @param pdev Device handle.
7382 * @param magic_val Old value of the `magic' bit.
7383 */
7384static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7385{
7386 /* Restore the `magic' bit value... */
72fd0718
VZ
7387 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7388 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7389 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7390}
7391
f85582f8
DK
7392/**
7393 * Prepares for MCP reset: takes care of CLP configurations.
72fd0718
VZ
7394 *
7395 * @param bp
7396 * @param magic_val Old value of 'magic' bit.
7397 */
7398static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7399{
7400 u32 shmem;
7401 u32 validity_offset;
7402
7403 DP(NETIF_MSG_HW, "Starting\n");
7404
7405 /* Set `magic' bit in order to save MF config */
7406 if (!CHIP_IS_E1(bp))
7407 bnx2x_clp_reset_prep(bp, magic_val);
7408
7409 /* Get shmem offset */
7410 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7411 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7412
7413 /* Clear validity map flags */
7414 if (shmem > 0)
7415 REG_WR(bp, shmem + validity_offset, 0);
7416}
7417
7418#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7419#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7420
7421/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7422 * depending on the HW type.
7423 *
7424 * @param bp
7425 */
7426static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7427{
7428 /* special handling for emulation and FPGA,
7429 wait 10 times longer */
7430 if (CHIP_REV_IS_SLOW(bp))
7431 msleep(MCP_ONE_TIMEOUT*10);
7432 else
7433 msleep(MCP_ONE_TIMEOUT);
7434}
7435
7436static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7437{
7438 u32 shmem, cnt, validity_offset, val;
7439 int rc = 0;
7440
7441 msleep(100);
7442
7443 /* Get shmem offset */
7444 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7445 if (shmem == 0) {
7446 BNX2X_ERR("Shmem 0 return failure\n");
7447 rc = -ENOTTY;
7448 goto exit_lbl;
7449 }
7450
7451 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7452
7453 /* Wait for MCP to come up */
7454 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7455 /* TBD: its best to check validity map of last port.
7456 * currently checks on port 0.
7457 */
7458 val = REG_RD(bp, shmem + validity_offset);
7459 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7460 shmem + validity_offset, val);
7461
7462 /* check that shared memory is valid. */
7463 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7464 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7465 break;
7466
7467 bnx2x_mcp_wait_one(bp);
7468 }
7469
7470 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7471
7472 /* Check that shared memory is valid. This indicates that MCP is up. */
7473 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7474 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7475 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7476 rc = -ENOTTY;
7477 goto exit_lbl;
7478 }
7479
7480exit_lbl:
7481 /* Restore the `magic' bit value */
7482 if (!CHIP_IS_E1(bp))
7483 bnx2x_clp_reset_done(bp, magic_val);
7484
7485 return rc;
7486}
7487
7488static void bnx2x_pxp_prep(struct bnx2x *bp)
7489{
7490 if (!CHIP_IS_E1(bp)) {
7491 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7492 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7493 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7494 mmiowb();
7495 }
7496}
7497
7498/*
7499 * Reset the whole chip except for:
7500 * - PCIE core
7501 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7502 * one reset bit)
7503 * - IGU
7504 * - MISC (including AEU)
7505 * - GRC
7506 * - RBCN, RBCP
7507 */
7508static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7509{
7510 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7511
7512 not_reset_mask1 =
7513 MISC_REGISTERS_RESET_REG_1_RST_HC |
7514 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7515 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7516
7517 not_reset_mask2 =
7518 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7519 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7520 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7521 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7522 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7523 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7524 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7525 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7526
7527 reset_mask1 = 0xffffffff;
7528
7529 if (CHIP_IS_E1(bp))
7530 reset_mask2 = 0xffff;
7531 else
7532 reset_mask2 = 0x1ffff;
7533
7534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7535 reset_mask1 & (~not_reset_mask1));
7536 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7537 reset_mask2 & (~not_reset_mask2));
7538
7539 barrier();
7540 mmiowb();
7541
7542 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7543 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7544 mmiowb();
7545}
7546
7547static int bnx2x_process_kill(struct bnx2x *bp)
7548{
7549 int cnt = 1000;
7550 u32 val = 0;
7551 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7552
7553
7554 /* Empty the Tetris buffer, wait for 1s */
7555 do {
7556 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7557 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7558 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7559 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7560 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7561 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7562 ((port_is_idle_0 & 0x1) == 0x1) &&
7563 ((port_is_idle_1 & 0x1) == 0x1) &&
7564 (pgl_exp_rom2 == 0xffffffff))
7565 break;
7566 msleep(1);
7567 } while (cnt-- > 0);
7568
7569 if (cnt <= 0) {
7570 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7571 " are still"
7572 " outstanding read requests after 1s!\n");
7573 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7574 " port_is_idle_0=0x%08x,"
7575 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7576 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7577 pgl_exp_rom2);
7578 return -EAGAIN;
7579 }
7580
7581 barrier();
7582
7583 /* Close gates #2, #3 and #4 */
7584 bnx2x_set_234_gates(bp, true);
7585
7586 /* TBD: Indicate that "process kill" is in progress to MCP */
7587
7588 /* Clear "unprepared" bit */
7589 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7590 barrier();
7591
7592 /* Make sure all is written to the chip before the reset */
7593 mmiowb();
7594
7595 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7596 * PSWHST, GRC and PSWRD Tetris buffer.
7597 */
7598 msleep(1);
7599
7600 /* Prepare to chip reset: */
7601 /* MCP */
7602 bnx2x_reset_mcp_prep(bp, &val);
7603
7604 /* PXP */
7605 bnx2x_pxp_prep(bp);
7606 barrier();
7607
7608 /* reset the chip */
7609 bnx2x_process_kill_chip_reset(bp);
7610 barrier();
7611
7612 /* Recover after reset: */
7613 /* MCP */
7614 if (bnx2x_reset_mcp_comp(bp, val))
7615 return -EAGAIN;
7616
7617 /* PXP */
7618 bnx2x_pxp_prep(bp);
7619
7620 /* Open the gates #2, #3 and #4 */
7621 bnx2x_set_234_gates(bp, false);
7622
7623 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7624 * reset state, re-enable attentions. */
7625
a2fbb9ea
ET
7626 return 0;
7627}
7628
72fd0718
VZ
7629static int bnx2x_leader_reset(struct bnx2x *bp)
7630{
7631 int rc = 0;
7632 /* Try to recover after the failure */
7633 if (bnx2x_process_kill(bp)) {
7634 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7635 bp->dev->name);
7636 rc = -EAGAIN;
7637 goto exit_leader_reset;
7638 }
7639
7640 /* Clear "reset is in progress" bit and update the driver state */
7641 bnx2x_set_reset_done(bp);
7642 bp->recovery_state = BNX2X_RECOVERY_DONE;
7643
7644exit_leader_reset:
7645 bp->is_leader = 0;
7646 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7647 smp_wmb();
7648 return rc;
7649}
7650
72fd0718
VZ
7651/* Assumption: runs under rtnl lock. This together with the fact
7652 * that it's called only from bnx2x_reset_task() ensure that it
7653 * will never be called when netif_running(bp->dev) is false.
7654 */
7655static void bnx2x_parity_recover(struct bnx2x *bp)
7656{
7657 DP(NETIF_MSG_HW, "Handling parity\n");
7658 while (1) {
7659 switch (bp->recovery_state) {
7660 case BNX2X_RECOVERY_INIT:
7661 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7662 /* Try to get a LEADER_LOCK HW lock */
7663 if (bnx2x_trylock_hw_lock(bp,
7664 HW_LOCK_RESOURCE_RESERVED_08))
7665 bp->is_leader = 1;
7666
7667 /* Stop the driver */
7668 /* If interface has been removed - break */
7669 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7670 return;
7671
7672 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7673 /* Ensure "is_leader" and "recovery_state"
7674 * update values are seen on other CPUs
7675 */
7676 smp_wmb();
7677 break;
7678
7679 case BNX2X_RECOVERY_WAIT:
7680 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7681 if (bp->is_leader) {
7682 u32 load_counter = bnx2x_get_load_cnt(bp);
7683 if (load_counter) {
7684 /* Wait until all other functions get
7685 * down.
7686 */
7687 schedule_delayed_work(&bp->reset_task,
7688 HZ/10);
7689 return;
7690 } else {
7691 /* If all other functions got down -
7692 * try to bring the chip back to
7693 * normal. In any case it's an exit
7694 * point for a leader.
7695 */
7696 if (bnx2x_leader_reset(bp) ||
7697 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7698 printk(KERN_ERR"%s: Recovery "
7699 "has failed. Power cycle is "
7700 "needed.\n", bp->dev->name);
7701 /* Disconnect this device */
7702 netif_device_detach(bp->dev);
7703 /* Block ifup for all function
7704 * of this ASIC until
7705 * "process kill" or power
7706 * cycle.
7707 */
7708 bnx2x_set_reset_in_progress(bp);
7709 /* Shut down the power */
7710 bnx2x_set_power_state(bp,
7711 PCI_D3hot);
7712 return;
7713 }
7714
7715 return;
7716 }
7717 } else { /* non-leader */
7718 if (!bnx2x_reset_is_done(bp)) {
7719 /* Try to get a LEADER_LOCK HW lock as
7720 * long as a former leader may have
7721 * been unloaded by the user or
7722 * released a leadership by another
7723 * reason.
7724 */
7725 if (bnx2x_trylock_hw_lock(bp,
7726 HW_LOCK_RESOURCE_RESERVED_08)) {
7727 /* I'm a leader now! Restart a
7728 * switch case.
7729 */
7730 bp->is_leader = 1;
7731 break;
7732 }
7733
7734 schedule_delayed_work(&bp->reset_task,
7735 HZ/10);
7736 return;
7737
7738 } else { /* A leader has completed
7739 * the "process kill". It's an exit
7740 * point for a non-leader.
7741 */
7742 bnx2x_nic_load(bp, LOAD_NORMAL);
7743 bp->recovery_state =
7744 BNX2X_RECOVERY_DONE;
7745 smp_wmb();
7746 return;
7747 }
7748 }
7749 default:
7750 return;
7751 }
7752 }
7753}
7754
7755/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7756 * scheduled on a general queue in order to prevent a dead lock.
7757 */
34f80b04
EG
7758static void bnx2x_reset_task(struct work_struct *work)
7759{
72fd0718 7760 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7761
7762#ifdef BNX2X_STOP_ON_ERROR
7763 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7764 " so reset not done to allow debug dump,\n"
72fd0718 7765 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7766 return;
7767#endif
7768
7769 rtnl_lock();
7770
7771 if (!netif_running(bp->dev))
7772 goto reset_task_exit;
7773
72fd0718
VZ
7774 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7775 bnx2x_parity_recover(bp);
7776 else {
7777 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7778 bnx2x_nic_load(bp, LOAD_NORMAL);
7779 }
34f80b04
EG
7780
7781reset_task_exit:
7782 rtnl_unlock();
7783}
7784
a2fbb9ea
ET
7785/* end of nic load/unload */
7786
a2fbb9ea
ET
7787/*
7788 * Init service functions
7789 */
7790
8d96286a 7791static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
f2e0899f
DK
7792{
7793 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7794 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7795 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
7796}
7797
f2e0899f 7798static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 7799{
f2e0899f 7800 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
7801
7802 /* Flush all outstanding writes */
7803 mmiowb();
7804
7805 /* Pretend to be function 0 */
7806 REG_WR(bp, reg, 0);
f2e0899f 7807 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
7808
7809 /* From now we are in the "like-E1" mode */
7810 bnx2x_int_disable(bp);
7811
7812 /* Flush all outstanding writes */
7813 mmiowb();
7814
f2e0899f
DK
7815 /* Restore the original function */
7816 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7817 REG_RD(bp, reg);
f1ef27ef
EG
7818}
7819
f2e0899f 7820static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 7821{
f2e0899f 7822 if (CHIP_IS_E1(bp))
f1ef27ef 7823 bnx2x_int_disable(bp);
f2e0899f
DK
7824 else
7825 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
7826}
7827
34f80b04
EG
7828static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7829{
7830 u32 val;
7831
7832 /* Check if there is any driver already loaded */
7833 val = REG_RD(bp, MISC_REG_UNPREPARED);
7834 if (val == 0x1) {
7835 /* Check if it is the UNDI driver
7836 * UNDI driver initializes CID offset for normal bell to 0x7
7837 */
4a37fb66 7838 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7839 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7840 if (val == 0x7) {
7841 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
7842 /* save our pf_num */
7843 int orig_pf_num = bp->pf_num;
da5a662a
VZ
7844 u32 swap_en;
7845 u32 swap_val;
34f80b04 7846
b4661739
EG
7847 /* clear the UNDI indication */
7848 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7849
34f80b04
EG
7850 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7851
7852 /* try unload UNDI on port 0 */
f2e0899f 7853 bp->pf_num = 0;
da5a662a 7854 bp->fw_seq =
f2e0899f 7855 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7856 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 7857 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7858
7859 /* if UNDI is loaded on the other port */
7860 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7861
da5a662a 7862 /* send "DONE" for previous unload */
a22f0788
YR
7863 bnx2x_fw_command(bp,
7864 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7865
7866 /* unload UNDI on port 1 */
f2e0899f 7867 bp->pf_num = 1;
da5a662a 7868 bp->fw_seq =
f2e0899f 7869 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
7870 DRV_MSG_SEQ_NUMBER_MASK);
7871 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7872
a22f0788 7873 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7874 }
7875
b4661739
EG
7876 /* now it's safe to release the lock */
7877 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7878
f2e0899f 7879 bnx2x_undi_int_disable(bp);
da5a662a
VZ
7880
7881 /* close input traffic and wait for it */
7882 /* Do not rcv packets to BRB */
7883 REG_WR(bp,
7884 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7885 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7886 /* Do not direct rcv packets that are not for MCP to
7887 * the BRB */
7888 REG_WR(bp,
7889 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7890 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7891 /* clear AEU */
7892 REG_WR(bp,
7893 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7894 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7895 msleep(10);
7896
7897 /* save NIG port swap info */
7898 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7899 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7900 /* reset device */
7901 REG_WR(bp,
7902 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7903 0xd3ffffff);
34f80b04
EG
7904 REG_WR(bp,
7905 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7906 0x1403);
da5a662a
VZ
7907 /* take the NIG out of reset and restore swap values */
7908 REG_WR(bp,
7909 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7910 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7911 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7912 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7913
7914 /* send unload done to the MCP */
a22f0788 7915 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7916
7917 /* restore our func and fw_seq */
f2e0899f 7918 bp->pf_num = orig_pf_num;
da5a662a 7919 bp->fw_seq =
f2e0899f 7920 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7921 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7922 } else
7923 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7924 }
7925}
7926
7927static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7928{
7929 u32 val, val2, val3, val4, id;
72ce58c3 7930 u16 pmc;
34f80b04
EG
7931
7932 /* Get the chip revision id and number. */
7933 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7934 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7935 id = ((val & 0xffff) << 16);
7936 val = REG_RD(bp, MISC_REG_CHIP_REV);
7937 id |= ((val & 0xf) << 12);
7938 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7939 id |= ((val & 0xff) << 4);
5a40e08e 7940 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7941 id |= (val & 0xf);
7942 bp->common.chip_id = id;
523224a3
DK
7943
7944 /* Set doorbell size */
7945 bp->db_size = (1 << BNX2X_DB_SHIFT);
7946
f2e0899f
DK
7947 if (CHIP_IS_E2(bp)) {
7948 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7949 if ((val & 1) == 0)
7950 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7951 else
7952 val = (val >> 1) & 1;
7953 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7954 "2_PORT_MODE");
7955 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7956 CHIP_2_PORT_MODE;
7957
7958 if (CHIP_MODE_IS_4_PORT(bp))
7959 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7960 else
7961 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7962 } else {
7963 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7964 bp->pfid = bp->pf_num; /* 0..7 */
7965 }
7966
523224a3
DK
7967 /*
7968 * set base FW non-default (fast path) status block id, this value is
7969 * used to initialize the fw_sb_id saved on the fp/queue structure to
7970 * determine the id used by the FW.
7971 */
f2e0899f
DK
7972 if (CHIP_IS_E1x(bp))
7973 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7974 else /* E2 */
7975 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7976
7977 bp->link_params.chip_id = bp->common.chip_id;
7978 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 7979
1c06328c
EG
7980 val = (REG_RD(bp, 0x2874) & 0x55);
7981 if ((bp->common.chip_id & 0x1) ||
7982 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7983 bp->flags |= ONE_PORT_FLAG;
7984 BNX2X_DEV_INFO("single port device\n");
7985 }
7986
34f80b04
EG
7987 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7988 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7989 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7990 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7991 bp->common.flash_size, bp->common.flash_size);
7992
7993 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
f2e0899f
DK
7994 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7995 MISC_REG_GENERIC_CR_1 :
7996 MISC_REG_GENERIC_CR_0));
34f80b04 7997 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 7998 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
7999 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8000 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 8001
f2e0899f 8002 if (!bp->common.shmem_base) {
34f80b04
EG
8003 BNX2X_DEV_INFO("MCP not active\n");
8004 bp->flags |= NO_MCP_FLAG;
8005 return;
8006 }
8007
8008 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8009 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8010 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f2e0899f 8011 BNX2X_ERR("BAD MCP validity signature\n");
34f80b04
EG
8012
8013 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8014 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8015
8016 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8017 SHARED_HW_CFG_LED_MODE_MASK) >>
8018 SHARED_HW_CFG_LED_MODE_SHIFT);
8019
c2c8b03e
EG
8020 bp->link_params.feature_config_flags = 0;
8021 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8022 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8023 bp->link_params.feature_config_flags |=
8024 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8025 else
8026 bp->link_params.feature_config_flags &=
8027 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8028
34f80b04
EG
8029 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8030 bp->common.bc_ver = val;
8031 BNX2X_DEV_INFO("bc_ver %X\n", val);
8032 if (val < BNX2X_BC_VER) {
8033 /* for now only warn
8034 * later we might need to enforce this */
f2e0899f
DK
8035 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
8036 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 8037 }
4d295db0 8038 bp->link_params.feature_config_flags |=
a22f0788 8039 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
8040 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8041
a22f0788
YR
8042 bp->link_params.feature_config_flags |=
8043 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
8044 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3
EG
8045
8046 if (BP_E1HVN(bp) == 0) {
8047 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8048 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8049 } else {
8050 /* no WOL capability for E1HVN != 0 */
8051 bp->flags |= NO_WOL_FLAG;
8052 }
8053 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8054 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8055
8056 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8057 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8058 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8059 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8060
cdaa7cb8
VZ
8061 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
8062 val, val2, val3, val4);
34f80b04
EG
8063}
8064
f2e0899f
DK
8065#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
8066#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
8067
8068static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8069{
8070 int pfid = BP_FUNC(bp);
8071 int vn = BP_E1HVN(bp);
8072 int igu_sb_id;
8073 u32 val;
8074 u8 fid;
8075
8076 bp->igu_base_sb = 0xff;
8077 bp->igu_sb_cnt = 0;
8078 if (CHIP_INT_MODE_IS_BC(bp)) {
8079 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
ec6ba945 8080 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8081
8082 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8083 FP_SB_MAX_E1x;
8084
8085 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
8086 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8087
8088 return;
8089 }
8090
8091 /* IGU in normal mode - read CAM */
8092 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8093 igu_sb_id++) {
8094 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8095 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8096 continue;
8097 fid = IGU_FID(val);
8098 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8099 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8100 continue;
8101 if (IGU_VEC(val) == 0)
8102 /* default status block */
8103 bp->igu_dsb_id = igu_sb_id;
8104 else {
8105 if (bp->igu_base_sb == 0xff)
8106 bp->igu_base_sb = igu_sb_id;
8107 bp->igu_sb_cnt++;
8108 }
8109 }
8110 }
ec6ba945
VZ
8111 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8112 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8113 if (bp->igu_sb_cnt == 0)
8114 BNX2X_ERR("CAM configuration error\n");
8115}
8116
34f80b04
EG
8117static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8118 u32 switch_cfg)
a2fbb9ea 8119{
a22f0788
YR
8120 int cfg_size = 0, idx, port = BP_PORT(bp);
8121
8122 /* Aggregation of supported attributes of all external phys */
8123 bp->port.supported[0] = 0;
8124 bp->port.supported[1] = 0;
b7737c9b
YR
8125 switch (bp->link_params.num_phys) {
8126 case 1:
a22f0788
YR
8127 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8128 cfg_size = 1;
8129 break;
b7737c9b 8130 case 2:
a22f0788
YR
8131 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8132 cfg_size = 1;
8133 break;
8134 case 3:
8135 if (bp->link_params.multi_phy_config &
8136 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8137 bp->port.supported[1] =
8138 bp->link_params.phy[EXT_PHY1].supported;
8139 bp->port.supported[0] =
8140 bp->link_params.phy[EXT_PHY2].supported;
8141 } else {
8142 bp->port.supported[0] =
8143 bp->link_params.phy[EXT_PHY1].supported;
8144 bp->port.supported[1] =
8145 bp->link_params.phy[EXT_PHY2].supported;
8146 }
8147 cfg_size = 2;
8148 break;
b7737c9b 8149 }
a2fbb9ea 8150
a22f0788 8151 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 8152 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 8153 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 8154 SHMEM_RD(bp,
a22f0788
YR
8155 dev_info.port_hw_config[port].external_phy_config),
8156 SHMEM_RD(bp,
8157 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 8158 return;
f85582f8 8159 }
a2fbb9ea 8160
b7737c9b
YR
8161 switch (switch_cfg) {
8162 case SWITCH_CFG_1G:
34f80b04
EG
8163 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8164 port*0x10);
8165 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8166 break;
8167
8168 case SWITCH_CFG_10G:
34f80b04
EG
8169 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8170 port*0x18);
8171 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8172 break;
8173
8174 default:
8175 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 8176 bp->port.link_config[0]);
a2fbb9ea
ET
8177 return;
8178 }
a22f0788
YR
8179 /* mask what we support according to speed_cap_mask per configuration */
8180 for (idx = 0; idx < cfg_size; idx++) {
8181 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8182 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 8183 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8184
a22f0788 8185 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8186 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 8187 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8188
a22f0788 8189 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8190 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 8191 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8192
a22f0788 8193 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8194 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 8195 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8196
a22f0788 8197 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8198 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 8199 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 8200 SUPPORTED_1000baseT_Full);
a2fbb9ea 8201
a22f0788 8202 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8203 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 8204 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8205
a22f0788 8206 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8207 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
8208 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
8209
8210 }
a2fbb9ea 8211
a22f0788
YR
8212 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8213 bp->port.supported[1]);
a2fbb9ea
ET
8214}
8215
34f80b04 8216static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8217{
a22f0788
YR
8218 u32 link_config, idx, cfg_size = 0;
8219 bp->port.advertising[0] = 0;
8220 bp->port.advertising[1] = 0;
8221 switch (bp->link_params.num_phys) {
8222 case 1:
8223 case 2:
8224 cfg_size = 1;
8225 break;
8226 case 3:
8227 cfg_size = 2;
8228 break;
8229 }
8230 for (idx = 0; idx < cfg_size; idx++) {
8231 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8232 link_config = bp->port.link_config[idx];
8233 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 8234 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
8235 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8236 bp->link_params.req_line_speed[idx] =
8237 SPEED_AUTO_NEG;
8238 bp->port.advertising[idx] |=
8239 bp->port.supported[idx];
f85582f8
DK
8240 } else {
8241 /* force 10G, no AN */
a22f0788
YR
8242 bp->link_params.req_line_speed[idx] =
8243 SPEED_10000;
8244 bp->port.advertising[idx] |=
8245 (ADVERTISED_10000baseT_Full |
f85582f8 8246 ADVERTISED_FIBRE);
a22f0788 8247 continue;
f85582f8
DK
8248 }
8249 break;
a2fbb9ea 8250
f85582f8 8251 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
8252 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8253 bp->link_params.req_line_speed[idx] =
8254 SPEED_10;
8255 bp->port.advertising[idx] |=
8256 (ADVERTISED_10baseT_Full |
f85582f8
DK
8257 ADVERTISED_TP);
8258 } else {
8259 BNX2X_ERROR("NVRAM config error. "
8260 "Invalid link_config 0x%x"
8261 " speed_cap_mask 0x%x\n",
8262 link_config,
a22f0788 8263 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8264 return;
8265 }
8266 break;
a2fbb9ea 8267
f85582f8 8268 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
8269 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8270 bp->link_params.req_line_speed[idx] =
8271 SPEED_10;
8272 bp->link_params.req_duplex[idx] =
8273 DUPLEX_HALF;
8274 bp->port.advertising[idx] |=
8275 (ADVERTISED_10baseT_Half |
f85582f8
DK
8276 ADVERTISED_TP);
8277 } else {
8278 BNX2X_ERROR("NVRAM config error. "
8279 "Invalid link_config 0x%x"
8280 " speed_cap_mask 0x%x\n",
8281 link_config,
8282 bp->link_params.speed_cap_mask[idx]);
8283 return;
8284 }
8285 break;
a2fbb9ea 8286
f85582f8
DK
8287 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8288 if (bp->port.supported[idx] &
8289 SUPPORTED_100baseT_Full) {
a22f0788
YR
8290 bp->link_params.req_line_speed[idx] =
8291 SPEED_100;
8292 bp->port.advertising[idx] |=
8293 (ADVERTISED_100baseT_Full |
f85582f8
DK
8294 ADVERTISED_TP);
8295 } else {
8296 BNX2X_ERROR("NVRAM config error. "
8297 "Invalid link_config 0x%x"
8298 " speed_cap_mask 0x%x\n",
8299 link_config,
8300 bp->link_params.speed_cap_mask[idx]);
8301 return;
8302 }
8303 break;
a2fbb9ea 8304
f85582f8
DK
8305 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8306 if (bp->port.supported[idx] &
8307 SUPPORTED_100baseT_Half) {
8308 bp->link_params.req_line_speed[idx] =
8309 SPEED_100;
8310 bp->link_params.req_duplex[idx] =
8311 DUPLEX_HALF;
a22f0788
YR
8312 bp->port.advertising[idx] |=
8313 (ADVERTISED_100baseT_Half |
f85582f8
DK
8314 ADVERTISED_TP);
8315 } else {
8316 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8317 "Invalid link_config 0x%x"
8318 " speed_cap_mask 0x%x\n",
a22f0788
YR
8319 link_config,
8320 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8321 return;
8322 }
8323 break;
a2fbb9ea 8324
f85582f8 8325 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
8326 if (bp->port.supported[idx] &
8327 SUPPORTED_1000baseT_Full) {
8328 bp->link_params.req_line_speed[idx] =
8329 SPEED_1000;
8330 bp->port.advertising[idx] |=
8331 (ADVERTISED_1000baseT_Full |
f85582f8
DK
8332 ADVERTISED_TP);
8333 } else {
8334 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8335 "Invalid link_config 0x%x"
8336 " speed_cap_mask 0x%x\n",
a22f0788
YR
8337 link_config,
8338 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8339 return;
8340 }
8341 break;
a2fbb9ea 8342
f85582f8 8343 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
8344 if (bp->port.supported[idx] &
8345 SUPPORTED_2500baseX_Full) {
8346 bp->link_params.req_line_speed[idx] =
8347 SPEED_2500;
8348 bp->port.advertising[idx] |=
8349 (ADVERTISED_2500baseX_Full |
34f80b04 8350 ADVERTISED_TP);
f85582f8
DK
8351 } else {
8352 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8353 "Invalid link_config 0x%x"
8354 " speed_cap_mask 0x%x\n",
a22f0788 8355 link_config,
f85582f8
DK
8356 bp->link_params.speed_cap_mask[idx]);
8357 return;
8358 }
8359 break;
a2fbb9ea 8360
f85582f8
DK
8361 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8362 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8363 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
8364 if (bp->port.supported[idx] &
8365 SUPPORTED_10000baseT_Full) {
8366 bp->link_params.req_line_speed[idx] =
8367 SPEED_10000;
8368 bp->port.advertising[idx] |=
8369 (ADVERTISED_10000baseT_Full |
34f80b04 8370 ADVERTISED_FIBRE);
f85582f8
DK
8371 } else {
8372 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8373 "Invalid link_config 0x%x"
8374 " speed_cap_mask 0x%x\n",
a22f0788 8375 link_config,
f85582f8
DK
8376 bp->link_params.speed_cap_mask[idx]);
8377 return;
8378 }
8379 break;
a2fbb9ea 8380
f85582f8
DK
8381 default:
8382 BNX2X_ERROR("NVRAM config error. "
8383 "BAD link speed link_config 0x%x\n",
8384 link_config);
8385 bp->link_params.req_line_speed[idx] =
8386 SPEED_AUTO_NEG;
8387 bp->port.advertising[idx] =
8388 bp->port.supported[idx];
8389 break;
8390 }
a2fbb9ea 8391
a22f0788 8392 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 8393 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
8394 if ((bp->link_params.req_flow_ctrl[idx] ==
8395 BNX2X_FLOW_CTRL_AUTO) &&
8396 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8397 bp->link_params.req_flow_ctrl[idx] =
8398 BNX2X_FLOW_CTRL_NONE;
8399 }
a2fbb9ea 8400
a22f0788
YR
8401 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8402 " 0x%x advertising 0x%x\n",
8403 bp->link_params.req_line_speed[idx],
8404 bp->link_params.req_duplex[idx],
8405 bp->link_params.req_flow_ctrl[idx],
8406 bp->port.advertising[idx]);
8407 }
a2fbb9ea
ET
8408}
8409
e665bfda
MC
8410static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8411{
8412 mac_hi = cpu_to_be16(mac_hi);
8413 mac_lo = cpu_to_be32(mac_lo);
8414 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8415 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8416}
8417
34f80b04 8418static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8419{
34f80b04 8420 int port = BP_PORT(bp);
589abe3a 8421 u32 config;
6f38ad93 8422 u32 ext_phy_type, ext_phy_config;
a2fbb9ea 8423
c18487ee 8424 bp->link_params.bp = bp;
34f80b04 8425 bp->link_params.port = port;
c18487ee 8426
c18487ee 8427 bp->link_params.lane_config =
a2fbb9ea 8428 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 8429
a22f0788 8430 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
8431 SHMEM_RD(bp,
8432 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
8433 bp->link_params.speed_cap_mask[1] =
8434 SHMEM_RD(bp,
8435 dev_info.port_hw_config[port].speed_capability_mask2);
8436 bp->port.link_config[0] =
a2fbb9ea
ET
8437 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8438
a22f0788
YR
8439 bp->port.link_config[1] =
8440 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 8441
a22f0788
YR
8442 bp->link_params.multi_phy_config =
8443 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
8444 /* If the device is capable of WoL, set the default state according
8445 * to the HW
8446 */
4d295db0 8447 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8448 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8449 (config & PORT_FEATURE_WOL_ENABLED));
8450
f85582f8 8451 BNX2X_DEV_INFO("lane_config 0x%08x "
a22f0788 8452 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 8453 bp->link_params.lane_config,
a22f0788
YR
8454 bp->link_params.speed_cap_mask[0],
8455 bp->port.link_config[0]);
a2fbb9ea 8456
a22f0788 8457 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 8458 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 8459 bnx2x_phy_probe(&bp->link_params);
c18487ee 8460 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8461
8462 bnx2x_link_settings_requested(bp);
8463
01cd4528
EG
8464 /*
8465 * If connected directly, work with the internal PHY, otherwise, work
8466 * with the external PHY
8467 */
b7737c9b
YR
8468 ext_phy_config =
8469 SHMEM_RD(bp,
8470 dev_info.port_hw_config[port].external_phy_config);
8471 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 8472 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 8473 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
8474
8475 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8476 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8477 bp->mdio.prtad =
b7737c9b 8478 XGXS_EXT_PHY_ADDR(ext_phy_config);
5866df6d
YR
8479
8480 /*
8481 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8482 * In MF mode, it is set to cover self test cases
8483 */
8484 if (IS_MF(bp))
8485 bp->port.need_hw_lock = 1;
8486 else
8487 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8488 bp->common.shmem_base,
8489 bp->common.shmem2_base);
0793f83f 8490}
01cd4528 8491
2ba45142
VZ
8492#ifdef BCM_CNIC
8493static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8494{
8495 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8496 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8497 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8498 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8499
8500 /* Get the number of maximum allowed iSCSI and FCoE connections */
8501 bp->cnic_eth_dev.max_iscsi_conn =
8502 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8503 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8504
8505 bp->cnic_eth_dev.max_fcoe_conn =
8506 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8507 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8508
8509 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8510 bp->cnic_eth_dev.max_iscsi_conn,
8511 bp->cnic_eth_dev.max_fcoe_conn);
8512
8513 /* If mamimum allowed number of connections is zero -
8514 * disable the feature.
8515 */
8516 if (!bp->cnic_eth_dev.max_iscsi_conn)
8517 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8518
8519 if (!bp->cnic_eth_dev.max_fcoe_conn)
8520 bp->flags |= NO_FCOE_FLAG;
8521}
8522#endif
8523
0793f83f
DK
8524static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8525{
8526 u32 val, val2;
8527 int func = BP_ABS_FUNC(bp);
8528 int port = BP_PORT(bp);
2ba45142
VZ
8529#ifdef BCM_CNIC
8530 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8531 u8 *fip_mac = bp->fip_mac;
8532#endif
0793f83f
DK
8533
8534 if (BP_NOMCP(bp)) {
8535 BNX2X_ERROR("warning: random MAC workaround active\n");
8536 random_ether_addr(bp->dev->dev_addr);
8537 } else if (IS_MF(bp)) {
8538 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8539 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8540 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8541 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8542 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
37b091ba
MC
8543
8544#ifdef BCM_CNIC
2ba45142
VZ
8545 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8546 * FCoE MAC then the appropriate feature should be disabled.
8547 */
0793f83f
DK
8548 if (IS_MF_SI(bp)) {
8549 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8550 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8551 val2 = MF_CFG_RD(bp, func_ext_config[func].
8552 iscsi_mac_addr_upper);
8553 val = MF_CFG_RD(bp, func_ext_config[func].
8554 iscsi_mac_addr_lower);
2ba45142
VZ
8555 BNX2X_DEV_INFO("Read iSCSI MAC: "
8556 "0x%x:0x%04x\n", val2, val);
8557 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8558
8559 /* Disable iSCSI OOO if MAC configuration is
8560 * invalid.
8561 */
8562 if (!is_valid_ether_addr(iscsi_mac)) {
8563 bp->flags |= NO_ISCSI_OOO_FLAG |
8564 NO_ISCSI_FLAG;
8565 memset(iscsi_mac, 0, ETH_ALEN);
8566 }
8567 } else
8568 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8569
8570 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8571 val2 = MF_CFG_RD(bp, func_ext_config[func].
8572 fcoe_mac_addr_upper);
8573 val = MF_CFG_RD(bp, func_ext_config[func].
8574 fcoe_mac_addr_lower);
8575 BNX2X_DEV_INFO("Read FCoE MAC to "
8576 "0x%x:0x%04x\n", val2, val);
8577 bnx2x_set_mac_buf(fip_mac, val, val2);
8578
8579 /* Disable FCoE if MAC configuration is
8580 * invalid.
8581 */
8582 if (!is_valid_ether_addr(fip_mac)) {
8583 bp->flags |= NO_FCOE_FLAG;
8584 memset(bp->fip_mac, 0, ETH_ALEN);
8585 }
8586 } else
8587 bp->flags |= NO_FCOE_FLAG;
0793f83f 8588 }
37b091ba 8589#endif
0793f83f
DK
8590 } else {
8591 /* in SF read MACs from port configuration */
8592 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8593 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8594 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8595
8596#ifdef BCM_CNIC
8597 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8598 iscsi_mac_upper);
8599 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8600 iscsi_mac_lower);
2ba45142 8601 bnx2x_set_mac_buf(iscsi_mac, val, val2);
0793f83f
DK
8602#endif
8603 }
8604
8605 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8606 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8607
ec6ba945 8608#ifdef BCM_CNIC
2ba45142 8609 /* Set the FCoE MAC in modes other then MF_SI */
ec6ba945
VZ
8610 if (!CHIP_IS_E1x(bp)) {
8611 if (IS_MF_SD(bp))
2ba45142
VZ
8612 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8613 else if (!IS_MF(bp))
8614 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
ec6ba945
VZ
8615 }
8616#endif
34f80b04
EG
8617}
8618
8619static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8620{
0793f83f
DK
8621 int /*abs*/func = BP_ABS_FUNC(bp);
8622 int vn, port;
8623 u32 val = 0;
34f80b04 8624 int rc = 0;
a2fbb9ea 8625
34f80b04 8626 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8627
f2e0899f
DK
8628 if (CHIP_IS_E1x(bp)) {
8629 bp->common.int_block = INT_BLOCK_HC;
8630
8631 bp->igu_dsb_id = DEF_SB_IGU_ID;
8632 bp->igu_base_sb = 0;
ec6ba945
VZ
8633 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8634 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8635 } else {
8636 bp->common.int_block = INT_BLOCK_IGU;
8637 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8638 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8639 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8640 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8641 } else
8642 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 8643
f2e0899f
DK
8644 bnx2x_get_igu_cam_info(bp);
8645
8646 }
8647 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8648 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8649
8650 /*
8651 * Initialize MF configuration
8652 */
523224a3 8653
fb3bff17
DK
8654 bp->mf_ov = 0;
8655 bp->mf_mode = 0;
f2e0899f 8656 vn = BP_E1HVN(bp);
0793f83f
DK
8657 port = BP_PORT(bp);
8658
f2e0899f 8659 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
0793f83f
DK
8660 DP(NETIF_MSG_PROBE,
8661 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8662 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8663 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
f2e0899f
DK
8664 if (SHMEM2_HAS(bp, mf_cfg_addr))
8665 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8666 else
8667 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
8668 offsetof(struct shmem_region, func_mb) +
8669 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
0793f83f
DK
8670 /*
8671 * get mf configuration:
8672 * 1. existance of MF configuration
8673 * 2. MAC address must be legal (check only upper bytes)
8674 * for Switch-Independent mode;
8675 * OVLAN must be legal for Switch-Dependent mode
8676 * 3. SF_MODE configures specific MF mode
8677 */
8678 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8679 /* get mf configuration */
8680 val = SHMEM_RD(bp,
8681 dev_info.shared_feature_config.config);
8682 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8683
8684 switch (val) {
8685 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8686 val = MF_CFG_RD(bp, func_mf_config[func].
8687 mac_upper);
8688 /* check for legal mac (upper bytes)*/
8689 if (val != 0xffff) {
8690 bp->mf_mode = MULTI_FUNCTION_SI;
8691 bp->mf_config[vn] = MF_CFG_RD(bp,
8692 func_mf_config[func].config);
8693 } else
8694 DP(NETIF_MSG_PROBE, "illegal MAC "
8695 "address for SI\n");
8696 break;
8697 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8698 /* get OV configuration */
8699 val = MF_CFG_RD(bp,
8700 func_mf_config[FUNC_0].e1hov_tag);
8701 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8702
8703 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8704 bp->mf_mode = MULTI_FUNCTION_SD;
8705 bp->mf_config[vn] = MF_CFG_RD(bp,
8706 func_mf_config[func].config);
8707 } else
8708 DP(NETIF_MSG_PROBE, "illegal OV for "
8709 "SD\n");
8710 break;
8711 default:
8712 /* Unknown configuration: reset mf_config */
8713 bp->mf_config[vn] = 0;
8714 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8715 val);
8716 }
8717 }
a2fbb9ea 8718
2691d51d 8719 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 8720 IS_MF(bp) ? "multi" : "single");
2691d51d 8721
0793f83f
DK
8722 switch (bp->mf_mode) {
8723 case MULTI_FUNCTION_SD:
8724 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8725 FUNC_MF_CFG_E1HOV_TAG_MASK;
2691d51d 8726 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 8727 bp->mf_ov = val;
0793f83f
DK
8728 BNX2X_DEV_INFO("MF OV for func %d is %d"
8729 " (0x%04x)\n", func,
8730 bp->mf_ov, bp->mf_ov);
2691d51d 8731 } else {
0793f83f
DK
8732 BNX2X_ERR("No valid MF OV for func %d,"
8733 " aborting\n", func);
34f80b04
EG
8734 rc = -EPERM;
8735 }
0793f83f
DK
8736 break;
8737 case MULTI_FUNCTION_SI:
8738 BNX2X_DEV_INFO("func %d is in MF "
8739 "switch-independent mode\n", func);
8740 break;
8741 default:
8742 if (vn) {
8743 BNX2X_ERR("VN %d in single function mode,"
8744 " aborting\n", vn);
2691d51d
EG
8745 rc = -EPERM;
8746 }
0793f83f 8747 break;
34f80b04 8748 }
0793f83f 8749
34f80b04 8750 }
a2fbb9ea 8751
f2e0899f
DK
8752 /* adjust igu_sb_cnt to MF for E1x */
8753 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
8754 bp->igu_sb_cnt /= E1HVN_MAX;
8755
f2e0899f
DK
8756 /*
8757 * adjust E2 sb count: to be removed when FW will support
8758 * more then 16 L2 clients
8759 */
8760#define MAX_L2_CLIENTS 16
8761 if (CHIP_IS_E2(bp))
8762 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8763 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8764
34f80b04
EG
8765 if (!BP_NOMCP(bp)) {
8766 bnx2x_get_port_hwinfo(bp);
8767
f2e0899f
DK
8768 bp->fw_seq =
8769 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8770 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
8771 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8772 }
8773
0793f83f
DK
8774 /* Get MAC addresses */
8775 bnx2x_get_mac_hwinfo(bp);
a2fbb9ea 8776
2ba45142
VZ
8777#ifdef BCM_CNIC
8778 bnx2x_get_cnic_info(bp);
8779#endif
8780
34f80b04
EG
8781 return rc;
8782}
8783
34f24c7f
VZ
8784static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8785{
8786 int cnt, i, block_end, rodi;
8787 char vpd_data[BNX2X_VPD_LEN+1];
8788 char str_id_reg[VENDOR_ID_LEN+1];
8789 char str_id_cap[VENDOR_ID_LEN+1];
8790 u8 len;
8791
8792 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8793 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8794
8795 if (cnt < BNX2X_VPD_LEN)
8796 goto out_not_found;
8797
8798 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8799 PCI_VPD_LRDT_RO_DATA);
8800 if (i < 0)
8801 goto out_not_found;
8802
8803
8804 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8805 pci_vpd_lrdt_size(&vpd_data[i]);
8806
8807 i += PCI_VPD_LRDT_TAG_SIZE;
8808
8809 if (block_end > BNX2X_VPD_LEN)
8810 goto out_not_found;
8811
8812 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8813 PCI_VPD_RO_KEYWORD_MFR_ID);
8814 if (rodi < 0)
8815 goto out_not_found;
8816
8817 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8818
8819 if (len != VENDOR_ID_LEN)
8820 goto out_not_found;
8821
8822 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8823
8824 /* vendor specific info */
8825 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8826 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8827 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8828 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8829
8830 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8831 PCI_VPD_RO_KEYWORD_VENDOR0);
8832 if (rodi >= 0) {
8833 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8834
8835 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8836
8837 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8838 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8839 bp->fw_ver[len] = ' ';
8840 }
8841 }
8842 return;
8843 }
8844out_not_found:
8845 return;
8846}
8847
34f80b04
EG
8848static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8849{
f2e0899f 8850 int func;
87942b46 8851 int timer_interval;
34f80b04
EG
8852 int rc;
8853
da5a662a
VZ
8854 /* Disable interrupt handling until HW is initialized */
8855 atomic_set(&bp->intr_sem, 1);
e1510706 8856 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8857
34f80b04 8858 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8859 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 8860 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
8861#ifdef BCM_CNIC
8862 mutex_init(&bp->cnic_mutex);
8863#endif
a2fbb9ea 8864
1cf167f2 8865 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8866 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8867
8868 rc = bnx2x_get_hwinfo(bp);
8869
523224a3
DK
8870 if (!rc)
8871 rc = bnx2x_alloc_mem_bp(bp);
8872
34f24c7f 8873 bnx2x_read_fwinfo(bp);
f2e0899f
DK
8874
8875 func = BP_FUNC(bp);
8876
34f80b04
EG
8877 /* need to reset chip if undi was active */
8878 if (!BP_NOMCP(bp))
8879 bnx2x_undi_unload(bp);
8880
8881 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8882 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8883
8884 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8885 dev_err(&bp->pdev->dev, "MCP disabled, "
8886 "must load devices in order!\n");
34f80b04 8887
555f6c78 8888 bp->multi_mode = multi_mode;
5d7cd496 8889 bp->int_mode = int_mode;
555f6c78 8890
4fd89b7a
DK
8891 bp->dev->features |= NETIF_F_GRO;
8892
7a9b2557
VZ
8893 /* Set TPA flags */
8894 if (disable_tpa) {
8895 bp->flags &= ~TPA_ENABLE_FLAG;
8896 bp->dev->features &= ~NETIF_F_LRO;
8897 } else {
8898 bp->flags |= TPA_ENABLE_FLAG;
8899 bp->dev->features |= NETIF_F_LRO;
8900 }
5d7cd496 8901 bp->disable_tpa = disable_tpa;
7a9b2557 8902
a18f5128
EG
8903 if (CHIP_IS_E1(bp))
8904 bp->dropless_fc = 0;
8905 else
8906 bp->dropless_fc = dropless_fc;
8907
8d5726c4 8908 bp->mrrs = mrrs;
7a9b2557 8909
34f80b04 8910 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04
EG
8911
8912 bp->rx_csum = 1;
34f80b04 8913
7d323bfd 8914 /* make sure that the numbers are in the right granularity */
523224a3
DK
8915 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8916 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 8917
87942b46
EG
8918 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8919 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8920
8921 init_timer(&bp->timer);
8922 bp->timer.expires = jiffies + bp->current_interval;
8923 bp->timer.data = (unsigned long) bp;
8924 bp->timer.function = bnx2x_timer;
8925
785b9b1a 8926 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
e4901dde
VZ
8927 bnx2x_dcbx_init_params(bp);
8928
34f80b04 8929 return rc;
a2fbb9ea
ET
8930}
8931
a2fbb9ea 8932
de0c62db
DK
8933/****************************************************************************
8934* General service functions
8935****************************************************************************/
a2fbb9ea 8936
bb2a0f7a 8937/* called with rtnl_lock */
a2fbb9ea
ET
8938static int bnx2x_open(struct net_device *dev)
8939{
8940 struct bnx2x *bp = netdev_priv(dev);
8941
6eccabb3
EG
8942 netif_carrier_off(dev);
8943
a2fbb9ea
ET
8944 bnx2x_set_power_state(bp, PCI_D0);
8945
72fd0718
VZ
8946 if (!bnx2x_reset_is_done(bp)) {
8947 do {
8948 /* Reset MCP mail box sequence if there is on going
8949 * recovery
8950 */
8951 bp->fw_seq = 0;
8952
8953 /* If it's the first function to load and reset done
8954 * is still not cleared it may mean that. We don't
8955 * check the attention state here because it may have
8956 * already been cleared by a "common" reset but we
8957 * shell proceed with "process kill" anyway.
8958 */
8959 if ((bnx2x_get_load_cnt(bp) == 0) &&
8960 bnx2x_trylock_hw_lock(bp,
8961 HW_LOCK_RESOURCE_RESERVED_08) &&
8962 (!bnx2x_leader_reset(bp))) {
8963 DP(NETIF_MSG_HW, "Recovered in open\n");
8964 break;
8965 }
8966
8967 bnx2x_set_power_state(bp, PCI_D3hot);
8968
8969 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8970 " completed yet. Try again later. If u still see this"
8971 " message after a few retries then power cycle is"
8972 " required.\n", bp->dev->name);
8973
8974 return -EAGAIN;
8975 } while (0);
8976 }
8977
8978 bp->recovery_state = BNX2X_RECOVERY_DONE;
8979
bb2a0f7a 8980 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8981}
8982
bb2a0f7a 8983/* called with rtnl_lock */
a2fbb9ea
ET
8984static int bnx2x_close(struct net_device *dev)
8985{
a2fbb9ea
ET
8986 struct bnx2x *bp = netdev_priv(dev);
8987
8988 /* Unload the driver, release IRQs */
bb2a0f7a 8989 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8990 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8991
8992 return 0;
8993}
8994
6e30dd4e
VZ
8995#define E1_MAX_UC_LIST 29
8996#define E1H_MAX_UC_LIST 30
8997#define E2_MAX_UC_LIST 14
8998static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
8999{
9000 if (CHIP_IS_E1(bp))
9001 return E1_MAX_UC_LIST;
9002 else if (CHIP_IS_E1H(bp))
9003 return E1H_MAX_UC_LIST;
9004 else
9005 return E2_MAX_UC_LIST;
9006}
9007
9008
9009static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
9010{
9011 if (CHIP_IS_E1(bp))
9012 /* CAM Entries for Port0:
9013 * 0 - prim ETH MAC
9014 * 1 - BCAST MAC
9015 * 2 - iSCSI L2 ring ETH MAC
9016 * 3-31 - UC MACs
9017 *
9018 * Port1 entries are allocated the same way starting from
9019 * entry 32.
9020 */
9021 return 3 + 32 * BP_PORT(bp);
9022 else if (CHIP_IS_E1H(bp)) {
9023 /* CAM Entries:
9024 * 0-7 - prim ETH MAC for each function
9025 * 8-15 - iSCSI L2 ring ETH MAC for each function
9026 * 16 till 255 UC MAC lists for each function
9027 *
9028 * Remark: There is no FCoE support for E1H, thus FCoE related
9029 * MACs are not considered.
9030 */
9031 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
9032 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
9033 } else {
9034 /* CAM Entries (there is a separate CAM per engine):
9035 * 0-4 - prim ETH MAC for each function
9036 * 4-7 - iSCSI L2 ring ETH MAC for each function
9037 * 8-11 - FIP ucast L2 MAC for each function
9038 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
9039 * 16 till 71 UC MAC lists for each function
9040 */
9041 u8 func_idx =
9042 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
9043
9044 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
9045 bnx2x_max_uc_list(bp) * func_idx;
9046 }
9047}
9048
9049/* set uc list, do not wait as wait implies sleep and
9050 * set_rx_mode can be invoked from non-sleepable context.
9051 *
9052 * Instead we use the same ramrod data buffer each time we need
9053 * to configure a list of addresses, and use the fact that the
9054 * list of MACs is changed in an incremental way and that the
9055 * function is called under the netif_addr_lock. A temporary
9056 * inconsistent CAM configuration (possible in case of very fast
9057 * sequence of add/del/add on the host side) will shortly be
9058 * restored by the handler of the last ramrod.
9059 */
9060static int bnx2x_set_uc_list(struct bnx2x *bp)
9061{
9062 int i = 0, old;
9063 struct net_device *dev = bp->dev;
9064 u8 offset = bnx2x_uc_list_cam_offset(bp);
9065 struct netdev_hw_addr *ha;
9066 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9067 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9068
9069 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
9070 return -EINVAL;
9071
9072 netdev_for_each_uc_addr(ha, dev) {
9073 /* copy mac */
9074 config_cmd->config_table[i].msb_mac_addr =
9075 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
9076 config_cmd->config_table[i].middle_mac_addr =
9077 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
9078 config_cmd->config_table[i].lsb_mac_addr =
9079 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
9080
9081 config_cmd->config_table[i].vlan_id = 0;
9082 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
9083 config_cmd->config_table[i].clients_bit_vector =
9084 cpu_to_le32(1 << BP_L_ID(bp));
9085
9086 SET_FLAG(config_cmd->config_table[i].flags,
9087 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9088 T_ETH_MAC_COMMAND_SET);
9089
9090 DP(NETIF_MSG_IFUP,
9091 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
9092 config_cmd->config_table[i].msb_mac_addr,
9093 config_cmd->config_table[i].middle_mac_addr,
9094 config_cmd->config_table[i].lsb_mac_addr);
9095
9096 i++;
9097
9098 /* Set uc MAC in NIG */
9099 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
9100 LLH_CAM_ETH_LINE + i);
9101 }
9102 old = config_cmd->hdr.length;
9103 if (old > i) {
9104 for (; i < old; i++) {
9105 if (CAM_IS_INVALID(config_cmd->
9106 config_table[i])) {
9107 /* already invalidated */
9108 break;
9109 }
9110 /* invalidate */
9111 SET_FLAG(config_cmd->config_table[i].flags,
9112 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9113 T_ETH_MAC_COMMAND_INVALIDATE);
9114 }
9115 }
9116
9117 wmb();
9118
9119 config_cmd->hdr.length = i;
9120 config_cmd->hdr.offset = offset;
9121 config_cmd->hdr.client_id = 0xff;
9122 /* Mark that this ramrod doesn't use bp->set_mac_pending for
9123 * synchronization.
9124 */
9125 config_cmd->hdr.echo = 0;
9126
9127 mb();
9128
9129 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9130 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9131
9132}
9133
9134void bnx2x_invalidate_uc_list(struct bnx2x *bp)
9135{
9136 int i;
9137 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9138 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9139 int ramrod_flags = WAIT_RAMROD_COMMON;
9140 u8 offset = bnx2x_uc_list_cam_offset(bp);
9141 u8 max_list_size = bnx2x_max_uc_list(bp);
9142
9143 for (i = 0; i < max_list_size; i++) {
9144 SET_FLAG(config_cmd->config_table[i].flags,
9145 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9146 T_ETH_MAC_COMMAND_INVALIDATE);
9147 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
9148 }
9149
9150 wmb();
9151
9152 config_cmd->hdr.length = max_list_size;
9153 config_cmd->hdr.offset = offset;
9154 config_cmd->hdr.client_id = 0xff;
9155 /* We'll wait for a completion this time... */
9156 config_cmd->hdr.echo = 1;
9157
9158 bp->set_mac_pending = 1;
9159
9160 mb();
9161
9162 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9163 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9164
9165 /* Wait for a completion */
9166 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
9167 ramrod_flags);
9168
9169}
9170
9171static inline int bnx2x_set_mc_list(struct bnx2x *bp)
9172{
9173 /* some multicasts */
9174 if (CHIP_IS_E1(bp)) {
9175 return bnx2x_set_e1_mc_list(bp);
9176 } else { /* E1H and newer */
9177 return bnx2x_set_e1h_mc_list(bp);
9178 }
9179}
9180
f5372251 9181/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 9182void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
9183{
9184 struct bnx2x *bp = netdev_priv(dev);
9185 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
34f80b04
EG
9186
9187 if (bp->state != BNX2X_STATE_OPEN) {
9188 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9189 return;
9190 }
9191
9192 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9193
9194 if (dev->flags & IFF_PROMISC)
9195 rx_mode = BNX2X_RX_MODE_PROMISC;
6e30dd4e 9196 else if (dev->flags & IFF_ALLMULTI)
34f80b04 9197 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6e30dd4e
VZ
9198 else {
9199 /* some multicasts */
9200 if (bnx2x_set_mc_list(bp))
9201 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04 9202
6e30dd4e
VZ
9203 /* some unicasts */
9204 if (bnx2x_set_uc_list(bp))
9205 rx_mode = BNX2X_RX_MODE_PROMISC;
34f80b04
EG
9206 }
9207
9208 bp->rx_mode = rx_mode;
9209 bnx2x_set_storm_rx_mode(bp);
9210}
9211
c18487ee 9212/* called with rtnl_lock */
01cd4528
EG
9213static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
9214 int devad, u16 addr)
a2fbb9ea 9215{
01cd4528
EG
9216 struct bnx2x *bp = netdev_priv(netdev);
9217 u16 value;
9218 int rc;
a2fbb9ea 9219
01cd4528
EG
9220 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
9221 prtad, devad, addr);
a2fbb9ea 9222
01cd4528
EG
9223 /* The HW expects different devad if CL22 is used */
9224 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 9225
01cd4528 9226 bnx2x_acquire_phy_lock(bp);
e10bc84d 9227 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
9228 bnx2x_release_phy_lock(bp);
9229 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 9230
01cd4528
EG
9231 if (!rc)
9232 rc = value;
9233 return rc;
9234}
a2fbb9ea 9235
01cd4528
EG
9236/* called with rtnl_lock */
9237static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
9238 u16 addr, u16 value)
9239{
9240 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
9241 int rc;
9242
9243 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
9244 " value 0x%x\n", prtad, devad, addr, value);
9245
01cd4528
EG
9246 /* The HW expects different devad if CL22 is used */
9247 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 9248
01cd4528 9249 bnx2x_acquire_phy_lock(bp);
e10bc84d 9250 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
9251 bnx2x_release_phy_lock(bp);
9252 return rc;
9253}
c18487ee 9254
01cd4528
EG
9255/* called with rtnl_lock */
9256static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9257{
9258 struct bnx2x *bp = netdev_priv(dev);
9259 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 9260
01cd4528
EG
9261 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
9262 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 9263
01cd4528
EG
9264 if (!netif_running(dev))
9265 return -EAGAIN;
9266
9267 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
9268}
9269
257ddbda 9270#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
9271static void poll_bnx2x(struct net_device *dev)
9272{
9273 struct bnx2x *bp = netdev_priv(dev);
9274
9275 disable_irq(bp->pdev->irq);
9276 bnx2x_interrupt(bp->pdev->irq, dev);
9277 enable_irq(bp->pdev->irq);
9278}
9279#endif
9280
c64213cd
SH
9281static const struct net_device_ops bnx2x_netdev_ops = {
9282 .ndo_open = bnx2x_open,
9283 .ndo_stop = bnx2x_close,
9284 .ndo_start_xmit = bnx2x_start_xmit,
8307fa3e 9285 .ndo_select_queue = bnx2x_select_queue,
6e30dd4e 9286 .ndo_set_rx_mode = bnx2x_set_rx_mode,
c64213cd
SH
9287 .ndo_set_mac_address = bnx2x_change_mac_addr,
9288 .ndo_validate_addr = eth_validate_addr,
9289 .ndo_do_ioctl = bnx2x_ioctl,
9290 .ndo_change_mtu = bnx2x_change_mtu,
9291 .ndo_tx_timeout = bnx2x_tx_timeout,
257ddbda 9292#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
9293 .ndo_poll_controller = poll_bnx2x,
9294#endif
9295};
9296
34f80b04
EG
9297static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9298 struct net_device *dev)
a2fbb9ea
ET
9299{
9300 struct bnx2x *bp;
9301 int rc;
9302
9303 SET_NETDEV_DEV(dev, &pdev->dev);
9304 bp = netdev_priv(dev);
9305
34f80b04
EG
9306 bp->dev = dev;
9307 bp->pdev = pdev;
a2fbb9ea 9308 bp->flags = 0;
f2e0899f 9309 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9310
9311 rc = pci_enable_device(pdev);
9312 if (rc) {
cdaa7cb8
VZ
9313 dev_err(&bp->pdev->dev,
9314 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
9315 goto err_out;
9316 }
9317
9318 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9319 dev_err(&bp->pdev->dev,
9320 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
9321 rc = -ENODEV;
9322 goto err_out_disable;
9323 }
9324
9325 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9326 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9327 " base address, aborting\n");
a2fbb9ea
ET
9328 rc = -ENODEV;
9329 goto err_out_disable;
9330 }
9331
34f80b04
EG
9332 if (atomic_read(&pdev->enable_cnt) == 1) {
9333 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9334 if (rc) {
cdaa7cb8
VZ
9335 dev_err(&bp->pdev->dev,
9336 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
9337 goto err_out_disable;
9338 }
a2fbb9ea 9339
34f80b04
EG
9340 pci_set_master(pdev);
9341 pci_save_state(pdev);
9342 }
a2fbb9ea
ET
9343
9344 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9345 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
9346 dev_err(&bp->pdev->dev,
9347 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
9348 rc = -EIO;
9349 goto err_out_release;
9350 }
9351
9352 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9353 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
9354 dev_err(&bp->pdev->dev,
9355 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
9356 rc = -EIO;
9357 goto err_out_release;
9358 }
9359
1a983142 9360 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 9361 bp->flags |= USING_DAC_FLAG;
1a983142 9362 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
9363 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9364 " failed, aborting\n");
a2fbb9ea
ET
9365 rc = -EIO;
9366 goto err_out_release;
9367 }
9368
1a983142 9369 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
9370 dev_err(&bp->pdev->dev,
9371 "System does not support DMA, aborting\n");
a2fbb9ea
ET
9372 rc = -EIO;
9373 goto err_out_release;
9374 }
9375
34f80b04
EG
9376 dev->mem_start = pci_resource_start(pdev, 0);
9377 dev->base_addr = dev->mem_start;
9378 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
9379
9380 dev->irq = pdev->irq;
9381
275f165f 9382 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 9383 if (!bp->regview) {
cdaa7cb8
VZ
9384 dev_err(&bp->pdev->dev,
9385 "Cannot map register space, aborting\n");
a2fbb9ea
ET
9386 rc = -ENOMEM;
9387 goto err_out_release;
9388 }
9389
34f80b04 9390 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 9391 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 9392 pci_resource_len(pdev, 2)));
a2fbb9ea 9393 if (!bp->doorbells) {
cdaa7cb8
VZ
9394 dev_err(&bp->pdev->dev,
9395 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
9396 rc = -ENOMEM;
9397 goto err_out_unmap;
9398 }
9399
9400 bnx2x_set_power_state(bp, PCI_D0);
9401
34f80b04
EG
9402 /* clean indirect addresses */
9403 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9404 PCICFG_VENDOR_ID_OFFSET);
9405 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9406 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9407 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9408 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 9409
72fd0718
VZ
9410 /* Reset the load counter */
9411 bnx2x_clear_load_cnt(bp);
9412
34f80b04 9413 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 9414
c64213cd 9415 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 9416 bnx2x_set_ethtool_ops(dev);
34f80b04 9417 dev->features |= NETIF_F_SG;
79032644 9418 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
34f80b04
EG
9419 if (bp->flags & USING_DAC_FLAG)
9420 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
9421 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9422 dev->features |= NETIF_F_TSO6;
34f80b04 9423 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
5316bc0b
EG
9424
9425 dev->vlan_features |= NETIF_F_SG;
79032644 9426 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5316bc0b
EG
9427 if (bp->flags & USING_DAC_FLAG)
9428 dev->vlan_features |= NETIF_F_HIGHDMA;
9429 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9430 dev->vlan_features |= NETIF_F_TSO6;
a2fbb9ea 9431
785b9b1a
SR
9432#ifdef BCM_DCB
9433 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9434#endif
9435
01cd4528
EG
9436 /* get_port_hwinfo() will set prtad and mmds properly */
9437 bp->mdio.prtad = MDIO_PRTAD_NONE;
9438 bp->mdio.mmds = 0;
9439 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9440 bp->mdio.dev = dev;
9441 bp->mdio.mdio_read = bnx2x_mdio_read;
9442 bp->mdio.mdio_write = bnx2x_mdio_write;
9443
a2fbb9ea
ET
9444 return 0;
9445
9446err_out_unmap:
9447 if (bp->regview) {
9448 iounmap(bp->regview);
9449 bp->regview = NULL;
9450 }
a2fbb9ea
ET
9451 if (bp->doorbells) {
9452 iounmap(bp->doorbells);
9453 bp->doorbells = NULL;
9454 }
9455
9456err_out_release:
34f80b04
EG
9457 if (atomic_read(&pdev->enable_cnt) == 1)
9458 pci_release_regions(pdev);
a2fbb9ea
ET
9459
9460err_out_disable:
9461 pci_disable_device(pdev);
9462 pci_set_drvdata(pdev, NULL);
9463
9464err_out:
9465 return rc;
9466}
9467
37f9ce62
EG
9468static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9469 int *width, int *speed)
25047950
ET
9470{
9471 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9472
37f9ce62 9473 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 9474
37f9ce62
EG
9475 /* return value of 1=2.5GHz 2=5GHz */
9476 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 9477}
37f9ce62 9478
6891dd25 9479static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 9480{
37f9ce62 9481 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
9482 struct bnx2x_fw_file_hdr *fw_hdr;
9483 struct bnx2x_fw_file_section *sections;
94a78b79 9484 u32 offset, len, num_ops;
37f9ce62 9485 u16 *ops_offsets;
94a78b79 9486 int i;
37f9ce62 9487 const u8 *fw_ver;
94a78b79
VZ
9488
9489 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9490 return -EINVAL;
9491
9492 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9493 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9494
9495 /* Make sure none of the offsets and sizes make us read beyond
9496 * the end of the firmware data */
9497 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9498 offset = be32_to_cpu(sections[i].offset);
9499 len = be32_to_cpu(sections[i].len);
9500 if (offset + len > firmware->size) {
cdaa7cb8
VZ
9501 dev_err(&bp->pdev->dev,
9502 "Section %d length is out of bounds\n", i);
94a78b79
VZ
9503 return -EINVAL;
9504 }
9505 }
9506
9507 /* Likewise for the init_ops offsets */
9508 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9509 ops_offsets = (u16 *)(firmware->data + offset);
9510 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9511
9512 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9513 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
9514 dev_err(&bp->pdev->dev,
9515 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
9516 return -EINVAL;
9517 }
9518 }
9519
9520 /* Check FW version */
9521 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9522 fw_ver = firmware->data + offset;
9523 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9524 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9525 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9526 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
9527 dev_err(&bp->pdev->dev,
9528 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
9529 fw_ver[0], fw_ver[1], fw_ver[2],
9530 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9531 BCM_5710_FW_MINOR_VERSION,
9532 BCM_5710_FW_REVISION_VERSION,
9533 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 9534 return -EINVAL;
94a78b79
VZ
9535 }
9536
9537 return 0;
9538}
9539
ab6ad5a4 9540static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9541{
ab6ad5a4
EG
9542 const __be32 *source = (const __be32 *)_source;
9543 u32 *target = (u32 *)_target;
94a78b79 9544 u32 i;
94a78b79
VZ
9545
9546 for (i = 0; i < n/4; i++)
9547 target[i] = be32_to_cpu(source[i]);
9548}
9549
9550/*
9551 Ops array is stored in the following format:
9552 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9553 */
ab6ad5a4 9554static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 9555{
ab6ad5a4
EG
9556 const __be32 *source = (const __be32 *)_source;
9557 struct raw_op *target = (struct raw_op *)_target;
94a78b79 9558 u32 i, j, tmp;
94a78b79 9559
ab6ad5a4 9560 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
9561 tmp = be32_to_cpu(source[j]);
9562 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
9563 target[i].offset = tmp & 0xffffff;
9564 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
9565 }
9566}
ab6ad5a4 9567
523224a3
DK
9568/**
9569 * IRO array is stored in the following format:
9570 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9571 */
9572static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9573{
9574 const __be32 *source = (const __be32 *)_source;
9575 struct iro *target = (struct iro *)_target;
9576 u32 i, j, tmp;
9577
9578 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9579 target[i].base = be32_to_cpu(source[j]);
9580 j++;
9581 tmp = be32_to_cpu(source[j]);
9582 target[i].m1 = (tmp >> 16) & 0xffff;
9583 target[i].m2 = tmp & 0xffff;
9584 j++;
9585 tmp = be32_to_cpu(source[j]);
9586 target[i].m3 = (tmp >> 16) & 0xffff;
9587 target[i].size = tmp & 0xffff;
9588 j++;
9589 }
9590}
9591
ab6ad5a4 9592static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9593{
ab6ad5a4
EG
9594 const __be16 *source = (const __be16 *)_source;
9595 u16 *target = (u16 *)_target;
94a78b79 9596 u32 i;
94a78b79
VZ
9597
9598 for (i = 0; i < n/2; i++)
9599 target[i] = be16_to_cpu(source[i]);
9600}
9601
7995c64e
JP
9602#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9603do { \
9604 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9605 bp->arr = kmalloc(len, GFP_KERNEL); \
9606 if (!bp->arr) { \
9607 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9608 goto lbl; \
9609 } \
9610 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9611 (u8 *)bp->arr, len); \
9612} while (0)
94a78b79 9613
6891dd25 9614int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 9615{
45229b42 9616 const char *fw_file_name;
94a78b79 9617 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 9618 int rc;
94a78b79 9619
94a78b79 9620 if (CHIP_IS_E1(bp))
45229b42 9621 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 9622 else if (CHIP_IS_E1H(bp))
45229b42 9623 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
9624 else if (CHIP_IS_E2(bp))
9625 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 9626 else {
6891dd25 9627 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
9628 return -EINVAL;
9629 }
94a78b79 9630
6891dd25 9631 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 9632
6891dd25 9633 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 9634 if (rc) {
6891dd25 9635 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
9636 goto request_firmware_exit;
9637 }
9638
9639 rc = bnx2x_check_firmware(bp);
9640 if (rc) {
6891dd25 9641 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
9642 goto request_firmware_exit;
9643 }
9644
9645 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9646
9647 /* Initialize the pointers to the init arrays */
9648 /* Blob */
9649 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9650
9651 /* Opcodes */
9652 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9653
9654 /* Offsets */
ab6ad5a4
EG
9655 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9656 be16_to_cpu_n);
94a78b79
VZ
9657
9658 /* STORMs firmware */
573f2035
EG
9659 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9660 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9661 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9662 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9663 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9664 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9665 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9666 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9667 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9668 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9669 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9670 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9671 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9672 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9673 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9674 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
9675 /* IRO */
9676 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
9677
9678 return 0;
ab6ad5a4 9679
523224a3
DK
9680iro_alloc_err:
9681 kfree(bp->init_ops_offsets);
94a78b79
VZ
9682init_offsets_alloc_err:
9683 kfree(bp->init_ops);
9684init_ops_alloc_err:
9685 kfree(bp->init_data);
9686request_firmware_exit:
9687 release_firmware(bp->firmware);
9688
9689 return rc;
9690}
9691
523224a3
DK
9692static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9693{
9694 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 9695
523224a3
DK
9696#ifdef BCM_CNIC
9697 cid_count += CNIC_CID_MAX;
9698#endif
9699 return roundup(cid_count, QM_CID_ROUND);
9700}
f85582f8 9701
a2fbb9ea
ET
9702static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9703 const struct pci_device_id *ent)
9704{
a2fbb9ea
ET
9705 struct net_device *dev = NULL;
9706 struct bnx2x *bp;
37f9ce62 9707 int pcie_width, pcie_speed;
523224a3
DK
9708 int rc, cid_count;
9709
f2e0899f
DK
9710 switch (ent->driver_data) {
9711 case BCM57710:
9712 case BCM57711:
9713 case BCM57711E:
9714 cid_count = FP_SB_MAX_E1x;
9715 break;
9716
9717 case BCM57712:
9718 case BCM57712E:
9719 cid_count = FP_SB_MAX_E2;
9720 break;
a2fbb9ea 9721
f2e0899f
DK
9722 default:
9723 pr_err("Unknown board_type (%ld), aborting\n",
9724 ent->driver_data);
870634b0 9725 return -ENODEV;
f2e0899f
DK
9726 }
9727
ec6ba945 9728 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
f85582f8 9729
a2fbb9ea 9730 /* dev zeroed in init_etherdev */
523224a3 9731 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 9732 if (!dev) {
cdaa7cb8 9733 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 9734 return -ENOMEM;
34f80b04 9735 }
a2fbb9ea 9736
a2fbb9ea 9737 bp = netdev_priv(dev);
7995c64e 9738 bp->msg_enable = debug;
a2fbb9ea 9739
df4770de
EG
9740 pci_set_drvdata(pdev, dev);
9741
523224a3
DK
9742 bp->l2_cid_count = cid_count;
9743
34f80b04 9744 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
9745 if (rc < 0) {
9746 free_netdev(dev);
9747 return rc;
9748 }
9749
34f80b04 9750 rc = bnx2x_init_bp(bp);
693fc0d1
EG
9751 if (rc)
9752 goto init_one_exit;
9753
523224a3
DK
9754 /* calc qm_cid_count */
9755 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9756
ec6ba945
VZ
9757#ifdef BCM_CNIC
9758 /* disable FCOE L2 queue for E1x*/
9759 if (CHIP_IS_E1x(bp))
9760 bp->flags |= NO_FCOE_FLAG;
9761
9762#endif
9763
d6214d7a
DK
9764 /* Configure interupt mode: try to enable MSI-X/MSI if
9765 * needed, set bp->num_queues appropriately.
9766 */
9767 bnx2x_set_int_mode(bp);
9768
9769 /* Add all NAPI objects */
9770 bnx2x_add_all_napi(bp);
9771
b340007f
VZ
9772 rc = register_netdev(dev);
9773 if (rc) {
9774 dev_err(&pdev->dev, "Cannot register net device\n");
9775 goto init_one_exit;
9776 }
9777
ec6ba945
VZ
9778#ifdef BCM_CNIC
9779 if (!NO_FCOE(bp)) {
9780 /* Add storage MAC address */
9781 rtnl_lock();
9782 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9783 rtnl_unlock();
9784 }
9785#endif
9786
37f9ce62 9787 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
d6214d7a 9788
cdaa7cb8
VZ
9789 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9790 " IRQ %d, ", board_info[ent->driver_data].name,
9791 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
9792 pcie_width,
9793 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9794 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9795 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
9796 dev->base_addr, bp->pdev->irq);
9797 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 9798
a2fbb9ea 9799 return 0;
34f80b04
EG
9800
9801init_one_exit:
9802 if (bp->regview)
9803 iounmap(bp->regview);
9804
9805 if (bp->doorbells)
9806 iounmap(bp->doorbells);
9807
9808 free_netdev(dev);
9809
9810 if (atomic_read(&pdev->enable_cnt) == 1)
9811 pci_release_regions(pdev);
9812
9813 pci_disable_device(pdev);
9814 pci_set_drvdata(pdev, NULL);
9815
9816 return rc;
a2fbb9ea
ET
9817}
9818
9819static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9820{
9821 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
9822 struct bnx2x *bp;
9823
9824 if (!dev) {
cdaa7cb8 9825 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
9826 return;
9827 }
228241eb 9828 bp = netdev_priv(dev);
a2fbb9ea 9829
ec6ba945
VZ
9830#ifdef BCM_CNIC
9831 /* Delete storage MAC address */
9832 if (!NO_FCOE(bp)) {
9833 rtnl_lock();
9834 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9835 rtnl_unlock();
9836 }
9837#endif
9838
a2fbb9ea
ET
9839 unregister_netdev(dev);
9840
d6214d7a
DK
9841 /* Delete all NAPI objects */
9842 bnx2x_del_all_napi(bp);
9843
084d6cbb
VZ
9844 /* Power on: we can't let PCI layer write to us while we are in D3 */
9845 bnx2x_set_power_state(bp, PCI_D0);
9846
d6214d7a
DK
9847 /* Disable MSI/MSI-X */
9848 bnx2x_disable_msi(bp);
f85582f8 9849
084d6cbb
VZ
9850 /* Power off */
9851 bnx2x_set_power_state(bp, PCI_D3hot);
9852
72fd0718
VZ
9853 /* Make sure RESET task is not scheduled before continuing */
9854 cancel_delayed_work_sync(&bp->reset_task);
9855
a2fbb9ea
ET
9856 if (bp->regview)
9857 iounmap(bp->regview);
9858
9859 if (bp->doorbells)
9860 iounmap(bp->doorbells);
9861
523224a3
DK
9862 bnx2x_free_mem_bp(bp);
9863
a2fbb9ea 9864 free_netdev(dev);
34f80b04
EG
9865
9866 if (atomic_read(&pdev->enable_cnt) == 1)
9867 pci_release_regions(pdev);
9868
a2fbb9ea
ET
9869 pci_disable_device(pdev);
9870 pci_set_drvdata(pdev, NULL);
9871}
9872
f8ef6e44
YG
9873static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9874{
9875 int i;
9876
9877 bp->state = BNX2X_STATE_ERROR;
9878
9879 bp->rx_mode = BNX2X_RX_MODE_NONE;
9880
9881 bnx2x_netif_stop(bp, 0);
c89af1a3 9882 netif_carrier_off(bp->dev);
f8ef6e44
YG
9883
9884 del_timer_sync(&bp->timer);
9885 bp->stats_state = STATS_STATE_DISABLED;
9886 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9887
9888 /* Release IRQs */
d6214d7a 9889 bnx2x_free_irq(bp);
f8ef6e44 9890
f8ef6e44
YG
9891 /* Free SKBs, SGEs, TPA pool and driver internals */
9892 bnx2x_free_skbs(bp);
523224a3 9893
ec6ba945 9894 for_each_rx_queue(bp, i)
f8ef6e44 9895 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 9896
f8ef6e44
YG
9897 bnx2x_free_mem(bp);
9898
9899 bp->state = BNX2X_STATE_CLOSED;
9900
f8ef6e44
YG
9901 return 0;
9902}
9903
9904static void bnx2x_eeh_recover(struct bnx2x *bp)
9905{
9906 u32 val;
9907
9908 mutex_init(&bp->port.phy_mutex);
9909
9910 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9911 bp->link_params.shmem_base = bp->common.shmem_base;
9912 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9913
9914 if (!bp->common.shmem_base ||
9915 (bp->common.shmem_base < 0xA0000) ||
9916 (bp->common.shmem_base >= 0xC0000)) {
9917 BNX2X_DEV_INFO("MCP not active\n");
9918 bp->flags |= NO_MCP_FLAG;
9919 return;
9920 }
9921
9922 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9923 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9924 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9925 BNX2X_ERR("BAD MCP validity signature\n");
9926
9927 if (!BP_NOMCP(bp)) {
f2e0899f
DK
9928 bp->fw_seq =
9929 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9930 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
9931 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9932 }
9933}
9934
493adb1f
WX
9935/**
9936 * bnx2x_io_error_detected - called when PCI error is detected
9937 * @pdev: Pointer to PCI device
9938 * @state: The current pci connection state
9939 *
9940 * This function is called after a PCI bus error affecting
9941 * this device has been detected.
9942 */
9943static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9944 pci_channel_state_t state)
9945{
9946 struct net_device *dev = pci_get_drvdata(pdev);
9947 struct bnx2x *bp = netdev_priv(dev);
9948
9949 rtnl_lock();
9950
9951 netif_device_detach(dev);
9952
07ce50e4
DN
9953 if (state == pci_channel_io_perm_failure) {
9954 rtnl_unlock();
9955 return PCI_ERS_RESULT_DISCONNECT;
9956 }
9957
493adb1f 9958 if (netif_running(dev))
f8ef6e44 9959 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9960
9961 pci_disable_device(pdev);
9962
9963 rtnl_unlock();
9964
9965 /* Request a slot reset */
9966 return PCI_ERS_RESULT_NEED_RESET;
9967}
9968
9969/**
9970 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9971 * @pdev: Pointer to PCI device
9972 *
9973 * Restart the card from scratch, as if from a cold-boot.
9974 */
9975static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9976{
9977 struct net_device *dev = pci_get_drvdata(pdev);
9978 struct bnx2x *bp = netdev_priv(dev);
9979
9980 rtnl_lock();
9981
9982 if (pci_enable_device(pdev)) {
9983 dev_err(&pdev->dev,
9984 "Cannot re-enable PCI device after reset\n");
9985 rtnl_unlock();
9986 return PCI_ERS_RESULT_DISCONNECT;
9987 }
9988
9989 pci_set_master(pdev);
9990 pci_restore_state(pdev);
9991
9992 if (netif_running(dev))
9993 bnx2x_set_power_state(bp, PCI_D0);
9994
9995 rtnl_unlock();
9996
9997 return PCI_ERS_RESULT_RECOVERED;
9998}
9999
10000/**
10001 * bnx2x_io_resume - called when traffic can start flowing again
10002 * @pdev: Pointer to PCI device
10003 *
10004 * This callback is called when the error recovery driver tells us that
10005 * its OK to resume normal operation.
10006 */
10007static void bnx2x_io_resume(struct pci_dev *pdev)
10008{
10009 struct net_device *dev = pci_get_drvdata(pdev);
10010 struct bnx2x *bp = netdev_priv(dev);
10011
72fd0718 10012 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
10013 printk(KERN_ERR "Handling parity error recovery. "
10014 "Try again later\n");
72fd0718
VZ
10015 return;
10016 }
10017
493adb1f
WX
10018 rtnl_lock();
10019
f8ef6e44
YG
10020 bnx2x_eeh_recover(bp);
10021
493adb1f 10022 if (netif_running(dev))
f8ef6e44 10023 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10024
10025 netif_device_attach(dev);
10026
10027 rtnl_unlock();
10028}
10029
10030static struct pci_error_handlers bnx2x_err_handler = {
10031 .error_detected = bnx2x_io_error_detected,
356e2385
EG
10032 .slot_reset = bnx2x_io_slot_reset,
10033 .resume = bnx2x_io_resume,
493adb1f
WX
10034};
10035
a2fbb9ea 10036static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10037 .name = DRV_MODULE_NAME,
10038 .id_table = bnx2x_pci_tbl,
10039 .probe = bnx2x_init_one,
10040 .remove = __devexit_p(bnx2x_remove_one),
10041 .suspend = bnx2x_suspend,
10042 .resume = bnx2x_resume,
10043 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10044};
10045
10046static int __init bnx2x_init(void)
10047{
dd21ca6d
SG
10048 int ret;
10049
7995c64e 10050 pr_info("%s", version);
938cf541 10051
1cf167f2
EG
10052 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10053 if (bnx2x_wq == NULL) {
7995c64e 10054 pr_err("Cannot create workqueue\n");
1cf167f2
EG
10055 return -ENOMEM;
10056 }
10057
dd21ca6d
SG
10058 ret = pci_register_driver(&bnx2x_pci_driver);
10059 if (ret) {
7995c64e 10060 pr_err("Cannot register driver\n");
dd21ca6d
SG
10061 destroy_workqueue(bnx2x_wq);
10062 }
10063 return ret;
a2fbb9ea
ET
10064}
10065
10066static void __exit bnx2x_cleanup(void)
10067{
10068 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10069
10070 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10071}
10072
10073module_init(bnx2x_init);
10074module_exit(bnx2x_cleanup);
10075
993ac7b5
MC
10076#ifdef BCM_CNIC
10077
10078/* count denotes the number of new completions we have seen */
10079static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
10080{
10081 struct eth_spe *spe;
10082
10083#ifdef BNX2X_STOP_ON_ERROR
10084 if (unlikely(bp->panic))
10085 return;
10086#endif
10087
10088 spin_lock_bh(&bp->spq_lock);
c2bff63f 10089 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
10090 bp->cnic_spq_pending -= count;
10091
993ac7b5 10092
c2bff63f
DK
10093 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
10094 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
10095 & SPE_HDR_CONN_TYPE) >>
10096 SPE_HDR_CONN_TYPE_SHIFT;
10097
10098 /* Set validation for iSCSI L2 client before sending SETUP
10099 * ramrod
10100 */
10101 if (type == ETH_CONNECTION_TYPE) {
10102 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
10103 hdr.conn_and_cmd_data) >>
10104 SPE_HDR_CMD_ID_SHIFT) & 0xff;
10105
10106 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
10107 bnx2x_set_ctx_validation(&bp->context.
10108 vcxt[BNX2X_ISCSI_ETH_CID].eth,
10109 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
10110 }
10111
6e30dd4e
VZ
10112 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
10113 * We also check that the number of outstanding
10114 * COMMON ramrods is not more than the EQ and SPQ can
10115 * accommodate.
c2bff63f 10116 */
6e30dd4e
VZ
10117 if (type == ETH_CONNECTION_TYPE) {
10118 if (!atomic_read(&bp->cq_spq_left))
10119 break;
10120 else
10121 atomic_dec(&bp->cq_spq_left);
10122 } else if (type == NONE_CONNECTION_TYPE) {
10123 if (!atomic_read(&bp->eq_spq_left))
c2bff63f
DK
10124 break;
10125 else
6e30dd4e 10126 atomic_dec(&bp->eq_spq_left);
ec6ba945
VZ
10127 } else if ((type == ISCSI_CONNECTION_TYPE) ||
10128 (type == FCOE_CONNECTION_TYPE)) {
c2bff63f
DK
10129 if (bp->cnic_spq_pending >=
10130 bp->cnic_eth_dev.max_kwqe_pending)
10131 break;
10132 else
10133 bp->cnic_spq_pending++;
10134 } else {
10135 BNX2X_ERR("Unknown SPE type: %d\n", type);
10136 bnx2x_panic();
993ac7b5 10137 break;
c2bff63f 10138 }
993ac7b5
MC
10139
10140 spe = bnx2x_sp_get_next(bp);
10141 *spe = *bp->cnic_kwq_cons;
10142
993ac7b5
MC
10143 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
10144 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
10145
10146 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
10147 bp->cnic_kwq_cons = bp->cnic_kwq;
10148 else
10149 bp->cnic_kwq_cons++;
10150 }
10151 bnx2x_sp_prod_update(bp);
10152 spin_unlock_bh(&bp->spq_lock);
10153}
10154
10155static int bnx2x_cnic_sp_queue(struct net_device *dev,
10156 struct kwqe_16 *kwqes[], u32 count)
10157{
10158 struct bnx2x *bp = netdev_priv(dev);
10159 int i;
10160
10161#ifdef BNX2X_STOP_ON_ERROR
10162 if (unlikely(bp->panic))
10163 return -EIO;
10164#endif
10165
10166 spin_lock_bh(&bp->spq_lock);
10167
10168 for (i = 0; i < count; i++) {
10169 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
10170
10171 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
10172 break;
10173
10174 *bp->cnic_kwq_prod = *spe;
10175
10176 bp->cnic_kwq_pending++;
10177
10178 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
10179 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
10180 spe->data.update_data_addr.hi,
10181 spe->data.update_data_addr.lo,
993ac7b5
MC
10182 bp->cnic_kwq_pending);
10183
10184 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
10185 bp->cnic_kwq_prod = bp->cnic_kwq;
10186 else
10187 bp->cnic_kwq_prod++;
10188 }
10189
10190 spin_unlock_bh(&bp->spq_lock);
10191
10192 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
10193 bnx2x_cnic_sp_post(bp, 0);
10194
10195 return i;
10196}
10197
10198static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
10199{
10200 struct cnic_ops *c_ops;
10201 int rc = 0;
10202
10203 mutex_lock(&bp->cnic_mutex);
13707f9e
ED
10204 c_ops = rcu_dereference_protected(bp->cnic_ops,
10205 lockdep_is_held(&bp->cnic_mutex));
993ac7b5
MC
10206 if (c_ops)
10207 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
10208 mutex_unlock(&bp->cnic_mutex);
10209
10210 return rc;
10211}
10212
10213static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
10214{
10215 struct cnic_ops *c_ops;
10216 int rc = 0;
10217
10218 rcu_read_lock();
10219 c_ops = rcu_dereference(bp->cnic_ops);
10220 if (c_ops)
10221 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
10222 rcu_read_unlock();
10223
10224 return rc;
10225}
10226
10227/*
10228 * for commands that have no data
10229 */
9f6c9258 10230int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
10231{
10232 struct cnic_ctl_info ctl = {0};
10233
10234 ctl.cmd = cmd;
10235
10236 return bnx2x_cnic_ctl_send(bp, &ctl);
10237}
10238
10239static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
10240{
10241 struct cnic_ctl_info ctl;
10242
10243 /* first we tell CNIC and only then we count this as a completion */
10244 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
10245 ctl.data.comp.cid = cid;
10246
10247 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 10248 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
10249}
10250
10251static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10252{
10253 struct bnx2x *bp = netdev_priv(dev);
10254 int rc = 0;
10255
10256 switch (ctl->cmd) {
10257 case DRV_CTL_CTXTBL_WR_CMD: {
10258 u32 index = ctl->data.io.offset;
10259 dma_addr_t addr = ctl->data.io.dma_addr;
10260
10261 bnx2x_ilt_wr(bp, index, addr);
10262 break;
10263 }
10264
c2bff63f
DK
10265 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
10266 int count = ctl->data.credit.credit_count;
993ac7b5
MC
10267
10268 bnx2x_cnic_sp_post(bp, count);
10269 break;
10270 }
10271
10272 /* rtnl_lock is held. */
10273 case DRV_CTL_START_L2_CMD: {
10274 u32 cli = ctl->data.ring.client_id;
10275
ec6ba945
VZ
10276 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
10277 bnx2x_del_fcoe_eth_macs(bp);
10278
523224a3
DK
10279 /* Set iSCSI MAC address */
10280 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
10281
10282 mmiowb();
10283 barrier();
10284
10285 /* Start accepting on iSCSI L2 ring. Accept all multicasts
10286 * because it's the only way for UIO Client to accept
10287 * multicasts (in non-promiscuous mode only one Client per
10288 * function will receive multicast packets (leading in our
10289 * case).
10290 */
10291 bnx2x_rxq_set_mac_filters(bp, cli,
10292 BNX2X_ACCEPT_UNICAST |
10293 BNX2X_ACCEPT_BROADCAST |
10294 BNX2X_ACCEPT_ALL_MULTICAST);
10295 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10296
993ac7b5
MC
10297 break;
10298 }
10299
10300 /* rtnl_lock is held. */
10301 case DRV_CTL_STOP_L2_CMD: {
10302 u32 cli = ctl->data.ring.client_id;
10303
523224a3
DK
10304 /* Stop accepting on iSCSI L2 ring */
10305 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
10306 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10307
10308 mmiowb();
10309 barrier();
10310
10311 /* Unset iSCSI L2 MAC */
10312 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
10313 break;
10314 }
c2bff63f
DK
10315 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
10316 int count = ctl->data.credit.credit_count;
10317
10318 smp_mb__before_atomic_inc();
6e30dd4e 10319 atomic_add(count, &bp->cq_spq_left);
c2bff63f
DK
10320 smp_mb__after_atomic_inc();
10321 break;
10322 }
993ac7b5
MC
10323
10324 default:
10325 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10326 rc = -EINVAL;
10327 }
10328
10329 return rc;
10330}
10331
9f6c9258 10332void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
10333{
10334 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10335
10336 if (bp->flags & USING_MSIX_FLAG) {
10337 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10338 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10339 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10340 } else {
10341 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10342 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10343 }
f2e0899f
DK
10344 if (CHIP_IS_E2(bp))
10345 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10346 else
10347 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10348
993ac7b5 10349 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 10350 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
10351 cp->irq_arr[1].status_blk = bp->def_status_blk;
10352 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 10353 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
10354
10355 cp->num_irq = 2;
10356}
10357
10358static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10359 void *data)
10360{
10361 struct bnx2x *bp = netdev_priv(dev);
10362 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10363
10364 if (ops == NULL)
10365 return -EINVAL;
10366
10367 if (atomic_read(&bp->intr_sem) != 0)
10368 return -EBUSY;
10369
10370 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10371 if (!bp->cnic_kwq)
10372 return -ENOMEM;
10373
10374 bp->cnic_kwq_cons = bp->cnic_kwq;
10375 bp->cnic_kwq_prod = bp->cnic_kwq;
10376 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10377
10378 bp->cnic_spq_pending = 0;
10379 bp->cnic_kwq_pending = 0;
10380
10381 bp->cnic_data = data;
10382
10383 cp->num_irq = 0;
10384 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 10385 cp->iro_arr = bp->iro_arr;
993ac7b5 10386
993ac7b5 10387 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 10388
993ac7b5
MC
10389 rcu_assign_pointer(bp->cnic_ops, ops);
10390
10391 return 0;
10392}
10393
10394static int bnx2x_unregister_cnic(struct net_device *dev)
10395{
10396 struct bnx2x *bp = netdev_priv(dev);
10397 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10398
10399 mutex_lock(&bp->cnic_mutex);
993ac7b5
MC
10400 cp->drv_state = 0;
10401 rcu_assign_pointer(bp->cnic_ops, NULL);
10402 mutex_unlock(&bp->cnic_mutex);
10403 synchronize_rcu();
10404 kfree(bp->cnic_kwq);
10405 bp->cnic_kwq = NULL;
10406
10407 return 0;
10408}
10409
10410struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10411{
10412 struct bnx2x *bp = netdev_priv(dev);
10413 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10414
2ba45142
VZ
10415 /* If both iSCSI and FCoE are disabled - return NULL in
10416 * order to indicate CNIC that it should not try to work
10417 * with this device.
10418 */
10419 if (NO_ISCSI(bp) && NO_FCOE(bp))
10420 return NULL;
10421
993ac7b5
MC
10422 cp->drv_owner = THIS_MODULE;
10423 cp->chip_id = CHIP_ID(bp);
10424 cp->pdev = bp->pdev;
10425 cp->io_base = bp->regview;
10426 cp->io_base2 = bp->doorbells;
10427 cp->max_kwqe_pending = 8;
523224a3 10428 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
10429 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10430 bnx2x_cid_ilt_lines(bp);
993ac7b5 10431 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 10432 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
10433 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10434 cp->drv_ctl = bnx2x_drv_ctl;
10435 cp->drv_register_cnic = bnx2x_register_cnic;
10436 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
ec6ba945
VZ
10437 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10438 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10439 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
c2bff63f
DK
10440 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10441
2ba45142
VZ
10442 if (NO_ISCSI_OOO(bp))
10443 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10444
10445 if (NO_ISCSI(bp))
10446 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10447
10448 if (NO_FCOE(bp))
10449 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10450
c2bff63f
DK
10451 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10452 "starting cid %d\n",
10453 cp->ctx_blk_size,
10454 cp->ctx_tbl_offset,
10455 cp->ctx_tbl_len,
10456 cp->starting_cid);
993ac7b5
MC
10457 return cp;
10458}
10459EXPORT_SYMBOL(bnx2x_cnic_probe);
10460
10461#endif /* BCM_CNIC */
94a78b79 10462
This page took 1.24212 seconds and 5 git commands to generate.