8390: Move the 8390 related drivers
[deliverable/linux.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
dc187cb3 3 * Copyright (c) 2004-2011 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
3a9c6a49 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
f2a4f052
MC
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16
17#include <linux/kernel.h>
18#include <linux/timer.h>
19#include <linux/errno.h>
20#include <linux/ioport.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/interrupt.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/dma-mapping.h>
1977f032 30#include <linux/bitops.h>
f2a4f052
MC
31#include <asm/io.h>
32#include <asm/irq.h>
33#include <linux/delay.h>
34#include <asm/byteorder.h>
c86a31f4 35#include <asm/page.h>
f2a4f052
MC
36#include <linux/time.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
f2a4f052 39#include <linux/if_vlan.h>
f2a4f052 40#include <net/ip.h>
de081fa5 41#include <net/tcp.h>
f2a4f052 42#include <net/checksum.h>
f2a4f052
MC
43#include <linux/workqueue.h>
44#include <linux/crc32.h>
45#include <linux/prefetch.h>
29b12174 46#include <linux/cache.h>
57579f76 47#include <linux/firmware.h>
706bf240 48#include <linux/log2.h>
cd709aa9 49#include <linux/aer.h>
f2a4f052 50
4edd473f
MC
51#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
52#define BCM_CNIC 1
53#include "cnic_if.h"
54#endif
b6016b76
MC
55#include "bnx2.h"
56#include "bnx2_fw.h"
b3448b0b 57
b6016b76 58#define DRV_MODULE_NAME "bnx2"
3aeb7d22
MC
59#define DRV_MODULE_VERSION "2.1.11"
60#define DRV_MODULE_RELDATE "July 20, 2011"
0268102d 61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
22fa159d 62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
dc187cb3 63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
22fa159d
MC
64#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
b6016b76
MC
66
67#define RUN_AT(x) (jiffies + (x))
68
69/* Time in jiffies before concluding the transmitter is hung. */
70#define TX_TIMEOUT (5*HZ)
71
fefa8645 72static char version[] __devinitdata =
b6016b76
MC
73 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74
75MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
453a9c6e 76MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
b6016b76
MC
77MODULE_LICENSE("GPL");
78MODULE_VERSION(DRV_MODULE_VERSION);
57579f76
MC
79MODULE_FIRMWARE(FW_MIPS_FILE_06);
80MODULE_FIRMWARE(FW_RV2P_FILE_06);
81MODULE_FIRMWARE(FW_MIPS_FILE_09);
82MODULE_FIRMWARE(FW_RV2P_FILE_09);
078b0735 83MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
b6016b76
MC
84
85static int disable_msi = 0;
86
87module_param(disable_msi, int, 0);
88MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
89
90typedef enum {
91 BCM5706 = 0,
92 NC370T,
93 NC370I,
94 BCM5706S,
95 NC370F,
5b0c76ad
MC
96 BCM5708,
97 BCM5708S,
bac0dff6 98 BCM5709,
27a005b8 99 BCM5709S,
7bb0a04f 100 BCM5716,
1caacecb 101 BCM5716S,
b6016b76
MC
102} board_t;
103
104/* indexed by board_t, above */
fefa8645 105static struct {
b6016b76
MC
106 char *name;
107} board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
109 { "HP NC370T Multifunction Gigabit Server Adapter" },
110 { "HP NC370i Multifunction Gigabit Server Adapter" },
111 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
112 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
113 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
114 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 115 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 116 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
7bb0a04f 117 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
1caacecb 118 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
b6016b76
MC
119 };
120
7bb0a04f 121static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
b6016b76
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
131 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
7bb0a04f
MC
140 { PCI_VENDOR_ID_BROADCOM, 0x163b,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
1caacecb 142 { PCI_VENDOR_ID_BROADCOM, 0x163c,
1f2435e5 143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
b6016b76
MC
144 { 0, }
145};
146
0ced9d01 147static const struct flash_spec flash_table[] =
b6016b76 148{
e30372c9
MC
149#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
150#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 151 /* Slow EEPROM */
37137709 152 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 153 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
154 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
155 "EEPROM - slow"},
37137709
MC
156 /* Expansion entry 0001 */
157 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 158 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
159 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 "Entry 0001"},
b6016b76
MC
161 /* Saifun SA25F010 (non-buffered flash) */
162 /* strap, cfg1, & write1 need updates */
37137709 163 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 164 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
165 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
166 "Non-buffered flash (128kB)"},
167 /* Saifun SA25F020 (non-buffered flash) */
168 /* strap, cfg1, & write1 need updates */
37137709 169 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 170 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
171 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
172 "Non-buffered flash (256kB)"},
37137709
MC
173 /* Expansion entry 0100 */
174 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 175 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
176 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 "Entry 0100"},
178 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 179 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 180 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
181 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
182 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
183 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
184 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 185 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
186 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
187 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
188 /* Saifun SA25F005 (non-buffered flash) */
189 /* strap, cfg1, & write1 need updates */
190 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 191 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
192 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
193 "Non-buffered flash (64kB)"},
194 /* Fast EEPROM */
195 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 196 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
197 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
198 "EEPROM - fast"},
199 /* Expansion entry 1001 */
200 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 201 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
202 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
203 "Entry 1001"},
204 /* Expansion entry 1010 */
205 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 206 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
207 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
208 "Entry 1010"},
209 /* ATMEL AT45DB011B (buffered flash) */
210 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 211 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
212 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
213 "Buffered flash (128kB)"},
214 /* Expansion entry 1100 */
215 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 216 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
217 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218 "Entry 1100"},
219 /* Expansion entry 1101 */
220 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 221 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
222 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223 "Entry 1101"},
224 /* Ateml Expansion entry 1110 */
225 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 226 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
227 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
228 "Entry 1110 (Atmel)"},
229 /* ATMEL AT45DB021B (buffered flash) */
230 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 231 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
232 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
233 "Buffered flash (256kB)"},
b6016b76
MC
234};
235
0ced9d01 236static const struct flash_spec flash_5709 = {
e30372c9
MC
237 .flags = BNX2_NV_BUFFERED,
238 .page_bits = BCM5709_FLASH_PAGE_BITS,
239 .page_size = BCM5709_FLASH_PAGE_SIZE,
240 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
241 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
242 .name = "5709 Buffered flash (256kB)",
243};
244
b6016b76
MC
245MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
246
4327ba43 247static void bnx2_init_napi(struct bnx2 *bp);
f048fa9c 248static void bnx2_del_napi(struct bnx2 *bp);
4327ba43 249
35e9010b 250static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
e89bbf10 251{
2f8af120 252 u32 diff;
e89bbf10 253
11848b96
MC
254 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
255 barrier();
faac9c4b
MC
256
257 /* The ring uses 256 indices for 255 entries, one of them
258 * needs to be skipped.
259 */
35e9010b 260 diff = txr->tx_prod - txr->tx_cons;
faac9c4b
MC
261 if (unlikely(diff >= TX_DESC_CNT)) {
262 diff &= 0xffff;
263 if (diff == TX_DESC_CNT)
264 diff = MAX_TX_DESC_CNT;
265 }
807540ba 266 return bp->tx_ring_size - diff;
e89bbf10
MC
267}
268
b6016b76
MC
269static u32
270bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271{
1b8227c4
MC
272 u32 val;
273
274 spin_lock_bh(&bp->indirect_lock);
b6016b76 275 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
276 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277 spin_unlock_bh(&bp->indirect_lock);
278 return val;
b6016b76
MC
279}
280
281static void
282bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283{
1b8227c4 284 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
285 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 287 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
288}
289
2726d6e1
MC
290static void
291bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292{
293 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294}
295
296static u32
297bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298{
807540ba 299 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
2726d6e1
MC
300}
301
b6016b76
MC
302static void
303bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304{
305 offset += cid_addr;
1b8227c4 306 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
307 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308 int i;
309
310 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313 for (i = 0; i < 5; i++) {
59b47d8a
MC
314 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316 break;
317 udelay(5);
318 }
319 } else {
320 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321 REG_WR(bp, BNX2_CTX_DATA, val);
322 }
1b8227c4 323 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
324}
325
4edd473f
MC
326#ifdef BCM_CNIC
327static int
328bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329{
330 struct bnx2 *bp = netdev_priv(dev);
331 struct drv_ctl_io *io = &info->data.io;
332
333 switch (info->cmd) {
334 case DRV_CTL_IO_WR_CMD:
335 bnx2_reg_wr_ind(bp, io->offset, io->data);
336 break;
337 case DRV_CTL_IO_RD_CMD:
338 io->data = bnx2_reg_rd_ind(bp, io->offset);
339 break;
340 case DRV_CTL_CTX_WR_CMD:
341 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342 break;
343 default:
344 return -EINVAL;
345 }
346 return 0;
347}
348
349static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350{
351 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353 int sb_id;
354
355 if (bp->flags & BNX2_FLAG_USING_MSIX) {
356 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357 bnapi->cnic_present = 0;
358 sb_id = bp->irq_nvecs;
359 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360 } else {
361 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362 bnapi->cnic_tag = bnapi->last_status_idx;
363 bnapi->cnic_present = 1;
364 sb_id = 0;
365 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366 }
367
368 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369 cp->irq_arr[0].status_blk = (void *)
370 ((unsigned long) bnapi->status_blk.msi +
371 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372 cp->irq_arr[0].status_blk_num = sb_id;
373 cp->num_irq = 1;
374}
375
376static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377 void *data)
378{
379 struct bnx2 *bp = netdev_priv(dev);
380 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382 if (ops == NULL)
383 return -EINVAL;
384
385 if (cp->drv_state & CNIC_DRV_STATE_REGD)
386 return -EBUSY;
387
41c2178a
MC
388 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
389 return -ENODEV;
390
4edd473f
MC
391 bp->cnic_data = data;
392 rcu_assign_pointer(bp->cnic_ops, ops);
393
394 cp->num_irq = 0;
395 cp->drv_state = CNIC_DRV_STATE_REGD;
396
397 bnx2_setup_cnic_irq_info(bp);
398
399 return 0;
400}
401
402static int bnx2_unregister_cnic(struct net_device *dev)
403{
404 struct bnx2 *bp = netdev_priv(dev);
405 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
406 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
407
c5a88950 408 mutex_lock(&bp->cnic_lock);
4edd473f
MC
409 cp->drv_state = 0;
410 bnapi->cnic_present = 0;
411 rcu_assign_pointer(bp->cnic_ops, NULL);
c5a88950 412 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
413 synchronize_rcu();
414 return 0;
415}
416
417struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
418{
419 struct bnx2 *bp = netdev_priv(dev);
420 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
421
7625eb2f
MC
422 if (!cp->max_iscsi_conn)
423 return NULL;
424
4edd473f
MC
425 cp->drv_owner = THIS_MODULE;
426 cp->chip_id = bp->chip_id;
427 cp->pdev = bp->pdev;
428 cp->io_base = bp->regview;
429 cp->drv_ctl = bnx2_drv_ctl;
430 cp->drv_register_cnic = bnx2_register_cnic;
431 cp->drv_unregister_cnic = bnx2_unregister_cnic;
432
433 return cp;
434}
435EXPORT_SYMBOL(bnx2_cnic_probe);
436
437static void
438bnx2_cnic_stop(struct bnx2 *bp)
439{
440 struct cnic_ops *c_ops;
441 struct cnic_ctl_info info;
442
c5a88950 443 mutex_lock(&bp->cnic_lock);
13707f9e
ED
444 c_ops = rcu_dereference_protected(bp->cnic_ops,
445 lockdep_is_held(&bp->cnic_lock));
4edd473f
MC
446 if (c_ops) {
447 info.cmd = CNIC_CTL_STOP_CMD;
448 c_ops->cnic_ctl(bp->cnic_data, &info);
449 }
c5a88950 450 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
451}
452
453static void
454bnx2_cnic_start(struct bnx2 *bp)
455{
456 struct cnic_ops *c_ops;
457 struct cnic_ctl_info info;
458
c5a88950 459 mutex_lock(&bp->cnic_lock);
13707f9e
ED
460 c_ops = rcu_dereference_protected(bp->cnic_ops,
461 lockdep_is_held(&bp->cnic_lock));
4edd473f
MC
462 if (c_ops) {
463 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
464 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
465
466 bnapi->cnic_tag = bnapi->last_status_idx;
467 }
468 info.cmd = CNIC_CTL_START_CMD;
469 c_ops->cnic_ctl(bp->cnic_data, &info);
470 }
c5a88950 471 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
472}
473
474#else
475
476static void
477bnx2_cnic_stop(struct bnx2 *bp)
478{
479}
480
481static void
482bnx2_cnic_start(struct bnx2 *bp)
483{
484}
485
486#endif
487
b6016b76
MC
488static int
489bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
490{
491 u32 val1;
492 int i, ret;
493
583c28e5 494 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
495 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
496 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
497
498 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
499 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
500
501 udelay(40);
502 }
503
504 val1 = (bp->phy_addr << 21) | (reg << 16) |
505 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
506 BNX2_EMAC_MDIO_COMM_START_BUSY;
507 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
508
509 for (i = 0; i < 50; i++) {
510 udelay(10);
511
512 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
513 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
514 udelay(5);
515
516 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
517 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
518
519 break;
520 }
521 }
522
523 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
524 *val = 0x0;
525 ret = -EBUSY;
526 }
527 else {
528 *val = val1;
529 ret = 0;
530 }
531
583c28e5 532 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
533 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
534 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
535
536 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
537 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
538
539 udelay(40);
540 }
541
542 return ret;
543}
544
545static int
546bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
547{
548 u32 val1;
549 int i, ret;
550
583c28e5 551 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
552 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
553 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
554
555 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
556 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
557
558 udelay(40);
559 }
560
561 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
562 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
563 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
564 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 565
b6016b76
MC
566 for (i = 0; i < 50; i++) {
567 udelay(10);
568
569 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
570 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
571 udelay(5);
572 break;
573 }
574 }
575
576 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
577 ret = -EBUSY;
578 else
579 ret = 0;
580
583c28e5 581 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
582 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
583 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
584
585 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
586 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
587
588 udelay(40);
589 }
590
591 return ret;
592}
593
594static void
595bnx2_disable_int(struct bnx2 *bp)
596{
b4b36042
MC
597 int i;
598 struct bnx2_napi *bnapi;
599
600 for (i = 0; i < bp->irq_nvecs; i++) {
601 bnapi = &bp->bnx2_napi[i];
602 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
603 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
604 }
b6016b76
MC
605 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
606}
607
608static void
609bnx2_enable_int(struct bnx2 *bp)
610{
b4b36042
MC
611 int i;
612 struct bnx2_napi *bnapi;
35efa7c1 613
b4b36042
MC
614 for (i = 0; i < bp->irq_nvecs; i++) {
615 bnapi = &bp->bnx2_napi[i];
1269a8a6 616
b4b36042
MC
617 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
618 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
619 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
620 bnapi->last_status_idx);
b6016b76 621
b4b36042
MC
622 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
623 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
624 bnapi->last_status_idx);
625 }
bf5295bb 626 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
627}
628
629static void
630bnx2_disable_int_sync(struct bnx2 *bp)
631{
b4b36042
MC
632 int i;
633
b6016b76 634 atomic_inc(&bp->intr_sem);
3767546c
MC
635 if (!netif_running(bp->dev))
636 return;
637
b6016b76 638 bnx2_disable_int(bp);
b4b36042
MC
639 for (i = 0; i < bp->irq_nvecs; i++)
640 synchronize_irq(bp->irq_tbl[i].vector);
b6016b76
MC
641}
642
35efa7c1
MC
643static void
644bnx2_napi_disable(struct bnx2 *bp)
645{
b4b36042
MC
646 int i;
647
648 for (i = 0; i < bp->irq_nvecs; i++)
649 napi_disable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
650}
651
652static void
653bnx2_napi_enable(struct bnx2 *bp)
654{
b4b36042
MC
655 int i;
656
657 for (i = 0; i < bp->irq_nvecs; i++)
658 napi_enable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
659}
660
b6016b76 661static void
212f9934 662bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
b6016b76 663{
212f9934
MC
664 if (stop_cnic)
665 bnx2_cnic_stop(bp);
b6016b76 666 if (netif_running(bp->dev)) {
35efa7c1 667 bnx2_napi_disable(bp);
b6016b76 668 netif_tx_disable(bp->dev);
b6016b76 669 }
b7466560 670 bnx2_disable_int_sync(bp);
a0ba6760 671 netif_carrier_off(bp->dev); /* prevent tx timeout */
b6016b76
MC
672}
673
674static void
212f9934 675bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
b6016b76
MC
676{
677 if (atomic_dec_and_test(&bp->intr_sem)) {
678 if (netif_running(bp->dev)) {
706bf240 679 netif_tx_wake_all_queues(bp->dev);
a0ba6760
MC
680 spin_lock_bh(&bp->phy_lock);
681 if (bp->link_up)
682 netif_carrier_on(bp->dev);
683 spin_unlock_bh(&bp->phy_lock);
35efa7c1 684 bnx2_napi_enable(bp);
b6016b76 685 bnx2_enable_int(bp);
212f9934
MC
686 if (start_cnic)
687 bnx2_cnic_start(bp);
b6016b76
MC
688 }
689 }
690}
691
35e9010b
MC
692static void
693bnx2_free_tx_mem(struct bnx2 *bp)
694{
695 int i;
696
697 for (i = 0; i < bp->num_tx_rings; i++) {
698 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
699 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
700
701 if (txr->tx_desc_ring) {
36227e88
SG
702 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
703 txr->tx_desc_ring,
704 txr->tx_desc_mapping);
35e9010b
MC
705 txr->tx_desc_ring = NULL;
706 }
707 kfree(txr->tx_buf_ring);
708 txr->tx_buf_ring = NULL;
709 }
710}
711
bb4f98ab
MC
712static void
713bnx2_free_rx_mem(struct bnx2 *bp)
714{
715 int i;
716
717 for (i = 0; i < bp->num_rx_rings; i++) {
718 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
719 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
720 int j;
721
722 for (j = 0; j < bp->rx_max_ring; j++) {
723 if (rxr->rx_desc_ring[j])
36227e88
SG
724 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
725 rxr->rx_desc_ring[j],
726 rxr->rx_desc_mapping[j]);
bb4f98ab
MC
727 rxr->rx_desc_ring[j] = NULL;
728 }
25b0b999 729 vfree(rxr->rx_buf_ring);
bb4f98ab
MC
730 rxr->rx_buf_ring = NULL;
731
732 for (j = 0; j < bp->rx_max_pg_ring; j++) {
733 if (rxr->rx_pg_desc_ring[j])
36227e88
SG
734 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
735 rxr->rx_pg_desc_ring[j],
736 rxr->rx_pg_desc_mapping[j]);
3298a738 737 rxr->rx_pg_desc_ring[j] = NULL;
bb4f98ab 738 }
25b0b999 739 vfree(rxr->rx_pg_ring);
bb4f98ab
MC
740 rxr->rx_pg_ring = NULL;
741 }
742}
743
35e9010b
MC
744static int
745bnx2_alloc_tx_mem(struct bnx2 *bp)
746{
747 int i;
748
749 for (i = 0; i < bp->num_tx_rings; i++) {
750 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
751 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
752
753 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
754 if (txr->tx_buf_ring == NULL)
755 return -ENOMEM;
756
757 txr->tx_desc_ring =
36227e88
SG
758 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
759 &txr->tx_desc_mapping, GFP_KERNEL);
35e9010b
MC
760 if (txr->tx_desc_ring == NULL)
761 return -ENOMEM;
762 }
763 return 0;
764}
765
bb4f98ab
MC
766static int
767bnx2_alloc_rx_mem(struct bnx2 *bp)
768{
769 int i;
770
771 for (i = 0; i < bp->num_rx_rings; i++) {
772 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
773 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
774 int j;
775
776 rxr->rx_buf_ring =
89bf67f1 777 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
bb4f98ab
MC
778 if (rxr->rx_buf_ring == NULL)
779 return -ENOMEM;
780
bb4f98ab
MC
781 for (j = 0; j < bp->rx_max_ring; j++) {
782 rxr->rx_desc_ring[j] =
36227e88
SG
783 dma_alloc_coherent(&bp->pdev->dev,
784 RXBD_RING_SIZE,
785 &rxr->rx_desc_mapping[j],
786 GFP_KERNEL);
bb4f98ab
MC
787 if (rxr->rx_desc_ring[j] == NULL)
788 return -ENOMEM;
789
790 }
791
792 if (bp->rx_pg_ring_size) {
89bf67f1 793 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
bb4f98ab
MC
794 bp->rx_max_pg_ring);
795 if (rxr->rx_pg_ring == NULL)
796 return -ENOMEM;
797
bb4f98ab
MC
798 }
799
800 for (j = 0; j < bp->rx_max_pg_ring; j++) {
801 rxr->rx_pg_desc_ring[j] =
36227e88
SG
802 dma_alloc_coherent(&bp->pdev->dev,
803 RXBD_RING_SIZE,
804 &rxr->rx_pg_desc_mapping[j],
805 GFP_KERNEL);
bb4f98ab
MC
806 if (rxr->rx_pg_desc_ring[j] == NULL)
807 return -ENOMEM;
808
809 }
810 }
811 return 0;
812}
813
b6016b76
MC
814static void
815bnx2_free_mem(struct bnx2 *bp)
816{
13daffa2 817 int i;
43e80b89 818 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
13daffa2 819
35e9010b 820 bnx2_free_tx_mem(bp);
bb4f98ab 821 bnx2_free_rx_mem(bp);
35e9010b 822
59b47d8a
MC
823 for (i = 0; i < bp->ctx_pages; i++) {
824 if (bp->ctx_blk[i]) {
36227e88
SG
825 dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
826 bp->ctx_blk[i],
827 bp->ctx_blk_mapping[i]);
59b47d8a
MC
828 bp->ctx_blk[i] = NULL;
829 }
830 }
43e80b89 831 if (bnapi->status_blk.msi) {
36227e88
SG
832 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
833 bnapi->status_blk.msi,
834 bp->status_blk_mapping);
43e80b89 835 bnapi->status_blk.msi = NULL;
0f31f994 836 bp->stats_blk = NULL;
b6016b76 837 }
b6016b76
MC
838}
839
840static int
841bnx2_alloc_mem(struct bnx2 *bp)
842{
35e9010b 843 int i, status_blk_size, err;
43e80b89
MC
844 struct bnx2_napi *bnapi;
845 void *status_blk;
b6016b76 846
0f31f994
MC
847 /* Combine status and statistics blocks into one allocation. */
848 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
f86e82fb 849 if (bp->flags & BNX2_FLAG_MSIX_CAP)
b4b36042
MC
850 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
851 BNX2_SBLK_MSIX_ALIGN_SIZE);
0f31f994
MC
852 bp->status_stats_size = status_blk_size +
853 sizeof(struct statistics_block);
854
36227e88
SG
855 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
856 &bp->status_blk_mapping, GFP_KERNEL);
43e80b89 857 if (status_blk == NULL)
b6016b76
MC
858 goto alloc_mem_err;
859
43e80b89 860 memset(status_blk, 0, bp->status_stats_size);
b6016b76 861
43e80b89
MC
862 bnapi = &bp->bnx2_napi[0];
863 bnapi->status_blk.msi = status_blk;
864 bnapi->hw_tx_cons_ptr =
865 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
866 bnapi->hw_rx_cons_ptr =
867 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
f86e82fb 868 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
379b39a2 869 for (i = 1; i < bp->irq_nvecs; i++) {
43e80b89
MC
870 struct status_block_msix *sblk;
871
872 bnapi = &bp->bnx2_napi[i];
b4b36042 873
43e80b89
MC
874 sblk = (void *) (status_blk +
875 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
876 bnapi->status_blk.msix = sblk;
877 bnapi->hw_tx_cons_ptr =
878 &sblk->status_tx_quick_consumer_index;
879 bnapi->hw_rx_cons_ptr =
880 &sblk->status_rx_quick_consumer_index;
b4b36042
MC
881 bnapi->int_num = i << 24;
882 }
883 }
35efa7c1 884
43e80b89 885 bp->stats_blk = status_blk + status_blk_size;
b6016b76 886
0f31f994 887 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 888
59b47d8a
MC
889 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
890 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
891 if (bp->ctx_pages == 0)
892 bp->ctx_pages = 1;
893 for (i = 0; i < bp->ctx_pages; i++) {
36227e88 894 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
59b47d8a 895 BCM_PAGE_SIZE,
36227e88
SG
896 &bp->ctx_blk_mapping[i],
897 GFP_KERNEL);
59b47d8a
MC
898 if (bp->ctx_blk[i] == NULL)
899 goto alloc_mem_err;
900 }
901 }
35e9010b 902
bb4f98ab
MC
903 err = bnx2_alloc_rx_mem(bp);
904 if (err)
905 goto alloc_mem_err;
906
35e9010b
MC
907 err = bnx2_alloc_tx_mem(bp);
908 if (err)
909 goto alloc_mem_err;
910
b6016b76
MC
911 return 0;
912
913alloc_mem_err:
914 bnx2_free_mem(bp);
915 return -ENOMEM;
916}
917
e3648b3d
MC
918static void
919bnx2_report_fw_link(struct bnx2 *bp)
920{
921 u32 fw_link_status = 0;
922
583c28e5 923 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
924 return;
925
e3648b3d
MC
926 if (bp->link_up) {
927 u32 bmsr;
928
929 switch (bp->line_speed) {
930 case SPEED_10:
931 if (bp->duplex == DUPLEX_HALF)
932 fw_link_status = BNX2_LINK_STATUS_10HALF;
933 else
934 fw_link_status = BNX2_LINK_STATUS_10FULL;
935 break;
936 case SPEED_100:
937 if (bp->duplex == DUPLEX_HALF)
938 fw_link_status = BNX2_LINK_STATUS_100HALF;
939 else
940 fw_link_status = BNX2_LINK_STATUS_100FULL;
941 break;
942 case SPEED_1000:
943 if (bp->duplex == DUPLEX_HALF)
944 fw_link_status = BNX2_LINK_STATUS_1000HALF;
945 else
946 fw_link_status = BNX2_LINK_STATUS_1000FULL;
947 break;
948 case SPEED_2500:
949 if (bp->duplex == DUPLEX_HALF)
950 fw_link_status = BNX2_LINK_STATUS_2500HALF;
951 else
952 fw_link_status = BNX2_LINK_STATUS_2500FULL;
953 break;
954 }
955
956 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
957
958 if (bp->autoneg) {
959 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
960
ca58c3af
MC
961 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
963
964 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583c28e5 965 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
e3648b3d
MC
966 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
967 else
968 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
969 }
970 }
971 else
972 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
973
2726d6e1 974 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
e3648b3d
MC
975}
976
9b1084b8
MC
977static char *
978bnx2_xceiver_str(struct bnx2 *bp)
979{
807540ba 980 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
583c28e5 981 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
807540ba 982 "Copper");
9b1084b8
MC
983}
984
b6016b76
MC
985static void
986bnx2_report_link(struct bnx2 *bp)
987{
988 if (bp->link_up) {
989 netif_carrier_on(bp->dev);
3a9c6a49
JP
990 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
991 bnx2_xceiver_str(bp),
992 bp->line_speed,
993 bp->duplex == DUPLEX_FULL ? "full" : "half");
b6016b76
MC
994
995 if (bp->flow_ctrl) {
996 if (bp->flow_ctrl & FLOW_CTRL_RX) {
3a9c6a49 997 pr_cont(", receive ");
b6016b76 998 if (bp->flow_ctrl & FLOW_CTRL_TX)
3a9c6a49 999 pr_cont("& transmit ");
b6016b76
MC
1000 }
1001 else {
3a9c6a49 1002 pr_cont(", transmit ");
b6016b76 1003 }
3a9c6a49 1004 pr_cont("flow control ON");
b6016b76 1005 }
3a9c6a49
JP
1006 pr_cont("\n");
1007 } else {
b6016b76 1008 netif_carrier_off(bp->dev);
3a9c6a49
JP
1009 netdev_err(bp->dev, "NIC %s Link is Down\n",
1010 bnx2_xceiver_str(bp));
b6016b76 1011 }
e3648b3d
MC
1012
1013 bnx2_report_fw_link(bp);
b6016b76
MC
1014}
1015
1016static void
1017bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1018{
1019 u32 local_adv, remote_adv;
1020
1021 bp->flow_ctrl = 0;
6aa20a22 1022 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
1023 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1024
1025 if (bp->duplex == DUPLEX_FULL) {
1026 bp->flow_ctrl = bp->req_flow_ctrl;
1027 }
1028 return;
1029 }
1030
1031 if (bp->duplex != DUPLEX_FULL) {
1032 return;
1033 }
1034
583c28e5 1035 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
5b0c76ad
MC
1036 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1037 u32 val;
1038
1039 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1040 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1041 bp->flow_ctrl |= FLOW_CTRL_TX;
1042 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1043 bp->flow_ctrl |= FLOW_CTRL_RX;
1044 return;
1045 }
1046
ca58c3af
MC
1047 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1048 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76 1049
583c28e5 1050 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1051 u32 new_local_adv = 0;
1052 u32 new_remote_adv = 0;
1053
1054 if (local_adv & ADVERTISE_1000XPAUSE)
1055 new_local_adv |= ADVERTISE_PAUSE_CAP;
1056 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1057 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1058 if (remote_adv & ADVERTISE_1000XPAUSE)
1059 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1060 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1061 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1062
1063 local_adv = new_local_adv;
1064 remote_adv = new_remote_adv;
1065 }
1066
1067 /* See Table 28B-3 of 802.3ab-1999 spec. */
1068 if (local_adv & ADVERTISE_PAUSE_CAP) {
1069 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1070 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072 }
1073 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1074 bp->flow_ctrl = FLOW_CTRL_RX;
1075 }
1076 }
1077 else {
1078 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1079 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1080 }
1081 }
1082 }
1083 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1084 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1085 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1086
1087 bp->flow_ctrl = FLOW_CTRL_TX;
1088 }
1089 }
1090}
1091
27a005b8
MC
1092static int
1093bnx2_5709s_linkup(struct bnx2 *bp)
1094{
1095 u32 val, speed;
1096
1097 bp->link_up = 1;
1098
1099 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1100 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1101 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1102
1103 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1104 bp->line_speed = bp->req_line_speed;
1105 bp->duplex = bp->req_duplex;
1106 return 0;
1107 }
1108 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1109 switch (speed) {
1110 case MII_BNX2_GP_TOP_AN_SPEED_10:
1111 bp->line_speed = SPEED_10;
1112 break;
1113 case MII_BNX2_GP_TOP_AN_SPEED_100:
1114 bp->line_speed = SPEED_100;
1115 break;
1116 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1117 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1118 bp->line_speed = SPEED_1000;
1119 break;
1120 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1121 bp->line_speed = SPEED_2500;
1122 break;
1123 }
1124 if (val & MII_BNX2_GP_TOP_AN_FD)
1125 bp->duplex = DUPLEX_FULL;
1126 else
1127 bp->duplex = DUPLEX_HALF;
1128 return 0;
1129}
1130
b6016b76 1131static int
5b0c76ad
MC
1132bnx2_5708s_linkup(struct bnx2 *bp)
1133{
1134 u32 val;
1135
1136 bp->link_up = 1;
1137 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1138 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1139 case BCM5708S_1000X_STAT1_SPEED_10:
1140 bp->line_speed = SPEED_10;
1141 break;
1142 case BCM5708S_1000X_STAT1_SPEED_100:
1143 bp->line_speed = SPEED_100;
1144 break;
1145 case BCM5708S_1000X_STAT1_SPEED_1G:
1146 bp->line_speed = SPEED_1000;
1147 break;
1148 case BCM5708S_1000X_STAT1_SPEED_2G5:
1149 bp->line_speed = SPEED_2500;
1150 break;
1151 }
1152 if (val & BCM5708S_1000X_STAT1_FD)
1153 bp->duplex = DUPLEX_FULL;
1154 else
1155 bp->duplex = DUPLEX_HALF;
1156
1157 return 0;
1158}
1159
1160static int
1161bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
1162{
1163 u32 bmcr, local_adv, remote_adv, common;
1164
1165 bp->link_up = 1;
1166 bp->line_speed = SPEED_1000;
1167
ca58c3af 1168 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1169 if (bmcr & BMCR_FULLDPLX) {
1170 bp->duplex = DUPLEX_FULL;
1171 }
1172 else {
1173 bp->duplex = DUPLEX_HALF;
1174 }
1175
1176 if (!(bmcr & BMCR_ANENABLE)) {
1177 return 0;
1178 }
1179
ca58c3af
MC
1180 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1181 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1182
1183 common = local_adv & remote_adv;
1184 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1185
1186 if (common & ADVERTISE_1000XFULL) {
1187 bp->duplex = DUPLEX_FULL;
1188 }
1189 else {
1190 bp->duplex = DUPLEX_HALF;
1191 }
1192 }
1193
1194 return 0;
1195}
1196
1197static int
1198bnx2_copper_linkup(struct bnx2 *bp)
1199{
1200 u32 bmcr;
1201
ca58c3af 1202 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1203 if (bmcr & BMCR_ANENABLE) {
1204 u32 local_adv, remote_adv, common;
1205
1206 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208
1209 common = local_adv & (remote_adv >> 2);
1210 if (common & ADVERTISE_1000FULL) {
1211 bp->line_speed = SPEED_1000;
1212 bp->duplex = DUPLEX_FULL;
1213 }
1214 else if (common & ADVERTISE_1000HALF) {
1215 bp->line_speed = SPEED_1000;
1216 bp->duplex = DUPLEX_HALF;
1217 }
1218 else {
ca58c3af
MC
1219 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1221
1222 common = local_adv & remote_adv;
1223 if (common & ADVERTISE_100FULL) {
1224 bp->line_speed = SPEED_100;
1225 bp->duplex = DUPLEX_FULL;
1226 }
1227 else if (common & ADVERTISE_100HALF) {
1228 bp->line_speed = SPEED_100;
1229 bp->duplex = DUPLEX_HALF;
1230 }
1231 else if (common & ADVERTISE_10FULL) {
1232 bp->line_speed = SPEED_10;
1233 bp->duplex = DUPLEX_FULL;
1234 }
1235 else if (common & ADVERTISE_10HALF) {
1236 bp->line_speed = SPEED_10;
1237 bp->duplex = DUPLEX_HALF;
1238 }
1239 else {
1240 bp->line_speed = 0;
1241 bp->link_up = 0;
1242 }
1243 }
1244 }
1245 else {
1246 if (bmcr & BMCR_SPEED100) {
1247 bp->line_speed = SPEED_100;
1248 }
1249 else {
1250 bp->line_speed = SPEED_10;
1251 }
1252 if (bmcr & BMCR_FULLDPLX) {
1253 bp->duplex = DUPLEX_FULL;
1254 }
1255 else {
1256 bp->duplex = DUPLEX_HALF;
1257 }
1258 }
1259
1260 return 0;
1261}
1262
83e3fc89 1263static void
bb4f98ab 1264bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
83e3fc89 1265{
bb4f98ab 1266 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
83e3fc89
MC
1267
1268 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1269 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1270 val |= 0x02 << 8;
1271
22fa159d
MC
1272 if (bp->flow_ctrl & FLOW_CTRL_TX)
1273 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
83e3fc89 1274
83e3fc89
MC
1275 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1276}
1277
bb4f98ab
MC
1278static void
1279bnx2_init_all_rx_contexts(struct bnx2 *bp)
1280{
1281 int i;
1282 u32 cid;
1283
1284 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1285 if (i == 1)
1286 cid = RX_RSS_CID;
1287 bnx2_init_rx_context(bp, cid);
1288 }
1289}
1290
344478db 1291static void
b6016b76
MC
1292bnx2_set_mac_link(struct bnx2 *bp)
1293{
1294 u32 val;
1295
1296 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1297 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1298 (bp->duplex == DUPLEX_HALF)) {
1299 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1300 }
1301
1302 /* Configure the EMAC mode register. */
1303 val = REG_RD(bp, BNX2_EMAC_MODE);
1304
1305 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 1306 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1307 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
1308
1309 if (bp->link_up) {
5b0c76ad
MC
1310 switch (bp->line_speed) {
1311 case SPEED_10:
59b47d8a
MC
1312 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1313 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
1314 break;
1315 }
1316 /* fall through */
1317 case SPEED_100:
1318 val |= BNX2_EMAC_MODE_PORT_MII;
1319 break;
1320 case SPEED_2500:
59b47d8a 1321 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
1322 /* fall through */
1323 case SPEED_1000:
1324 val |= BNX2_EMAC_MODE_PORT_GMII;
1325 break;
1326 }
b6016b76
MC
1327 }
1328 else {
1329 val |= BNX2_EMAC_MODE_PORT_GMII;
1330 }
1331
1332 /* Set the MAC to operate in the appropriate duplex mode. */
1333 if (bp->duplex == DUPLEX_HALF)
1334 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1335 REG_WR(bp, BNX2_EMAC_MODE, val);
1336
1337 /* Enable/disable rx PAUSE. */
1338 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1339
1340 if (bp->flow_ctrl & FLOW_CTRL_RX)
1341 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1342 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1343
1344 /* Enable/disable tx PAUSE. */
1345 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1346 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1347
1348 if (bp->flow_ctrl & FLOW_CTRL_TX)
1349 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1350 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1351
1352 /* Acknowledge the interrupt. */
1353 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1354
22fa159d 1355 bnx2_init_all_rx_contexts(bp);
b6016b76
MC
1356}
1357
27a005b8
MC
1358static void
1359bnx2_enable_bmsr1(struct bnx2 *bp)
1360{
583c28e5 1361 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1362 (CHIP_NUM(bp) == CHIP_NUM_5709))
1363 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1364 MII_BNX2_BLK_ADDR_GP_STATUS);
1365}
1366
1367static void
1368bnx2_disable_bmsr1(struct bnx2 *bp)
1369{
583c28e5 1370 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1371 (CHIP_NUM(bp) == CHIP_NUM_5709))
1372 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1373 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1374}
1375
605a9e20
MC
1376static int
1377bnx2_test_and_enable_2g5(struct bnx2 *bp)
1378{
1379 u32 up1;
1380 int ret = 1;
1381
583c28e5 1382 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1383 return 0;
1384
1385 if (bp->autoneg & AUTONEG_SPEED)
1386 bp->advertising |= ADVERTISED_2500baseX_Full;
1387
27a005b8
MC
1388 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1389 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1390
605a9e20
MC
1391 bnx2_read_phy(bp, bp->mii_up1, &up1);
1392 if (!(up1 & BCM5708S_UP1_2G5)) {
1393 up1 |= BCM5708S_UP1_2G5;
1394 bnx2_write_phy(bp, bp->mii_up1, up1);
1395 ret = 0;
1396 }
1397
27a005b8
MC
1398 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1399 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1400 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1401
605a9e20
MC
1402 return ret;
1403}
1404
1405static int
1406bnx2_test_and_disable_2g5(struct bnx2 *bp)
1407{
1408 u32 up1;
1409 int ret = 0;
1410
583c28e5 1411 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1412 return 0;
1413
27a005b8
MC
1414 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1415 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1416
605a9e20
MC
1417 bnx2_read_phy(bp, bp->mii_up1, &up1);
1418 if (up1 & BCM5708S_UP1_2G5) {
1419 up1 &= ~BCM5708S_UP1_2G5;
1420 bnx2_write_phy(bp, bp->mii_up1, up1);
1421 ret = 1;
1422 }
1423
27a005b8
MC
1424 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1425 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1426 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1427
605a9e20
MC
1428 return ret;
1429}
1430
1431static void
1432bnx2_enable_forced_2g5(struct bnx2 *bp)
1433{
cbd6890c
MC
1434 u32 uninitialized_var(bmcr);
1435 int err;
605a9e20 1436
583c28e5 1437 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1438 return;
1439
27a005b8
MC
1440 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1441 u32 val;
1442
1443 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444 MII_BNX2_BLK_ADDR_SERDES_DIG);
cbd6890c
MC
1445 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1446 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1447 val |= MII_BNX2_SD_MISC1_FORCE |
1448 MII_BNX2_SD_MISC1_FORCE_2_5G;
1449 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1450 }
27a005b8
MC
1451
1452 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1453 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
cbd6890c 1454 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
27a005b8
MC
1455
1456 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
cbd6890c
MC
1457 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1458 if (!err)
1459 bmcr |= BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1460 } else {
1461 return;
605a9e20
MC
1462 }
1463
cbd6890c
MC
1464 if (err)
1465 return;
1466
605a9e20
MC
1467 if (bp->autoneg & AUTONEG_SPEED) {
1468 bmcr &= ~BMCR_ANENABLE;
1469 if (bp->req_duplex == DUPLEX_FULL)
1470 bmcr |= BMCR_FULLDPLX;
1471 }
1472 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1473}
1474
1475static void
1476bnx2_disable_forced_2g5(struct bnx2 *bp)
1477{
cbd6890c
MC
1478 u32 uninitialized_var(bmcr);
1479 int err;
605a9e20 1480
583c28e5 1481 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1482 return;
1483
27a005b8
MC
1484 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1485 u32 val;
1486
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1488 MII_BNX2_BLK_ADDR_SERDES_DIG);
cbd6890c
MC
1489 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1490 val &= ~MII_BNX2_SD_MISC1_FORCE;
1491 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1492 }
27a005b8
MC
1493
1494 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
cbd6890c 1496 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
27a005b8
MC
1497
1498 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
cbd6890c
MC
1499 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1500 if (!err)
1501 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1502 } else {
1503 return;
605a9e20
MC
1504 }
1505
cbd6890c
MC
1506 if (err)
1507 return;
1508
605a9e20
MC
1509 if (bp->autoneg & AUTONEG_SPEED)
1510 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1511 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1512}
1513
b2fadeae
MC
1514static void
1515bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1516{
1517 u32 val;
1518
1519 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1520 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1521 if (start)
1522 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1523 else
1524 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1525}
1526
b6016b76
MC
1527static int
1528bnx2_set_link(struct bnx2 *bp)
1529{
1530 u32 bmsr;
1531 u8 link_up;
1532
80be4434 1533 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1534 bp->link_up = 1;
1535 return 0;
1536 }
1537
583c28e5 1538 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1539 return 0;
1540
b6016b76
MC
1541 link_up = bp->link_up;
1542
27a005b8
MC
1543 bnx2_enable_bmsr1(bp);
1544 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546 bnx2_disable_bmsr1(bp);
b6016b76 1547
583c28e5 1548 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
b6016b76 1549 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
a2724e25 1550 u32 val, an_dbg;
b6016b76 1551
583c28e5 1552 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
b2fadeae 1553 bnx2_5706s_force_link_dn(bp, 0);
583c28e5 1554 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
b2fadeae 1555 }
b6016b76 1556 val = REG_RD(bp, BNX2_EMAC_STATUS);
a2724e25
MC
1557
1558 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1559 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561
1562 if ((val & BNX2_EMAC_STATUS_LINK) &&
1563 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
b6016b76
MC
1564 bmsr |= BMSR_LSTATUS;
1565 else
1566 bmsr &= ~BMSR_LSTATUS;
1567 }
1568
1569 if (bmsr & BMSR_LSTATUS) {
1570 bp->link_up = 1;
1571
583c28e5 1572 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad
MC
1573 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1574 bnx2_5706s_linkup(bp);
1575 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1576 bnx2_5708s_linkup(bp);
27a005b8
MC
1577 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1578 bnx2_5709s_linkup(bp);
b6016b76
MC
1579 }
1580 else {
1581 bnx2_copper_linkup(bp);
1582 }
1583 bnx2_resolve_flow_ctrl(bp);
1584 }
1585 else {
583c28e5 1586 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
605a9e20
MC
1587 (bp->autoneg & AUTONEG_SPEED))
1588 bnx2_disable_forced_2g5(bp);
b6016b76 1589
583c28e5 1590 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
b2fadeae
MC
1591 u32 bmcr;
1592
1593 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1594 bmcr |= BMCR_ANENABLE;
1595 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1596
583c28e5 1597 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b2fadeae 1598 }
b6016b76
MC
1599 bp->link_up = 0;
1600 }
1601
1602 if (bp->link_up != link_up) {
1603 bnx2_report_link(bp);
1604 }
1605
1606 bnx2_set_mac_link(bp);
1607
1608 return 0;
1609}
1610
1611static int
1612bnx2_reset_phy(struct bnx2 *bp)
1613{
1614 int i;
1615 u32 reg;
1616
ca58c3af 1617 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1618
1619#define PHY_RESET_MAX_WAIT 100
1620 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1621 udelay(10);
1622
ca58c3af 1623 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1624 if (!(reg & BMCR_RESET)) {
1625 udelay(20);
1626 break;
1627 }
1628 }
1629 if (i == PHY_RESET_MAX_WAIT) {
1630 return -EBUSY;
1631 }
1632 return 0;
1633}
1634
1635static u32
1636bnx2_phy_get_pause_adv(struct bnx2 *bp)
1637{
1638 u32 adv = 0;
1639
1640 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1641 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1642
583c28e5 1643 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1644 adv = ADVERTISE_1000XPAUSE;
1645 }
1646 else {
1647 adv = ADVERTISE_PAUSE_CAP;
1648 }
1649 }
1650 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
583c28e5 1651 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1652 adv = ADVERTISE_1000XPSE_ASYM;
1653 }
1654 else {
1655 adv = ADVERTISE_PAUSE_ASYM;
1656 }
1657 }
1658 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
583c28e5 1659 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1660 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1661 }
1662 else {
1663 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1664 }
1665 }
1666 return adv;
1667}
1668
a2f13890 1669static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
0d8a6571 1670
b6016b76 1671static int
0d8a6571 1672bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1673__releases(&bp->phy_lock)
1674__acquires(&bp->phy_lock)
0d8a6571
MC
1675{
1676 u32 speed_arg = 0, pause_adv;
1677
1678 pause_adv = bnx2_phy_get_pause_adv(bp);
1679
1680 if (bp->autoneg & AUTONEG_SPEED) {
1681 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1682 if (bp->advertising & ADVERTISED_10baseT_Half)
1683 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1684 if (bp->advertising & ADVERTISED_10baseT_Full)
1685 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1686 if (bp->advertising & ADVERTISED_100baseT_Half)
1687 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1688 if (bp->advertising & ADVERTISED_100baseT_Full)
1689 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1690 if (bp->advertising & ADVERTISED_1000baseT_Full)
1691 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1692 if (bp->advertising & ADVERTISED_2500baseX_Full)
1693 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1694 } else {
1695 if (bp->req_line_speed == SPEED_2500)
1696 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697 else if (bp->req_line_speed == SPEED_1000)
1698 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699 else if (bp->req_line_speed == SPEED_100) {
1700 if (bp->req_duplex == DUPLEX_FULL)
1701 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1702 else
1703 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1704 } else if (bp->req_line_speed == SPEED_10) {
1705 if (bp->req_duplex == DUPLEX_FULL)
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1707 else
1708 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1709 }
1710 }
1711
1712 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1713 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
c26736ec 1714 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
0d8a6571
MC
1715 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1716
1717 if (port == PORT_TP)
1718 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1719 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1720
2726d6e1 1721 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
0d8a6571
MC
1722
1723 spin_unlock_bh(&bp->phy_lock);
a2f13890 1724 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
0d8a6571
MC
1725 spin_lock_bh(&bp->phy_lock);
1726
1727 return 0;
1728}
1729
1730static int
1731bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1732__releases(&bp->phy_lock)
1733__acquires(&bp->phy_lock)
b6016b76 1734{
605a9e20 1735 u32 adv, bmcr;
b6016b76
MC
1736 u32 new_adv = 0;
1737
583c28e5 1738 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
807540ba 1739 return bnx2_setup_remote_phy(bp, port);
0d8a6571 1740
b6016b76
MC
1741 if (!(bp->autoneg & AUTONEG_SPEED)) {
1742 u32 new_bmcr;
5b0c76ad
MC
1743 int force_link_down = 0;
1744
605a9e20
MC
1745 if (bp->req_line_speed == SPEED_2500) {
1746 if (!bnx2_test_and_enable_2g5(bp))
1747 force_link_down = 1;
1748 } else if (bp->req_line_speed == SPEED_1000) {
1749 if (bnx2_test_and_disable_2g5(bp))
1750 force_link_down = 1;
1751 }
ca58c3af 1752 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1753 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1754
ca58c3af 1755 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1756 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1757 new_bmcr |= BMCR_SPEED1000;
605a9e20 1758
27a005b8
MC
1759 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1760 if (bp->req_line_speed == SPEED_2500)
1761 bnx2_enable_forced_2g5(bp);
1762 else if (bp->req_line_speed == SPEED_1000) {
1763 bnx2_disable_forced_2g5(bp);
1764 new_bmcr &= ~0x2000;
1765 }
1766
1767 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1768 if (bp->req_line_speed == SPEED_2500)
1769 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1770 else
1771 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1772 }
1773
b6016b76 1774 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1775 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1776 new_bmcr |= BMCR_FULLDPLX;
1777 }
1778 else {
5b0c76ad 1779 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1780 new_bmcr &= ~BMCR_FULLDPLX;
1781 }
5b0c76ad 1782 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1783 /* Force a link down visible on the other side */
1784 if (bp->link_up) {
ca58c3af 1785 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1786 ~(ADVERTISE_1000XFULL |
1787 ADVERTISE_1000XHALF));
ca58c3af 1788 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1789 BMCR_ANRESTART | BMCR_ANENABLE);
1790
1791 bp->link_up = 0;
1792 netif_carrier_off(bp->dev);
ca58c3af 1793 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1794 bnx2_report_link(bp);
b6016b76 1795 }
ca58c3af
MC
1796 bnx2_write_phy(bp, bp->mii_adv, adv);
1797 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1798 } else {
1799 bnx2_resolve_flow_ctrl(bp);
1800 bnx2_set_mac_link(bp);
b6016b76
MC
1801 }
1802 return 0;
1803 }
1804
605a9e20 1805 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1806
b6016b76
MC
1807 if (bp->advertising & ADVERTISED_1000baseT_Full)
1808 new_adv |= ADVERTISE_1000XFULL;
1809
1810 new_adv |= bnx2_phy_get_pause_adv(bp);
1811
ca58c3af
MC
1812 bnx2_read_phy(bp, bp->mii_adv, &adv);
1813 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1814
1815 bp->serdes_an_pending = 0;
1816 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1817 /* Force a link down visible on the other side */
1818 if (bp->link_up) {
ca58c3af 1819 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1820 spin_unlock_bh(&bp->phy_lock);
1821 msleep(20);
1822 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1823 }
1824
ca58c3af
MC
1825 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1826 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1827 BMCR_ANENABLE);
f8dd064e
MC
1828 /* Speed up link-up time when the link partner
1829 * does not autonegotiate which is very common
1830 * in blade servers. Some blade servers use
1831 * IPMI for kerboard input and it's important
1832 * to minimize link disruptions. Autoneg. involves
1833 * exchanging base pages plus 3 next pages and
1834 * normally completes in about 120 msec.
1835 */
40105c0b 1836 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
1837 bp->serdes_an_pending = 1;
1838 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1839 } else {
1840 bnx2_resolve_flow_ctrl(bp);
1841 bnx2_set_mac_link(bp);
b6016b76
MC
1842 }
1843
1844 return 0;
1845}
1846
1847#define ETHTOOL_ALL_FIBRE_SPEED \
583c28e5 1848 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
deaf391b
MC
1849 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1850 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1851
1852#define ETHTOOL_ALL_COPPER_SPEED \
1853 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1854 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1855 ADVERTISED_1000baseT_Full)
1856
1857#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1858 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1859
b6016b76
MC
1860#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1861
0d8a6571
MC
1862static void
1863bnx2_set_default_remote_link(struct bnx2 *bp)
1864{
1865 u32 link;
1866
1867 if (bp->phy_port == PORT_TP)
2726d6e1 1868 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
0d8a6571 1869 else
2726d6e1 1870 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
0d8a6571
MC
1871
1872 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1873 bp->req_line_speed = 0;
1874 bp->autoneg |= AUTONEG_SPEED;
1875 bp->advertising = ADVERTISED_Autoneg;
1876 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1877 bp->advertising |= ADVERTISED_10baseT_Half;
1878 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1879 bp->advertising |= ADVERTISED_10baseT_Full;
1880 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1881 bp->advertising |= ADVERTISED_100baseT_Half;
1882 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1883 bp->advertising |= ADVERTISED_100baseT_Full;
1884 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1885 bp->advertising |= ADVERTISED_1000baseT_Full;
1886 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1887 bp->advertising |= ADVERTISED_2500baseX_Full;
1888 } else {
1889 bp->autoneg = 0;
1890 bp->advertising = 0;
1891 bp->req_duplex = DUPLEX_FULL;
1892 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1893 bp->req_line_speed = SPEED_10;
1894 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1895 bp->req_duplex = DUPLEX_HALF;
1896 }
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1898 bp->req_line_speed = SPEED_100;
1899 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1900 bp->req_duplex = DUPLEX_HALF;
1901 }
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1903 bp->req_line_speed = SPEED_1000;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1905 bp->req_line_speed = SPEED_2500;
1906 }
1907}
1908
deaf391b
MC
1909static void
1910bnx2_set_default_link(struct bnx2 *bp)
1911{
ab59859d
HH
1912 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1913 bnx2_set_default_remote_link(bp);
1914 return;
1915 }
0d8a6571 1916
deaf391b
MC
1917 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1918 bp->req_line_speed = 0;
583c28e5 1919 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
deaf391b
MC
1920 u32 reg;
1921
1922 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1923
2726d6e1 1924 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
deaf391b
MC
1925 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1926 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1927 bp->autoneg = 0;
1928 bp->req_line_speed = bp->line_speed = SPEED_1000;
1929 bp->req_duplex = DUPLEX_FULL;
1930 }
1931 } else
1932 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1933}
1934
df149d70
MC
1935static void
1936bnx2_send_heart_beat(struct bnx2 *bp)
1937{
1938 u32 msg;
1939 u32 addr;
1940
1941 spin_lock(&bp->indirect_lock);
1942 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1943 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1944 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1945 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1946 spin_unlock(&bp->indirect_lock);
1947}
1948
0d8a6571
MC
1949static void
1950bnx2_remote_phy_event(struct bnx2 *bp)
1951{
1952 u32 msg;
1953 u8 link_up = bp->link_up;
1954 u8 old_port;
1955
2726d6e1 1956 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571 1957
df149d70
MC
1958 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1959 bnx2_send_heart_beat(bp);
1960
1961 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1962
0d8a6571
MC
1963 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1964 bp->link_up = 0;
1965 else {
1966 u32 speed;
1967
1968 bp->link_up = 1;
1969 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1970 bp->duplex = DUPLEX_FULL;
1971 switch (speed) {
1972 case BNX2_LINK_STATUS_10HALF:
1973 bp->duplex = DUPLEX_HALF;
1974 case BNX2_LINK_STATUS_10FULL:
1975 bp->line_speed = SPEED_10;
1976 break;
1977 case BNX2_LINK_STATUS_100HALF:
1978 bp->duplex = DUPLEX_HALF;
1979 case BNX2_LINK_STATUS_100BASE_T4:
1980 case BNX2_LINK_STATUS_100FULL:
1981 bp->line_speed = SPEED_100;
1982 break;
1983 case BNX2_LINK_STATUS_1000HALF:
1984 bp->duplex = DUPLEX_HALF;
1985 case BNX2_LINK_STATUS_1000FULL:
1986 bp->line_speed = SPEED_1000;
1987 break;
1988 case BNX2_LINK_STATUS_2500HALF:
1989 bp->duplex = DUPLEX_HALF;
1990 case BNX2_LINK_STATUS_2500FULL:
1991 bp->line_speed = SPEED_2500;
1992 break;
1993 default:
1994 bp->line_speed = 0;
1995 break;
1996 }
1997
0d8a6571
MC
1998 bp->flow_ctrl = 0;
1999 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2000 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2001 if (bp->duplex == DUPLEX_FULL)
2002 bp->flow_ctrl = bp->req_flow_ctrl;
2003 } else {
2004 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2005 bp->flow_ctrl |= FLOW_CTRL_TX;
2006 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2007 bp->flow_ctrl |= FLOW_CTRL_RX;
2008 }
2009
2010 old_port = bp->phy_port;
2011 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2012 bp->phy_port = PORT_FIBRE;
2013 else
2014 bp->phy_port = PORT_TP;
2015
2016 if (old_port != bp->phy_port)
2017 bnx2_set_default_link(bp);
2018
0d8a6571
MC
2019 }
2020 if (bp->link_up != link_up)
2021 bnx2_report_link(bp);
2022
2023 bnx2_set_mac_link(bp);
2024}
2025
2026static int
2027bnx2_set_remote_link(struct bnx2 *bp)
2028{
2029 u32 evt_code;
2030
2726d6e1 2031 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
0d8a6571
MC
2032 switch (evt_code) {
2033 case BNX2_FW_EVT_CODE_LINK_EVENT:
2034 bnx2_remote_phy_event(bp);
2035 break;
2036 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2037 default:
df149d70 2038 bnx2_send_heart_beat(bp);
0d8a6571
MC
2039 break;
2040 }
2041 return 0;
2042}
2043
b6016b76
MC
2044static int
2045bnx2_setup_copper_phy(struct bnx2 *bp)
52d07b1f
HH
2046__releases(&bp->phy_lock)
2047__acquires(&bp->phy_lock)
b6016b76
MC
2048{
2049 u32 bmcr;
2050 u32 new_bmcr;
2051
ca58c3af 2052 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
2053
2054 if (bp->autoneg & AUTONEG_SPEED) {
2055 u32 adv_reg, adv1000_reg;
2056 u32 new_adv_reg = 0;
2057 u32 new_adv1000_reg = 0;
2058
ca58c3af 2059 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
2060 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2061 ADVERTISE_PAUSE_ASYM);
2062
2063 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2064 adv1000_reg &= PHY_ALL_1000_SPEED;
2065
2066 if (bp->advertising & ADVERTISED_10baseT_Half)
2067 new_adv_reg |= ADVERTISE_10HALF;
2068 if (bp->advertising & ADVERTISED_10baseT_Full)
2069 new_adv_reg |= ADVERTISE_10FULL;
2070 if (bp->advertising & ADVERTISED_100baseT_Half)
2071 new_adv_reg |= ADVERTISE_100HALF;
2072 if (bp->advertising & ADVERTISED_100baseT_Full)
2073 new_adv_reg |= ADVERTISE_100FULL;
2074 if (bp->advertising & ADVERTISED_1000baseT_Full)
2075 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 2076
b6016b76
MC
2077 new_adv_reg |= ADVERTISE_CSMA;
2078
2079 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2080
2081 if ((adv1000_reg != new_adv1000_reg) ||
2082 (adv_reg != new_adv_reg) ||
2083 ((bmcr & BMCR_ANENABLE) == 0)) {
2084
ca58c3af 2085 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 2086 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 2087 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
2088 BMCR_ANENABLE);
2089 }
2090 else if (bp->link_up) {
2091 /* Flow ctrl may have changed from auto to forced */
2092 /* or vice-versa. */
2093
2094 bnx2_resolve_flow_ctrl(bp);
2095 bnx2_set_mac_link(bp);
2096 }
2097 return 0;
2098 }
2099
2100 new_bmcr = 0;
2101 if (bp->req_line_speed == SPEED_100) {
2102 new_bmcr |= BMCR_SPEED100;
2103 }
2104 if (bp->req_duplex == DUPLEX_FULL) {
2105 new_bmcr |= BMCR_FULLDPLX;
2106 }
2107 if (new_bmcr != bmcr) {
2108 u32 bmsr;
b6016b76 2109
ca58c3af
MC
2110 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2111 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 2112
b6016b76
MC
2113 if (bmsr & BMSR_LSTATUS) {
2114 /* Force link down */
ca58c3af 2115 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
2116 spin_unlock_bh(&bp->phy_lock);
2117 msleep(50);
2118 spin_lock_bh(&bp->phy_lock);
2119
ca58c3af
MC
2120 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2121 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
2122 }
2123
ca58c3af 2124 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
2125
2126 /* Normally, the new speed is setup after the link has
2127 * gone down and up again. In some cases, link will not go
2128 * down so we need to set up the new speed here.
2129 */
2130 if (bmsr & BMSR_LSTATUS) {
2131 bp->line_speed = bp->req_line_speed;
2132 bp->duplex = bp->req_duplex;
2133 bnx2_resolve_flow_ctrl(bp);
2134 bnx2_set_mac_link(bp);
2135 }
27a005b8
MC
2136 } else {
2137 bnx2_resolve_flow_ctrl(bp);
2138 bnx2_set_mac_link(bp);
b6016b76
MC
2139 }
2140 return 0;
2141}
2142
2143static int
0d8a6571 2144bnx2_setup_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
2145__releases(&bp->phy_lock)
2146__acquires(&bp->phy_lock)
b6016b76
MC
2147{
2148 if (bp->loopback == MAC_LOOPBACK)
2149 return 0;
2150
583c28e5 2151 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
807540ba 2152 return bnx2_setup_serdes_phy(bp, port);
b6016b76
MC
2153 }
2154 else {
807540ba 2155 return bnx2_setup_copper_phy(bp);
b6016b76
MC
2156 }
2157}
2158
27a005b8 2159static int
9a120bc5 2160bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
27a005b8
MC
2161{
2162 u32 val;
2163
2164 bp->mii_bmcr = MII_BMCR + 0x10;
2165 bp->mii_bmsr = MII_BMSR + 0x10;
2166 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2167 bp->mii_adv = MII_ADVERTISE + 0x10;
2168 bp->mii_lpa = MII_LPA + 0x10;
2169 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2170
2171 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2172 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2173
2174 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
9a120bc5
MC
2175 if (reset_phy)
2176 bnx2_reset_phy(bp);
27a005b8
MC
2177
2178 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2179
2180 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2181 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2182 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2183 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2184
2185 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2186 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
583c28e5 2187 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
27a005b8
MC
2188 val |= BCM5708S_UP1_2G5;
2189 else
2190 val &= ~BCM5708S_UP1_2G5;
2191 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2192
2193 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2194 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2195 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2196 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2197
2198 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2199
2200 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2201 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2202 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2203
2204 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2205
2206 return 0;
2207}
2208
b6016b76 2209static int
9a120bc5 2210bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
5b0c76ad
MC
2211{
2212 u32 val;
2213
9a120bc5
MC
2214 if (reset_phy)
2215 bnx2_reset_phy(bp);
27a005b8
MC
2216
2217 bp->mii_up1 = BCM5708S_UP1;
2218
5b0c76ad
MC
2219 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2220 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2221 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2222
2223 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2224 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2225 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2226
2227 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2228 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2229 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2230
583c28e5 2231 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
5b0c76ad
MC
2232 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2233 val |= BCM5708S_UP1_2G5;
2234 bnx2_write_phy(bp, BCM5708S_UP1, val);
2235 }
2236
2237 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
2238 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2239 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
2240 /* increase tx signal amplitude */
2241 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2242 BCM5708S_BLK_ADDR_TX_MISC);
2243 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2244 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2245 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2247 }
2248
2726d6e1 2249 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
2250 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2251
2252 if (val) {
2253 u32 is_backplane;
2254
2726d6e1 2255 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
5b0c76ad
MC
2256 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2257 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2258 BCM5708S_BLK_ADDR_TX_MISC);
2259 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2260 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261 BCM5708S_BLK_ADDR_DIG);
2262 }
2263 }
2264 return 0;
2265}
2266
2267static int
9a120bc5 2268bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2269{
9a120bc5
MC
2270 if (reset_phy)
2271 bnx2_reset_phy(bp);
27a005b8 2272
583c28e5 2273 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b6016b76 2274
59b47d8a
MC
2275 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2276 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
2277
2278 if (bp->dev->mtu > 1500) {
2279 u32 val;
2280
2281 /* Set extended packet length bit */
2282 bnx2_write_phy(bp, 0x18, 0x7);
2283 bnx2_read_phy(bp, 0x18, &val);
2284 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2285
2286 bnx2_write_phy(bp, 0x1c, 0x6c00);
2287 bnx2_read_phy(bp, 0x1c, &val);
2288 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2289 }
2290 else {
2291 u32 val;
2292
2293 bnx2_write_phy(bp, 0x18, 0x7);
2294 bnx2_read_phy(bp, 0x18, &val);
2295 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2296
2297 bnx2_write_phy(bp, 0x1c, 0x6c00);
2298 bnx2_read_phy(bp, 0x1c, &val);
2299 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2300 }
2301
2302 return 0;
2303}
2304
2305static int
9a120bc5 2306bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2307{
5b0c76ad
MC
2308 u32 val;
2309
9a120bc5
MC
2310 if (reset_phy)
2311 bnx2_reset_phy(bp);
27a005b8 2312
583c28e5 2313 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
b6016b76
MC
2314 bnx2_write_phy(bp, 0x18, 0x0c00);
2315 bnx2_write_phy(bp, 0x17, 0x000a);
2316 bnx2_write_phy(bp, 0x15, 0x310b);
2317 bnx2_write_phy(bp, 0x17, 0x201f);
2318 bnx2_write_phy(bp, 0x15, 0x9506);
2319 bnx2_write_phy(bp, 0x17, 0x401f);
2320 bnx2_write_phy(bp, 0x15, 0x14e2);
2321 bnx2_write_phy(bp, 0x18, 0x0400);
2322 }
2323
583c28e5 2324 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
b659f44e
MC
2325 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2326 MII_BNX2_DSP_EXPAND_REG | 0x8);
2327 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2328 val &= ~(1 << 8);
2329 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2330 }
2331
b6016b76 2332 if (bp->dev->mtu > 1500) {
b6016b76
MC
2333 /* Set extended packet length bit */
2334 bnx2_write_phy(bp, 0x18, 0x7);
2335 bnx2_read_phy(bp, 0x18, &val);
2336 bnx2_write_phy(bp, 0x18, val | 0x4000);
2337
2338 bnx2_read_phy(bp, 0x10, &val);
2339 bnx2_write_phy(bp, 0x10, val | 0x1);
2340 }
2341 else {
b6016b76
MC
2342 bnx2_write_phy(bp, 0x18, 0x7);
2343 bnx2_read_phy(bp, 0x18, &val);
2344 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2345
2346 bnx2_read_phy(bp, 0x10, &val);
2347 bnx2_write_phy(bp, 0x10, val & ~0x1);
2348 }
2349
5b0c76ad
MC
2350 /* ethernet@wirespeed */
2351 bnx2_write_phy(bp, 0x18, 0x7007);
2352 bnx2_read_phy(bp, 0x18, &val);
2353 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
2354 return 0;
2355}
2356
2357
2358static int
9a120bc5 2359bnx2_init_phy(struct bnx2 *bp, int reset_phy)
52d07b1f
HH
2360__releases(&bp->phy_lock)
2361__acquires(&bp->phy_lock)
b6016b76
MC
2362{
2363 u32 val;
2364 int rc = 0;
2365
583c28e5
MC
2366 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2367 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
b6016b76 2368
ca58c3af
MC
2369 bp->mii_bmcr = MII_BMCR;
2370 bp->mii_bmsr = MII_BMSR;
27a005b8 2371 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
2372 bp->mii_adv = MII_ADVERTISE;
2373 bp->mii_lpa = MII_LPA;
2374
b6016b76
MC
2375 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2376
583c28e5 2377 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
2378 goto setup_phy;
2379
b6016b76
MC
2380 bnx2_read_phy(bp, MII_PHYSID1, &val);
2381 bp->phy_id = val << 16;
2382 bnx2_read_phy(bp, MII_PHYSID2, &val);
2383 bp->phy_id |= val & 0xffff;
2384
583c28e5 2385 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad 2386 if (CHIP_NUM(bp) == CHIP_NUM_5706)
9a120bc5 2387 rc = bnx2_init_5706s_phy(bp, reset_phy);
5b0c76ad 2388 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
9a120bc5 2389 rc = bnx2_init_5708s_phy(bp, reset_phy);
27a005b8 2390 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
9a120bc5 2391 rc = bnx2_init_5709s_phy(bp, reset_phy);
b6016b76
MC
2392 }
2393 else {
9a120bc5 2394 rc = bnx2_init_copper_phy(bp, reset_phy);
b6016b76
MC
2395 }
2396
0d8a6571
MC
2397setup_phy:
2398 if (!rc)
2399 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
2400
2401 return rc;
2402}
2403
2404static int
2405bnx2_set_mac_loopback(struct bnx2 *bp)
2406{
2407 u32 mac_mode;
2408
2409 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2410 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2411 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2412 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2413 bp->link_up = 1;
2414 return 0;
2415}
2416
bc5a0690
MC
2417static int bnx2_test_link(struct bnx2 *);
2418
2419static int
2420bnx2_set_phy_loopback(struct bnx2 *bp)
2421{
2422 u32 mac_mode;
2423 int rc, i;
2424
2425 spin_lock_bh(&bp->phy_lock);
ca58c3af 2426 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2427 BMCR_SPEED1000);
2428 spin_unlock_bh(&bp->phy_lock);
2429 if (rc)
2430 return rc;
2431
2432 for (i = 0; i < 10; i++) {
2433 if (bnx2_test_link(bp) == 0)
2434 break;
80be4434 2435 msleep(100);
bc5a0690
MC
2436 }
2437
2438 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2439 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2440 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2441 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2442
2443 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2444 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2445 bp->link_up = 1;
2446 return 0;
2447}
2448
ecdbf6e0
JH
2449static void
2450bnx2_dump_mcp_state(struct bnx2 *bp)
2451{
2452 struct net_device *dev = bp->dev;
2453 u32 mcp_p0, mcp_p1;
2454
2455 netdev_err(dev, "<--- start MCP states dump --->\n");
2456 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2457 mcp_p0 = BNX2_MCP_STATE_P0;
2458 mcp_p1 = BNX2_MCP_STATE_P1;
2459 } else {
2460 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2461 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2462 }
2463 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2464 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2465 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2466 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2467 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2468 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2469 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2470 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2471 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2472 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2473 netdev_err(dev, "DEBUG: shmem states:\n");
2474 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2475 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2476 bnx2_shmem_rd(bp, BNX2_FW_MB),
2477 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2478 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2479 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2480 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2481 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2482 pr_cont(" condition[%08x]\n",
2483 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2484 DP_SHMEM_LINE(bp, 0x3cc);
2485 DP_SHMEM_LINE(bp, 0x3dc);
2486 DP_SHMEM_LINE(bp, 0x3ec);
2487 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2488 netdev_err(dev, "<--- end MCP states dump --->\n");
2489}
2490
b6016b76 2491static int
a2f13890 2492bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
b6016b76
MC
2493{
2494 int i;
2495 u32 val;
2496
b6016b76
MC
2497 bp->fw_wr_seq++;
2498 msg_data |= bp->fw_wr_seq;
2499
2726d6e1 2500 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2501
a2f13890
MC
2502 if (!ack)
2503 return 0;
2504
b6016b76 2505 /* wait for an acknowledgement. */
40105c0b 2506 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
b090ae2b 2507 msleep(10);
b6016b76 2508
2726d6e1 2509 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
b6016b76
MC
2510
2511 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2512 break;
2513 }
b090ae2b
MC
2514 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2515 return 0;
b6016b76
MC
2516
2517 /* If we timed out, inform the firmware that this is the case. */
b090ae2b 2518 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
b6016b76
MC
2519 msg_data &= ~BNX2_DRV_MSG_CODE;
2520 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2521
2726d6e1 2522 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
ecdbf6e0
JH
2523 if (!silent) {
2524 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2525 bnx2_dump_mcp_state(bp);
2526 }
b6016b76 2527
b6016b76
MC
2528 return -EBUSY;
2529 }
2530
b090ae2b
MC
2531 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2532 return -EIO;
2533
b6016b76
MC
2534 return 0;
2535}
2536
59b47d8a
MC
2537static int
2538bnx2_init_5709_context(struct bnx2 *bp)
2539{
2540 int i, ret = 0;
2541 u32 val;
2542
2543 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2544 val |= (BCM_PAGE_BITS - 8) << 16;
2545 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2546 for (i = 0; i < 10; i++) {
2547 val = REG_RD(bp, BNX2_CTX_COMMAND);
2548 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2549 break;
2550 udelay(2);
2551 }
2552 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2553 return -EBUSY;
2554
59b47d8a
MC
2555 for (i = 0; i < bp->ctx_pages; i++) {
2556 int j;
2557
352f7687
MC
2558 if (bp->ctx_blk[i])
2559 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2560 else
2561 return -ENOMEM;
2562
59b47d8a
MC
2563 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2564 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2565 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2566 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2567 (u64) bp->ctx_blk_mapping[i] >> 32);
2568 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2569 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2570 for (j = 0; j < 10; j++) {
2571
2572 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2573 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2574 break;
2575 udelay(5);
2576 }
2577 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2578 ret = -EBUSY;
2579 break;
2580 }
2581 }
2582 return ret;
2583}
2584
b6016b76
MC
2585static void
2586bnx2_init_context(struct bnx2 *bp)
2587{
2588 u32 vcid;
2589
2590 vcid = 96;
2591 while (vcid) {
2592 u32 vcid_addr, pcid_addr, offset;
7947b20e 2593 int i;
b6016b76
MC
2594
2595 vcid--;
2596
2597 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2598 u32 new_vcid;
2599
2600 vcid_addr = GET_PCID_ADDR(vcid);
2601 if (vcid & 0x8) {
2602 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2603 }
2604 else {
2605 new_vcid = vcid;
2606 }
2607 pcid_addr = GET_PCID_ADDR(new_vcid);
2608 }
2609 else {
2610 vcid_addr = GET_CID_ADDR(vcid);
2611 pcid_addr = vcid_addr;
2612 }
2613
7947b20e
MC
2614 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2615 vcid_addr += (i << PHY_CTX_SHIFT);
2616 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2617
5d5d0015 2618 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2619 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2620
7947b20e
MC
2621 /* Zero out the context. */
2622 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
62a8313c 2623 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
7947b20e 2624 }
b6016b76
MC
2625 }
2626}
2627
2628static int
2629bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2630{
2631 u16 *good_mbuf;
2632 u32 good_mbuf_cnt;
2633 u32 val;
2634
2635 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2636 if (good_mbuf == NULL) {
3a9c6a49 2637 pr_err("Failed to allocate memory in %s\n", __func__);
b6016b76
MC
2638 return -ENOMEM;
2639 }
2640
2641 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2642 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2643
2644 good_mbuf_cnt = 0;
2645
2646 /* Allocate a bunch of mbufs and save the good ones in an array. */
2726d6e1 2647 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76 2648 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2726d6e1
MC
2649 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2650 BNX2_RBUF_COMMAND_ALLOC_REQ);
b6016b76 2651
2726d6e1 2652 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
b6016b76
MC
2653
2654 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2655
2656 /* The addresses with Bit 9 set are bad memory blocks. */
2657 if (!(val & (1 << 9))) {
2658 good_mbuf[good_mbuf_cnt] = (u16) val;
2659 good_mbuf_cnt++;
2660 }
2661
2726d6e1 2662 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76
MC
2663 }
2664
2665 /* Free the good ones back to the mbuf pool thus discarding
2666 * all the bad ones. */
2667 while (good_mbuf_cnt) {
2668 good_mbuf_cnt--;
2669
2670 val = good_mbuf[good_mbuf_cnt];
2671 val = (val << 9) | val | 1;
2672
2726d6e1 2673 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
b6016b76
MC
2674 }
2675 kfree(good_mbuf);
2676 return 0;
2677}
2678
2679static void
5fcaed01 2680bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
b6016b76
MC
2681{
2682 u32 val;
b6016b76
MC
2683
2684 val = (mac_addr[0] << 8) | mac_addr[1];
2685
5fcaed01 2686 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
b6016b76 2687
6aa20a22 2688 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2689 (mac_addr[4] << 8) | mac_addr[5];
2690
5fcaed01 2691 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
b6016b76
MC
2692}
2693
47bf4246 2694static inline int
a2df00aa 2695bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
47bf4246
MC
2696{
2697 dma_addr_t mapping;
bb4f98ab 2698 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246 2699 struct rx_bd *rxbd =
bb4f98ab 2700 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
a2df00aa 2701 struct page *page = alloc_page(gfp);
47bf4246
MC
2702
2703 if (!page)
2704 return -ENOMEM;
36227e88 2705 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
47bf4246 2706 PCI_DMA_FROMDEVICE);
36227e88 2707 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
3d16af86
BL
2708 __free_page(page);
2709 return -EIO;
2710 }
2711
47bf4246 2712 rx_pg->page = page;
1a4ccc2d 2713 dma_unmap_addr_set(rx_pg, mapping, mapping);
47bf4246
MC
2714 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2715 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2716 return 0;
2717}
2718
2719static void
bb4f98ab 2720bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246 2721{
bb4f98ab 2722 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246
MC
2723 struct page *page = rx_pg->page;
2724
2725 if (!page)
2726 return;
2727
36227e88
SG
2728 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2729 PAGE_SIZE, PCI_DMA_FROMDEVICE);
47bf4246
MC
2730
2731 __free_page(page);
2732 rx_pg->page = NULL;
2733}
2734
b6016b76 2735static inline int
a2df00aa 2736bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
b6016b76
MC
2737{
2738 struct sk_buff *skb;
bb4f98ab 2739 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
b6016b76 2740 dma_addr_t mapping;
bb4f98ab 2741 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2742 unsigned long align;
2743
a2df00aa 2744 skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
b6016b76
MC
2745 if (skb == NULL) {
2746 return -ENOMEM;
2747 }
2748
59b47d8a
MC
2749 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2750 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2751
36227e88
SG
2752 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2753 PCI_DMA_FROMDEVICE);
2754 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
3d16af86
BL
2755 dev_kfree_skb(skb);
2756 return -EIO;
2757 }
b6016b76
MC
2758
2759 rx_buf->skb = skb;
a33fa66b 2760 rx_buf->desc = (struct l2_fhdr *) skb->data;
1a4ccc2d 2761 dma_unmap_addr_set(rx_buf, mapping, mapping);
b6016b76
MC
2762
2763 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2764 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2765
bb4f98ab 2766 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2767
2768 return 0;
2769}
2770
da3e4fbe 2771static int
35efa7c1 2772bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2773{
43e80b89 2774 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76 2775 u32 new_link_state, old_link_state;
da3e4fbe 2776 int is_set = 1;
b6016b76 2777
da3e4fbe
MC
2778 new_link_state = sblk->status_attn_bits & event;
2779 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2780 if (new_link_state != old_link_state) {
da3e4fbe
MC
2781 if (new_link_state)
2782 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2783 else
2784 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2785 } else
2786 is_set = 0;
2787
2788 return is_set;
2789}
2790
2791static void
35efa7c1 2792bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2793{
74ecc62d
MC
2794 spin_lock(&bp->phy_lock);
2795
2796 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
b6016b76 2797 bnx2_set_link(bp);
35efa7c1 2798 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2799 bnx2_set_remote_link(bp);
2800
74ecc62d
MC
2801 spin_unlock(&bp->phy_lock);
2802
b6016b76
MC
2803}
2804
ead7270b 2805static inline u16
35efa7c1 2806bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2807{
2808 u16 cons;
2809
43e80b89
MC
2810 /* Tell compiler that status block fields can change. */
2811 barrier();
2812 cons = *bnapi->hw_tx_cons_ptr;
581daf7e 2813 barrier();
ead7270b
MC
2814 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2815 cons++;
2816 return cons;
2817}
2818
57851d84
MC
2819static int
2820bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2821{
35e9010b 2822 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 2823 u16 hw_cons, sw_cons, sw_ring_cons;
706bf240
BL
2824 int tx_pkt = 0, index;
2825 struct netdev_queue *txq;
2826
2827 index = (bnapi - bp->bnx2_napi);
2828 txq = netdev_get_tx_queue(bp->dev, index);
b6016b76 2829
35efa7c1 2830 hw_cons = bnx2_get_hw_tx_cons(bnapi);
35e9010b 2831 sw_cons = txr->tx_cons;
b6016b76
MC
2832
2833 while (sw_cons != hw_cons) {
3d16af86 2834 struct sw_tx_bd *tx_buf;
b6016b76
MC
2835 struct sk_buff *skb;
2836 int i, last;
2837
2838 sw_ring_cons = TX_RING_IDX(sw_cons);
2839
35e9010b 2840 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
b6016b76 2841 skb = tx_buf->skb;
1d39ed56 2842
d62fda08
ED
2843 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2844 prefetch(&skb->end);
2845
b6016b76 2846 /* partial BD completions possible with TSO packets */
d62fda08 2847 if (tx_buf->is_gso) {
b6016b76
MC
2848 u16 last_idx, last_ring_idx;
2849
d62fda08
ED
2850 last_idx = sw_cons + tx_buf->nr_frags + 1;
2851 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
b6016b76
MC
2852 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2853 last_idx++;
2854 }
2855 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2856 break;
2857 }
2858 }
1d39ed56 2859
36227e88 2860 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
e95524a7 2861 skb_headlen(skb), PCI_DMA_TODEVICE);
b6016b76
MC
2862
2863 tx_buf->skb = NULL;
d62fda08 2864 last = tx_buf->nr_frags;
b6016b76
MC
2865
2866 for (i = 0; i < last; i++) {
2867 sw_cons = NEXT_TX_BD(sw_cons);
e95524a7 2868
36227e88 2869 dma_unmap_page(&bp->pdev->dev,
1a4ccc2d 2870 dma_unmap_addr(
e95524a7
AD
2871 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2872 mapping),
2873 skb_shinfo(skb)->frags[i].size,
2874 PCI_DMA_TODEVICE);
b6016b76
MC
2875 }
2876
2877 sw_cons = NEXT_TX_BD(sw_cons);
2878
745720e5 2879 dev_kfree_skb(skb);
57851d84
MC
2880 tx_pkt++;
2881 if (tx_pkt == budget)
2882 break;
b6016b76 2883
d62fda08
ED
2884 if (hw_cons == sw_cons)
2885 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2886 }
2887
35e9010b
MC
2888 txr->hw_tx_cons = hw_cons;
2889 txr->tx_cons = sw_cons;
706bf240 2890
2f8af120 2891 /* Need to make the tx_cons update visible to bnx2_start_xmit()
706bf240 2892 * before checking for netif_tx_queue_stopped(). Without the
2f8af120
MC
2893 * memory barrier, there is a small possibility that bnx2_start_xmit()
2894 * will miss it and cause the queue to be stopped forever.
2895 */
2896 smp_mb();
b6016b76 2897
706bf240 2898 if (unlikely(netif_tx_queue_stopped(txq)) &&
35e9010b 2899 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
706bf240
BL
2900 __netif_tx_lock(txq, smp_processor_id());
2901 if ((netif_tx_queue_stopped(txq)) &&
35e9010b 2902 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
706bf240
BL
2903 netif_tx_wake_queue(txq);
2904 __netif_tx_unlock(txq);
b6016b76 2905 }
706bf240 2906
57851d84 2907 return tx_pkt;
b6016b76
MC
2908}
2909
1db82f2a 2910static void
bb4f98ab 2911bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
a1f60190 2912 struct sk_buff *skb, int count)
1db82f2a
MC
2913{
2914 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2915 struct rx_bd *cons_bd, *prod_bd;
1db82f2a 2916 int i;
3d16af86 2917 u16 hw_prod, prod;
bb4f98ab 2918 u16 cons = rxr->rx_pg_cons;
1db82f2a 2919
3d16af86
BL
2920 cons_rx_pg = &rxr->rx_pg_ring[cons];
2921
2922 /* The caller was unable to allocate a new page to replace the
2923 * last one in the frags array, so we need to recycle that page
2924 * and then free the skb.
2925 */
2926 if (skb) {
2927 struct page *page;
2928 struct skb_shared_info *shinfo;
2929
2930 shinfo = skb_shinfo(skb);
2931 shinfo->nr_frags--;
2932 page = shinfo->frags[shinfo->nr_frags].page;
2933 shinfo->frags[shinfo->nr_frags].page = NULL;
2934
2935 cons_rx_pg->page = page;
2936 dev_kfree_skb(skb);
2937 }
2938
2939 hw_prod = rxr->rx_pg_prod;
2940
1db82f2a
MC
2941 for (i = 0; i < count; i++) {
2942 prod = RX_PG_RING_IDX(hw_prod);
2943
bb4f98ab
MC
2944 prod_rx_pg = &rxr->rx_pg_ring[prod];
2945 cons_rx_pg = &rxr->rx_pg_ring[cons];
2946 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2947 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1db82f2a 2948
1db82f2a
MC
2949 if (prod != cons) {
2950 prod_rx_pg->page = cons_rx_pg->page;
2951 cons_rx_pg->page = NULL;
1a4ccc2d
FT
2952 dma_unmap_addr_set(prod_rx_pg, mapping,
2953 dma_unmap_addr(cons_rx_pg, mapping));
1db82f2a
MC
2954
2955 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2956 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2957
2958 }
2959 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2960 hw_prod = NEXT_RX_BD(hw_prod);
2961 }
bb4f98ab
MC
2962 rxr->rx_pg_prod = hw_prod;
2963 rxr->rx_pg_cons = cons;
1db82f2a
MC
2964}
2965
b6016b76 2966static inline void
bb4f98ab
MC
2967bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2968 struct sk_buff *skb, u16 cons, u16 prod)
b6016b76 2969{
236b6394
MC
2970 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2971 struct rx_bd *cons_bd, *prod_bd;
2972
bb4f98ab
MC
2973 cons_rx_buf = &rxr->rx_buf_ring[cons];
2974 prod_rx_buf = &rxr->rx_buf_ring[prod];
b6016b76 2975
36227e88 2976 dma_sync_single_for_device(&bp->pdev->dev,
1a4ccc2d 2977 dma_unmap_addr(cons_rx_buf, mapping),
601d3d18 2978 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
b6016b76 2979
bb4f98ab 2980 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2981
236b6394 2982 prod_rx_buf->skb = skb;
a33fa66b 2983 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
b6016b76 2984
236b6394
MC
2985 if (cons == prod)
2986 return;
b6016b76 2987
1a4ccc2d
FT
2988 dma_unmap_addr_set(prod_rx_buf, mapping,
2989 dma_unmap_addr(cons_rx_buf, mapping));
236b6394 2990
bb4f98ab
MC
2991 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2992 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2993 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2994 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2995}
2996
85833c62 2997static int
bb4f98ab 2998bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
a1f60190
MC
2999 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3000 u32 ring_idx)
85833c62
MC
3001{
3002 int err;
3003 u16 prod = ring_idx & 0xffff;
3004
a2df00aa 3005 err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
85833c62 3006 if (unlikely(err)) {
bb4f98ab 3007 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
3008 if (hdr_len) {
3009 unsigned int raw_len = len + 4;
3010 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3011
bb4f98ab 3012 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
1db82f2a 3013 }
85833c62
MC
3014 return err;
3015 }
3016
d89cb6af 3017 skb_reserve(skb, BNX2_RX_OFFSET);
36227e88 3018 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
85833c62
MC
3019 PCI_DMA_FROMDEVICE);
3020
1db82f2a
MC
3021 if (hdr_len == 0) {
3022 skb_put(skb, len);
3023 return 0;
3024 } else {
3025 unsigned int i, frag_len, frag_size, pages;
3026 struct sw_pg *rx_pg;
bb4f98ab
MC
3027 u16 pg_cons = rxr->rx_pg_cons;
3028 u16 pg_prod = rxr->rx_pg_prod;
1db82f2a
MC
3029
3030 frag_size = len + 4 - hdr_len;
3031 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3032 skb_put(skb, hdr_len);
3033
3034 for (i = 0; i < pages; i++) {
3d16af86
BL
3035 dma_addr_t mapping_old;
3036
1db82f2a
MC
3037 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3038 if (unlikely(frag_len <= 4)) {
3039 unsigned int tail = 4 - frag_len;
3040
bb4f98ab
MC
3041 rxr->rx_pg_cons = pg_cons;
3042 rxr->rx_pg_prod = pg_prod;
3043 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
a1f60190 3044 pages - i);
1db82f2a
MC
3045 skb->len -= tail;
3046 if (i == 0) {
3047 skb->tail -= tail;
3048 } else {
3049 skb_frag_t *frag =
3050 &skb_shinfo(skb)->frags[i - 1];
3051 frag->size -= tail;
3052 skb->data_len -= tail;
3053 skb->truesize -= tail;
3054 }
3055 return 0;
3056 }
bb4f98ab 3057 rx_pg = &rxr->rx_pg_ring[pg_cons];
1db82f2a 3058
3d16af86
BL
3059 /* Don't unmap yet. If we're unable to allocate a new
3060 * page, we need to recycle the page and the DMA addr.
3061 */
1a4ccc2d 3062 mapping_old = dma_unmap_addr(rx_pg, mapping);
1db82f2a
MC
3063 if (i == pages - 1)
3064 frag_len -= 4;
3065
3066 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3067 rx_pg->page = NULL;
3068
bb4f98ab 3069 err = bnx2_alloc_rx_page(bp, rxr,
a2df00aa
SG
3070 RX_PG_RING_IDX(pg_prod),
3071 GFP_ATOMIC);
1db82f2a 3072 if (unlikely(err)) {
bb4f98ab
MC
3073 rxr->rx_pg_cons = pg_cons;
3074 rxr->rx_pg_prod = pg_prod;
3075 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
a1f60190 3076 pages - i);
1db82f2a
MC
3077 return err;
3078 }
3079
36227e88 3080 dma_unmap_page(&bp->pdev->dev, mapping_old,
3d16af86
BL
3081 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3082
1db82f2a
MC
3083 frag_size -= frag_len;
3084 skb->data_len += frag_len;
3085 skb->truesize += frag_len;
3086 skb->len += frag_len;
3087
3088 pg_prod = NEXT_RX_BD(pg_prod);
3089 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3090 }
bb4f98ab
MC
3091 rxr->rx_pg_prod = pg_prod;
3092 rxr->rx_pg_cons = pg_cons;
1db82f2a 3093 }
85833c62
MC
3094 return 0;
3095}
3096
c09c2627 3097static inline u16
35efa7c1 3098bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 3099{
bb4f98ab
MC
3100 u16 cons;
3101
43e80b89
MC
3102 /* Tell compiler that status block fields can change. */
3103 barrier();
3104 cons = *bnapi->hw_rx_cons_ptr;
581daf7e 3105 barrier();
c09c2627
MC
3106 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3107 cons++;
3108 return cons;
3109}
3110
b6016b76 3111static int
35efa7c1 3112bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 3113{
bb4f98ab 3114 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76
MC
3115 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3116 struct l2_fhdr *rx_hdr;
1db82f2a 3117 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 3118
35efa7c1 3119 hw_cons = bnx2_get_hw_rx_cons(bnapi);
bb4f98ab
MC
3120 sw_cons = rxr->rx_cons;
3121 sw_prod = rxr->rx_prod;
b6016b76
MC
3122
3123 /* Memory barrier necessary as speculative reads of the rx
3124 * buffer can be ahead of the index in the status block
3125 */
3126 rmb();
3127 while (sw_cons != hw_cons) {
1db82f2a 3128 unsigned int len, hdr_len;
ade2bfe7 3129 u32 status;
a33fa66b 3130 struct sw_bd *rx_buf, *next_rx_buf;
b6016b76 3131 struct sk_buff *skb;
236b6394 3132 dma_addr_t dma_addr;
b6016b76
MC
3133
3134 sw_ring_cons = RX_RING_IDX(sw_cons);
3135 sw_ring_prod = RX_RING_IDX(sw_prod);
3136
bb4f98ab 3137 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
b6016b76 3138 skb = rx_buf->skb;
a33fa66b 3139 prefetchw(skb);
236b6394 3140
aabef8b2
FT
3141 next_rx_buf =
3142 &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3143 prefetch(next_rx_buf->desc);
3144
236b6394
MC
3145 rx_buf->skb = NULL;
3146
1a4ccc2d 3147 dma_addr = dma_unmap_addr(rx_buf, mapping);
236b6394 3148
36227e88 3149 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
601d3d18
BL
3150 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3151 PCI_DMA_FROMDEVICE);
b6016b76 3152
a33fa66b 3153 rx_hdr = rx_buf->desc;
1db82f2a 3154 len = rx_hdr->l2_fhdr_pkt_len;
990ec380 3155 status = rx_hdr->l2_fhdr_status;
b6016b76 3156
1db82f2a
MC
3157 hdr_len = 0;
3158 if (status & L2_FHDR_STATUS_SPLIT) {
3159 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3160 pg_ring_used = 1;
3161 } else if (len > bp->rx_jumbo_thresh) {
3162 hdr_len = bp->rx_jumbo_thresh;
3163 pg_ring_used = 1;
3164 }
3165
990ec380
MC
3166 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3167 L2_FHDR_ERRORS_PHY_DECODE |
3168 L2_FHDR_ERRORS_ALIGNMENT |
3169 L2_FHDR_ERRORS_TOO_SHORT |
3170 L2_FHDR_ERRORS_GIANT_FRAME))) {
3171
3172 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3173 sw_ring_prod);
3174 if (pg_ring_used) {
3175 int pages;
3176
3177 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3178
3179 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3180 }
3181 goto next_rx;
3182 }
3183
1db82f2a 3184 len -= 4;
b6016b76 3185
5d5d0015 3186 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
3187 struct sk_buff *new_skb;
3188
f22828e8 3189 new_skb = netdev_alloc_skb(bp->dev, len + 6);
85833c62 3190 if (new_skb == NULL) {
bb4f98ab 3191 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
85833c62
MC
3192 sw_ring_prod);
3193 goto next_rx;
3194 }
b6016b76
MC
3195
3196 /* aligned copy */
d89cb6af 3197 skb_copy_from_linear_data_offset(skb,
f22828e8
MC
3198 BNX2_RX_OFFSET - 6,
3199 new_skb->data, len + 6);
3200 skb_reserve(new_skb, 6);
b6016b76 3201 skb_put(new_skb, len);
b6016b76 3202
bb4f98ab 3203 bnx2_reuse_rx_skb(bp, rxr, skb,
b6016b76
MC
3204 sw_ring_cons, sw_ring_prod);
3205
3206 skb = new_skb;
bb4f98ab 3207 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
a1f60190 3208 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 3209 goto next_rx;
b6016b76 3210
f22828e8 3211 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
7d0fd211
JG
3212 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3213 __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
f22828e8 3214
b6016b76
MC
3215 skb->protocol = eth_type_trans(skb, bp->dev);
3216
3217 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 3218 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 3219
745720e5 3220 dev_kfree_skb(skb);
b6016b76
MC
3221 goto next_rx;
3222
3223 }
3224
bc8acf2c 3225 skb_checksum_none_assert(skb);
8d7dfc2b 3226 if ((bp->dev->features & NETIF_F_RXCSUM) &&
b6016b76
MC
3227 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3228 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3229
ade2bfe7
MC
3230 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3231 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
3232 skb->ip_summed = CHECKSUM_UNNECESSARY;
3233 }
fdc8541d
MC
3234 if ((bp->dev->features & NETIF_F_RXHASH) &&
3235 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3236 L2_FHDR_STATUS_USE_RXHASH))
3237 skb->rxhash = rx_hdr->l2_fhdr_hash;
b6016b76 3238
0c8dfc83 3239 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
7d0fd211 3240 napi_gro_receive(&bnapi->napi, skb);
b6016b76
MC
3241 rx_pkt++;
3242
3243next_rx:
b6016b76
MC
3244 sw_cons = NEXT_RX_BD(sw_cons);
3245 sw_prod = NEXT_RX_BD(sw_prod);
3246
3247 if ((rx_pkt == budget))
3248 break;
f4e418f7
MC
3249
3250 /* Refresh hw_cons to see if there is new work */
3251 if (sw_cons == hw_cons) {
35efa7c1 3252 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
3253 rmb();
3254 }
b6016b76 3255 }
bb4f98ab
MC
3256 rxr->rx_cons = sw_cons;
3257 rxr->rx_prod = sw_prod;
b6016b76 3258
1db82f2a 3259 if (pg_ring_used)
bb4f98ab 3260 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
1db82f2a 3261
bb4f98ab 3262 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
b6016b76 3263
bb4f98ab 3264 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
3265
3266 mmiowb();
3267
3268 return rx_pkt;
3269
3270}
3271
3272/* MSI ISR - The only difference between this and the INTx ISR
3273 * is that the MSI interrupt is always serviced.
3274 */
3275static irqreturn_t
7d12e780 3276bnx2_msi(int irq, void *dev_instance)
b6016b76 3277{
f0ea2e63
MC
3278 struct bnx2_napi *bnapi = dev_instance;
3279 struct bnx2 *bp = bnapi->bp;
b6016b76 3280
43e80b89 3281 prefetch(bnapi->status_blk.msi);
b6016b76
MC
3282 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3283 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3284 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3285
3286 /* Return here if interrupt is disabled. */
73eef4cd
MC
3287 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3288 return IRQ_HANDLED;
b6016b76 3289
288379f0 3290 napi_schedule(&bnapi->napi);
b6016b76 3291
73eef4cd 3292 return IRQ_HANDLED;
b6016b76
MC
3293}
3294
8e6a72c4
MC
3295static irqreturn_t
3296bnx2_msi_1shot(int irq, void *dev_instance)
3297{
f0ea2e63
MC
3298 struct bnx2_napi *bnapi = dev_instance;
3299 struct bnx2 *bp = bnapi->bp;
8e6a72c4 3300
43e80b89 3301 prefetch(bnapi->status_blk.msi);
8e6a72c4
MC
3302
3303 /* Return here if interrupt is disabled. */
3304 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3305 return IRQ_HANDLED;
3306
288379f0 3307 napi_schedule(&bnapi->napi);
8e6a72c4
MC
3308
3309 return IRQ_HANDLED;
3310}
3311
b6016b76 3312static irqreturn_t
7d12e780 3313bnx2_interrupt(int irq, void *dev_instance)
b6016b76 3314{
f0ea2e63
MC
3315 struct bnx2_napi *bnapi = dev_instance;
3316 struct bnx2 *bp = bnapi->bp;
43e80b89 3317 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76
MC
3318
3319 /* When using INTx, it is possible for the interrupt to arrive
3320 * at the CPU before the status block posted prior to the
3321 * interrupt. Reading a register will flush the status block.
3322 * When using MSI, the MSI message will always complete after
3323 * the status block write.
3324 */
35efa7c1 3325 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
3326 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3327 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 3328 return IRQ_NONE;
b6016b76
MC
3329
3330 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3331 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3332 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3333
b8a7ce7b
MC
3334 /* Read back to deassert IRQ immediately to avoid too many
3335 * spurious interrupts.
3336 */
3337 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3338
b6016b76 3339 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
3340 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3341 return IRQ_HANDLED;
b6016b76 3342
288379f0 3343 if (napi_schedule_prep(&bnapi->napi)) {
35efa7c1 3344 bnapi->last_status_idx = sblk->status_idx;
288379f0 3345 __napi_schedule(&bnapi->napi);
b8a7ce7b 3346 }
b6016b76 3347
73eef4cd 3348 return IRQ_HANDLED;
b6016b76
MC
3349}
3350
f4e418f7 3351static inline int
43e80b89 3352bnx2_has_fast_work(struct bnx2_napi *bnapi)
f4e418f7 3353{
35e9010b 3354 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 3355 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
f4e418f7 3356
bb4f98ab 3357 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
35e9010b 3358 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
f4e418f7 3359 return 1;
43e80b89
MC
3360 return 0;
3361}
3362
3363#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3364 STATUS_ATTN_BITS_TIMER_ABORT)
3365
3366static inline int
3367bnx2_has_work(struct bnx2_napi *bnapi)
3368{
3369 struct status_block *sblk = bnapi->status_blk.msi;
3370
3371 if (bnx2_has_fast_work(bnapi))
3372 return 1;
f4e418f7 3373
4edd473f
MC
3374#ifdef BCM_CNIC
3375 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3376 return 1;
3377#endif
3378
da3e4fbe
MC
3379 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3380 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
3381 return 1;
3382
3383 return 0;
3384}
3385
efba0180
MC
3386static void
3387bnx2_chk_missed_msi(struct bnx2 *bp)
3388{
3389 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3390 u32 msi_ctrl;
3391
3392 if (bnx2_has_work(bnapi)) {
3393 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3394 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3395 return;
3396
3397 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3398 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3399 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3400 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3401 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3402 }
3403 }
3404
3405 bp->idle_chk_status_idx = bnapi->last_status_idx;
3406}
3407
4edd473f
MC
3408#ifdef BCM_CNIC
3409static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3410{
3411 struct cnic_ops *c_ops;
3412
3413 if (!bnapi->cnic_present)
3414 return;
3415
3416 rcu_read_lock();
3417 c_ops = rcu_dereference(bp->cnic_ops);
3418 if (c_ops)
3419 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3420 bnapi->status_blk.msi);
3421 rcu_read_unlock();
3422}
3423#endif
3424
43e80b89 3425static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76 3426{
43e80b89 3427 struct status_block *sblk = bnapi->status_blk.msi;
da3e4fbe
MC
3428 u32 status_attn_bits = sblk->status_attn_bits;
3429 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 3430
da3e4fbe
MC
3431 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3432 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 3433
35efa7c1 3434 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
3435
3436 /* This is needed to take care of transient status
3437 * during link changes.
3438 */
3439 REG_WR(bp, BNX2_HC_COMMAND,
3440 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3441 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76 3442 }
43e80b89
MC
3443}
3444
3445static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3446 int work_done, int budget)
3447{
3448 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3449 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76 3450
35e9010b 3451 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
57851d84 3452 bnx2_tx_int(bp, bnapi, 0);
b6016b76 3453
bb4f98ab 3454 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
35efa7c1 3455 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 3456
6f535763
DM
3457 return work_done;
3458}
3459
f0ea2e63
MC
3460static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3461{
3462 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3463 struct bnx2 *bp = bnapi->bp;
3464 int work_done = 0;
3465 struct status_block_msix *sblk = bnapi->status_blk.msix;
3466
3467 while (1) {
3468 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3469 if (unlikely(work_done >= budget))
3470 break;
3471
3472 bnapi->last_status_idx = sblk->status_idx;
3473 /* status idx must be read before checking for more work. */
3474 rmb();
3475 if (likely(!bnx2_has_fast_work(bnapi))) {
3476
288379f0 3477 napi_complete(napi);
f0ea2e63
MC
3478 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3479 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3480 bnapi->last_status_idx);
3481 break;
3482 }
3483 }
3484 return work_done;
3485}
3486
6f535763
DM
3487static int bnx2_poll(struct napi_struct *napi, int budget)
3488{
35efa7c1
MC
3489 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3490 struct bnx2 *bp = bnapi->bp;
6f535763 3491 int work_done = 0;
43e80b89 3492 struct status_block *sblk = bnapi->status_blk.msi;
6f535763
DM
3493
3494 while (1) {
43e80b89
MC
3495 bnx2_poll_link(bp, bnapi);
3496
35efa7c1 3497 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 3498
4edd473f
MC
3499#ifdef BCM_CNIC
3500 bnx2_poll_cnic(bp, bnapi);
3501#endif
3502
35efa7c1 3503 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
3504 * much work has been processed, so we must read it before
3505 * checking for more work.
3506 */
35efa7c1 3507 bnapi->last_status_idx = sblk->status_idx;
efba0180
MC
3508
3509 if (unlikely(work_done >= budget))
3510 break;
3511
6dee6421 3512 rmb();
35efa7c1 3513 if (likely(!bnx2_has_work(bnapi))) {
288379f0 3514 napi_complete(napi);
f86e82fb 3515 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
6f535763
DM
3516 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3517 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3518 bnapi->last_status_idx);
6dee6421 3519 break;
6f535763 3520 }
1269a8a6
MC
3521 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3522 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 3523 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 3524 bnapi->last_status_idx);
1269a8a6 3525
6f535763
DM
3526 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3527 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3528 bnapi->last_status_idx);
6f535763
DM
3529 break;
3530 }
b6016b76
MC
3531 }
3532
bea3348e 3533 return work_done;
b6016b76
MC
3534}
3535
932ff279 3536/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
3537 * from set_multicast.
3538 */
3539static void
3540bnx2_set_rx_mode(struct net_device *dev)
3541{
972ec0d4 3542 struct bnx2 *bp = netdev_priv(dev);
b6016b76 3543 u32 rx_mode, sort_mode;
ccffad25 3544 struct netdev_hw_addr *ha;
b6016b76 3545 int i;
b6016b76 3546
9f52b564
MC
3547 if (!netif_running(dev))
3548 return;
3549
c770a65c 3550 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3551
3552 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3553 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3554 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
7d0fd211
JG
3555 if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3556 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
b6016b76 3557 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
3558 if (dev->flags & IFF_PROMISC) {
3559 /* Promiscuous mode. */
3560 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
3561 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3562 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
3563 }
3564 else if (dev->flags & IFF_ALLMULTI) {
3565 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3566 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3567 0xffffffff);
3568 }
3569 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3570 }
3571 else {
3572 /* Accept one or more multicast(s). */
b6016b76
MC
3573 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3574 u32 regidx;
3575 u32 bit;
3576 u32 crc;
3577
3578 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3579
22bedad3
JP
3580 netdev_for_each_mc_addr(ha, dev) {
3581 crc = ether_crc_le(ETH_ALEN, ha->addr);
b6016b76
MC
3582 bit = crc & 0xff;
3583 regidx = (bit & 0xe0) >> 5;
3584 bit &= 0x1f;
3585 mc_filter[regidx] |= (1 << bit);
3586 }
3587
3588 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3589 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3590 mc_filter[i]);
3591 }
3592
3593 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3594 }
3595
32e7bfc4 3596 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
5fcaed01
BL
3597 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3598 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3599 BNX2_RPM_SORT_USER0_PROM_VLAN;
3600 } else if (!(dev->flags & IFF_PROMISC)) {
5fcaed01 3601 /* Add all entries into to the match filter list */
ccffad25 3602 i = 0;
32e7bfc4 3603 netdev_for_each_uc_addr(ha, dev) {
ccffad25 3604 bnx2_set_mac_addr(bp, ha->addr,
5fcaed01
BL
3605 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3606 sort_mode |= (1 <<
3607 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
ccffad25 3608 i++;
5fcaed01
BL
3609 }
3610
3611 }
3612
b6016b76
MC
3613 if (rx_mode != bp->rx_mode) {
3614 bp->rx_mode = rx_mode;
3615 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3616 }
3617
3618 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3619 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3620 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3621
c770a65c 3622 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3623}
3624
57579f76
MC
3625static int __devinit
3626check_fw_section(const struct firmware *fw,
3627 const struct bnx2_fw_file_section *section,
3628 u32 alignment, bool non_empty)
3629{
3630 u32 offset = be32_to_cpu(section->offset);
3631 u32 len = be32_to_cpu(section->len);
3632
3633 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3634 return -EINVAL;
3635 if ((non_empty && len == 0) || len > fw->size - offset ||
3636 len & (alignment - 1))
3637 return -EINVAL;
3638 return 0;
3639}
3640
3641static int __devinit
3642check_mips_fw_entry(const struct firmware *fw,
3643 const struct bnx2_mips_fw_file_entry *entry)
3644{
3645 if (check_fw_section(fw, &entry->text, 4, true) ||
3646 check_fw_section(fw, &entry->data, 4, false) ||
3647 check_fw_section(fw, &entry->rodata, 4, false))
3648 return -EINVAL;
3649 return 0;
3650}
3651
3652static int __devinit
3653bnx2_request_firmware(struct bnx2 *bp)
b6016b76 3654{
57579f76 3655 const char *mips_fw_file, *rv2p_fw_file;
5ee1c326
BB
3656 const struct bnx2_mips_fw_file *mips_fw;
3657 const struct bnx2_rv2p_fw_file *rv2p_fw;
57579f76
MC
3658 int rc;
3659
3660 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3661 mips_fw_file = FW_MIPS_FILE_09;
078b0735
MC
3662 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3663 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3664 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3665 else
3666 rv2p_fw_file = FW_RV2P_FILE_09;
57579f76
MC
3667 } else {
3668 mips_fw_file = FW_MIPS_FILE_06;
3669 rv2p_fw_file = FW_RV2P_FILE_06;
3670 }
3671
3672 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3673 if (rc) {
3a9c6a49 3674 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
57579f76
MC
3675 return rc;
3676 }
3677
3678 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3679 if (rc) {
3a9c6a49 3680 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
57579f76
MC
3681 return rc;
3682 }
5ee1c326
BB
3683 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3684 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3685 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3686 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3687 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3688 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3689 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3690 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3a9c6a49 3691 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
57579f76
MC
3692 return -EINVAL;
3693 }
5ee1c326
BB
3694 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3695 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3696 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3a9c6a49 3697 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
57579f76
MC
3698 return -EINVAL;
3699 }
3700
3701 return 0;
3702}
3703
3704static u32
3705rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3706{
3707 switch (idx) {
3708 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3709 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3710 rv2p_code |= RV2P_BD_PAGE_SIZE;
3711 break;
3712 }
3713 return rv2p_code;
3714}
3715
3716static int
3717load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3718 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3719{
3720 u32 rv2p_code_len, file_offset;
3721 __be32 *rv2p_code;
b6016b76 3722 int i;
57579f76
MC
3723 u32 val, cmd, addr;
3724
3725 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3726 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3727
3728 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
b6016b76 3729
57579f76
MC
3730 if (rv2p_proc == RV2P_PROC1) {
3731 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3732 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3733 } else {
3734 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3735 addr = BNX2_RV2P_PROC2_ADDR_CMD;
d25be1d3 3736 }
b6016b76
MC
3737
3738 for (i = 0; i < rv2p_code_len; i += 8) {
57579f76 3739 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
b6016b76 3740 rv2p_code++;
57579f76 3741 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
b6016b76
MC
3742 rv2p_code++;
3743
57579f76
MC
3744 val = (i / 8) | cmd;
3745 REG_WR(bp, addr, val);
3746 }
3747
3748 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3749 for (i = 0; i < 8; i++) {
3750 u32 loc, code;
3751
3752 loc = be32_to_cpu(fw_entry->fixup[i]);
3753 if (loc && ((loc * 4) < rv2p_code_len)) {
3754 code = be32_to_cpu(*(rv2p_code + loc - 1));
3755 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3756 code = be32_to_cpu(*(rv2p_code + loc));
3757 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3758 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3759
3760 val = (loc / 2) | cmd;
3761 REG_WR(bp, addr, val);
b6016b76
MC
3762 }
3763 }
3764
3765 /* Reset the processor, un-stall is done later. */
3766 if (rv2p_proc == RV2P_PROC1) {
3767 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3768 }
3769 else {
3770 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3771 }
57579f76
MC
3772
3773 return 0;
b6016b76
MC
3774}
3775
af3ee519 3776static int
57579f76
MC
3777load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3778 const struct bnx2_mips_fw_file_entry *fw_entry)
b6016b76 3779{
57579f76
MC
3780 u32 addr, len, file_offset;
3781 __be32 *data;
b6016b76
MC
3782 u32 offset;
3783 u32 val;
3784
3785 /* Halt the CPU. */
2726d6e1 3786 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3787 val |= cpu_reg->mode_value_halt;
2726d6e1
MC
3788 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3789 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
b6016b76
MC
3790
3791 /* Load the Text area. */
57579f76
MC
3792 addr = be32_to_cpu(fw_entry->text.addr);
3793 len = be32_to_cpu(fw_entry->text.len);
3794 file_offset = be32_to_cpu(fw_entry->text.offset);
3795 data = (__be32 *)(bp->mips_firmware->data + file_offset);
ea1f8d5c 3796
57579f76
MC
3797 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3798 if (len) {
b6016b76
MC
3799 int j;
3800
57579f76
MC
3801 for (j = 0; j < (len / 4); j++, offset += 4)
3802 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3803 }
3804
57579f76
MC
3805 /* Load the Data area. */
3806 addr = be32_to_cpu(fw_entry->data.addr);
3807 len = be32_to_cpu(fw_entry->data.len);
3808 file_offset = be32_to_cpu(fw_entry->data.offset);
3809 data = (__be32 *)(bp->mips_firmware->data + file_offset);
b6016b76 3810
57579f76
MC
3811 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3812 if (len) {
b6016b76
MC
3813 int j;
3814
57579f76
MC
3815 for (j = 0; j < (len / 4); j++, offset += 4)
3816 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3817 }
3818
3819 /* Load the Read-Only area. */
57579f76
MC
3820 addr = be32_to_cpu(fw_entry->rodata.addr);
3821 len = be32_to_cpu(fw_entry->rodata.len);
3822 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3823 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3824
3825 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3826 if (len) {
b6016b76
MC
3827 int j;
3828
57579f76
MC
3829 for (j = 0; j < (len / 4); j++, offset += 4)
3830 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3831 }
3832
3833 /* Clear the pre-fetch instruction. */
2726d6e1 3834 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
57579f76
MC
3835
3836 val = be32_to_cpu(fw_entry->start_addr);
3837 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
b6016b76
MC
3838
3839 /* Start the CPU. */
2726d6e1 3840 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3841 val &= ~cpu_reg->mode_value_halt;
2726d6e1
MC
3842 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3843 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
af3ee519
MC
3844
3845 return 0;
b6016b76
MC
3846}
3847
fba9fe91 3848static int
b6016b76
MC
3849bnx2_init_cpus(struct bnx2 *bp)
3850{
57579f76
MC
3851 const struct bnx2_mips_fw_file *mips_fw =
3852 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3853 const struct bnx2_rv2p_fw_file *rv2p_fw =
3854 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3855 int rc;
b6016b76
MC
3856
3857 /* Initialize the RV2P processor. */
57579f76
MC
3858 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3859 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
b6016b76
MC
3860
3861 /* Initialize the RX Processor. */
57579f76 3862 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
fba9fe91
MC
3863 if (rc)
3864 goto init_cpu_err;
3865
b6016b76 3866 /* Initialize the TX Processor. */
57579f76 3867 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
fba9fe91
MC
3868 if (rc)
3869 goto init_cpu_err;
3870
b6016b76 3871 /* Initialize the TX Patch-up Processor. */
57579f76 3872 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
fba9fe91
MC
3873 if (rc)
3874 goto init_cpu_err;
3875
b6016b76 3876 /* Initialize the Completion Processor. */
57579f76 3877 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
fba9fe91
MC
3878 if (rc)
3879 goto init_cpu_err;
3880
d43584c8 3881 /* Initialize the Command Processor. */
57579f76 3882 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
b6016b76 3883
fba9fe91 3884init_cpu_err:
fba9fe91 3885 return rc;
b6016b76
MC
3886}
3887
3888static int
829ca9a3 3889bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3890{
3891 u16 pmcsr;
3892
3893 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3894
3895 switch (state) {
829ca9a3 3896 case PCI_D0: {
b6016b76
MC
3897 u32 val;
3898
3899 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3900 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3901 PCI_PM_CTRL_PME_STATUS);
3902
3903 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3904 /* delay required during transition out of D3hot */
3905 msleep(20);
3906
3907 val = REG_RD(bp, BNX2_EMAC_MODE);
3908 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3909 val &= ~BNX2_EMAC_MODE_MPKT;
3910 REG_WR(bp, BNX2_EMAC_MODE, val);
3911
3912 val = REG_RD(bp, BNX2_RPM_CONFIG);
3913 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3914 REG_WR(bp, BNX2_RPM_CONFIG, val);
3915 break;
3916 }
829ca9a3 3917 case PCI_D3hot: {
b6016b76
MC
3918 int i;
3919 u32 val, wol_msg;
3920
3921 if (bp->wol) {
3922 u32 advertising;
3923 u8 autoneg;
3924
3925 autoneg = bp->autoneg;
3926 advertising = bp->advertising;
3927
239cd343
MC
3928 if (bp->phy_port == PORT_TP) {
3929 bp->autoneg = AUTONEG_SPEED;
3930 bp->advertising = ADVERTISED_10baseT_Half |
3931 ADVERTISED_10baseT_Full |
3932 ADVERTISED_100baseT_Half |
3933 ADVERTISED_100baseT_Full |
3934 ADVERTISED_Autoneg;
3935 }
b6016b76 3936
239cd343
MC
3937 spin_lock_bh(&bp->phy_lock);
3938 bnx2_setup_phy(bp, bp->phy_port);
3939 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3940
3941 bp->autoneg = autoneg;
3942 bp->advertising = advertising;
3943
5fcaed01 3944 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
3945
3946 val = REG_RD(bp, BNX2_EMAC_MODE);
3947
3948 /* Enable port mode. */
3949 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3950 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3951 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3952 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3953 if (bp->phy_port == PORT_TP)
3954 val |= BNX2_EMAC_MODE_PORT_MII;
3955 else {
3956 val |= BNX2_EMAC_MODE_PORT_GMII;
3957 if (bp->line_speed == SPEED_2500)
3958 val |= BNX2_EMAC_MODE_25G_MODE;
3959 }
b6016b76
MC
3960
3961 REG_WR(bp, BNX2_EMAC_MODE, val);
3962
3963 /* receive all multicast */
3964 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3965 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3966 0xffffffff);
3967 }
3968 REG_WR(bp, BNX2_EMAC_RX_MODE,
3969 BNX2_EMAC_RX_MODE_SORT_MODE);
3970
3971 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3972 BNX2_RPM_SORT_USER0_MC_EN;
3973 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3974 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3975 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3976 BNX2_RPM_SORT_USER0_ENA);
3977
3978 /* Need to enable EMAC and RPM for WOL. */
3979 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3980 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3981 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3982 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3983
3984 val = REG_RD(bp, BNX2_RPM_CONFIG);
3985 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3986 REG_WR(bp, BNX2_RPM_CONFIG, val);
3987
3988 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3989 }
3990 else {
3991 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3992 }
3993
f86e82fb 3994 if (!(bp->flags & BNX2_FLAG_NO_WOL))
a2f13890
MC
3995 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3996 1, 0);
b6016b76
MC
3997
3998 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3999 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4000 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4001
4002 if (bp->wol)
4003 pmcsr |= 3;
4004 }
4005 else {
4006 pmcsr |= 3;
4007 }
4008 if (bp->wol) {
4009 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4010 }
4011 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4012 pmcsr);
4013
4014 /* No more memory access after this point until
4015 * device is brought back to D0.
4016 */
4017 udelay(50);
4018 break;
4019 }
4020 default:
4021 return -EINVAL;
4022 }
4023 return 0;
4024}
4025
4026static int
4027bnx2_acquire_nvram_lock(struct bnx2 *bp)
4028{
4029 u32 val;
4030 int j;
4031
4032 /* Request access to the flash interface. */
4033 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4034 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4035 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4036 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4037 break;
4038
4039 udelay(5);
4040 }
4041
4042 if (j >= NVRAM_TIMEOUT_COUNT)
4043 return -EBUSY;
4044
4045 return 0;
4046}
4047
4048static int
4049bnx2_release_nvram_lock(struct bnx2 *bp)
4050{
4051 int j;
4052 u32 val;
4053
4054 /* Relinquish nvram interface. */
4055 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4056
4057 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4058 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4059 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4060 break;
4061
4062 udelay(5);
4063 }
4064
4065 if (j >= NVRAM_TIMEOUT_COUNT)
4066 return -EBUSY;
4067
4068 return 0;
4069}
4070
4071
4072static int
4073bnx2_enable_nvram_write(struct bnx2 *bp)
4074{
4075 u32 val;
4076
4077 val = REG_RD(bp, BNX2_MISC_CFG);
4078 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4079
e30372c9 4080 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
4081 int j;
4082
4083 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4084 REG_WR(bp, BNX2_NVM_COMMAND,
4085 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4086
4087 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4088 udelay(5);
4089
4090 val = REG_RD(bp, BNX2_NVM_COMMAND);
4091 if (val & BNX2_NVM_COMMAND_DONE)
4092 break;
4093 }
4094
4095 if (j >= NVRAM_TIMEOUT_COUNT)
4096 return -EBUSY;
4097 }
4098 return 0;
4099}
4100
4101static void
4102bnx2_disable_nvram_write(struct bnx2 *bp)
4103{
4104 u32 val;
4105
4106 val = REG_RD(bp, BNX2_MISC_CFG);
4107 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4108}
4109
4110
4111static void
4112bnx2_enable_nvram_access(struct bnx2 *bp)
4113{
4114 u32 val;
4115
4116 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4117 /* Enable both bits, even on read. */
6aa20a22 4118 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4119 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4120}
4121
4122static void
4123bnx2_disable_nvram_access(struct bnx2 *bp)
4124{
4125 u32 val;
4126
4127 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4128 /* Disable both bits, even after read. */
6aa20a22 4129 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4130 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4131 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4132}
4133
4134static int
4135bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4136{
4137 u32 cmd;
4138 int j;
4139
e30372c9 4140 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
4141 /* Buffered flash, no erase needed */
4142 return 0;
4143
4144 /* Build an erase command */
4145 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4146 BNX2_NVM_COMMAND_DOIT;
4147
4148 /* Need to clear DONE bit separately. */
4149 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4150
4151 /* Address of the NVRAM to read from. */
4152 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4153
4154 /* Issue an erase command. */
4155 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4156
4157 /* Wait for completion. */
4158 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4159 u32 val;
4160
4161 udelay(5);
4162
4163 val = REG_RD(bp, BNX2_NVM_COMMAND);
4164 if (val & BNX2_NVM_COMMAND_DONE)
4165 break;
4166 }
4167
4168 if (j >= NVRAM_TIMEOUT_COUNT)
4169 return -EBUSY;
4170
4171 return 0;
4172}
4173
4174static int
4175bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4176{
4177 u32 cmd;
4178 int j;
4179
4180 /* Build the command word. */
4181 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4182
e30372c9
MC
4183 /* Calculate an offset of a buffered flash, not needed for 5709. */
4184 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4185 offset = ((offset / bp->flash_info->page_size) <<
4186 bp->flash_info->page_bits) +
4187 (offset % bp->flash_info->page_size);
4188 }
4189
4190 /* Need to clear DONE bit separately. */
4191 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4192
4193 /* Address of the NVRAM to read from. */
4194 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4195
4196 /* Issue a read command. */
4197 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4198
4199 /* Wait for completion. */
4200 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4201 u32 val;
4202
4203 udelay(5);
4204
4205 val = REG_RD(bp, BNX2_NVM_COMMAND);
4206 if (val & BNX2_NVM_COMMAND_DONE) {
b491edd5
AV
4207 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4208 memcpy(ret_val, &v, 4);
b6016b76
MC
4209 break;
4210 }
4211 }
4212 if (j >= NVRAM_TIMEOUT_COUNT)
4213 return -EBUSY;
4214
4215 return 0;
4216}
4217
4218
4219static int
4220bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4221{
b491edd5
AV
4222 u32 cmd;
4223 __be32 val32;
b6016b76
MC
4224 int j;
4225
4226 /* Build the command word. */
4227 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4228
e30372c9
MC
4229 /* Calculate an offset of a buffered flash, not needed for 5709. */
4230 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4231 offset = ((offset / bp->flash_info->page_size) <<
4232 bp->flash_info->page_bits) +
4233 (offset % bp->flash_info->page_size);
4234 }
4235
4236 /* Need to clear DONE bit separately. */
4237 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4238
4239 memcpy(&val32, val, 4);
b6016b76
MC
4240
4241 /* Write the data. */
b491edd5 4242 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
b6016b76
MC
4243
4244 /* Address of the NVRAM to write to. */
4245 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4246
4247 /* Issue the write command. */
4248 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4249
4250 /* Wait for completion. */
4251 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4252 udelay(5);
4253
4254 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4255 break;
4256 }
4257 if (j >= NVRAM_TIMEOUT_COUNT)
4258 return -EBUSY;
4259
4260 return 0;
4261}
4262
4263static int
4264bnx2_init_nvram(struct bnx2 *bp)
4265{
4266 u32 val;
e30372c9 4267 int j, entry_count, rc = 0;
0ced9d01 4268 const struct flash_spec *flash;
b6016b76 4269
e30372c9
MC
4270 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4271 bp->flash_info = &flash_5709;
4272 goto get_flash_size;
4273 }
4274
b6016b76
MC
4275 /* Determine the selected interface. */
4276 val = REG_RD(bp, BNX2_NVM_CFG1);
4277
ff8ac609 4278 entry_count = ARRAY_SIZE(flash_table);
b6016b76 4279
b6016b76
MC
4280 if (val & 0x40000000) {
4281
4282 /* Flash interface has been reconfigured */
4283 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
4284 j++, flash++) {
4285 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4286 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
4287 bp->flash_info = flash;
4288 break;
4289 }
4290 }
4291 }
4292 else {
37137709 4293 u32 mask;
b6016b76
MC
4294 /* Not yet been reconfigured */
4295
37137709
MC
4296 if (val & (1 << 23))
4297 mask = FLASH_BACKUP_STRAP_MASK;
4298 else
4299 mask = FLASH_STRAP_MASK;
4300
b6016b76
MC
4301 for (j = 0, flash = &flash_table[0]; j < entry_count;
4302 j++, flash++) {
4303
37137709 4304 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
4305 bp->flash_info = flash;
4306
4307 /* Request access to the flash interface. */
4308 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4309 return rc;
4310
4311 /* Enable access to flash interface */
4312 bnx2_enable_nvram_access(bp);
4313
4314 /* Reconfigure the flash interface */
4315 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4316 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4317 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4318 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4319
4320 /* Disable access to flash interface */
4321 bnx2_disable_nvram_access(bp);
4322 bnx2_release_nvram_lock(bp);
4323
4324 break;
4325 }
4326 }
4327 } /* if (val & 0x40000000) */
4328
4329 if (j == entry_count) {
4330 bp->flash_info = NULL;
3a9c6a49 4331 pr_alert("Unknown flash/EEPROM type\n");
1122db71 4332 return -ENODEV;
b6016b76
MC
4333 }
4334
e30372c9 4335get_flash_size:
2726d6e1 4336 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
1122db71
MC
4337 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4338 if (val)
4339 bp->flash_size = val;
4340 else
4341 bp->flash_size = bp->flash_info->total_size;
4342
b6016b76
MC
4343 return rc;
4344}
4345
4346static int
4347bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4348 int buf_size)
4349{
4350 int rc = 0;
4351 u32 cmd_flags, offset32, len32, extra;
4352
4353 if (buf_size == 0)
4354 return 0;
4355
4356 /* Request access to the flash interface. */
4357 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4358 return rc;
4359
4360 /* Enable access to flash interface */
4361 bnx2_enable_nvram_access(bp);
4362
4363 len32 = buf_size;
4364 offset32 = offset;
4365 extra = 0;
4366
4367 cmd_flags = 0;
4368
4369 if (offset32 & 3) {
4370 u8 buf[4];
4371 u32 pre_len;
4372
4373 offset32 &= ~3;
4374 pre_len = 4 - (offset & 3);
4375
4376 if (pre_len >= len32) {
4377 pre_len = len32;
4378 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4379 BNX2_NVM_COMMAND_LAST;
4380 }
4381 else {
4382 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4383 }
4384
4385 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4386
4387 if (rc)
4388 return rc;
4389
4390 memcpy(ret_buf, buf + (offset & 3), pre_len);
4391
4392 offset32 += 4;
4393 ret_buf += pre_len;
4394 len32 -= pre_len;
4395 }
4396 if (len32 & 3) {
4397 extra = 4 - (len32 & 3);
4398 len32 = (len32 + 4) & ~3;
4399 }
4400
4401 if (len32 == 4) {
4402 u8 buf[4];
4403
4404 if (cmd_flags)
4405 cmd_flags = BNX2_NVM_COMMAND_LAST;
4406 else
4407 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4408 BNX2_NVM_COMMAND_LAST;
4409
4410 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4411
4412 memcpy(ret_buf, buf, 4 - extra);
4413 }
4414 else if (len32 > 0) {
4415 u8 buf[4];
4416
4417 /* Read the first word. */
4418 if (cmd_flags)
4419 cmd_flags = 0;
4420 else
4421 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4422
4423 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4424
4425 /* Advance to the next dword. */
4426 offset32 += 4;
4427 ret_buf += 4;
4428 len32 -= 4;
4429
4430 while (len32 > 4 && rc == 0) {
4431 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4432
4433 /* Advance to the next dword. */
4434 offset32 += 4;
4435 ret_buf += 4;
4436 len32 -= 4;
4437 }
4438
4439 if (rc)
4440 return rc;
4441
4442 cmd_flags = BNX2_NVM_COMMAND_LAST;
4443 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4444
4445 memcpy(ret_buf, buf, 4 - extra);
4446 }
4447
4448 /* Disable access to flash interface */
4449 bnx2_disable_nvram_access(bp);
4450
4451 bnx2_release_nvram_lock(bp);
4452
4453 return rc;
4454}
4455
4456static int
4457bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4458 int buf_size)
4459{
4460 u32 written, offset32, len32;
e6be763f 4461 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
4462 int rc = 0;
4463 int align_start, align_end;
4464
4465 buf = data_buf;
4466 offset32 = offset;
4467 len32 = buf_size;
4468 align_start = align_end = 0;
4469
4470 if ((align_start = (offset32 & 3))) {
4471 offset32 &= ~3;
c873879c
MC
4472 len32 += align_start;
4473 if (len32 < 4)
4474 len32 = 4;
b6016b76
MC
4475 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4476 return rc;
4477 }
4478
4479 if (len32 & 3) {
c873879c
MC
4480 align_end = 4 - (len32 & 3);
4481 len32 += align_end;
4482 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4483 return rc;
b6016b76
MC
4484 }
4485
4486 if (align_start || align_end) {
e6be763f
MC
4487 align_buf = kmalloc(len32, GFP_KERNEL);
4488 if (align_buf == NULL)
b6016b76
MC
4489 return -ENOMEM;
4490 if (align_start) {
e6be763f 4491 memcpy(align_buf, start, 4);
b6016b76
MC
4492 }
4493 if (align_end) {
e6be763f 4494 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 4495 }
e6be763f
MC
4496 memcpy(align_buf + align_start, data_buf, buf_size);
4497 buf = align_buf;
b6016b76
MC
4498 }
4499
e30372c9 4500 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
4501 flash_buffer = kmalloc(264, GFP_KERNEL);
4502 if (flash_buffer == NULL) {
4503 rc = -ENOMEM;
4504 goto nvram_write_end;
4505 }
4506 }
4507
b6016b76
MC
4508 written = 0;
4509 while ((written < len32) && (rc == 0)) {
4510 u32 page_start, page_end, data_start, data_end;
4511 u32 addr, cmd_flags;
4512 int i;
b6016b76
MC
4513
4514 /* Find the page_start addr */
4515 page_start = offset32 + written;
4516 page_start -= (page_start % bp->flash_info->page_size);
4517 /* Find the page_end addr */
4518 page_end = page_start + bp->flash_info->page_size;
4519 /* Find the data_start addr */
4520 data_start = (written == 0) ? offset32 : page_start;
4521 /* Find the data_end addr */
6aa20a22 4522 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
4523 (offset32 + len32) : page_end;
4524
4525 /* Request access to the flash interface. */
4526 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4527 goto nvram_write_end;
4528
4529 /* Enable access to flash interface */
4530 bnx2_enable_nvram_access(bp);
4531
4532 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 4533 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4534 int j;
4535
4536 /* Read the whole page into the buffer
4537 * (non-buffer flash only) */
4538 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4539 if (j == (bp->flash_info->page_size - 4)) {
4540 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4541 }
4542 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
4543 page_start + j,
4544 &flash_buffer[j],
b6016b76
MC
4545 cmd_flags);
4546
4547 if (rc)
4548 goto nvram_write_end;
4549
4550 cmd_flags = 0;
4551 }
4552 }
4553
4554 /* Enable writes to flash interface (unlock write-protect) */
4555 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4556 goto nvram_write_end;
4557
b6016b76
MC
4558 /* Loop to write back the buffer data from page_start to
4559 * data_start */
4560 i = 0;
e30372c9 4561 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
4562 /* Erase the page */
4563 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4564 goto nvram_write_end;
4565
4566 /* Re-enable the write again for the actual write */
4567 bnx2_enable_nvram_write(bp);
4568
b6016b76
MC
4569 for (addr = page_start; addr < data_start;
4570 addr += 4, i += 4) {
6aa20a22 4571
b6016b76
MC
4572 rc = bnx2_nvram_write_dword(bp, addr,
4573 &flash_buffer[i], cmd_flags);
4574
4575 if (rc != 0)
4576 goto nvram_write_end;
4577
4578 cmd_flags = 0;
4579 }
4580 }
4581
4582 /* Loop to write the new data from data_start to data_end */
bae25761 4583 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 4584 if ((addr == page_end - 4) ||
e30372c9 4585 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
4586 (addr == data_end - 4))) {
4587
4588 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4589 }
4590 rc = bnx2_nvram_write_dword(bp, addr, buf,
4591 cmd_flags);
4592
4593 if (rc != 0)
4594 goto nvram_write_end;
4595
4596 cmd_flags = 0;
4597 buf += 4;
4598 }
4599
4600 /* Loop to write back the buffer data from data_end
4601 * to page_end */
e30372c9 4602 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4603 for (addr = data_end; addr < page_end;
4604 addr += 4, i += 4) {
6aa20a22 4605
b6016b76
MC
4606 if (addr == page_end-4) {
4607 cmd_flags = BNX2_NVM_COMMAND_LAST;
4608 }
4609 rc = bnx2_nvram_write_dword(bp, addr,
4610 &flash_buffer[i], cmd_flags);
4611
4612 if (rc != 0)
4613 goto nvram_write_end;
4614
4615 cmd_flags = 0;
4616 }
4617 }
4618
4619 /* Disable writes to flash interface (lock write-protect) */
4620 bnx2_disable_nvram_write(bp);
4621
4622 /* Disable access to flash interface */
4623 bnx2_disable_nvram_access(bp);
4624 bnx2_release_nvram_lock(bp);
4625
4626 /* Increment written */
4627 written += data_end - data_start;
4628 }
4629
4630nvram_write_end:
e6be763f
MC
4631 kfree(flash_buffer);
4632 kfree(align_buf);
b6016b76
MC
4633 return rc;
4634}
4635
0d8a6571 4636static void
7c62e83b 4637bnx2_init_fw_cap(struct bnx2 *bp)
0d8a6571 4638{
7c62e83b 4639 u32 val, sig = 0;
0d8a6571 4640
583c28e5 4641 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
7c62e83b
MC
4642 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4643
4644 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4645 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
0d8a6571 4646
2726d6e1 4647 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
0d8a6571
MC
4648 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4649 return;
4650
7c62e83b
MC
4651 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4652 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4653 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4654 }
4655
4656 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4657 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4658 u32 link;
4659
583c28e5 4660 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
0d8a6571 4661
7c62e83b
MC
4662 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4663 if (link & BNX2_LINK_STATUS_SERDES_LINK)
0d8a6571
MC
4664 bp->phy_port = PORT_FIBRE;
4665 else
4666 bp->phy_port = PORT_TP;
489310a4 4667
7c62e83b
MC
4668 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4669 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
0d8a6571 4670 }
7c62e83b
MC
4671
4672 if (netif_running(bp->dev) && sig)
4673 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
0d8a6571
MC
4674}
4675
b4b36042
MC
4676static void
4677bnx2_setup_msix_tbl(struct bnx2 *bp)
4678{
4679 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4680
4681 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4682 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4683}
4684
b6016b76
MC
4685static int
4686bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4687{
4688 u32 val;
4689 int i, rc = 0;
489310a4 4690 u8 old_port;
b6016b76
MC
4691
4692 /* Wait for the current PCI transaction to complete before
4693 * issuing a reset. */
a5dac108
EW
4694 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4695 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4696 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4697 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4698 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4699 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4700 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4701 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4702 udelay(5);
4703 } else { /* 5709 */
4704 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4705 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4706 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4707 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4708
4709 for (i = 0; i < 100; i++) {
4710 msleep(1);
4711 val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4712 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4713 break;
4714 }
4715 }
b6016b76 4716
b090ae2b 4717 /* Wait for the firmware to tell us it is ok to issue a reset. */
a2f13890 4718 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
b090ae2b 4719
b6016b76
MC
4720 /* Deposit a driver reset signature so the firmware knows that
4721 * this is a soft reset. */
2726d6e1
MC
4722 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4723 BNX2_DRV_RESET_SIGNATURE_MAGIC);
b6016b76 4724
b6016b76
MC
4725 /* Do a dummy read to force the chip to complete all current transaction
4726 * before we issue a reset. */
4727 val = REG_RD(bp, BNX2_MISC_ID);
4728
234754d5
MC
4729 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4730 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4731 REG_RD(bp, BNX2_MISC_COMMAND);
4732 udelay(5);
b6016b76 4733
234754d5
MC
4734 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4735 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4736
be7ff1af 4737 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4738
234754d5
MC
4739 } else {
4740 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4741 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4742 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4743
4744 /* Chip reset. */
4745 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4746
594a9dfa
MC
4747 /* Reading back any register after chip reset will hang the
4748 * bus on 5706 A0 and A1. The msleep below provides plenty
4749 * of margin for write posting.
4750 */
234754d5 4751 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4752 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4753 msleep(20);
b6016b76 4754
234754d5
MC
4755 /* Reset takes approximate 30 usec */
4756 for (i = 0; i < 10; i++) {
4757 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4758 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4759 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4760 break;
4761 udelay(10);
4762 }
4763
4764 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4765 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3a9c6a49 4766 pr_err("Chip reset did not complete\n");
234754d5
MC
4767 return -EBUSY;
4768 }
b6016b76
MC
4769 }
4770
4771 /* Make sure byte swapping is properly configured. */
4772 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4773 if (val != 0x01020304) {
3a9c6a49 4774 pr_err("Chip not in correct endian mode\n");
b6016b76
MC
4775 return -ENODEV;
4776 }
4777
b6016b76 4778 /* Wait for the firmware to finish its initialization. */
a2f13890 4779 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
b090ae2b
MC
4780 if (rc)
4781 return rc;
b6016b76 4782
0d8a6571 4783 spin_lock_bh(&bp->phy_lock);
489310a4 4784 old_port = bp->phy_port;
7c62e83b 4785 bnx2_init_fw_cap(bp);
583c28e5
MC
4786 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4787 old_port != bp->phy_port)
0d8a6571
MC
4788 bnx2_set_default_remote_link(bp);
4789 spin_unlock_bh(&bp->phy_lock);
4790
b6016b76
MC
4791 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4792 /* Adjust the voltage regular to two steps lower. The default
4793 * of this register is 0x0000000e. */
4794 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4795
4796 /* Remove bad rbuf memory from the free pool. */
4797 rc = bnx2_alloc_bad_rbuf(bp);
4798 }
4799
c441b8d2 4800 if (bp->flags & BNX2_FLAG_USING_MSIX) {
b4b36042 4801 bnx2_setup_msix_tbl(bp);
c441b8d2
MC
4802 /* Prevent MSIX table reads and write from timing out */
4803 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4804 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4805 }
b4b36042 4806
b6016b76
MC
4807 return rc;
4808}
4809
4810static int
4811bnx2_init_chip(struct bnx2 *bp)
4812{
d8026d93 4813 u32 val, mtu;
b4b36042 4814 int rc, i;
b6016b76
MC
4815
4816 /* Make sure the interrupt is not active. */
4817 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4818
4819 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4820 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4821#ifdef __BIG_ENDIAN
6aa20a22 4822 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4823#endif
6aa20a22 4824 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4825 DMA_READ_CHANS << 12 |
4826 DMA_WRITE_CHANS << 16;
4827
4828 val |= (0x2 << 20) | (1 << 11);
4829
f86e82fb 4830 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4831 val |= (1 << 23);
4832
4833 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
f86e82fb 4834 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
b6016b76
MC
4835 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4836
4837 REG_WR(bp, BNX2_DMA_CONFIG, val);
4838
4839 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4840 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4841 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4842 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4843 }
4844
f86e82fb 4845 if (bp->flags & BNX2_FLAG_PCIX) {
b6016b76
MC
4846 u16 val16;
4847
4848 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4849 &val16);
4850 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4851 val16 & ~PCI_X_CMD_ERO);
4852 }
4853
4854 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4855 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4856 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4857 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4858
4859 /* Initialize context mapping and zero out the quick contexts. The
4860 * context block must have already been enabled. */
641bdcd5
MC
4861 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4862 rc = bnx2_init_5709_context(bp);
4863 if (rc)
4864 return rc;
4865 } else
59b47d8a 4866 bnx2_init_context(bp);
b6016b76 4867
fba9fe91
MC
4868 if ((rc = bnx2_init_cpus(bp)) != 0)
4869 return rc;
4870
b6016b76
MC
4871 bnx2_init_nvram(bp);
4872
5fcaed01 4873 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
4874
4875 val = REG_RD(bp, BNX2_MQ_CONFIG);
4876 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4877 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4edd473f
MC
4878 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4879 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4880 if (CHIP_REV(bp) == CHIP_REV_Ax)
4881 val |= BNX2_MQ_CONFIG_HALT_DIS;
4882 }
68c9f75a 4883
b6016b76
MC
4884 REG_WR(bp, BNX2_MQ_CONFIG, val);
4885
4886 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4887 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4888 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4889
4890 val = (BCM_PAGE_BITS - 8) << 24;
4891 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4892
4893 /* Configure page size. */
4894 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4895 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4896 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4897 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4898
4899 val = bp->mac_addr[0] +
4900 (bp->mac_addr[1] << 8) +
4901 (bp->mac_addr[2] << 16) +
4902 bp->mac_addr[3] +
4903 (bp->mac_addr[4] << 8) +
4904 (bp->mac_addr[5] << 16);
4905 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4906
4907 /* Program the MTU. Also include 4 bytes for CRC32. */
d8026d93
MC
4908 mtu = bp->dev->mtu;
4909 val = mtu + ETH_HLEN + ETH_FCS_LEN;
b6016b76
MC
4910 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4911 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4912 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4913
d8026d93
MC
4914 if (mtu < 1500)
4915 mtu = 1500;
4916
4917 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4918 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4919 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4920
155d5561 4921 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
b4b36042
MC
4922 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4923 bp->bnx2_napi[i].last_status_idx = 0;
4924
efba0180
MC
4925 bp->idle_chk_status_idx = 0xffff;
4926
b6016b76
MC
4927 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4928
4929 /* Set up how to generate a link change interrupt. */
4930 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4931
4932 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4933 (u64) bp->status_blk_mapping & 0xffffffff);
4934 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4935
4936 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4937 (u64) bp->stats_blk_mapping & 0xffffffff);
4938 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4939 (u64) bp->stats_blk_mapping >> 32);
4940
6aa20a22 4941 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4942 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4943
4944 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4945 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4946
4947 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4948 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4949
4950 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4951
4952 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4953
4954 REG_WR(bp, BNX2_HC_COM_TICKS,
4955 (bp->com_ticks_int << 16) | bp->com_ticks);
4956
4957 REG_WR(bp, BNX2_HC_CMD_TICKS,
4958 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4959
61d9e3fa 4960 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
02537b06
MC
4961 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4962 else
7ea6920e 4963 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4964 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4965
4966 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4967 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4968 else {
8e6a72c4
MC
4969 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4970 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4971 }
4972
efde73a3 4973 if (bp->flags & BNX2_FLAG_USING_MSIX) {
c76c0475
MC
4974 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4975 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4976
5e9ad9e1
MC
4977 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4978 }
4979
4980 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
cf7474a6 4981 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5e9ad9e1
MC
4982
4983 REG_WR(bp, BNX2_HC_CONFIG, val);
4984
22fa159d
MC
4985 if (bp->rx_ticks < 25)
4986 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4987 else
4988 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4989
5e9ad9e1
MC
4990 for (i = 1; i < bp->irq_nvecs; i++) {
4991 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4992 BNX2_HC_SB_CONFIG_1;
4993
6f743ca0 4994 REG_WR(bp, base,
c76c0475 4995 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5e9ad9e1 4996 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
c76c0475
MC
4997 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4998
6f743ca0 4999 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
c76c0475
MC
5000 (bp->tx_quick_cons_trip_int << 16) |
5001 bp->tx_quick_cons_trip);
5002
6f743ca0 5003 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
c76c0475
MC
5004 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5005
5e9ad9e1
MC
5006 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5007 (bp->rx_quick_cons_trip_int << 16) |
5008 bp->rx_quick_cons_trip);
8e6a72c4 5009
5e9ad9e1
MC
5010 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5011 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5012 }
8e6a72c4 5013
b6016b76
MC
5014 /* Clear internal stats counters. */
5015 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5016
da3e4fbe 5017 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
5018
5019 /* Initialize the receive filter. */
5020 bnx2_set_rx_mode(bp->dev);
5021
0aa38df7
MC
5022 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5023 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5024 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5025 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5026 }
b090ae2b 5027 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
a2f13890 5028 1, 0);
b6016b76 5029
df149d70 5030 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
5031 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5032
5033 udelay(20);
5034
bf5295bb
MC
5035 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5036
b090ae2b 5037 return rc;
b6016b76
MC
5038}
5039
c76c0475
MC
5040static void
5041bnx2_clear_ring_states(struct bnx2 *bp)
5042{
5043 struct bnx2_napi *bnapi;
35e9010b 5044 struct bnx2_tx_ring_info *txr;
bb4f98ab 5045 struct bnx2_rx_ring_info *rxr;
c76c0475
MC
5046 int i;
5047
5048 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5049 bnapi = &bp->bnx2_napi[i];
35e9010b 5050 txr = &bnapi->tx_ring;
bb4f98ab 5051 rxr = &bnapi->rx_ring;
c76c0475 5052
35e9010b
MC
5053 txr->tx_cons = 0;
5054 txr->hw_tx_cons = 0;
bb4f98ab
MC
5055 rxr->rx_prod_bseq = 0;
5056 rxr->rx_prod = 0;
5057 rxr->rx_cons = 0;
5058 rxr->rx_pg_prod = 0;
5059 rxr->rx_pg_cons = 0;
c76c0475
MC
5060 }
5061}
5062
59b47d8a 5063static void
35e9010b 5064bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
59b47d8a
MC
5065{
5066 u32 val, offset0, offset1, offset2, offset3;
62a8313c 5067 u32 cid_addr = GET_CID_ADDR(cid);
59b47d8a
MC
5068
5069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5070 offset0 = BNX2_L2CTX_TYPE_XI;
5071 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5072 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5073 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5074 } else {
5075 offset0 = BNX2_L2CTX_TYPE;
5076 offset1 = BNX2_L2CTX_CMD_TYPE;
5077 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5078 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5079 }
5080 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
62a8313c 5081 bnx2_ctx_wr(bp, cid_addr, offset0, val);
59b47d8a
MC
5082
5083 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
62a8313c 5084 bnx2_ctx_wr(bp, cid_addr, offset1, val);
59b47d8a 5085
35e9010b 5086 val = (u64) txr->tx_desc_mapping >> 32;
62a8313c 5087 bnx2_ctx_wr(bp, cid_addr, offset2, val);
59b47d8a 5088
35e9010b 5089 val = (u64) txr->tx_desc_mapping & 0xffffffff;
62a8313c 5090 bnx2_ctx_wr(bp, cid_addr, offset3, val);
59b47d8a 5091}
b6016b76
MC
5092
5093static void
35e9010b 5094bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
b6016b76
MC
5095{
5096 struct tx_bd *txbd;
c76c0475
MC
5097 u32 cid = TX_CID;
5098 struct bnx2_napi *bnapi;
35e9010b 5099 struct bnx2_tx_ring_info *txr;
c76c0475 5100
35e9010b
MC
5101 bnapi = &bp->bnx2_napi[ring_num];
5102 txr = &bnapi->tx_ring;
5103
5104 if (ring_num == 0)
5105 cid = TX_CID;
5106 else
5107 cid = TX_TSS_CID + ring_num - 1;
b6016b76 5108
2f8af120
MC
5109 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5110
35e9010b 5111 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 5112
35e9010b
MC
5113 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5114 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
b6016b76 5115
35e9010b
MC
5116 txr->tx_prod = 0;
5117 txr->tx_prod_bseq = 0;
6aa20a22 5118
35e9010b
MC
5119 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5120 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 5121
35e9010b 5122 bnx2_init_tx_context(bp, cid, txr);
b6016b76
MC
5123}
5124
5125static void
5d5d0015
MC
5126bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5127 int num_rings)
b6016b76 5128{
b6016b76 5129 int i;
5d5d0015 5130 struct rx_bd *rxbd;
6aa20a22 5131
5d5d0015 5132 for (i = 0; i < num_rings; i++) {
13daffa2 5133 int j;
b6016b76 5134
5d5d0015 5135 rxbd = &rx_ring[i][0];
13daffa2 5136 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 5137 rxbd->rx_bd_len = buf_size;
13daffa2
MC
5138 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5139 }
5d5d0015 5140 if (i == (num_rings - 1))
13daffa2
MC
5141 j = 0;
5142 else
5143 j = i + 1;
5d5d0015
MC
5144 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5145 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 5146 }
5d5d0015
MC
5147}
5148
5149static void
bb4f98ab 5150bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5d5d0015
MC
5151{
5152 int i;
5153 u16 prod, ring_prod;
bb4f98ab
MC
5154 u32 cid, rx_cid_addr, val;
5155 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5156 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5157
5158 if (ring_num == 0)
5159 cid = RX_CID;
5160 else
5161 cid = RX_RSS_CID + ring_num - 1;
5162
5163 rx_cid_addr = GET_CID_ADDR(cid);
5d5d0015 5164
bb4f98ab 5165 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5d5d0015
MC
5166 bp->rx_buf_use_size, bp->rx_max_ring);
5167
bb4f98ab 5168 bnx2_init_rx_context(bp, cid);
83e3fc89
MC
5169
5170 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5171 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5172 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5173 }
5174
62a8313c 5175 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246 5176 if (bp->rx_pg_ring_size) {
bb4f98ab
MC
5177 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5178 rxr->rx_pg_desc_mapping,
47bf4246
MC
5179 PAGE_SIZE, bp->rx_max_pg_ring);
5180 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
62a8313c
MC
5181 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5182 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5e9ad9e1 5183 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
47bf4246 5184
bb4f98ab 5185 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
62a8313c 5186 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
47bf4246 5187
bb4f98ab 5188 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
62a8313c 5189 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
47bf4246
MC
5190
5191 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5192 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5193 }
b6016b76 5194
bb4f98ab 5195 val = (u64) rxr->rx_desc_mapping[0] >> 32;
62a8313c 5196 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 5197
bb4f98ab 5198 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
62a8313c 5199 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 5200
bb4f98ab 5201 ring_prod = prod = rxr->rx_pg_prod;
47bf4246 5202 for (i = 0; i < bp->rx_pg_ring_size; i++) {
a2df00aa 5203 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
3a9c6a49
JP
5204 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5205 ring_num, i, bp->rx_pg_ring_size);
47bf4246 5206 break;
b929e53c 5207 }
47bf4246
MC
5208 prod = NEXT_RX_BD(prod);
5209 ring_prod = RX_PG_RING_IDX(prod);
5210 }
bb4f98ab 5211 rxr->rx_pg_prod = prod;
47bf4246 5212
bb4f98ab 5213 ring_prod = prod = rxr->rx_prod;
236b6394 5214 for (i = 0; i < bp->rx_ring_size; i++) {
a2df00aa 5215 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
3a9c6a49
JP
5216 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5217 ring_num, i, bp->rx_ring_size);
b6016b76 5218 break;
b929e53c 5219 }
b6016b76
MC
5220 prod = NEXT_RX_BD(prod);
5221 ring_prod = RX_RING_IDX(prod);
5222 }
bb4f98ab 5223 rxr->rx_prod = prod;
b6016b76 5224
bb4f98ab
MC
5225 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5226 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5227 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
b6016b76 5228
bb4f98ab
MC
5229 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5230 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5231
5232 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
5233}
5234
35e9010b
MC
5235static void
5236bnx2_init_all_rings(struct bnx2 *bp)
5237{
5238 int i;
5e9ad9e1 5239 u32 val;
35e9010b
MC
5240
5241 bnx2_clear_ring_states(bp);
5242
5243 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5244 for (i = 0; i < bp->num_tx_rings; i++)
5245 bnx2_init_tx_ring(bp, i);
5246
5247 if (bp->num_tx_rings > 1)
5248 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5249 (TX_TSS_CID << 7));
5250
5e9ad9e1
MC
5251 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5252 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5253
bb4f98ab
MC
5254 for (i = 0; i < bp->num_rx_rings; i++)
5255 bnx2_init_rx_ring(bp, i);
5e9ad9e1
MC
5256
5257 if (bp->num_rx_rings > 1) {
22fa159d 5258 u32 tbl_32 = 0;
5e9ad9e1
MC
5259
5260 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
22fa159d
MC
5261 int shift = (i % 8) << 2;
5262
5263 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5264 if ((i % 8) == 7) {
5265 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5266 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5267 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5268 BNX2_RLUP_RSS_COMMAND_WRITE |
5269 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5270 tbl_32 = 0;
5271 }
5e9ad9e1
MC
5272 }
5273
5274 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5275 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5276
5277 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5278
5279 }
35e9010b
MC
5280}
5281
5d5d0015 5282static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 5283{
5d5d0015 5284 u32 max, num_rings = 1;
13daffa2 5285
5d5d0015
MC
5286 while (ring_size > MAX_RX_DESC_CNT) {
5287 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
5288 num_rings++;
5289 }
5290 /* round to next power of 2 */
5d5d0015 5291 max = max_size;
13daffa2
MC
5292 while ((max & num_rings) == 0)
5293 max >>= 1;
5294
5295 if (num_rings != max)
5296 max <<= 1;
5297
5d5d0015
MC
5298 return max;
5299}
5300
5301static void
5302bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5303{
84eaa187 5304 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
5305
5306 /* 8 for CRC and VLAN */
d89cb6af 5307 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5d5d0015 5308
84eaa187
MC
5309 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5310 sizeof(struct skb_shared_info);
5311
601d3d18 5312 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
47bf4246
MC
5313 bp->rx_pg_ring_size = 0;
5314 bp->rx_max_pg_ring = 0;
5315 bp->rx_max_pg_ring_idx = 0;
f86e82fb 5316 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
84eaa187
MC
5317 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5318
5319 jumbo_size = size * pages;
5320 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5321 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5322
5323 bp->rx_pg_ring_size = jumbo_size;
5324 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5325 MAX_RX_PG_RINGS);
5326 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
601d3d18 5327 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
84eaa187
MC
5328 bp->rx_copy_thresh = 0;
5329 }
5d5d0015
MC
5330
5331 bp->rx_buf_use_size = rx_size;
5332 /* hw alignment */
5333 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
d89cb6af 5334 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5d5d0015
MC
5335 bp->rx_ring_size = size;
5336 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
5337 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5338}
5339
b6016b76
MC
5340static void
5341bnx2_free_tx_skbs(struct bnx2 *bp)
5342{
5343 int i;
5344
35e9010b
MC
5345 for (i = 0; i < bp->num_tx_rings; i++) {
5346 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5347 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5348 int j;
b6016b76 5349
35e9010b 5350 if (txr->tx_buf_ring == NULL)
b6016b76 5351 continue;
b6016b76 5352
35e9010b 5353 for (j = 0; j < TX_DESC_CNT; ) {
3d16af86 5354 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
35e9010b 5355 struct sk_buff *skb = tx_buf->skb;
e95524a7 5356 int k, last;
35e9010b
MC
5357
5358 if (skb == NULL) {
5359 j++;
5360 continue;
5361 }
5362
36227e88 5363 dma_unmap_single(&bp->pdev->dev,
1a4ccc2d 5364 dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
5365 skb_headlen(skb),
5366 PCI_DMA_TODEVICE);
b6016b76 5367
35e9010b 5368 tx_buf->skb = NULL;
b6016b76 5369
e95524a7
AD
5370 last = tx_buf->nr_frags;
5371 j++;
5372 for (k = 0; k < last; k++, j++) {
5373 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
36227e88 5374 dma_unmap_page(&bp->pdev->dev,
1a4ccc2d 5375 dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
5376 skb_shinfo(skb)->frags[k].size,
5377 PCI_DMA_TODEVICE);
5378 }
35e9010b 5379 dev_kfree_skb(skb);
b6016b76 5380 }
b6016b76 5381 }
b6016b76
MC
5382}
5383
5384static void
5385bnx2_free_rx_skbs(struct bnx2 *bp)
5386{
5387 int i;
5388
bb4f98ab
MC
5389 for (i = 0; i < bp->num_rx_rings; i++) {
5390 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5391 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5392 int j;
b6016b76 5393
bb4f98ab
MC
5394 if (rxr->rx_buf_ring == NULL)
5395 return;
b6016b76 5396
bb4f98ab
MC
5397 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5398 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5399 struct sk_buff *skb = rx_buf->skb;
b6016b76 5400
bb4f98ab
MC
5401 if (skb == NULL)
5402 continue;
b6016b76 5403
36227e88 5404 dma_unmap_single(&bp->pdev->dev,
1a4ccc2d 5405 dma_unmap_addr(rx_buf, mapping),
bb4f98ab
MC
5406 bp->rx_buf_use_size,
5407 PCI_DMA_FROMDEVICE);
b6016b76 5408
bb4f98ab
MC
5409 rx_buf->skb = NULL;
5410
5411 dev_kfree_skb(skb);
5412 }
5413 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5414 bnx2_free_rx_page(bp, rxr, j);
b6016b76
MC
5415 }
5416}
5417
5418static void
5419bnx2_free_skbs(struct bnx2 *bp)
5420{
5421 bnx2_free_tx_skbs(bp);
5422 bnx2_free_rx_skbs(bp);
5423}
5424
5425static int
5426bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5427{
5428 int rc;
5429
5430 rc = bnx2_reset_chip(bp, reset_code);
5431 bnx2_free_skbs(bp);
5432 if (rc)
5433 return rc;
5434
fba9fe91
MC
5435 if ((rc = bnx2_init_chip(bp)) != 0)
5436 return rc;
5437
35e9010b 5438 bnx2_init_all_rings(bp);
b6016b76
MC
5439 return 0;
5440}
5441
5442static int
9a120bc5 5443bnx2_init_nic(struct bnx2 *bp, int reset_phy)
b6016b76
MC
5444{
5445 int rc;
5446
5447 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5448 return rc;
5449
80be4434 5450 spin_lock_bh(&bp->phy_lock);
9a120bc5 5451 bnx2_init_phy(bp, reset_phy);
b6016b76 5452 bnx2_set_link(bp);
543a827d
MC
5453 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5454 bnx2_remote_phy_event(bp);
0d8a6571 5455 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5456 return 0;
5457}
5458
74bf4ba3
MC
5459static int
5460bnx2_shutdown_chip(struct bnx2 *bp)
5461{
5462 u32 reset_code;
5463
5464 if (bp->flags & BNX2_FLAG_NO_WOL)
5465 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5466 else if (bp->wol)
5467 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5468 else
5469 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5470
5471 return bnx2_reset_chip(bp, reset_code);
5472}
5473
b6016b76
MC
5474static int
5475bnx2_test_registers(struct bnx2 *bp)
5476{
5477 int ret;
5bae30c9 5478 int i, is_5709;
f71e1309 5479 static const struct {
b6016b76
MC
5480 u16 offset;
5481 u16 flags;
5bae30c9 5482#define BNX2_FL_NOT_5709 1
b6016b76
MC
5483 u32 rw_mask;
5484 u32 ro_mask;
5485 } reg_tbl[] = {
5486 { 0x006c, 0, 0x00000000, 0x0000003f },
5487 { 0x0090, 0, 0xffffffff, 0x00000000 },
5488 { 0x0094, 0, 0x00000000, 0x00000000 },
5489
5bae30c9
MC
5490 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5491 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5492 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5493 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5494 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5495 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5496 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5497 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5498 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5499
5500 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5501 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5502 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5503 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5504 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5505 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5506
5507 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5508 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5509 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
5510
5511 { 0x1000, 0, 0x00000000, 0x00000001 },
15b169cc 5512 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
b6016b76
MC
5513
5514 { 0x1408, 0, 0x01c00800, 0x00000000 },
5515 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5516 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 5517 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
5518 { 0x14b0, 0, 0x00000002, 0x00000001 },
5519 { 0x14b8, 0, 0x00000000, 0x00000000 },
5520 { 0x14c0, 0, 0x00000000, 0x00000009 },
5521 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5522 { 0x14cc, 0, 0x00000000, 0x00000001 },
5523 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
5524
5525 { 0x1800, 0, 0x00000000, 0x00000001 },
5526 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
5527
5528 { 0x2800, 0, 0x00000000, 0x00000001 },
5529 { 0x2804, 0, 0x00000000, 0x00003f01 },
5530 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5531 { 0x2810, 0, 0xffff0000, 0x00000000 },
5532 { 0x2814, 0, 0xffff0000, 0x00000000 },
5533 { 0x2818, 0, 0xffff0000, 0x00000000 },
5534 { 0x281c, 0, 0xffff0000, 0x00000000 },
5535 { 0x2834, 0, 0xffffffff, 0x00000000 },
5536 { 0x2840, 0, 0x00000000, 0xffffffff },
5537 { 0x2844, 0, 0x00000000, 0xffffffff },
5538 { 0x2848, 0, 0xffffffff, 0x00000000 },
5539 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5540
5541 { 0x2c00, 0, 0x00000000, 0x00000011 },
5542 { 0x2c04, 0, 0x00000000, 0x00030007 },
5543
b6016b76
MC
5544 { 0x3c00, 0, 0x00000000, 0x00000001 },
5545 { 0x3c04, 0, 0x00000000, 0x00070000 },
5546 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5547 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5548 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5549 { 0x3c14, 0, 0x00000000, 0xffffffff },
5550 { 0x3c18, 0, 0x00000000, 0xffffffff },
5551 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5552 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
5553
5554 { 0x5004, 0, 0x00000000, 0x0000007f },
5555 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 5556
b6016b76
MC
5557 { 0x5c00, 0, 0x00000000, 0x00000001 },
5558 { 0x5c04, 0, 0x00000000, 0x0003000f },
5559 { 0x5c08, 0, 0x00000003, 0x00000000 },
5560 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5561 { 0x5c10, 0, 0x00000000, 0xffffffff },
5562 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5563 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5564 { 0x5c88, 0, 0x00000000, 0x00077373 },
5565 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5566
5567 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5568 { 0x680c, 0, 0xffffffff, 0x00000000 },
5569 { 0x6810, 0, 0xffffffff, 0x00000000 },
5570 { 0x6814, 0, 0xffffffff, 0x00000000 },
5571 { 0x6818, 0, 0xffffffff, 0x00000000 },
5572 { 0x681c, 0, 0xffffffff, 0x00000000 },
5573 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5574 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5575 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5576 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5577 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5578 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5579 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5580 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5581 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5582 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5583 { 0x684c, 0, 0xffffffff, 0x00000000 },
5584 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5585 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5586 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5587 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5588 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5589 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5590
5591 { 0xffff, 0, 0x00000000, 0x00000000 },
5592 };
5593
5594 ret = 0;
5bae30c9
MC
5595 is_5709 = 0;
5596 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5597 is_5709 = 1;
5598
b6016b76
MC
5599 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5600 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
5601 u16 flags = reg_tbl[i].flags;
5602
5603 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5604 continue;
b6016b76
MC
5605
5606 offset = (u32) reg_tbl[i].offset;
5607 rw_mask = reg_tbl[i].rw_mask;
5608 ro_mask = reg_tbl[i].ro_mask;
5609
14ab9b86 5610 save_val = readl(bp->regview + offset);
b6016b76 5611
14ab9b86 5612 writel(0, bp->regview + offset);
b6016b76 5613
14ab9b86 5614 val = readl(bp->regview + offset);
b6016b76
MC
5615 if ((val & rw_mask) != 0) {
5616 goto reg_test_err;
5617 }
5618
5619 if ((val & ro_mask) != (save_val & ro_mask)) {
5620 goto reg_test_err;
5621 }
5622
14ab9b86 5623 writel(0xffffffff, bp->regview + offset);
b6016b76 5624
14ab9b86 5625 val = readl(bp->regview + offset);
b6016b76
MC
5626 if ((val & rw_mask) != rw_mask) {
5627 goto reg_test_err;
5628 }
5629
5630 if ((val & ro_mask) != (save_val & ro_mask)) {
5631 goto reg_test_err;
5632 }
5633
14ab9b86 5634 writel(save_val, bp->regview + offset);
b6016b76
MC
5635 continue;
5636
5637reg_test_err:
14ab9b86 5638 writel(save_val, bp->regview + offset);
b6016b76
MC
5639 ret = -ENODEV;
5640 break;
5641 }
5642 return ret;
5643}
5644
5645static int
5646bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5647{
f71e1309 5648 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
5649 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5650 int i;
5651
5652 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5653 u32 offset;
5654
5655 for (offset = 0; offset < size; offset += 4) {
5656
2726d6e1 5657 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
b6016b76 5658
2726d6e1 5659 if (bnx2_reg_rd_ind(bp, start + offset) !=
b6016b76
MC
5660 test_pattern[i]) {
5661 return -ENODEV;
5662 }
5663 }
5664 }
5665 return 0;
5666}
5667
5668static int
5669bnx2_test_memory(struct bnx2 *bp)
5670{
5671 int ret = 0;
5672 int i;
5bae30c9 5673 static struct mem_entry {
b6016b76
MC
5674 u32 offset;
5675 u32 len;
5bae30c9 5676 } mem_tbl_5706[] = {
b6016b76 5677 { 0x60000, 0x4000 },
5b0c76ad 5678 { 0xa0000, 0x3000 },
b6016b76
MC
5679 { 0xe0000, 0x4000 },
5680 { 0x120000, 0x4000 },
5681 { 0x1a0000, 0x4000 },
5682 { 0x160000, 0x4000 },
5683 { 0xffffffff, 0 },
5bae30c9
MC
5684 },
5685 mem_tbl_5709[] = {
5686 { 0x60000, 0x4000 },
5687 { 0xa0000, 0x3000 },
5688 { 0xe0000, 0x4000 },
5689 { 0x120000, 0x4000 },
5690 { 0x1a0000, 0x4000 },
5691 { 0xffffffff, 0 },
b6016b76 5692 };
5bae30c9
MC
5693 struct mem_entry *mem_tbl;
5694
5695 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5696 mem_tbl = mem_tbl_5709;
5697 else
5698 mem_tbl = mem_tbl_5706;
b6016b76
MC
5699
5700 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5701 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5702 mem_tbl[i].len)) != 0) {
5703 return ret;
5704 }
5705 }
6aa20a22 5706
b6016b76
MC
5707 return ret;
5708}
5709
bc5a0690
MC
5710#define BNX2_MAC_LOOPBACK 0
5711#define BNX2_PHY_LOOPBACK 1
5712
b6016b76 5713static int
bc5a0690 5714bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
5715{
5716 unsigned int pkt_size, num_pkts, i;
5717 struct sk_buff *skb, *rx_skb;
5718 unsigned char *packet;
bc5a0690 5719 u16 rx_start_idx, rx_idx;
b6016b76
MC
5720 dma_addr_t map;
5721 struct tx_bd *txbd;
5722 struct sw_bd *rx_buf;
5723 struct l2_fhdr *rx_hdr;
5724 int ret = -ENODEV;
c76c0475 5725 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
35e9010b 5726 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 5727 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
c76c0475
MC
5728
5729 tx_napi = bnapi;
b6016b76 5730
35e9010b 5731 txr = &tx_napi->tx_ring;
bb4f98ab 5732 rxr = &bnapi->rx_ring;
bc5a0690
MC
5733 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5734 bp->loopback = MAC_LOOPBACK;
5735 bnx2_set_mac_loopback(bp);
5736 }
5737 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
583c28e5 5738 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
489310a4
MC
5739 return 0;
5740
80be4434 5741 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
5742 bnx2_set_phy_loopback(bp);
5743 }
5744 else
5745 return -EINVAL;
b6016b76 5746
84eaa187 5747 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 5748 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
5749 if (!skb)
5750 return -ENOMEM;
b6016b76 5751 packet = skb_put(skb, pkt_size);
6634292b 5752 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
5753 memset(packet + 6, 0x0, 8);
5754 for (i = 14; i < pkt_size; i++)
5755 packet[i] = (unsigned char) (i & 0xff);
5756
36227e88
SG
5757 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5758 PCI_DMA_TODEVICE);
5759 if (dma_mapping_error(&bp->pdev->dev, map)) {
3d16af86
BL
5760 dev_kfree_skb(skb);
5761 return -EIO;
5762 }
b6016b76 5763
bf5295bb
MC
5764 REG_WR(bp, BNX2_HC_COMMAND,
5765 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5766
b6016b76
MC
5767 REG_RD(bp, BNX2_HC_COMMAND);
5768
5769 udelay(5);
35efa7c1 5770 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 5771
b6016b76
MC
5772 num_pkts = 0;
5773
35e9010b 5774 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
b6016b76
MC
5775
5776 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5777 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5778 txbd->tx_bd_mss_nbytes = pkt_size;
5779 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5780
5781 num_pkts++;
35e9010b
MC
5782 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5783 txr->tx_prod_bseq += pkt_size;
b6016b76 5784
35e9010b
MC
5785 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5786 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
5787
5788 udelay(100);
5789
bf5295bb
MC
5790 REG_WR(bp, BNX2_HC_COMMAND,
5791 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5792
b6016b76
MC
5793 REG_RD(bp, BNX2_HC_COMMAND);
5794
5795 udelay(5);
5796
36227e88 5797 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 5798 dev_kfree_skb(skb);
b6016b76 5799
35e9010b 5800 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
b6016b76 5801 goto loopback_test_done;
b6016b76 5802
35efa7c1 5803 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
5804 if (rx_idx != rx_start_idx + num_pkts) {
5805 goto loopback_test_done;
5806 }
5807
bb4f98ab 5808 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
b6016b76
MC
5809 rx_skb = rx_buf->skb;
5810
a33fa66b 5811 rx_hdr = rx_buf->desc;
d89cb6af 5812 skb_reserve(rx_skb, BNX2_RX_OFFSET);
b6016b76 5813
36227e88 5814 dma_sync_single_for_cpu(&bp->pdev->dev,
1a4ccc2d 5815 dma_unmap_addr(rx_buf, mapping),
b6016b76
MC
5816 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5817
ade2bfe7 5818 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5819 (L2_FHDR_ERRORS_BAD_CRC |
5820 L2_FHDR_ERRORS_PHY_DECODE |
5821 L2_FHDR_ERRORS_ALIGNMENT |
5822 L2_FHDR_ERRORS_TOO_SHORT |
5823 L2_FHDR_ERRORS_GIANT_FRAME)) {
5824
5825 goto loopback_test_done;
5826 }
5827
5828 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5829 goto loopback_test_done;
5830 }
5831
5832 for (i = 14; i < pkt_size; i++) {
5833 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5834 goto loopback_test_done;
5835 }
5836 }
5837
5838 ret = 0;
5839
5840loopback_test_done:
5841 bp->loopback = 0;
5842 return ret;
5843}
5844
bc5a0690
MC
5845#define BNX2_MAC_LOOPBACK_FAILED 1
5846#define BNX2_PHY_LOOPBACK_FAILED 2
5847#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5848 BNX2_PHY_LOOPBACK_FAILED)
5849
5850static int
5851bnx2_test_loopback(struct bnx2 *bp)
5852{
5853 int rc = 0;
5854
5855 if (!netif_running(bp->dev))
5856 return BNX2_LOOPBACK_FAILED;
5857
5858 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5859 spin_lock_bh(&bp->phy_lock);
9a120bc5 5860 bnx2_init_phy(bp, 1);
bc5a0690
MC
5861 spin_unlock_bh(&bp->phy_lock);
5862 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5863 rc |= BNX2_MAC_LOOPBACK_FAILED;
5864 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5865 rc |= BNX2_PHY_LOOPBACK_FAILED;
5866 return rc;
5867}
5868
b6016b76
MC
5869#define NVRAM_SIZE 0x200
5870#define CRC32_RESIDUAL 0xdebb20e3
5871
5872static int
5873bnx2_test_nvram(struct bnx2 *bp)
5874{
b491edd5 5875 __be32 buf[NVRAM_SIZE / 4];
b6016b76
MC
5876 u8 *data = (u8 *) buf;
5877 int rc = 0;
5878 u32 magic, csum;
5879
5880 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5881 goto test_nvram_done;
5882
5883 magic = be32_to_cpu(buf[0]);
5884 if (magic != 0x669955aa) {
5885 rc = -ENODEV;
5886 goto test_nvram_done;
5887 }
5888
5889 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5890 goto test_nvram_done;
5891
5892 csum = ether_crc_le(0x100, data);
5893 if (csum != CRC32_RESIDUAL) {
5894 rc = -ENODEV;
5895 goto test_nvram_done;
5896 }
5897
5898 csum = ether_crc_le(0x100, data + 0x100);
5899 if (csum != CRC32_RESIDUAL) {
5900 rc = -ENODEV;
5901 }
5902
5903test_nvram_done:
5904 return rc;
5905}
5906
5907static int
5908bnx2_test_link(struct bnx2 *bp)
5909{
5910 u32 bmsr;
5911
9f52b564
MC
5912 if (!netif_running(bp->dev))
5913 return -ENODEV;
5914
583c28e5 5915 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
489310a4
MC
5916 if (bp->link_up)
5917 return 0;
5918 return -ENODEV;
5919 }
c770a65c 5920 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5921 bnx2_enable_bmsr1(bp);
5922 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5923 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5924 bnx2_disable_bmsr1(bp);
c770a65c 5925 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5926
b6016b76
MC
5927 if (bmsr & BMSR_LSTATUS) {
5928 return 0;
5929 }
5930 return -ENODEV;
5931}
5932
5933static int
5934bnx2_test_intr(struct bnx2 *bp)
5935{
5936 int i;
b6016b76
MC
5937 u16 status_idx;
5938
5939 if (!netif_running(bp->dev))
5940 return -ENODEV;
5941
5942 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5943
5944 /* This register is not touched during run-time. */
bf5295bb 5945 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5946 REG_RD(bp, BNX2_HC_COMMAND);
5947
5948 for (i = 0; i < 10; i++) {
5949 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5950 status_idx) {
5951
5952 break;
5953 }
5954
5955 msleep_interruptible(10);
5956 }
5957 if (i < 10)
5958 return 0;
5959
5960 return -ENODEV;
5961}
5962
38ea3686 5963/* Determining link for parallel detection. */
b2fadeae
MC
5964static int
5965bnx2_5706_serdes_has_link(struct bnx2 *bp)
5966{
5967 u32 mode_ctl, an_dbg, exp;
5968
38ea3686
MC
5969 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5970 return 0;
5971
b2fadeae
MC
5972 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5973 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5974
5975 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5976 return 0;
5977
5978 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5979 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5980 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5981
f3014c0c 5982 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
b2fadeae
MC
5983 return 0;
5984
5985 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5986 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5987 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5988
5989 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5990 return 0;
5991
5992 return 1;
5993}
5994
b6016b76 5995static void
48b01e2d 5996bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5997{
b2fadeae
MC
5998 int check_link = 1;
5999
48b01e2d 6000 spin_lock(&bp->phy_lock);
b2fadeae 6001 if (bp->serdes_an_pending) {
48b01e2d 6002 bp->serdes_an_pending--;
b2fadeae
MC
6003 check_link = 0;
6004 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
48b01e2d 6005 u32 bmcr;
b6016b76 6006
ac392abc 6007 bp->current_interval = BNX2_TIMER_INTERVAL;
cd339a0e 6008
ca58c3af 6009 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6010
48b01e2d 6011 if (bmcr & BMCR_ANENABLE) {
b2fadeae 6012 if (bnx2_5706_serdes_has_link(bp)) {
48b01e2d
MC
6013 bmcr &= ~BMCR_ANENABLE;
6014 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 6015 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
583c28e5 6016 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d 6017 }
b6016b76 6018 }
48b01e2d
MC
6019 }
6020 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
583c28e5 6021 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
48b01e2d 6022 u32 phy2;
b6016b76 6023
48b01e2d
MC
6024 bnx2_write_phy(bp, 0x17, 0x0f01);
6025 bnx2_read_phy(bp, 0x15, &phy2);
6026 if (phy2 & 0x20) {
6027 u32 bmcr;
cd339a0e 6028
ca58c3af 6029 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 6030 bmcr |= BMCR_ANENABLE;
ca58c3af 6031 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 6032
583c28e5 6033 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d
MC
6034 }
6035 } else
ac392abc 6036 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6037
a2724e25 6038 if (check_link) {
b2fadeae
MC
6039 u32 val;
6040
6041 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6042 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6043 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6044
a2724e25
MC
6045 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6046 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6047 bnx2_5706s_force_link_dn(bp, 1);
6048 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6049 } else
6050 bnx2_set_link(bp);
6051 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6052 bnx2_set_link(bp);
b2fadeae 6053 }
48b01e2d
MC
6054 spin_unlock(&bp->phy_lock);
6055}
b6016b76 6056
f8dd064e
MC
6057static void
6058bnx2_5708_serdes_timer(struct bnx2 *bp)
6059{
583c28e5 6060 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
6061 return;
6062
583c28e5 6063 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
f8dd064e
MC
6064 bp->serdes_an_pending = 0;
6065 return;
6066 }
b6016b76 6067
f8dd064e
MC
6068 spin_lock(&bp->phy_lock);
6069 if (bp->serdes_an_pending)
6070 bp->serdes_an_pending--;
6071 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6072 u32 bmcr;
b6016b76 6073
ca58c3af 6074 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 6075 if (bmcr & BMCR_ANENABLE) {
605a9e20 6076 bnx2_enable_forced_2g5(bp);
40105c0b 6077 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
f8dd064e 6078 } else {
605a9e20 6079 bnx2_disable_forced_2g5(bp);
f8dd064e 6080 bp->serdes_an_pending = 2;
ac392abc 6081 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6082 }
b6016b76 6083
f8dd064e 6084 } else
ac392abc 6085 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6086
f8dd064e
MC
6087 spin_unlock(&bp->phy_lock);
6088}
6089
48b01e2d
MC
6090static void
6091bnx2_timer(unsigned long data)
6092{
6093 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 6094
48b01e2d
MC
6095 if (!netif_running(bp->dev))
6096 return;
b6016b76 6097
48b01e2d
MC
6098 if (atomic_read(&bp->intr_sem) != 0)
6099 goto bnx2_restart_timer;
b6016b76 6100
efba0180
MC
6101 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6102 BNX2_FLAG_USING_MSI)
6103 bnx2_chk_missed_msi(bp);
6104
df149d70 6105 bnx2_send_heart_beat(bp);
b6016b76 6106
2726d6e1
MC
6107 bp->stats_blk->stat_FwRxDrop =
6108 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 6109
02537b06 6110 /* workaround occasional corrupted counters */
61d9e3fa 6111 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
02537b06
MC
6112 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6113 BNX2_HC_COMMAND_STATS_NOW);
6114
583c28e5 6115 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
f8dd064e
MC
6116 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6117 bnx2_5706_serdes_timer(bp);
27a005b8 6118 else
f8dd064e 6119 bnx2_5708_serdes_timer(bp);
b6016b76
MC
6120 }
6121
6122bnx2_restart_timer:
cd339a0e 6123 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6124}
6125
8e6a72c4
MC
6126static int
6127bnx2_request_irq(struct bnx2 *bp)
6128{
6d866ffc 6129 unsigned long flags;
b4b36042
MC
6130 struct bnx2_irq *irq;
6131 int rc = 0, i;
8e6a72c4 6132
f86e82fb 6133 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6d866ffc
MC
6134 flags = 0;
6135 else
6136 flags = IRQF_SHARED;
b4b36042
MC
6137
6138 for (i = 0; i < bp->irq_nvecs; i++) {
6139 irq = &bp->irq_tbl[i];
c76c0475 6140 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
f0ea2e63 6141 &bp->bnx2_napi[i]);
b4b36042
MC
6142 if (rc)
6143 break;
6144 irq->requested = 1;
6145 }
8e6a72c4
MC
6146 return rc;
6147}
6148
6149static void
a29ba9d2 6150__bnx2_free_irq(struct bnx2 *bp)
8e6a72c4 6151{
b4b36042
MC
6152 struct bnx2_irq *irq;
6153 int i;
8e6a72c4 6154
b4b36042
MC
6155 for (i = 0; i < bp->irq_nvecs; i++) {
6156 irq = &bp->irq_tbl[i];
6157 if (irq->requested)
f0ea2e63 6158 free_irq(irq->vector, &bp->bnx2_napi[i]);
b4b36042 6159 irq->requested = 0;
6d866ffc 6160 }
a29ba9d2
MC
6161}
6162
6163static void
6164bnx2_free_irq(struct bnx2 *bp)
6165{
6166
6167 __bnx2_free_irq(bp);
f86e82fb 6168 if (bp->flags & BNX2_FLAG_USING_MSI)
b4b36042 6169 pci_disable_msi(bp->pdev);
f86e82fb 6170 else if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
6171 pci_disable_msix(bp->pdev);
6172
f86e82fb 6173 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
b4b36042
MC
6174}
6175
6176static void
5e9ad9e1 6177bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
b4b36042 6178{
379b39a2 6179 int i, total_vecs, rc;
57851d84 6180 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
4e1d0de9
MC
6181 struct net_device *dev = bp->dev;
6182 const int len = sizeof(bp->irq_tbl[0].name);
57851d84 6183
b4b36042
MC
6184 bnx2_setup_msix_tbl(bp);
6185 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6186 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6187 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
57851d84 6188
e2eb8e38
BL
6189 /* Need to flush the previous three writes to ensure MSI-X
6190 * is setup properly */
6191 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6192
57851d84
MC
6193 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6194 msix_ent[i].entry = i;
6195 msix_ent[i].vector = 0;
6196 }
6197
379b39a2
MC
6198 total_vecs = msix_vecs;
6199#ifdef BCM_CNIC
6200 total_vecs++;
6201#endif
6202 rc = -ENOSPC;
6203 while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6204 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6205 if (rc <= 0)
6206 break;
6207 if (rc > 0)
6208 total_vecs = rc;
6209 }
6210
57851d84
MC
6211 if (rc != 0)
6212 return;
6213
379b39a2
MC
6214 msix_vecs = total_vecs;
6215#ifdef BCM_CNIC
6216 msix_vecs--;
6217#endif
5e9ad9e1 6218 bp->irq_nvecs = msix_vecs;
f86e82fb 6219 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
379b39a2 6220 for (i = 0; i < total_vecs; i++) {
57851d84 6221 bp->irq_tbl[i].vector = msix_ent[i].vector;
69010313
MC
6222 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6223 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6224 }
6d866ffc
MC
6225}
6226
657d92fe 6227static int
6d866ffc
MC
6228bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6229{
5e9ad9e1 6230 int cpus = num_online_cpus();
706bf240 6231 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5e9ad9e1 6232
6d866ffc
MC
6233 bp->irq_tbl[0].handler = bnx2_interrupt;
6234 strcpy(bp->irq_tbl[0].name, bp->dev->name);
b4b36042
MC
6235 bp->irq_nvecs = 1;
6236 bp->irq_tbl[0].vector = bp->pdev->irq;
6237
3d5f3a7b 6238 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5e9ad9e1 6239 bnx2_enable_msix(bp, msix_vecs);
6d866ffc 6240
f86e82fb
DM
6241 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6242 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6d866ffc 6243 if (pci_enable_msi(bp->pdev) == 0) {
f86e82fb 6244 bp->flags |= BNX2_FLAG_USING_MSI;
6d866ffc 6245 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
f86e82fb 6246 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6d866ffc
MC
6247 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6248 } else
6249 bp->irq_tbl[0].handler = bnx2_msi;
b4b36042
MC
6250
6251 bp->irq_tbl[0].vector = bp->pdev->irq;
6d866ffc
MC
6252 }
6253 }
706bf240
BL
6254
6255 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
657d92fe 6256 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
706bf240 6257
5e9ad9e1 6258 bp->num_rx_rings = bp->irq_nvecs;
657d92fe 6259 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
8e6a72c4
MC
6260}
6261
b6016b76
MC
6262/* Called with rtnl_lock */
6263static int
6264bnx2_open(struct net_device *dev)
6265{
972ec0d4 6266 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6267 int rc;
6268
1b2f922f
MC
6269 netif_carrier_off(dev);
6270
829ca9a3 6271 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6272 bnx2_disable_int(bp);
6273
657d92fe
BH
6274 rc = bnx2_setup_int_mode(bp, disable_msi);
6275 if (rc)
6276 goto open_err;
4327ba43 6277 bnx2_init_napi(bp);
35e9010b 6278 bnx2_napi_enable(bp);
b6016b76 6279 rc = bnx2_alloc_mem(bp);
2739a8bb
MC
6280 if (rc)
6281 goto open_err;
b6016b76 6282
8e6a72c4 6283 rc = bnx2_request_irq(bp);
2739a8bb
MC
6284 if (rc)
6285 goto open_err;
b6016b76 6286
9a120bc5 6287 rc = bnx2_init_nic(bp, 1);
2739a8bb
MC
6288 if (rc)
6289 goto open_err;
6aa20a22 6290
cd339a0e 6291 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6292
6293 atomic_set(&bp->intr_sem, 0);
6294
354fcd77
MC
6295 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6296
b6016b76
MC
6297 bnx2_enable_int(bp);
6298
f86e82fb 6299 if (bp->flags & BNX2_FLAG_USING_MSI) {
b6016b76
MC
6300 /* Test MSI to make sure it is working
6301 * If MSI test fails, go back to INTx mode
6302 */
6303 if (bnx2_test_intr(bp) != 0) {
3a9c6a49 6304 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
b6016b76
MC
6305
6306 bnx2_disable_int(bp);
8e6a72c4 6307 bnx2_free_irq(bp);
b6016b76 6308
6d866ffc
MC
6309 bnx2_setup_int_mode(bp, 1);
6310
9a120bc5 6311 rc = bnx2_init_nic(bp, 0);
b6016b76 6312
8e6a72c4
MC
6313 if (!rc)
6314 rc = bnx2_request_irq(bp);
6315
b6016b76 6316 if (rc) {
b6016b76 6317 del_timer_sync(&bp->timer);
2739a8bb 6318 goto open_err;
b6016b76
MC
6319 }
6320 bnx2_enable_int(bp);
6321 }
6322 }
f86e82fb 6323 if (bp->flags & BNX2_FLAG_USING_MSI)
3a9c6a49 6324 netdev_info(dev, "using MSI\n");
f86e82fb 6325 else if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49 6326 netdev_info(dev, "using MSIX\n");
b6016b76 6327
706bf240 6328 netif_tx_start_all_queues(dev);
b6016b76
MC
6329
6330 return 0;
2739a8bb
MC
6331
6332open_err:
6333 bnx2_napi_disable(bp);
6334 bnx2_free_skbs(bp);
6335 bnx2_free_irq(bp);
6336 bnx2_free_mem(bp);
f048fa9c 6337 bnx2_del_napi(bp);
2739a8bb 6338 return rc;
b6016b76
MC
6339}
6340
6341static void
c4028958 6342bnx2_reset_task(struct work_struct *work)
b6016b76 6343{
c4028958 6344 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
cd634019 6345 int rc;
b6016b76 6346
51bf6bb4
MC
6347 rtnl_lock();
6348 if (!netif_running(bp->dev)) {
6349 rtnl_unlock();
afdc08b9 6350 return;
51bf6bb4 6351 }
afdc08b9 6352
212f9934 6353 bnx2_netif_stop(bp, true);
b6016b76 6354
cd634019
MC
6355 rc = bnx2_init_nic(bp, 1);
6356 if (rc) {
6357 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6358 bnx2_napi_enable(bp);
6359 dev_close(bp->dev);
6360 rtnl_unlock();
6361 return;
6362 }
b6016b76
MC
6363
6364 atomic_set(&bp->intr_sem, 1);
212f9934 6365 bnx2_netif_start(bp, true);
51bf6bb4 6366 rtnl_unlock();
b6016b76
MC
6367}
6368
20175c57
MC
6369static void
6370bnx2_dump_state(struct bnx2 *bp)
6371{
6372 struct net_device *dev = bp->dev;
ecdbf6e0 6373 u32 val1, val2;
5804a8fb
MC
6374
6375 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6376 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6377 atomic_read(&bp->intr_sem), val1);
6378 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6379 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6380 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
b98eba52 6381 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
3a9c6a49 6382 REG_RD(bp, BNX2_EMAC_TX_STATUS),
b98eba52
EW
6383 REG_RD(bp, BNX2_EMAC_RX_STATUS));
6384 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
3a9c6a49 6385 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
3a9c6a49
JP
6386 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6387 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
20175c57 6388 if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49
JP
6389 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6390 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
20175c57
MC
6391}
6392
b6016b76
MC
6393static void
6394bnx2_tx_timeout(struct net_device *dev)
6395{
972ec0d4 6396 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6397
20175c57 6398 bnx2_dump_state(bp);
ecdbf6e0 6399 bnx2_dump_mcp_state(bp);
20175c57 6400
b6016b76
MC
6401 /* This allows the netif to be shutdown gracefully before resetting */
6402 schedule_work(&bp->reset_task);
6403}
6404
932ff279 6405/* Called with netif_tx_lock.
2f8af120
MC
6406 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6407 * netif_wake_queue().
b6016b76 6408 */
61357325 6409static netdev_tx_t
b6016b76
MC
6410bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6411{
972ec0d4 6412 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6413 dma_addr_t mapping;
6414 struct tx_bd *txbd;
3d16af86 6415 struct sw_tx_bd *tx_buf;
b6016b76
MC
6416 u32 len, vlan_tag_flags, last_frag, mss;
6417 u16 prod, ring_prod;
6418 int i;
706bf240
BL
6419 struct bnx2_napi *bnapi;
6420 struct bnx2_tx_ring_info *txr;
6421 struct netdev_queue *txq;
6422
6423 /* Determine which tx ring we will be placed on */
6424 i = skb_get_queue_mapping(skb);
6425 bnapi = &bp->bnx2_napi[i];
6426 txr = &bnapi->tx_ring;
6427 txq = netdev_get_tx_queue(dev, i);
b6016b76 6428
35e9010b 6429 if (unlikely(bnx2_tx_avail(bp, txr) <
a550c99b 6430 (skb_shinfo(skb)->nr_frags + 1))) {
706bf240 6431 netif_tx_stop_queue(txq);
3a9c6a49 6432 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
b6016b76
MC
6433
6434 return NETDEV_TX_BUSY;
6435 }
6436 len = skb_headlen(skb);
35e9010b 6437 prod = txr->tx_prod;
b6016b76
MC
6438 ring_prod = TX_RING_IDX(prod);
6439
6440 vlan_tag_flags = 0;
84fa7933 6441 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
6442 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6443 }
6444
eab6d18d 6445 if (vlan_tx_tag_present(skb)) {
b6016b76
MC
6446 vlan_tag_flags |=
6447 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6448 }
7d0fd211 6449
fde82055 6450 if ((mss = skb_shinfo(skb)->gso_size)) {
a1efb4b6 6451 u32 tcp_opt_len;
eddc9ec5 6452 struct iphdr *iph;
b6016b76 6453
b6016b76
MC
6454 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6455
4666f87a
MC
6456 tcp_opt_len = tcp_optlen(skb);
6457
6458 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6459 u32 tcp_off = skb_transport_offset(skb) -
6460 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 6461
4666f87a
MC
6462 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6463 TX_BD_FLAGS_SW_FLAGS;
6464 if (likely(tcp_off == 0))
6465 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6466 else {
6467 tcp_off >>= 3;
6468 vlan_tag_flags |= ((tcp_off & 0x3) <<
6469 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6470 ((tcp_off & 0x10) <<
6471 TX_BD_FLAGS_TCP6_OFF4_SHL);
6472 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6473 }
6474 } else {
4666f87a 6475 iph = ip_hdr(skb);
4666f87a
MC
6476 if (tcp_opt_len || (iph->ihl > 5)) {
6477 vlan_tag_flags |= ((iph->ihl - 5) +
6478 (tcp_opt_len >> 2)) << 8;
6479 }
b6016b76 6480 }
4666f87a 6481 } else
b6016b76 6482 mss = 0;
b6016b76 6483
36227e88
SG
6484 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6485 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
3d16af86
BL
6486 dev_kfree_skb(skb);
6487 return NETDEV_TX_OK;
6488 }
6489
35e9010b 6490 tx_buf = &txr->tx_buf_ring[ring_prod];
b6016b76 6491 tx_buf->skb = skb;
1a4ccc2d 6492 dma_unmap_addr_set(tx_buf, mapping, mapping);
b6016b76 6493
35e9010b 6494 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6495
6496 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6497 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6498 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6499 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6500
6501 last_frag = skb_shinfo(skb)->nr_frags;
d62fda08
ED
6502 tx_buf->nr_frags = last_frag;
6503 tx_buf->is_gso = skb_is_gso(skb);
b6016b76
MC
6504
6505 for (i = 0; i < last_frag; i++) {
6506 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6507
6508 prod = NEXT_TX_BD(prod);
6509 ring_prod = TX_RING_IDX(prod);
35e9010b 6510 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6511
6512 len = frag->size;
36227e88
SG
6513 mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
6514 len, PCI_DMA_TODEVICE);
6515 if (dma_mapping_error(&bp->pdev->dev, mapping))
e95524a7 6516 goto dma_error;
1a4ccc2d 6517 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
e95524a7 6518 mapping);
b6016b76
MC
6519
6520 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6521 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6522 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6523 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6524
6525 }
6526 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6527
6528 prod = NEXT_TX_BD(prod);
35e9010b 6529 txr->tx_prod_bseq += skb->len;
b6016b76 6530
35e9010b
MC
6531 REG_WR16(bp, txr->tx_bidx_addr, prod);
6532 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
6533
6534 mmiowb();
6535
35e9010b 6536 txr->tx_prod = prod;
b6016b76 6537
35e9010b 6538 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
706bf240 6539 netif_tx_stop_queue(txq);
11848b96
MC
6540
6541 /* netif_tx_stop_queue() must be done before checking
6542 * tx index in bnx2_tx_avail() below, because in
6543 * bnx2_tx_int(), we update tx index before checking for
6544 * netif_tx_queue_stopped().
6545 */
6546 smp_mb();
35e9010b 6547 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
706bf240 6548 netif_tx_wake_queue(txq);
b6016b76
MC
6549 }
6550
e95524a7
AD
6551 return NETDEV_TX_OK;
6552dma_error:
6553 /* save value of frag that failed */
6554 last_frag = i;
6555
6556 /* start back at beginning and unmap skb */
6557 prod = txr->tx_prod;
6558 ring_prod = TX_RING_IDX(prod);
6559 tx_buf = &txr->tx_buf_ring[ring_prod];
6560 tx_buf->skb = NULL;
36227e88 6561 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
6562 skb_headlen(skb), PCI_DMA_TODEVICE);
6563
6564 /* unmap remaining mapped pages */
6565 for (i = 0; i < last_frag; i++) {
6566 prod = NEXT_TX_BD(prod);
6567 ring_prod = TX_RING_IDX(prod);
6568 tx_buf = &txr->tx_buf_ring[ring_prod];
36227e88 6569 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
6570 skb_shinfo(skb)->frags[i].size,
6571 PCI_DMA_TODEVICE);
6572 }
6573
6574 dev_kfree_skb(skb);
b6016b76
MC
6575 return NETDEV_TX_OK;
6576}
6577
6578/* Called with rtnl_lock */
6579static int
6580bnx2_close(struct net_device *dev)
6581{
972ec0d4 6582 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6583
bea3348e 6584 bnx2_disable_int_sync(bp);
35efa7c1 6585 bnx2_napi_disable(bp);
b6016b76 6586 del_timer_sync(&bp->timer);
74bf4ba3 6587 bnx2_shutdown_chip(bp);
8e6a72c4 6588 bnx2_free_irq(bp);
b6016b76
MC
6589 bnx2_free_skbs(bp);
6590 bnx2_free_mem(bp);
f048fa9c 6591 bnx2_del_napi(bp);
b6016b76
MC
6592 bp->link_up = 0;
6593 netif_carrier_off(bp->dev);
829ca9a3 6594 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
6595 return 0;
6596}
6597
354fcd77
MC
6598static void
6599bnx2_save_stats(struct bnx2 *bp)
6600{
6601 u32 *hw_stats = (u32 *) bp->stats_blk;
6602 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6603 int i;
6604
6605 /* The 1st 10 counters are 64-bit counters */
6606 for (i = 0; i < 20; i += 2) {
6607 u32 hi;
6608 u64 lo;
6609
c9885fe5
PR
6610 hi = temp_stats[i] + hw_stats[i];
6611 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
354fcd77
MC
6612 if (lo > 0xffffffff)
6613 hi++;
c9885fe5
PR
6614 temp_stats[i] = hi;
6615 temp_stats[i + 1] = lo & 0xffffffff;
354fcd77
MC
6616 }
6617
6618 for ( ; i < sizeof(struct statistics_block) / 4; i++)
c9885fe5 6619 temp_stats[i] += hw_stats[i];
354fcd77
MC
6620}
6621
5d07bf26
ED
6622#define GET_64BIT_NET_STATS64(ctr) \
6623 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
b6016b76 6624
a4743058 6625#define GET_64BIT_NET_STATS(ctr) \
354fcd77
MC
6626 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6627 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
b6016b76 6628
a4743058 6629#define GET_32BIT_NET_STATS(ctr) \
354fcd77
MC
6630 (unsigned long) (bp->stats_blk->ctr + \
6631 bp->temp_stats_blk->ctr)
a4743058 6632
5d07bf26
ED
6633static struct rtnl_link_stats64 *
6634bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
b6016b76 6635{
972ec0d4 6636 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6637
5d07bf26 6638 if (bp->stats_blk == NULL)
b6016b76 6639 return net_stats;
5d07bf26 6640
b6016b76 6641 net_stats->rx_packets =
a4743058
MC
6642 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6643 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6644 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
b6016b76
MC
6645
6646 net_stats->tx_packets =
a4743058
MC
6647 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6648 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6649 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
b6016b76
MC
6650
6651 net_stats->rx_bytes =
a4743058 6652 GET_64BIT_NET_STATS(stat_IfHCInOctets);
b6016b76
MC
6653
6654 net_stats->tx_bytes =
a4743058 6655 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
b6016b76 6656
6aa20a22 6657 net_stats->multicast =
6fdae995 6658 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
b6016b76 6659
6aa20a22 6660 net_stats->collisions =
a4743058 6661 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
b6016b76 6662
6aa20a22 6663 net_stats->rx_length_errors =
a4743058
MC
6664 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6665 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
b6016b76 6666
6aa20a22 6667 net_stats->rx_over_errors =
a4743058
MC
6668 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6669 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
b6016b76 6670
6aa20a22 6671 net_stats->rx_frame_errors =
a4743058 6672 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
b6016b76 6673
6aa20a22 6674 net_stats->rx_crc_errors =
a4743058 6675 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
b6016b76
MC
6676
6677 net_stats->rx_errors = net_stats->rx_length_errors +
6678 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6679 net_stats->rx_crc_errors;
6680
6681 net_stats->tx_aborted_errors =
a4743058
MC
6682 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6683 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
b6016b76 6684
5b0c76ad
MC
6685 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6686 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
6687 net_stats->tx_carrier_errors = 0;
6688 else {
6689 net_stats->tx_carrier_errors =
a4743058 6690 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
b6016b76
MC
6691 }
6692
6693 net_stats->tx_errors =
a4743058 6694 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
b6016b76
MC
6695 net_stats->tx_aborted_errors +
6696 net_stats->tx_carrier_errors;
6697
cea94db9 6698 net_stats->rx_missed_errors =
a4743058
MC
6699 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6700 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6701 GET_32BIT_NET_STATS(stat_FwRxDrop);
cea94db9 6702
b6016b76
MC
6703 return net_stats;
6704}
6705
6706/* All ethtool functions called with rtnl_lock */
6707
6708static int
6709bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6710{
972ec0d4 6711 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 6712 int support_serdes = 0, support_copper = 0;
b6016b76
MC
6713
6714 cmd->supported = SUPPORTED_Autoneg;
583c28e5 6715 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6716 support_serdes = 1;
6717 support_copper = 1;
6718 } else if (bp->phy_port == PORT_FIBRE)
6719 support_serdes = 1;
6720 else
6721 support_copper = 1;
6722
6723 if (support_serdes) {
b6016b76
MC
6724 cmd->supported |= SUPPORTED_1000baseT_Full |
6725 SUPPORTED_FIBRE;
583c28e5 6726 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
605a9e20 6727 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 6728
b6016b76 6729 }
7b6b8347 6730 if (support_copper) {
b6016b76
MC
6731 cmd->supported |= SUPPORTED_10baseT_Half |
6732 SUPPORTED_10baseT_Full |
6733 SUPPORTED_100baseT_Half |
6734 SUPPORTED_100baseT_Full |
6735 SUPPORTED_1000baseT_Full |
6736 SUPPORTED_TP;
6737
b6016b76
MC
6738 }
6739
7b6b8347
MC
6740 spin_lock_bh(&bp->phy_lock);
6741 cmd->port = bp->phy_port;
b6016b76
MC
6742 cmd->advertising = bp->advertising;
6743
6744 if (bp->autoneg & AUTONEG_SPEED) {
6745 cmd->autoneg = AUTONEG_ENABLE;
70739497 6746 } else {
b6016b76
MC
6747 cmd->autoneg = AUTONEG_DISABLE;
6748 }
6749
6750 if (netif_carrier_ok(dev)) {
70739497 6751 ethtool_cmd_speed_set(cmd, bp->line_speed);
b6016b76
MC
6752 cmd->duplex = bp->duplex;
6753 }
6754 else {
70739497 6755 ethtool_cmd_speed_set(cmd, -1);
b6016b76
MC
6756 cmd->duplex = -1;
6757 }
7b6b8347 6758 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6759
6760 cmd->transceiver = XCVR_INTERNAL;
6761 cmd->phy_address = bp->phy_addr;
6762
6763 return 0;
6764}
6aa20a22 6765
b6016b76
MC
6766static int
6767bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6768{
972ec0d4 6769 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6770 u8 autoneg = bp->autoneg;
6771 u8 req_duplex = bp->req_duplex;
6772 u16 req_line_speed = bp->req_line_speed;
6773 u32 advertising = bp->advertising;
7b6b8347
MC
6774 int err = -EINVAL;
6775
6776 spin_lock_bh(&bp->phy_lock);
6777
6778 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6779 goto err_out_unlock;
6780
583c28e5
MC
6781 if (cmd->port != bp->phy_port &&
6782 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
7b6b8347 6783 goto err_out_unlock;
b6016b76 6784
d6b14486
MC
6785 /* If device is down, we can store the settings only if the user
6786 * is setting the currently active port.
6787 */
6788 if (!netif_running(dev) && cmd->port != bp->phy_port)
6789 goto err_out_unlock;
6790
b6016b76
MC
6791 if (cmd->autoneg == AUTONEG_ENABLE) {
6792 autoneg |= AUTONEG_SPEED;
6793
beb499af
MC
6794 advertising = cmd->advertising;
6795 if (cmd->port == PORT_TP) {
6796 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6797 if (!advertising)
b6016b76 6798 advertising = ETHTOOL_ALL_COPPER_SPEED;
beb499af
MC
6799 } else {
6800 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6801 if (!advertising)
6802 advertising = ETHTOOL_ALL_FIBRE_SPEED;
b6016b76
MC
6803 }
6804 advertising |= ADVERTISED_Autoneg;
6805 }
6806 else {
25db0338 6807 u32 speed = ethtool_cmd_speed(cmd);
7b6b8347 6808 if (cmd->port == PORT_FIBRE) {
25db0338
DD
6809 if ((speed != SPEED_1000 &&
6810 speed != SPEED_2500) ||
80be4434 6811 (cmd->duplex != DUPLEX_FULL))
7b6b8347 6812 goto err_out_unlock;
80be4434 6813
25db0338 6814 if (speed == SPEED_2500 &&
583c28e5 6815 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7b6b8347 6816 goto err_out_unlock;
25db0338 6817 } else if (speed == SPEED_1000 || speed == SPEED_2500)
7b6b8347
MC
6818 goto err_out_unlock;
6819
b6016b76 6820 autoneg &= ~AUTONEG_SPEED;
25db0338 6821 req_line_speed = speed;
b6016b76
MC
6822 req_duplex = cmd->duplex;
6823 advertising = 0;
6824 }
6825
6826 bp->autoneg = autoneg;
6827 bp->advertising = advertising;
6828 bp->req_line_speed = req_line_speed;
6829 bp->req_duplex = req_duplex;
6830
d6b14486
MC
6831 err = 0;
6832 /* If device is down, the new settings will be picked up when it is
6833 * brought up.
6834 */
6835 if (netif_running(dev))
6836 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 6837
7b6b8347 6838err_out_unlock:
c770a65c 6839 spin_unlock_bh(&bp->phy_lock);
b6016b76 6840
7b6b8347 6841 return err;
b6016b76
MC
6842}
6843
6844static void
6845bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6846{
972ec0d4 6847 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6848
6849 strcpy(info->driver, DRV_MODULE_NAME);
6850 strcpy(info->version, DRV_MODULE_VERSION);
6851 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 6852 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
6853}
6854
244ac4f4
MC
6855#define BNX2_REGDUMP_LEN (32 * 1024)
6856
6857static int
6858bnx2_get_regs_len(struct net_device *dev)
6859{
6860 return BNX2_REGDUMP_LEN;
6861}
6862
6863static void
6864bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6865{
6866 u32 *p = _p, i, offset;
6867 u8 *orig_p = _p;
6868 struct bnx2 *bp = netdev_priv(dev);
b6bc7650
JP
6869 static const u32 reg_boundaries[] = {
6870 0x0000, 0x0098, 0x0400, 0x045c,
6871 0x0800, 0x0880, 0x0c00, 0x0c10,
6872 0x0c30, 0x0d08, 0x1000, 0x101c,
6873 0x1040, 0x1048, 0x1080, 0x10a4,
6874 0x1400, 0x1490, 0x1498, 0x14f0,
6875 0x1500, 0x155c, 0x1580, 0x15dc,
6876 0x1600, 0x1658, 0x1680, 0x16d8,
6877 0x1800, 0x1820, 0x1840, 0x1854,
6878 0x1880, 0x1894, 0x1900, 0x1984,
6879 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6880 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6881 0x2000, 0x2030, 0x23c0, 0x2400,
6882 0x2800, 0x2820, 0x2830, 0x2850,
6883 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6884 0x3c00, 0x3c94, 0x4000, 0x4010,
6885 0x4080, 0x4090, 0x43c0, 0x4458,
6886 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6887 0x4fc0, 0x5010, 0x53c0, 0x5444,
6888 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6889 0x5fc0, 0x6000, 0x6400, 0x6428,
6890 0x6800, 0x6848, 0x684c, 0x6860,
6891 0x6888, 0x6910, 0x8000
6892 };
244ac4f4
MC
6893
6894 regs->version = 0;
6895
6896 memset(p, 0, BNX2_REGDUMP_LEN);
6897
6898 if (!netif_running(bp->dev))
6899 return;
6900
6901 i = 0;
6902 offset = reg_boundaries[0];
6903 p += offset;
6904 while (offset < BNX2_REGDUMP_LEN) {
6905 *p++ = REG_RD(bp, offset);
6906 offset += 4;
6907 if (offset == reg_boundaries[i + 1]) {
6908 offset = reg_boundaries[i + 2];
6909 p = (u32 *) (orig_p + offset);
6910 i += 2;
6911 }
6912 }
6913}
6914
b6016b76
MC
6915static void
6916bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6917{
972ec0d4 6918 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6919
f86e82fb 6920 if (bp->flags & BNX2_FLAG_NO_WOL) {
b6016b76
MC
6921 wol->supported = 0;
6922 wol->wolopts = 0;
6923 }
6924 else {
6925 wol->supported = WAKE_MAGIC;
6926 if (bp->wol)
6927 wol->wolopts = WAKE_MAGIC;
6928 else
6929 wol->wolopts = 0;
6930 }
6931 memset(&wol->sopass, 0, sizeof(wol->sopass));
6932}
6933
6934static int
6935bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6936{
972ec0d4 6937 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6938
6939 if (wol->wolopts & ~WAKE_MAGIC)
6940 return -EINVAL;
6941
6942 if (wol->wolopts & WAKE_MAGIC) {
f86e82fb 6943 if (bp->flags & BNX2_FLAG_NO_WOL)
b6016b76
MC
6944 return -EINVAL;
6945
6946 bp->wol = 1;
6947 }
6948 else {
6949 bp->wol = 0;
6950 }
6951 return 0;
6952}
6953
6954static int
6955bnx2_nway_reset(struct net_device *dev)
6956{
972ec0d4 6957 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6958 u32 bmcr;
6959
9f52b564
MC
6960 if (!netif_running(dev))
6961 return -EAGAIN;
6962
b6016b76
MC
6963 if (!(bp->autoneg & AUTONEG_SPEED)) {
6964 return -EINVAL;
6965 }
6966
c770a65c 6967 spin_lock_bh(&bp->phy_lock);
b6016b76 6968
583c28e5 6969 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6970 int rc;
6971
6972 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6973 spin_unlock_bh(&bp->phy_lock);
6974 return rc;
6975 }
6976
b6016b76 6977 /* Force a link down visible on the other side */
583c28e5 6978 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
ca58c3af 6979 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 6980 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6981
6982 msleep(20);
6983
c770a65c 6984 spin_lock_bh(&bp->phy_lock);
f8dd064e 6985
40105c0b 6986 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
6987 bp->serdes_an_pending = 1;
6988 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6989 }
6990
ca58c3af 6991 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6992 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 6993 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 6994
c770a65c 6995 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6996
6997 return 0;
6998}
6999
7959ea25
ON
7000static u32
7001bnx2_get_link(struct net_device *dev)
7002{
7003 struct bnx2 *bp = netdev_priv(dev);
7004
7005 return bp->link_up;
7006}
7007
b6016b76
MC
7008static int
7009bnx2_get_eeprom_len(struct net_device *dev)
7010{
972ec0d4 7011 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7012
1122db71 7013 if (bp->flash_info == NULL)
b6016b76
MC
7014 return 0;
7015
1122db71 7016 return (int) bp->flash_size;
b6016b76
MC
7017}
7018
7019static int
7020bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7021 u8 *eebuf)
7022{
972ec0d4 7023 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7024 int rc;
7025
9f52b564
MC
7026 if (!netif_running(dev))
7027 return -EAGAIN;
7028
1064e944 7029 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
7030
7031 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7032
7033 return rc;
7034}
7035
7036static int
7037bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7038 u8 *eebuf)
7039{
972ec0d4 7040 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7041 int rc;
7042
9f52b564
MC
7043 if (!netif_running(dev))
7044 return -EAGAIN;
7045
1064e944 7046 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
7047
7048 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7049
7050 return rc;
7051}
7052
7053static int
7054bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7055{
972ec0d4 7056 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7057
7058 memset(coal, 0, sizeof(struct ethtool_coalesce));
7059
7060 coal->rx_coalesce_usecs = bp->rx_ticks;
7061 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7062 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7063 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7064
7065 coal->tx_coalesce_usecs = bp->tx_ticks;
7066 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7067 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7068 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7069
7070 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7071
7072 return 0;
7073}
7074
7075static int
7076bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7077{
972ec0d4 7078 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7079
7080 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7081 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7082
6aa20a22 7083 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
7084 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7085
7086 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7087 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7088
7089 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7090 if (bp->rx_quick_cons_trip_int > 0xff)
7091 bp->rx_quick_cons_trip_int = 0xff;
7092
7093 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7094 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7095
7096 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7097 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7098
7099 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7100 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7101
7102 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7103 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7104 0xff;
7105
7106 bp->stats_ticks = coal->stats_block_coalesce_usecs;
61d9e3fa 7107 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
02537b06
MC
7108 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7109 bp->stats_ticks = USEC_PER_SEC;
7110 }
7ea6920e
MC
7111 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7112 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7113 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
7114
7115 if (netif_running(bp->dev)) {
212f9934 7116 bnx2_netif_stop(bp, true);
9a120bc5 7117 bnx2_init_nic(bp, 0);
212f9934 7118 bnx2_netif_start(bp, true);
b6016b76
MC
7119 }
7120
7121 return 0;
7122}
7123
7124static void
7125bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7126{
972ec0d4 7127 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7128
13daffa2 7129 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 7130 ering->rx_mini_max_pending = 0;
47bf4246 7131 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
7132
7133 ering->rx_pending = bp->rx_ring_size;
7134 ering->rx_mini_pending = 0;
47bf4246 7135 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
7136
7137 ering->tx_max_pending = MAX_TX_DESC_CNT;
7138 ering->tx_pending = bp->tx_ring_size;
7139}
7140
7141static int
5d5d0015 7142bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 7143{
13daffa2 7144 if (netif_running(bp->dev)) {
354fcd77
MC
7145 /* Reset will erase chipset stats; save them */
7146 bnx2_save_stats(bp);
7147
212f9934 7148 bnx2_netif_stop(bp, true);
13daffa2 7149 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
a29ba9d2 7150 __bnx2_free_irq(bp);
13daffa2
MC
7151 bnx2_free_skbs(bp);
7152 bnx2_free_mem(bp);
7153 }
7154
5d5d0015
MC
7155 bnx2_set_rx_ring_size(bp, rx);
7156 bp->tx_ring_size = tx;
b6016b76
MC
7157
7158 if (netif_running(bp->dev)) {
13daffa2
MC
7159 int rc;
7160
7161 rc = bnx2_alloc_mem(bp);
a29ba9d2
MC
7162 if (!rc)
7163 rc = bnx2_request_irq(bp);
7164
6fefb65e
MC
7165 if (!rc)
7166 rc = bnx2_init_nic(bp, 0);
7167
7168 if (rc) {
7169 bnx2_napi_enable(bp);
7170 dev_close(bp->dev);
13daffa2 7171 return rc;
6fefb65e 7172 }
e9f26c49
MC
7173#ifdef BCM_CNIC
7174 mutex_lock(&bp->cnic_lock);
7175 /* Let cnic know about the new status block. */
7176 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7177 bnx2_setup_cnic_irq_info(bp);
7178 mutex_unlock(&bp->cnic_lock);
7179#endif
212f9934 7180 bnx2_netif_start(bp, true);
b6016b76 7181 }
b6016b76
MC
7182 return 0;
7183}
7184
5d5d0015
MC
7185static int
7186bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7187{
7188 struct bnx2 *bp = netdev_priv(dev);
7189 int rc;
7190
7191 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7192 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7193 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7194
7195 return -EINVAL;
7196 }
7197 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7198 return rc;
7199}
7200
b6016b76
MC
7201static void
7202bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7203{
972ec0d4 7204 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7205
7206 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7207 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7208 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7209}
7210
7211static int
7212bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7213{
972ec0d4 7214 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7215
7216 bp->req_flow_ctrl = 0;
7217 if (epause->rx_pause)
7218 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7219 if (epause->tx_pause)
7220 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7221
7222 if (epause->autoneg) {
7223 bp->autoneg |= AUTONEG_FLOW_CTRL;
7224 }
7225 else {
7226 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7227 }
7228
9f52b564
MC
7229 if (netif_running(dev)) {
7230 spin_lock_bh(&bp->phy_lock);
7231 bnx2_setup_phy(bp, bp->phy_port);
7232 spin_unlock_bh(&bp->phy_lock);
7233 }
b6016b76
MC
7234
7235 return 0;
7236}
7237
14ab9b86 7238static struct {
b6016b76 7239 char string[ETH_GSTRING_LEN];
790dab2f 7240} bnx2_stats_str_arr[] = {
b6016b76
MC
7241 { "rx_bytes" },
7242 { "rx_error_bytes" },
7243 { "tx_bytes" },
7244 { "tx_error_bytes" },
7245 { "rx_ucast_packets" },
7246 { "rx_mcast_packets" },
7247 { "rx_bcast_packets" },
7248 { "tx_ucast_packets" },
7249 { "tx_mcast_packets" },
7250 { "tx_bcast_packets" },
7251 { "tx_mac_errors" },
7252 { "tx_carrier_errors" },
7253 { "rx_crc_errors" },
7254 { "rx_align_errors" },
7255 { "tx_single_collisions" },
7256 { "tx_multi_collisions" },
7257 { "tx_deferred" },
7258 { "tx_excess_collisions" },
7259 { "tx_late_collisions" },
7260 { "tx_total_collisions" },
7261 { "rx_fragments" },
7262 { "rx_jabbers" },
7263 { "rx_undersize_packets" },
7264 { "rx_oversize_packets" },
7265 { "rx_64_byte_packets" },
7266 { "rx_65_to_127_byte_packets" },
7267 { "rx_128_to_255_byte_packets" },
7268 { "rx_256_to_511_byte_packets" },
7269 { "rx_512_to_1023_byte_packets" },
7270 { "rx_1024_to_1522_byte_packets" },
7271 { "rx_1523_to_9022_byte_packets" },
7272 { "tx_64_byte_packets" },
7273 { "tx_65_to_127_byte_packets" },
7274 { "tx_128_to_255_byte_packets" },
7275 { "tx_256_to_511_byte_packets" },
7276 { "tx_512_to_1023_byte_packets" },
7277 { "tx_1024_to_1522_byte_packets" },
7278 { "tx_1523_to_9022_byte_packets" },
7279 { "rx_xon_frames" },
7280 { "rx_xoff_frames" },
7281 { "tx_xon_frames" },
7282 { "tx_xoff_frames" },
7283 { "rx_mac_ctrl_frames" },
7284 { "rx_filtered_packets" },
790dab2f 7285 { "rx_ftq_discards" },
b6016b76 7286 { "rx_discards" },
cea94db9 7287 { "rx_fw_discards" },
b6016b76
MC
7288};
7289
790dab2f
MC
7290#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7291 sizeof(bnx2_stats_str_arr[0]))
7292
b6016b76
MC
7293#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7294
f71e1309 7295static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7296 STATS_OFFSET32(stat_IfHCInOctets_hi),
7297 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7298 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7299 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7300 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7301 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7302 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7303 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7304 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7305 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7306 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
7307 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7308 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7309 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7310 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7311 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7312 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7313 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7314 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7315 STATS_OFFSET32(stat_EtherStatsCollisions),
7316 STATS_OFFSET32(stat_EtherStatsFragments),
7317 STATS_OFFSET32(stat_EtherStatsJabbers),
7318 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7319 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7320 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7321 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7322 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7323 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7324 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7325 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7326 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7327 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7328 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7329 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7330 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7331 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7332 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7333 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7334 STATS_OFFSET32(stat_XonPauseFramesReceived),
7335 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7336 STATS_OFFSET32(stat_OutXonSent),
7337 STATS_OFFSET32(stat_OutXoffSent),
7338 STATS_OFFSET32(stat_MacControlFramesReceived),
7339 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
790dab2f 7340 STATS_OFFSET32(stat_IfInFTQDiscards),
6aa20a22 7341 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 7342 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
7343};
7344
7345/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7346 * skipped because of errata.
6aa20a22 7347 */
14ab9b86 7348static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7349 8,0,8,8,8,8,8,8,8,8,
7350 4,0,4,4,4,4,4,4,4,4,
7351 4,4,4,4,4,4,4,4,4,4,
7352 4,4,4,4,4,4,4,4,4,4,
790dab2f 7353 4,4,4,4,4,4,4,
b6016b76
MC
7354};
7355
5b0c76ad
MC
7356static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7357 8,0,8,8,8,8,8,8,8,8,
7358 4,4,4,4,4,4,4,4,4,4,
7359 4,4,4,4,4,4,4,4,4,4,
7360 4,4,4,4,4,4,4,4,4,4,
790dab2f 7361 4,4,4,4,4,4,4,
5b0c76ad
MC
7362};
7363
b6016b76
MC
7364#define BNX2_NUM_TESTS 6
7365
14ab9b86 7366static struct {
b6016b76
MC
7367 char string[ETH_GSTRING_LEN];
7368} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7369 { "register_test (offline)" },
7370 { "memory_test (offline)" },
7371 { "loopback_test (offline)" },
7372 { "nvram_test (online)" },
7373 { "interrupt_test (online)" },
7374 { "link_test (online)" },
7375};
7376
7377static int
b9f2c044 7378bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 7379{
b9f2c044
JG
7380 switch (sset) {
7381 case ETH_SS_TEST:
7382 return BNX2_NUM_TESTS;
7383 case ETH_SS_STATS:
7384 return BNX2_NUM_STATS;
7385 default:
7386 return -EOPNOTSUPP;
7387 }
b6016b76
MC
7388}
7389
7390static void
7391bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7392{
972ec0d4 7393 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7394
9f52b564
MC
7395 bnx2_set_power_state(bp, PCI_D0);
7396
b6016b76
MC
7397 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7398 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
7399 int i;
7400
212f9934 7401 bnx2_netif_stop(bp, true);
b6016b76
MC
7402 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7403 bnx2_free_skbs(bp);
7404
7405 if (bnx2_test_registers(bp) != 0) {
7406 buf[0] = 1;
7407 etest->flags |= ETH_TEST_FL_FAILED;
7408 }
7409 if (bnx2_test_memory(bp) != 0) {
7410 buf[1] = 1;
7411 etest->flags |= ETH_TEST_FL_FAILED;
7412 }
bc5a0690 7413 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 7414 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76 7415
9f52b564
MC
7416 if (!netif_running(bp->dev))
7417 bnx2_shutdown_chip(bp);
b6016b76 7418 else {
9a120bc5 7419 bnx2_init_nic(bp, 1);
212f9934 7420 bnx2_netif_start(bp, true);
b6016b76
MC
7421 }
7422
7423 /* wait for link up */
80be4434
MC
7424 for (i = 0; i < 7; i++) {
7425 if (bp->link_up)
7426 break;
7427 msleep_interruptible(1000);
7428 }
b6016b76
MC
7429 }
7430
7431 if (bnx2_test_nvram(bp) != 0) {
7432 buf[3] = 1;
7433 etest->flags |= ETH_TEST_FL_FAILED;
7434 }
7435 if (bnx2_test_intr(bp) != 0) {
7436 buf[4] = 1;
7437 etest->flags |= ETH_TEST_FL_FAILED;
7438 }
7439
7440 if (bnx2_test_link(bp) != 0) {
7441 buf[5] = 1;
7442 etest->flags |= ETH_TEST_FL_FAILED;
7443
7444 }
9f52b564
MC
7445 if (!netif_running(bp->dev))
7446 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
7447}
7448
7449static void
7450bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7451{
7452 switch (stringset) {
7453 case ETH_SS_STATS:
7454 memcpy(buf, bnx2_stats_str_arr,
7455 sizeof(bnx2_stats_str_arr));
7456 break;
7457 case ETH_SS_TEST:
7458 memcpy(buf, bnx2_tests_str_arr,
7459 sizeof(bnx2_tests_str_arr));
7460 break;
7461 }
7462}
7463
b6016b76
MC
7464static void
7465bnx2_get_ethtool_stats(struct net_device *dev,
7466 struct ethtool_stats *stats, u64 *buf)
7467{
972ec0d4 7468 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7469 int i;
7470 u32 *hw_stats = (u32 *) bp->stats_blk;
354fcd77 7471 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
14ab9b86 7472 u8 *stats_len_arr = NULL;
b6016b76
MC
7473
7474 if (hw_stats == NULL) {
7475 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7476 return;
7477 }
7478
5b0c76ad
MC
7479 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7480 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7481 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7482 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 7483 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
7484 else
7485 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
7486
7487 for (i = 0; i < BNX2_NUM_STATS; i++) {
354fcd77
MC
7488 unsigned long offset;
7489
b6016b76
MC
7490 if (stats_len_arr[i] == 0) {
7491 /* skip this counter */
7492 buf[i] = 0;
7493 continue;
7494 }
354fcd77
MC
7495
7496 offset = bnx2_stats_offset_arr[i];
b6016b76
MC
7497 if (stats_len_arr[i] == 4) {
7498 /* 4-byte counter */
354fcd77
MC
7499 buf[i] = (u64) *(hw_stats + offset) +
7500 *(temp_stats + offset);
b6016b76
MC
7501 continue;
7502 }
7503 /* 8-byte counter */
354fcd77
MC
7504 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7505 *(hw_stats + offset + 1) +
7506 (((u64) *(temp_stats + offset)) << 32) +
7507 *(temp_stats + offset + 1);
b6016b76
MC
7508 }
7509}
7510
7511static int
2e17e1aa 7512bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
b6016b76 7513{
972ec0d4 7514 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7515
2e17e1aa 7516 switch (state) {
7517 case ETHTOOL_ID_ACTIVE:
7518 bnx2_set_power_state(bp, PCI_D0);
9f52b564 7519
2e17e1aa 7520 bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7521 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
fce55922 7522 return 1; /* cycle on/off once per second */
b6016b76 7523
2e17e1aa 7524 case ETHTOOL_ID_ON:
7525 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7526 BNX2_EMAC_LED_1000MB_OVERRIDE |
7527 BNX2_EMAC_LED_100MB_OVERRIDE |
7528 BNX2_EMAC_LED_10MB_OVERRIDE |
7529 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7530 BNX2_EMAC_LED_TRAFFIC);
7531 break;
b6016b76 7532
2e17e1aa 7533 case ETHTOOL_ID_OFF:
7534 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7535 break;
9f52b564 7536
2e17e1aa 7537 case ETHTOOL_ID_INACTIVE:
7538 REG_WR(bp, BNX2_EMAC_LED, 0);
7539 REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7540
7541 if (!netif_running(dev))
7542 bnx2_set_power_state(bp, PCI_D3hot);
7543 break;
7544 }
9f52b564 7545
b6016b76
MC
7546 return 0;
7547}
7548
8d7dfc2b
MM
7549static u32
7550bnx2_fix_features(struct net_device *dev, u32 features)
4666f87a
MC
7551{
7552 struct bnx2 *bp = netdev_priv(dev);
7553
8d7dfc2b
MM
7554 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7555 features |= NETIF_F_HW_VLAN_RX;
7556
7557 return features;
4666f87a
MC
7558}
7559
fdc8541d 7560static int
8d7dfc2b 7561bnx2_set_features(struct net_device *dev, u32 features)
fdc8541d 7562{
7d0fd211 7563 struct bnx2 *bp = netdev_priv(dev);
7d0fd211 7564
7c810477 7565 /* TSO with VLAN tag won't work with current firmware */
8d7dfc2b
MM
7566 if (features & NETIF_F_HW_VLAN_TX)
7567 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7568 else
7569 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7d0fd211 7570
8d7dfc2b 7571 if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7d0fd211
JG
7572 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7573 netif_running(dev)) {
7574 bnx2_netif_stop(bp, false);
8d7dfc2b 7575 dev->features = features;
7d0fd211
JG
7576 bnx2_set_rx_mode(dev);
7577 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7578 bnx2_netif_start(bp, false);
8d7dfc2b 7579 return 1;
7d0fd211
JG
7580 }
7581
7582 return 0;
fdc8541d
MC
7583}
7584
7282d491 7585static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
7586 .get_settings = bnx2_get_settings,
7587 .set_settings = bnx2_set_settings,
7588 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
7589 .get_regs_len = bnx2_get_regs_len,
7590 .get_regs = bnx2_get_regs,
b6016b76
MC
7591 .get_wol = bnx2_get_wol,
7592 .set_wol = bnx2_set_wol,
7593 .nway_reset = bnx2_nway_reset,
7959ea25 7594 .get_link = bnx2_get_link,
b6016b76
MC
7595 .get_eeprom_len = bnx2_get_eeprom_len,
7596 .get_eeprom = bnx2_get_eeprom,
7597 .set_eeprom = bnx2_set_eeprom,
7598 .get_coalesce = bnx2_get_coalesce,
7599 .set_coalesce = bnx2_set_coalesce,
7600 .get_ringparam = bnx2_get_ringparam,
7601 .set_ringparam = bnx2_set_ringparam,
7602 .get_pauseparam = bnx2_get_pauseparam,
7603 .set_pauseparam = bnx2_set_pauseparam,
b6016b76
MC
7604 .self_test = bnx2_self_test,
7605 .get_strings = bnx2_get_strings,
2e17e1aa 7606 .set_phys_id = bnx2_set_phys_id,
b6016b76 7607 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 7608 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
7609};
7610
7611/* Called with rtnl_lock */
7612static int
7613bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7614{
14ab9b86 7615 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 7616 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7617 int err;
7618
7619 switch(cmd) {
7620 case SIOCGMIIPHY:
7621 data->phy_id = bp->phy_addr;
7622
7623 /* fallthru */
7624 case SIOCGMIIREG: {
7625 u32 mii_regval;
7626
583c28e5 7627 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7628 return -EOPNOTSUPP;
7629
dad3e452
MC
7630 if (!netif_running(dev))
7631 return -EAGAIN;
7632
c770a65c 7633 spin_lock_bh(&bp->phy_lock);
b6016b76 7634 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 7635 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7636
7637 data->val_out = mii_regval;
7638
7639 return err;
7640 }
7641
7642 case SIOCSMIIREG:
583c28e5 7643 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7644 return -EOPNOTSUPP;
7645
dad3e452
MC
7646 if (!netif_running(dev))
7647 return -EAGAIN;
7648
c770a65c 7649 spin_lock_bh(&bp->phy_lock);
b6016b76 7650 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 7651 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7652
7653 return err;
7654
7655 default:
7656 /* do nothing */
7657 break;
7658 }
7659 return -EOPNOTSUPP;
7660}
7661
7662/* Called with rtnl_lock */
7663static int
7664bnx2_change_mac_addr(struct net_device *dev, void *p)
7665{
7666 struct sockaddr *addr = p;
972ec0d4 7667 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7668
73eef4cd
MC
7669 if (!is_valid_ether_addr(addr->sa_data))
7670 return -EINVAL;
7671
b6016b76
MC
7672 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7673 if (netif_running(dev))
5fcaed01 7674 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
7675
7676 return 0;
7677}
7678
7679/* Called with rtnl_lock */
7680static int
7681bnx2_change_mtu(struct net_device *dev, int new_mtu)
7682{
972ec0d4 7683 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7684
7685 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7686 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7687 return -EINVAL;
7688
7689 dev->mtu = new_mtu;
807540ba 7690 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
b6016b76
MC
7691}
7692
257ddbda 7693#ifdef CONFIG_NET_POLL_CONTROLLER
b6016b76
MC
7694static void
7695poll_bnx2(struct net_device *dev)
7696{
972ec0d4 7697 struct bnx2 *bp = netdev_priv(dev);
b2af2c1d 7698 int i;
b6016b76 7699
b2af2c1d 7700 for (i = 0; i < bp->irq_nvecs; i++) {
1bf1e347
MC
7701 struct bnx2_irq *irq = &bp->irq_tbl[i];
7702
7703 disable_irq(irq->vector);
7704 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7705 enable_irq(irq->vector);
b2af2c1d 7706 }
b6016b76
MC
7707}
7708#endif
7709
253c8b75
MC
7710static void __devinit
7711bnx2_get_5709_media(struct bnx2 *bp)
7712{
7713 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7714 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7715 u32 strap;
7716
7717 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7718 return;
7719 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
583c28e5 7720 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7721 return;
7722 }
7723
7724 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7725 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7726 else
7727 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7728
7729 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7730 switch (strap) {
7731 case 0x4:
7732 case 0x5:
7733 case 0x6:
583c28e5 7734 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7735 return;
7736 }
7737 } else {
7738 switch (strap) {
7739 case 0x1:
7740 case 0x2:
7741 case 0x4:
583c28e5 7742 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7743 return;
7744 }
7745 }
7746}
7747
883e5151
MC
7748static void __devinit
7749bnx2_get_pci_speed(struct bnx2 *bp)
7750{
7751 u32 reg;
7752
7753 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7754 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7755 u32 clkreg;
7756
f86e82fb 7757 bp->flags |= BNX2_FLAG_PCIX;
883e5151
MC
7758
7759 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7760
7761 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7762 switch (clkreg) {
7763 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7764 bp->bus_speed_mhz = 133;
7765 break;
7766
7767 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7768 bp->bus_speed_mhz = 100;
7769 break;
7770
7771 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7772 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7773 bp->bus_speed_mhz = 66;
7774 break;
7775
7776 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7777 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7778 bp->bus_speed_mhz = 50;
7779 break;
7780
7781 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7782 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7783 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7784 bp->bus_speed_mhz = 33;
7785 break;
7786 }
7787 }
7788 else {
7789 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7790 bp->bus_speed_mhz = 66;
7791 else
7792 bp->bus_speed_mhz = 33;
7793 }
7794
7795 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
f86e82fb 7796 bp->flags |= BNX2_FLAG_PCI_32BIT;
883e5151
MC
7797
7798}
7799
76d99061
MC
7800static void __devinit
7801bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7802{
df25bc38 7803 int rc, i, j;
76d99061 7804 u8 *data;
df25bc38 7805 unsigned int block_end, rosize, len;
76d99061 7806
012093f6
MC
7807#define BNX2_VPD_NVRAM_OFFSET 0x300
7808#define BNX2_VPD_LEN 128
76d99061
MC
7809#define BNX2_MAX_VER_SLEN 30
7810
7811 data = kmalloc(256, GFP_KERNEL);
7812 if (!data)
7813 return;
7814
012093f6
MC
7815 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7816 BNX2_VPD_LEN);
76d99061
MC
7817 if (rc)
7818 goto vpd_done;
7819
012093f6
MC
7820 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7821 data[i] = data[i + BNX2_VPD_LEN + 3];
7822 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7823 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7824 data[i + 3] = data[i + BNX2_VPD_LEN];
76d99061
MC
7825 }
7826
df25bc38
MC
7827 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7828 if (i < 0)
7829 goto vpd_done;
76d99061 7830
df25bc38
MC
7831 rosize = pci_vpd_lrdt_size(&data[i]);
7832 i += PCI_VPD_LRDT_TAG_SIZE;
7833 block_end = i + rosize;
76d99061 7834
df25bc38
MC
7835 if (block_end > BNX2_VPD_LEN)
7836 goto vpd_done;
76d99061 7837
df25bc38
MC
7838 j = pci_vpd_find_info_keyword(data, i, rosize,
7839 PCI_VPD_RO_KEYWORD_MFR_ID);
7840 if (j < 0)
7841 goto vpd_done;
76d99061 7842
df25bc38 7843 len = pci_vpd_info_field_size(&data[j]);
76d99061 7844
df25bc38
MC
7845 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7846 if (j + len > block_end || len != 4 ||
7847 memcmp(&data[j], "1028", 4))
7848 goto vpd_done;
4067a854 7849
df25bc38
MC
7850 j = pci_vpd_find_info_keyword(data, i, rosize,
7851 PCI_VPD_RO_KEYWORD_VENDOR0);
7852 if (j < 0)
7853 goto vpd_done;
4067a854 7854
df25bc38 7855 len = pci_vpd_info_field_size(&data[j]);
4067a854 7856
df25bc38
MC
7857 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7858 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
76d99061 7859 goto vpd_done;
df25bc38
MC
7860
7861 memcpy(bp->fw_version, &data[j], len);
7862 bp->fw_version[len] = ' ';
76d99061
MC
7863
7864vpd_done:
7865 kfree(data);
7866}
7867
b6016b76
MC
7868static int __devinit
7869bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7870{
7871 struct bnx2 *bp;
7872 unsigned long mem_len;
58fc2ea4 7873 int rc, i, j;
b6016b76 7874 u32 reg;
40453c83 7875 u64 dma_mask, persist_dma_mask;
cd709aa9 7876 int err;
b6016b76 7877
b6016b76 7878 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 7879 bp = netdev_priv(dev);
b6016b76
MC
7880
7881 bp->flags = 0;
7882 bp->phy_flags = 0;
7883
354fcd77
MC
7884 bp->temp_stats_blk =
7885 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7886
7887 if (bp->temp_stats_blk == NULL) {
7888 rc = -ENOMEM;
7889 goto err_out;
7890 }
7891
b6016b76
MC
7892 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7893 rc = pci_enable_device(pdev);
7894 if (rc) {
3a9c6a49 7895 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
b6016b76
MC
7896 goto err_out;
7897 }
7898
7899 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 7900 dev_err(&pdev->dev,
3a9c6a49 7901 "Cannot find PCI device base address, aborting\n");
b6016b76
MC
7902 rc = -ENODEV;
7903 goto err_out_disable;
7904 }
7905
7906 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7907 if (rc) {
3a9c6a49 7908 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
b6016b76
MC
7909 goto err_out_disable;
7910 }
7911
7912 pci_set_master(pdev);
7913
7914 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7915 if (bp->pm_cap == 0) {
9b91cf9d 7916 dev_err(&pdev->dev,
3a9c6a49 7917 "Cannot find power management capability, aborting\n");
b6016b76
MC
7918 rc = -EIO;
7919 goto err_out_release;
7920 }
7921
b6016b76
MC
7922 bp->dev = dev;
7923 bp->pdev = pdev;
7924
7925 spin_lock_init(&bp->phy_lock);
1b8227c4 7926 spin_lock_init(&bp->indirect_lock);
c5a88950
MC
7927#ifdef BCM_CNIC
7928 mutex_init(&bp->cnic_lock);
7929#endif
c4028958 7930 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
7931
7932 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
4edd473f 7933 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
b6016b76
MC
7934 dev->mem_end = dev->mem_start + mem_len;
7935 dev->irq = pdev->irq;
7936
7937 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7938
7939 if (!bp->regview) {
3a9c6a49 7940 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
b6016b76
MC
7941 rc = -ENOMEM;
7942 goto err_out_release;
7943 }
7944
be7ff1af
MC
7945 bnx2_set_power_state(bp, PCI_D0);
7946
b6016b76
MC
7947 /* Configure byte swap and enable write to the reg_window registers.
7948 * Rely on CPU to do target byte swapping on big endian systems
7949 * The chip's target access swapping will not swap all accesses
7950 */
be7ff1af
MC
7951 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7952 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7953 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
b6016b76
MC
7954
7955 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7956
883e5151 7957 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
e82760e7
JM
7958 if (!pci_is_pcie(pdev)) {
7959 dev_err(&pdev->dev, "Not PCIE, aborting\n");
883e5151
MC
7960 rc = -EIO;
7961 goto err_out_unmap;
7962 }
f86e82fb 7963 bp->flags |= BNX2_FLAG_PCIE;
2dd201d7 7964 if (CHIP_REV(bp) == CHIP_REV_Ax)
f86e82fb 7965 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
c239f279
MC
7966
7967 /* AER (Advanced Error Reporting) hooks */
7968 err = pci_enable_pcie_error_reporting(pdev);
4bb9ebc7
MC
7969 if (!err)
7970 bp->flags |= BNX2_FLAG_AER_ENABLED;
c239f279 7971
883e5151 7972 } else {
59b47d8a
MC
7973 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7974 if (bp->pcix_cap == 0) {
7975 dev_err(&pdev->dev,
3a9c6a49 7976 "Cannot find PCIX capability, aborting\n");
59b47d8a
MC
7977 rc = -EIO;
7978 goto err_out_unmap;
7979 }
61d9e3fa 7980 bp->flags |= BNX2_FLAG_BROKEN_STATS;
59b47d8a
MC
7981 }
7982
b4b36042
MC
7983 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7984 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
f86e82fb 7985 bp->flags |= BNX2_FLAG_MSIX_CAP;
b4b36042
MC
7986 }
7987
8e6a72c4
MC
7988 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7989 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
f86e82fb 7990 bp->flags |= BNX2_FLAG_MSI_CAP;
8e6a72c4
MC
7991 }
7992
40453c83
MC
7993 /* 5708 cannot support DMA addresses > 40-bit. */
7994 if (CHIP_NUM(bp) == CHIP_NUM_5708)
50cf156a 7995 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
40453c83 7996 else
6a35528a 7997 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
40453c83
MC
7998
7999 /* Configure DMA attributes. */
8000 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8001 dev->features |= NETIF_F_HIGHDMA;
8002 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8003 if (rc) {
8004 dev_err(&pdev->dev,
3a9c6a49 8005 "pci_set_consistent_dma_mask failed, aborting\n");
40453c83
MC
8006 goto err_out_unmap;
8007 }
284901a9 8008 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3a9c6a49 8009 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
40453c83
MC
8010 goto err_out_unmap;
8011 }
8012
f86e82fb 8013 if (!(bp->flags & BNX2_FLAG_PCIE))
883e5151 8014 bnx2_get_pci_speed(bp);
b6016b76
MC
8015
8016 /* 5706A0 may falsely detect SERR and PERR. */
8017 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8018 reg = REG_RD(bp, PCI_COMMAND);
8019 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8020 REG_WR(bp, PCI_COMMAND, reg);
8021 }
8022 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
f86e82fb 8023 !(bp->flags & BNX2_FLAG_PCIX)) {
b6016b76 8024
9b91cf9d 8025 dev_err(&pdev->dev,
3a9c6a49 8026 "5706 A1 can only be used in a PCIX bus, aborting\n");
b6016b76
MC
8027 goto err_out_unmap;
8028 }
8029
8030 bnx2_init_nvram(bp);
8031
2726d6e1 8032 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
e3648b3d
MC
8033
8034 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
8035 BNX2_SHM_HDR_SIGNATURE_SIG) {
8036 u32 off = PCI_FUNC(pdev->devfn) << 2;
8037
2726d6e1 8038 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
24cb230b 8039 } else
e3648b3d
MC
8040 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8041
b6016b76
MC
8042 /* Get the permanent MAC address. First we need to make sure the
8043 * firmware is actually running.
8044 */
2726d6e1 8045 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
8046
8047 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8048 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
3a9c6a49 8049 dev_err(&pdev->dev, "Firmware not running, aborting\n");
b6016b76
MC
8050 rc = -ENODEV;
8051 goto err_out_unmap;
8052 }
8053
76d99061
MC
8054 bnx2_read_vpd_fw_ver(bp);
8055
8056 j = strlen(bp->fw_version);
2726d6e1 8057 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
76d99061 8058 for (i = 0; i < 3 && j < 24; i++) {
58fc2ea4
MC
8059 u8 num, k, skip0;
8060
76d99061
MC
8061 if (i == 0) {
8062 bp->fw_version[j++] = 'b';
8063 bp->fw_version[j++] = 'c';
8064 bp->fw_version[j++] = ' ';
8065 }
58fc2ea4
MC
8066 num = (u8) (reg >> (24 - (i * 8)));
8067 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8068 if (num >= k || !skip0 || k == 1) {
8069 bp->fw_version[j++] = (num / k) + '0';
8070 skip0 = 0;
8071 }
8072 }
8073 if (i != 2)
8074 bp->fw_version[j++] = '.';
8075 }
2726d6e1 8076 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
846f5c62
MC
8077 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8078 bp->wol = 1;
8079
8080 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
f86e82fb 8081 bp->flags |= BNX2_FLAG_ASF_ENABLE;
c2d3db8c
MC
8082
8083 for (i = 0; i < 30; i++) {
2726d6e1 8084 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
c2d3db8c
MC
8085 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8086 break;
8087 msleep(10);
8088 }
8089 }
2726d6e1 8090 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
58fc2ea4
MC
8091 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8092 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8093 reg != BNX2_CONDITION_MFW_RUN_NONE) {
2726d6e1 8094 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
58fc2ea4 8095
76d99061
MC
8096 if (j < 32)
8097 bp->fw_version[j++] = ' ';
8098 for (i = 0; i < 3 && j < 28; i++) {
2726d6e1 8099 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
3aeb7d22 8100 reg = be32_to_cpu(reg);
58fc2ea4
MC
8101 memcpy(&bp->fw_version[j], &reg, 4);
8102 j += 4;
8103 }
8104 }
b6016b76 8105
2726d6e1 8106 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
8107 bp->mac_addr[0] = (u8) (reg >> 8);
8108 bp->mac_addr[1] = (u8) reg;
8109
2726d6e1 8110 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
8111 bp->mac_addr[2] = (u8) (reg >> 24);
8112 bp->mac_addr[3] = (u8) (reg >> 16);
8113 bp->mac_addr[4] = (u8) (reg >> 8);
8114 bp->mac_addr[5] = (u8) reg;
8115
8116 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 8117 bnx2_set_rx_ring_size(bp, 255);
b6016b76 8118
cf7474a6 8119 bp->tx_quick_cons_trip_int = 2;
b6016b76 8120 bp->tx_quick_cons_trip = 20;
cf7474a6 8121 bp->tx_ticks_int = 18;
b6016b76 8122 bp->tx_ticks = 80;
6aa20a22 8123
cf7474a6
MC
8124 bp->rx_quick_cons_trip_int = 2;
8125 bp->rx_quick_cons_trip = 12;
b6016b76
MC
8126 bp->rx_ticks_int = 18;
8127 bp->rx_ticks = 18;
8128
7ea6920e 8129 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76 8130
ac392abc 8131 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 8132
5b0c76ad
MC
8133 bp->phy_addr = 1;
8134
b6016b76 8135 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
8136 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8137 bnx2_get_5709_media(bp);
8138 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
583c28e5 8139 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bac0dff6 8140
0d8a6571 8141 bp->phy_port = PORT_TP;
583c28e5 8142 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 8143 bp->phy_port = PORT_FIBRE;
2726d6e1 8144 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
846f5c62 8145 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
f86e82fb 8146 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8147 bp->wol = 0;
8148 }
38ea3686
MC
8149 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8150 /* Don't do parallel detect on this board because of
8151 * some board problems. The link will not go down
8152 * if we do parallel detect.
8153 */
8154 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8155 pdev->subsystem_device == 0x310c)
8156 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8157 } else {
5b0c76ad 8158 bp->phy_addr = 2;
5b0c76ad 8159 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
583c28e5 8160 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
5b0c76ad 8161 }
261dd5ca
MC
8162 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8163 CHIP_NUM(bp) == CHIP_NUM_5708)
583c28e5 8164 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
fb0c18bd
MC
8165 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8166 (CHIP_REV(bp) == CHIP_REV_Ax ||
8167 CHIP_REV(bp) == CHIP_REV_Bx))
583c28e5 8168 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
b6016b76 8169
7c62e83b
MC
8170 bnx2_init_fw_cap(bp);
8171
16088272
MC
8172 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8173 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5ec6d7bf
MC
8174 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8175 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
f86e82fb 8176 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8177 bp->wol = 0;
8178 }
dda1e390 8179
b6016b76
MC
8180 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8181 bp->tx_quick_cons_trip_int =
8182 bp->tx_quick_cons_trip;
8183 bp->tx_ticks_int = bp->tx_ticks;
8184 bp->rx_quick_cons_trip_int =
8185 bp->rx_quick_cons_trip;
8186 bp->rx_ticks_int = bp->rx_ticks;
8187 bp->comp_prod_trip_int = bp->comp_prod_trip;
8188 bp->com_ticks_int = bp->com_ticks;
8189 bp->cmd_ticks_int = bp->cmd_ticks;
8190 }
8191
f9317a40
MC
8192 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8193 *
8194 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8195 * with byte enables disabled on the unused 32-bit word. This is legal
8196 * but causes problems on the AMD 8132 which will eventually stop
8197 * responding after a while.
8198 *
8199 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 8200 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
8201 */
8202 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8203 struct pci_dev *amd_8132 = NULL;
8204
8205 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8206 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8207 amd_8132))) {
f9317a40 8208
44c10138
AK
8209 if (amd_8132->revision >= 0x10 &&
8210 amd_8132->revision <= 0x13) {
f9317a40
MC
8211 disable_msi = 1;
8212 pci_dev_put(amd_8132);
8213 break;
8214 }
8215 }
8216 }
8217
deaf391b 8218 bnx2_set_default_link(bp);
b6016b76
MC
8219 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8220
cd339a0e 8221 init_timer(&bp->timer);
ac392abc 8222 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
cd339a0e
MC
8223 bp->timer.data = (unsigned long) bp;
8224 bp->timer.function = bnx2_timer;
8225
7625eb2f 8226#ifdef BCM_CNIC
41c2178a
MC
8227 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8228 bp->cnic_eth_dev.max_iscsi_conn =
8229 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8230 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
7625eb2f 8231#endif
c239f279
MC
8232 pci_save_state(pdev);
8233
b6016b76
MC
8234 return 0;
8235
8236err_out_unmap:
4bb9ebc7 8237 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
c239f279 8238 pci_disable_pcie_error_reporting(pdev);
4bb9ebc7
MC
8239 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8240 }
c239f279 8241
b6016b76
MC
8242 if (bp->regview) {
8243 iounmap(bp->regview);
73eef4cd 8244 bp->regview = NULL;
b6016b76
MC
8245 }
8246
8247err_out_release:
8248 pci_release_regions(pdev);
8249
8250err_out_disable:
8251 pci_disable_device(pdev);
8252 pci_set_drvdata(pdev, NULL);
8253
8254err_out:
8255 return rc;
8256}
8257
883e5151
MC
8258static char * __devinit
8259bnx2_bus_string(struct bnx2 *bp, char *str)
8260{
8261 char *s = str;
8262
f86e82fb 8263 if (bp->flags & BNX2_FLAG_PCIE) {
883e5151
MC
8264 s += sprintf(s, "PCI Express");
8265 } else {
8266 s += sprintf(s, "PCI");
f86e82fb 8267 if (bp->flags & BNX2_FLAG_PCIX)
883e5151 8268 s += sprintf(s, "-X");
f86e82fb 8269 if (bp->flags & BNX2_FLAG_PCI_32BIT)
883e5151
MC
8270 s += sprintf(s, " 32-bit");
8271 else
8272 s += sprintf(s, " 64-bit");
8273 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8274 }
8275 return str;
8276}
8277
f048fa9c
MC
8278static void
8279bnx2_del_napi(struct bnx2 *bp)
8280{
8281 int i;
8282
8283 for (i = 0; i < bp->irq_nvecs; i++)
8284 netif_napi_del(&bp->bnx2_napi[i].napi);
8285}
8286
8287static void
35efa7c1
MC
8288bnx2_init_napi(struct bnx2 *bp)
8289{
b4b36042 8290 int i;
35efa7c1 8291
4327ba43 8292 for (i = 0; i < bp->irq_nvecs; i++) {
35e9010b
MC
8293 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8294 int (*poll)(struct napi_struct *, int);
8295
8296 if (i == 0)
8297 poll = bnx2_poll;
8298 else
f0ea2e63 8299 poll = bnx2_poll_msix;
35e9010b
MC
8300
8301 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
b4b36042
MC
8302 bnapi->bp = bp;
8303 }
35efa7c1
MC
8304}
8305
0421eae6
SH
8306static const struct net_device_ops bnx2_netdev_ops = {
8307 .ndo_open = bnx2_open,
8308 .ndo_start_xmit = bnx2_start_xmit,
8309 .ndo_stop = bnx2_close,
5d07bf26 8310 .ndo_get_stats64 = bnx2_get_stats64,
0421eae6
SH
8311 .ndo_set_rx_mode = bnx2_set_rx_mode,
8312 .ndo_do_ioctl = bnx2_ioctl,
8313 .ndo_validate_addr = eth_validate_addr,
8314 .ndo_set_mac_address = bnx2_change_mac_addr,
8315 .ndo_change_mtu = bnx2_change_mtu,
8d7dfc2b
MM
8316 .ndo_fix_features = bnx2_fix_features,
8317 .ndo_set_features = bnx2_set_features,
0421eae6 8318 .ndo_tx_timeout = bnx2_tx_timeout,
257ddbda 8319#ifdef CONFIG_NET_POLL_CONTROLLER
0421eae6
SH
8320 .ndo_poll_controller = poll_bnx2,
8321#endif
8322};
8323
b6016b76
MC
8324static int __devinit
8325bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8326{
8327 static int version_printed = 0;
8328 struct net_device *dev = NULL;
8329 struct bnx2 *bp;
0795af57 8330 int rc;
883e5151 8331 char str[40];
b6016b76
MC
8332
8333 if (version_printed++ == 0)
3a9c6a49 8334 pr_info("%s", version);
b6016b76
MC
8335
8336 /* dev zeroed in init_etherdev */
706bf240 8337 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
b6016b76
MC
8338
8339 if (!dev)
8340 return -ENOMEM;
8341
8342 rc = bnx2_init_board(pdev, dev);
8343 if (rc < 0) {
8344 free_netdev(dev);
8345 return rc;
8346 }
8347
0421eae6 8348 dev->netdev_ops = &bnx2_netdev_ops;
b6016b76 8349 dev->watchdog_timeo = TX_TIMEOUT;
b6016b76 8350 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 8351
972ec0d4 8352 bp = netdev_priv(dev);
b6016b76 8353
1b2f922f
MC
8354 pci_set_drvdata(pdev, dev);
8355
57579f76
MC
8356 rc = bnx2_request_firmware(bp);
8357 if (rc)
8358 goto error;
8359
1b2f922f
MC
8360 memcpy(dev->dev_addr, bp->mac_addr, 6);
8361 memcpy(dev->perm_addr, bp->mac_addr, 6);
1b2f922f 8362
8d7dfc2b
MM
8363 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8364 NETIF_F_TSO | NETIF_F_TSO_ECN |
8365 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8366
8367 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8368 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8369
8370 dev->vlan_features = dev->hw_features;
8371 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8372 dev->features |= dev->hw_features;
8373
b6016b76 8374 if ((rc = register_netdev(dev))) {
9b91cf9d 8375 dev_err(&pdev->dev, "Cannot register net device\n");
57579f76 8376 goto error;
b6016b76
MC
8377 }
8378
3a9c6a49
JP
8379 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8380 board_info[ent->driver_data].name,
8381 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8382 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8383 bnx2_bus_string(bp, str),
8384 dev->base_addr,
8385 bp->pdev->irq, dev->dev_addr);
b6016b76 8386
b6016b76 8387 return 0;
57579f76
MC
8388
8389error:
8390 if (bp->mips_firmware)
8391 release_firmware(bp->mips_firmware);
8392 if (bp->rv2p_firmware)
8393 release_firmware(bp->rv2p_firmware);
8394
8395 if (bp->regview)
8396 iounmap(bp->regview);
8397 pci_release_regions(pdev);
8398 pci_disable_device(pdev);
8399 pci_set_drvdata(pdev, NULL);
8400 free_netdev(dev);
8401 return rc;
b6016b76
MC
8402}
8403
8404static void __devexit
8405bnx2_remove_one(struct pci_dev *pdev)
8406{
8407 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8408 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
8409
8410 unregister_netdev(dev);
8411
8333a46a 8412 del_timer_sync(&bp->timer);
cd634019 8413 cancel_work_sync(&bp->reset_task);
8333a46a 8414
57579f76
MC
8415 if (bp->mips_firmware)
8416 release_firmware(bp->mips_firmware);
8417 if (bp->rv2p_firmware)
8418 release_firmware(bp->rv2p_firmware);
8419
b6016b76
MC
8420 if (bp->regview)
8421 iounmap(bp->regview);
8422
354fcd77
MC
8423 kfree(bp->temp_stats_blk);
8424
4bb9ebc7 8425 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
c239f279 8426 pci_disable_pcie_error_reporting(pdev);
4bb9ebc7
MC
8427 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8428 }
cd709aa9 8429
c239f279 8430 free_netdev(dev);
cd709aa9 8431
b6016b76
MC
8432 pci_release_regions(pdev);
8433 pci_disable_device(pdev);
8434 pci_set_drvdata(pdev, NULL);
8435}
8436
8437static int
829ca9a3 8438bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
8439{
8440 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8441 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8442
6caebb02
MC
8443 /* PCI register 4 needs to be saved whether netif_running() or not.
8444 * MSI address and data need to be saved if using MSI and
8445 * netif_running().
8446 */
8447 pci_save_state(pdev);
b6016b76
MC
8448 if (!netif_running(dev))
8449 return 0;
8450
23f333a2 8451 cancel_work_sync(&bp->reset_task);
212f9934 8452 bnx2_netif_stop(bp, true);
b6016b76
MC
8453 netif_device_detach(dev);
8454 del_timer_sync(&bp->timer);
74bf4ba3 8455 bnx2_shutdown_chip(bp);
b6016b76 8456 bnx2_free_skbs(bp);
829ca9a3 8457 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
8458 return 0;
8459}
8460
8461static int
8462bnx2_resume(struct pci_dev *pdev)
8463{
8464 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8465 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8466
6caebb02 8467 pci_restore_state(pdev);
b6016b76
MC
8468 if (!netif_running(dev))
8469 return 0;
8470
829ca9a3 8471 bnx2_set_power_state(bp, PCI_D0);
b6016b76 8472 netif_device_attach(dev);
9a120bc5 8473 bnx2_init_nic(bp, 1);
212f9934 8474 bnx2_netif_start(bp, true);
b6016b76
MC
8475 return 0;
8476}
8477
6ff2da49
WX
8478/**
8479 * bnx2_io_error_detected - called when PCI error is detected
8480 * @pdev: Pointer to PCI device
8481 * @state: The current pci connection state
8482 *
8483 * This function is called after a PCI bus error affecting
8484 * this device has been detected.
8485 */
8486static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8487 pci_channel_state_t state)
8488{
8489 struct net_device *dev = pci_get_drvdata(pdev);
8490 struct bnx2 *bp = netdev_priv(dev);
8491
8492 rtnl_lock();
8493 netif_device_detach(dev);
8494
2ec3de26
DN
8495 if (state == pci_channel_io_perm_failure) {
8496 rtnl_unlock();
8497 return PCI_ERS_RESULT_DISCONNECT;
8498 }
8499
6ff2da49 8500 if (netif_running(dev)) {
212f9934 8501 bnx2_netif_stop(bp, true);
6ff2da49
WX
8502 del_timer_sync(&bp->timer);
8503 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8504 }
8505
8506 pci_disable_device(pdev);
8507 rtnl_unlock();
8508
8509 /* Request a slot slot reset. */
8510 return PCI_ERS_RESULT_NEED_RESET;
8511}
8512
8513/**
8514 * bnx2_io_slot_reset - called after the pci bus has been reset.
8515 * @pdev: Pointer to PCI device
8516 *
8517 * Restart the card from scratch, as if from a cold-boot.
8518 */
8519static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8520{
8521 struct net_device *dev = pci_get_drvdata(pdev);
8522 struct bnx2 *bp = netdev_priv(dev);
cd709aa9
JF
8523 pci_ers_result_t result;
8524 int err;
6ff2da49
WX
8525
8526 rtnl_lock();
8527 if (pci_enable_device(pdev)) {
8528 dev_err(&pdev->dev,
3a9c6a49 8529 "Cannot re-enable PCI device after reset\n");
cd709aa9
JF
8530 result = PCI_ERS_RESULT_DISCONNECT;
8531 } else {
8532 pci_set_master(pdev);
8533 pci_restore_state(pdev);
8534 pci_save_state(pdev);
8535
8536 if (netif_running(dev)) {
8537 bnx2_set_power_state(bp, PCI_D0);
8538 bnx2_init_nic(bp, 1);
8539 }
8540 result = PCI_ERS_RESULT_RECOVERED;
6ff2da49 8541 }
cd709aa9 8542 rtnl_unlock();
6ff2da49 8543
4bb9ebc7 8544 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
c239f279
MC
8545 return result;
8546
cd709aa9
JF
8547 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8548 if (err) {
8549 dev_err(&pdev->dev,
8550 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8551 err); /* non-fatal, continue */
6ff2da49
WX
8552 }
8553
cd709aa9 8554 return result;
6ff2da49
WX
8555}
8556
8557/**
8558 * bnx2_io_resume - called when traffic can start flowing again.
8559 * @pdev: Pointer to PCI device
8560 *
8561 * This callback is called when the error recovery driver tells us that
8562 * its OK to resume normal operation.
8563 */
8564static void bnx2_io_resume(struct pci_dev *pdev)
8565{
8566 struct net_device *dev = pci_get_drvdata(pdev);
8567 struct bnx2 *bp = netdev_priv(dev);
8568
8569 rtnl_lock();
8570 if (netif_running(dev))
212f9934 8571 bnx2_netif_start(bp, true);
6ff2da49
WX
8572
8573 netif_device_attach(dev);
8574 rtnl_unlock();
8575}
8576
8577static struct pci_error_handlers bnx2_err_handler = {
8578 .error_detected = bnx2_io_error_detected,
8579 .slot_reset = bnx2_io_slot_reset,
8580 .resume = bnx2_io_resume,
8581};
8582
b6016b76 8583static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
8584 .name = DRV_MODULE_NAME,
8585 .id_table = bnx2_pci_tbl,
8586 .probe = bnx2_init_one,
8587 .remove = __devexit_p(bnx2_remove_one),
8588 .suspend = bnx2_suspend,
8589 .resume = bnx2_resume,
6ff2da49 8590 .err_handler = &bnx2_err_handler,
b6016b76
MC
8591};
8592
8593static int __init bnx2_init(void)
8594{
29917620 8595 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
8596}
8597
8598static void __exit bnx2_cleanup(void)
8599{
8600 pci_unregister_driver(&bnx2_pci_driver);
8601}
8602
8603module_init(bnx2_init);
8604module_exit(bnx2_cleanup);
8605
8606
8607
This page took 2.602574 seconds and 5 git commands to generate.