bnx2: Add PCI ID for 5716.
[deliverable/linux.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
feebb331 3 * Copyright (c) 2004-2008 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
1977f032 29#include <linux/bitops.h>
f2a4f052
MC
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
f2a4f052 50
b6016b76
MC
51#include "bnx2.h"
52#include "bnx2_fw.h"
d43584c8 53#include "bnx2_fw2.h"
b6016b76 54
110d0ef9 55#define FW_BUF_SIZE 0x10000
b3448b0b 56
b6016b76
MC
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
8427f136
MC
59#define DRV_MODULE_VERSION "1.7.7"
60#define DRV_MODULE_RELDATE "June 17, 2008"
b6016b76
MC
61
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
fefa8645 67static char version[] __devinitdata =
b6016b76
MC
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
8427f136 71MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver");
b6016b76
MC
72MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
5b0c76ad
MC
86 BCM5708,
87 BCM5708S,
bac0dff6 88 BCM5709,
27a005b8 89 BCM5709S,
7bb0a04f 90 BCM5716,
b6016b76
MC
91} board_t;
92
93/* indexed by board_t, above */
fefa8645 94static struct {
b6016b76
MC
95 char *name;
96} board_info[] __devinitdata = {
97 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
98 { "HP NC370T Multifunction Gigabit Server Adapter" },
99 { "HP NC370i Multifunction Gigabit Server Adapter" },
100 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
101 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
102 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
103 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 104 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 105 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
7bb0a04f 106 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
b6016b76
MC
107 };
108
7bb0a04f 109static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
b6016b76
MC
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
7bb0a04f
MC
128 { PCI_VENDOR_ID_BROADCOM, 0x163b,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
b6016b76
MC
130 { 0, }
131};
132
133static struct flash_spec flash_table[] =
134{
e30372c9
MC
135#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
136#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 137 /* Slow EEPROM */
37137709 138 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 139 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
140 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
141 "EEPROM - slow"},
37137709
MC
142 /* Expansion entry 0001 */
143 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 144 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
145 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
146 "Entry 0001"},
b6016b76
MC
147 /* Saifun SA25F010 (non-buffered flash) */
148 /* strap, cfg1, & write1 need updates */
37137709 149 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 150 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
151 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
152 "Non-buffered flash (128kB)"},
153 /* Saifun SA25F020 (non-buffered flash) */
154 /* strap, cfg1, & write1 need updates */
37137709 155 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 156 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
157 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
158 "Non-buffered flash (256kB)"},
37137709
MC
159 /* Expansion entry 0100 */
160 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
162 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 "Entry 0100"},
164 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 165 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 166 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
167 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
168 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
169 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
170 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 171 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
172 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
173 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
174 /* Saifun SA25F005 (non-buffered flash) */
175 /* strap, cfg1, & write1 need updates */
176 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
178 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
179 "Non-buffered flash (64kB)"},
180 /* Fast EEPROM */
181 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 182 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
183 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
184 "EEPROM - fast"},
185 /* Expansion entry 1001 */
186 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 187 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
188 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
189 "Entry 1001"},
190 /* Expansion entry 1010 */
191 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194 "Entry 1010"},
195 /* ATMEL AT45DB011B (buffered flash) */
196 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 197 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
198 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
199 "Buffered flash (128kB)"},
200 /* Expansion entry 1100 */
201 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 "Entry 1100"},
205 /* Expansion entry 1101 */
206 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
208 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 "Entry 1101"},
210 /* Ateml Expansion entry 1110 */
211 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 212 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
213 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
214 "Entry 1110 (Atmel)"},
215 /* ATMEL AT45DB021B (buffered flash) */
216 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 217 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
218 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
219 "Buffered flash (256kB)"},
b6016b76
MC
220};
221
e30372c9
MC
222static struct flash_spec flash_5709 = {
223 .flags = BNX2_NV_BUFFERED,
224 .page_bits = BCM5709_FLASH_PAGE_BITS,
225 .page_size = BCM5709_FLASH_PAGE_SIZE,
226 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
227 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
228 .name = "5709 Buffered flash (256kB)",
229};
230
b6016b76
MC
231MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
232
35e9010b 233static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
e89bbf10 234{
2f8af120 235 u32 diff;
e89bbf10 236
2f8af120 237 smp_mb();
faac9c4b
MC
238
239 /* The ring uses 256 indices for 255 entries, one of them
240 * needs to be skipped.
241 */
35e9010b 242 diff = txr->tx_prod - txr->tx_cons;
faac9c4b
MC
243 if (unlikely(diff >= TX_DESC_CNT)) {
244 diff &= 0xffff;
245 if (diff == TX_DESC_CNT)
246 diff = MAX_TX_DESC_CNT;
247 }
e89bbf10
MC
248 return (bp->tx_ring_size - diff);
249}
250
b6016b76
MC
251static u32
252bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
253{
1b8227c4
MC
254 u32 val;
255
256 spin_lock_bh(&bp->indirect_lock);
b6016b76 257 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
258 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
259 spin_unlock_bh(&bp->indirect_lock);
260 return val;
b6016b76
MC
261}
262
263static void
264bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
265{
1b8227c4 266 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
267 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
268 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 269 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
270}
271
2726d6e1
MC
272static void
273bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
274{
275 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
276}
277
278static u32
279bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
280{
281 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
282}
283
b6016b76
MC
284static void
285bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
286{
287 offset += cid_addr;
1b8227c4 288 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
289 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
290 int i;
291
292 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
293 REG_WR(bp, BNX2_CTX_CTX_CTRL,
294 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
295 for (i = 0; i < 5; i++) {
296 u32 val;
297 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
298 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299 break;
300 udelay(5);
301 }
302 } else {
303 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
304 REG_WR(bp, BNX2_CTX_DATA, val);
305 }
1b8227c4 306 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
307}
308
309static int
310bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311{
312 u32 val1;
313 int i, ret;
314
583c28e5 315 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
316 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
318
319 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
320 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321
322 udelay(40);
323 }
324
325 val1 = (bp->phy_addr << 21) | (reg << 16) |
326 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
327 BNX2_EMAC_MDIO_COMM_START_BUSY;
328 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
329
330 for (i = 0; i < 50; i++) {
331 udelay(10);
332
333 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
335 udelay(5);
336
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
339
340 break;
341 }
342 }
343
344 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
345 *val = 0x0;
346 ret = -EBUSY;
347 }
348 else {
349 *val = val1;
350 ret = 0;
351 }
352
583c28e5 353 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
354 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
356
357 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
358 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
359
360 udelay(40);
361 }
362
363 return ret;
364}
365
366static int
367bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368{
369 u32 val1;
370 int i, ret;
371
583c28e5 372 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
373 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
375
376 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378
379 udelay(40);
380 }
381
382 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
383 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
384 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
385 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 386
b6016b76
MC
387 for (i = 0; i < 50; i++) {
388 udelay(10);
389
390 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
391 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
392 udelay(5);
393 break;
394 }
395 }
396
397 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398 ret = -EBUSY;
399 else
400 ret = 0;
401
583c28e5 402 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
403 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
405
406 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
407 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
408
409 udelay(40);
410 }
411
412 return ret;
413}
414
415static void
416bnx2_disable_int(struct bnx2 *bp)
417{
b4b36042
MC
418 int i;
419 struct bnx2_napi *bnapi;
420
421 for (i = 0; i < bp->irq_nvecs; i++) {
422 bnapi = &bp->bnx2_napi[i];
423 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
424 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
425 }
b6016b76
MC
426 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
427}
428
429static void
430bnx2_enable_int(struct bnx2 *bp)
431{
b4b36042
MC
432 int i;
433 struct bnx2_napi *bnapi;
35efa7c1 434
b4b36042
MC
435 for (i = 0; i < bp->irq_nvecs; i++) {
436 bnapi = &bp->bnx2_napi[i];
1269a8a6 437
b4b36042
MC
438 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
439 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
440 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
441 bnapi->last_status_idx);
b6016b76 442
b4b36042
MC
443 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
444 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
445 bnapi->last_status_idx);
446 }
bf5295bb 447 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
448}
449
450static void
451bnx2_disable_int_sync(struct bnx2 *bp)
452{
b4b36042
MC
453 int i;
454
b6016b76
MC
455 atomic_inc(&bp->intr_sem);
456 bnx2_disable_int(bp);
b4b36042
MC
457 for (i = 0; i < bp->irq_nvecs; i++)
458 synchronize_irq(bp->irq_tbl[i].vector);
b6016b76
MC
459}
460
35efa7c1
MC
461static void
462bnx2_napi_disable(struct bnx2 *bp)
463{
b4b36042
MC
464 int i;
465
466 for (i = 0; i < bp->irq_nvecs; i++)
467 napi_disable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
468}
469
470static void
471bnx2_napi_enable(struct bnx2 *bp)
472{
b4b36042
MC
473 int i;
474
475 for (i = 0; i < bp->irq_nvecs; i++)
476 napi_enable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
477}
478
b6016b76
MC
479static void
480bnx2_netif_stop(struct bnx2 *bp)
481{
482 bnx2_disable_int_sync(bp);
483 if (netif_running(bp->dev)) {
35efa7c1 484 bnx2_napi_disable(bp);
b6016b76
MC
485 netif_tx_disable(bp->dev);
486 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487 }
488}
489
490static void
491bnx2_netif_start(struct bnx2 *bp)
492{
493 if (atomic_dec_and_test(&bp->intr_sem)) {
494 if (netif_running(bp->dev)) {
495 netif_wake_queue(bp->dev);
35efa7c1 496 bnx2_napi_enable(bp);
b6016b76
MC
497 bnx2_enable_int(bp);
498 }
499 }
500}
501
35e9010b
MC
502static void
503bnx2_free_tx_mem(struct bnx2 *bp)
504{
505 int i;
506
507 for (i = 0; i < bp->num_tx_rings; i++) {
508 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
509 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
510
511 if (txr->tx_desc_ring) {
512 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
513 txr->tx_desc_ring,
514 txr->tx_desc_mapping);
515 txr->tx_desc_ring = NULL;
516 }
517 kfree(txr->tx_buf_ring);
518 txr->tx_buf_ring = NULL;
519 }
520}
521
bb4f98ab
MC
522static void
523bnx2_free_rx_mem(struct bnx2 *bp)
524{
525 int i;
526
527 for (i = 0; i < bp->num_rx_rings; i++) {
528 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
529 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
530 int j;
531
532 for (j = 0; j < bp->rx_max_ring; j++) {
533 if (rxr->rx_desc_ring[j])
534 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
535 rxr->rx_desc_ring[j],
536 rxr->rx_desc_mapping[j]);
537 rxr->rx_desc_ring[j] = NULL;
538 }
539 if (rxr->rx_buf_ring)
540 vfree(rxr->rx_buf_ring);
541 rxr->rx_buf_ring = NULL;
542
543 for (j = 0; j < bp->rx_max_pg_ring; j++) {
544 if (rxr->rx_pg_desc_ring[j])
545 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
546 rxr->rx_pg_desc_ring[i],
547 rxr->rx_pg_desc_mapping[i]);
548 rxr->rx_pg_desc_ring[i] = NULL;
549 }
550 if (rxr->rx_pg_ring)
551 vfree(rxr->rx_pg_ring);
552 rxr->rx_pg_ring = NULL;
553 }
554}
555
35e9010b
MC
556static int
557bnx2_alloc_tx_mem(struct bnx2 *bp)
558{
559 int i;
560
561 for (i = 0; i < bp->num_tx_rings; i++) {
562 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
563 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
564
565 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
566 if (txr->tx_buf_ring == NULL)
567 return -ENOMEM;
568
569 txr->tx_desc_ring =
570 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
571 &txr->tx_desc_mapping);
572 if (txr->tx_desc_ring == NULL)
573 return -ENOMEM;
574 }
575 return 0;
576}
577
bb4f98ab
MC
578static int
579bnx2_alloc_rx_mem(struct bnx2 *bp)
580{
581 int i;
582
583 for (i = 0; i < bp->num_rx_rings; i++) {
584 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
585 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
586 int j;
587
588 rxr->rx_buf_ring =
589 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
590 if (rxr->rx_buf_ring == NULL)
591 return -ENOMEM;
592
593 memset(rxr->rx_buf_ring, 0,
594 SW_RXBD_RING_SIZE * bp->rx_max_ring);
595
596 for (j = 0; j < bp->rx_max_ring; j++) {
597 rxr->rx_desc_ring[j] =
598 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
599 &rxr->rx_desc_mapping[j]);
600 if (rxr->rx_desc_ring[j] == NULL)
601 return -ENOMEM;
602
603 }
604
605 if (bp->rx_pg_ring_size) {
606 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
607 bp->rx_max_pg_ring);
608 if (rxr->rx_pg_ring == NULL)
609 return -ENOMEM;
610
611 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
612 bp->rx_max_pg_ring);
613 }
614
615 for (j = 0; j < bp->rx_max_pg_ring; j++) {
616 rxr->rx_pg_desc_ring[j] =
617 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
618 &rxr->rx_pg_desc_mapping[j]);
619 if (rxr->rx_pg_desc_ring[j] == NULL)
620 return -ENOMEM;
621
622 }
623 }
624 return 0;
625}
626
b6016b76
MC
627static void
628bnx2_free_mem(struct bnx2 *bp)
629{
13daffa2 630 int i;
43e80b89 631 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
13daffa2 632
35e9010b 633 bnx2_free_tx_mem(bp);
bb4f98ab 634 bnx2_free_rx_mem(bp);
35e9010b 635
59b47d8a
MC
636 for (i = 0; i < bp->ctx_pages; i++) {
637 if (bp->ctx_blk[i]) {
638 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
639 bp->ctx_blk[i],
640 bp->ctx_blk_mapping[i]);
641 bp->ctx_blk[i] = NULL;
642 }
643 }
43e80b89 644 if (bnapi->status_blk.msi) {
0f31f994 645 pci_free_consistent(bp->pdev, bp->status_stats_size,
43e80b89
MC
646 bnapi->status_blk.msi,
647 bp->status_blk_mapping);
648 bnapi->status_blk.msi = NULL;
0f31f994 649 bp->stats_blk = NULL;
b6016b76 650 }
b6016b76
MC
651}
652
653static int
654bnx2_alloc_mem(struct bnx2 *bp)
655{
35e9010b 656 int i, status_blk_size, err;
43e80b89
MC
657 struct bnx2_napi *bnapi;
658 void *status_blk;
b6016b76 659
0f31f994
MC
660 /* Combine status and statistics blocks into one allocation. */
661 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
f86e82fb 662 if (bp->flags & BNX2_FLAG_MSIX_CAP)
b4b36042
MC
663 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
664 BNX2_SBLK_MSIX_ALIGN_SIZE);
0f31f994
MC
665 bp->status_stats_size = status_blk_size +
666 sizeof(struct statistics_block);
667
43e80b89
MC
668 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
669 &bp->status_blk_mapping);
670 if (status_blk == NULL)
b6016b76
MC
671 goto alloc_mem_err;
672
43e80b89 673 memset(status_blk, 0, bp->status_stats_size);
b6016b76 674
43e80b89
MC
675 bnapi = &bp->bnx2_napi[0];
676 bnapi->status_blk.msi = status_blk;
677 bnapi->hw_tx_cons_ptr =
678 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
679 bnapi->hw_rx_cons_ptr =
680 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
f86e82fb 681 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
b4b36042 682 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
43e80b89
MC
683 struct status_block_msix *sblk;
684
685 bnapi = &bp->bnx2_napi[i];
b4b36042 686
43e80b89
MC
687 sblk = (void *) (status_blk +
688 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
689 bnapi->status_blk.msix = sblk;
690 bnapi->hw_tx_cons_ptr =
691 &sblk->status_tx_quick_consumer_index;
692 bnapi->hw_rx_cons_ptr =
693 &sblk->status_rx_quick_consumer_index;
b4b36042
MC
694 bnapi->int_num = i << 24;
695 }
696 }
35efa7c1 697
43e80b89 698 bp->stats_blk = status_blk + status_blk_size;
b6016b76 699
0f31f994 700 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 701
59b47d8a
MC
702 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
703 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
704 if (bp->ctx_pages == 0)
705 bp->ctx_pages = 1;
706 for (i = 0; i < bp->ctx_pages; i++) {
707 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
708 BCM_PAGE_SIZE,
709 &bp->ctx_blk_mapping[i]);
710 if (bp->ctx_blk[i] == NULL)
711 goto alloc_mem_err;
712 }
713 }
35e9010b 714
bb4f98ab
MC
715 err = bnx2_alloc_rx_mem(bp);
716 if (err)
717 goto alloc_mem_err;
718
35e9010b
MC
719 err = bnx2_alloc_tx_mem(bp);
720 if (err)
721 goto alloc_mem_err;
722
b6016b76
MC
723 return 0;
724
725alloc_mem_err:
726 bnx2_free_mem(bp);
727 return -ENOMEM;
728}
729
e3648b3d
MC
730static void
731bnx2_report_fw_link(struct bnx2 *bp)
732{
733 u32 fw_link_status = 0;
734
583c28e5 735 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
736 return;
737
e3648b3d
MC
738 if (bp->link_up) {
739 u32 bmsr;
740
741 switch (bp->line_speed) {
742 case SPEED_10:
743 if (bp->duplex == DUPLEX_HALF)
744 fw_link_status = BNX2_LINK_STATUS_10HALF;
745 else
746 fw_link_status = BNX2_LINK_STATUS_10FULL;
747 break;
748 case SPEED_100:
749 if (bp->duplex == DUPLEX_HALF)
750 fw_link_status = BNX2_LINK_STATUS_100HALF;
751 else
752 fw_link_status = BNX2_LINK_STATUS_100FULL;
753 break;
754 case SPEED_1000:
755 if (bp->duplex == DUPLEX_HALF)
756 fw_link_status = BNX2_LINK_STATUS_1000HALF;
757 else
758 fw_link_status = BNX2_LINK_STATUS_1000FULL;
759 break;
760 case SPEED_2500:
761 if (bp->duplex == DUPLEX_HALF)
762 fw_link_status = BNX2_LINK_STATUS_2500HALF;
763 else
764 fw_link_status = BNX2_LINK_STATUS_2500FULL;
765 break;
766 }
767
768 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
769
770 if (bp->autoneg) {
771 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
772
ca58c3af
MC
773 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
774 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
775
776 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583c28e5 777 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
e3648b3d
MC
778 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
779 else
780 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
781 }
782 }
783 else
784 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
785
2726d6e1 786 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
e3648b3d
MC
787}
788
9b1084b8
MC
789static char *
790bnx2_xceiver_str(struct bnx2 *bp)
791{
792 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
583c28e5 793 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
9b1084b8
MC
794 "Copper"));
795}
796
b6016b76
MC
797static void
798bnx2_report_link(struct bnx2 *bp)
799{
800 if (bp->link_up) {
801 netif_carrier_on(bp->dev);
9b1084b8
MC
802 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
803 bnx2_xceiver_str(bp));
b6016b76
MC
804
805 printk("%d Mbps ", bp->line_speed);
806
807 if (bp->duplex == DUPLEX_FULL)
808 printk("full duplex");
809 else
810 printk("half duplex");
811
812 if (bp->flow_ctrl) {
813 if (bp->flow_ctrl & FLOW_CTRL_RX) {
814 printk(", receive ");
815 if (bp->flow_ctrl & FLOW_CTRL_TX)
816 printk("& transmit ");
817 }
818 else {
819 printk(", transmit ");
820 }
821 printk("flow control ON");
822 }
823 printk("\n");
824 }
825 else {
826 netif_carrier_off(bp->dev);
9b1084b8
MC
827 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
828 bnx2_xceiver_str(bp));
b6016b76 829 }
e3648b3d
MC
830
831 bnx2_report_fw_link(bp);
b6016b76
MC
832}
833
834static void
835bnx2_resolve_flow_ctrl(struct bnx2 *bp)
836{
837 u32 local_adv, remote_adv;
838
839 bp->flow_ctrl = 0;
6aa20a22 840 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
841 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
842
843 if (bp->duplex == DUPLEX_FULL) {
844 bp->flow_ctrl = bp->req_flow_ctrl;
845 }
846 return;
847 }
848
849 if (bp->duplex != DUPLEX_FULL) {
850 return;
851 }
852
583c28e5 853 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
5b0c76ad
MC
854 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
855 u32 val;
856
857 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
859 bp->flow_ctrl |= FLOW_CTRL_TX;
860 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
861 bp->flow_ctrl |= FLOW_CTRL_RX;
862 return;
863 }
864
ca58c3af
MC
865 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
866 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76 867
583c28e5 868 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
869 u32 new_local_adv = 0;
870 u32 new_remote_adv = 0;
871
872 if (local_adv & ADVERTISE_1000XPAUSE)
873 new_local_adv |= ADVERTISE_PAUSE_CAP;
874 if (local_adv & ADVERTISE_1000XPSE_ASYM)
875 new_local_adv |= ADVERTISE_PAUSE_ASYM;
876 if (remote_adv & ADVERTISE_1000XPAUSE)
877 new_remote_adv |= ADVERTISE_PAUSE_CAP;
878 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
879 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
880
881 local_adv = new_local_adv;
882 remote_adv = new_remote_adv;
883 }
884
885 /* See Table 28B-3 of 802.3ab-1999 spec. */
886 if (local_adv & ADVERTISE_PAUSE_CAP) {
887 if(local_adv & ADVERTISE_PAUSE_ASYM) {
888 if (remote_adv & ADVERTISE_PAUSE_CAP) {
889 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
890 }
891 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
892 bp->flow_ctrl = FLOW_CTRL_RX;
893 }
894 }
895 else {
896 if (remote_adv & ADVERTISE_PAUSE_CAP) {
897 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
898 }
899 }
900 }
901 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
902 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
903 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
904
905 bp->flow_ctrl = FLOW_CTRL_TX;
906 }
907 }
908}
909
27a005b8
MC
910static int
911bnx2_5709s_linkup(struct bnx2 *bp)
912{
913 u32 val, speed;
914
915 bp->link_up = 1;
916
917 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
918 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
919 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
920
921 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
922 bp->line_speed = bp->req_line_speed;
923 bp->duplex = bp->req_duplex;
924 return 0;
925 }
926 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
927 switch (speed) {
928 case MII_BNX2_GP_TOP_AN_SPEED_10:
929 bp->line_speed = SPEED_10;
930 break;
931 case MII_BNX2_GP_TOP_AN_SPEED_100:
932 bp->line_speed = SPEED_100;
933 break;
934 case MII_BNX2_GP_TOP_AN_SPEED_1G:
935 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
936 bp->line_speed = SPEED_1000;
937 break;
938 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
939 bp->line_speed = SPEED_2500;
940 break;
941 }
942 if (val & MII_BNX2_GP_TOP_AN_FD)
943 bp->duplex = DUPLEX_FULL;
944 else
945 bp->duplex = DUPLEX_HALF;
946 return 0;
947}
948
b6016b76 949static int
5b0c76ad
MC
950bnx2_5708s_linkup(struct bnx2 *bp)
951{
952 u32 val;
953
954 bp->link_up = 1;
955 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
956 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
957 case BCM5708S_1000X_STAT1_SPEED_10:
958 bp->line_speed = SPEED_10;
959 break;
960 case BCM5708S_1000X_STAT1_SPEED_100:
961 bp->line_speed = SPEED_100;
962 break;
963 case BCM5708S_1000X_STAT1_SPEED_1G:
964 bp->line_speed = SPEED_1000;
965 break;
966 case BCM5708S_1000X_STAT1_SPEED_2G5:
967 bp->line_speed = SPEED_2500;
968 break;
969 }
970 if (val & BCM5708S_1000X_STAT1_FD)
971 bp->duplex = DUPLEX_FULL;
972 else
973 bp->duplex = DUPLEX_HALF;
974
975 return 0;
976}
977
978static int
979bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
980{
981 u32 bmcr, local_adv, remote_adv, common;
982
983 bp->link_up = 1;
984 bp->line_speed = SPEED_1000;
985
ca58c3af 986 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
987 if (bmcr & BMCR_FULLDPLX) {
988 bp->duplex = DUPLEX_FULL;
989 }
990 else {
991 bp->duplex = DUPLEX_HALF;
992 }
993
994 if (!(bmcr & BMCR_ANENABLE)) {
995 return 0;
996 }
997
ca58c3af
MC
998 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
999 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1000
1001 common = local_adv & remote_adv;
1002 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1003
1004 if (common & ADVERTISE_1000XFULL) {
1005 bp->duplex = DUPLEX_FULL;
1006 }
1007 else {
1008 bp->duplex = DUPLEX_HALF;
1009 }
1010 }
1011
1012 return 0;
1013}
1014
1015static int
1016bnx2_copper_linkup(struct bnx2 *bp)
1017{
1018 u32 bmcr;
1019
ca58c3af 1020 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1021 if (bmcr & BMCR_ANENABLE) {
1022 u32 local_adv, remote_adv, common;
1023
1024 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1025 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1026
1027 common = local_adv & (remote_adv >> 2);
1028 if (common & ADVERTISE_1000FULL) {
1029 bp->line_speed = SPEED_1000;
1030 bp->duplex = DUPLEX_FULL;
1031 }
1032 else if (common & ADVERTISE_1000HALF) {
1033 bp->line_speed = SPEED_1000;
1034 bp->duplex = DUPLEX_HALF;
1035 }
1036 else {
ca58c3af
MC
1037 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1038 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1039
1040 common = local_adv & remote_adv;
1041 if (common & ADVERTISE_100FULL) {
1042 bp->line_speed = SPEED_100;
1043 bp->duplex = DUPLEX_FULL;
1044 }
1045 else if (common & ADVERTISE_100HALF) {
1046 bp->line_speed = SPEED_100;
1047 bp->duplex = DUPLEX_HALF;
1048 }
1049 else if (common & ADVERTISE_10FULL) {
1050 bp->line_speed = SPEED_10;
1051 bp->duplex = DUPLEX_FULL;
1052 }
1053 else if (common & ADVERTISE_10HALF) {
1054 bp->line_speed = SPEED_10;
1055 bp->duplex = DUPLEX_HALF;
1056 }
1057 else {
1058 bp->line_speed = 0;
1059 bp->link_up = 0;
1060 }
1061 }
1062 }
1063 else {
1064 if (bmcr & BMCR_SPEED100) {
1065 bp->line_speed = SPEED_100;
1066 }
1067 else {
1068 bp->line_speed = SPEED_10;
1069 }
1070 if (bmcr & BMCR_FULLDPLX) {
1071 bp->duplex = DUPLEX_FULL;
1072 }
1073 else {
1074 bp->duplex = DUPLEX_HALF;
1075 }
1076 }
1077
1078 return 0;
1079}
1080
83e3fc89 1081static void
bb4f98ab 1082bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
83e3fc89 1083{
bb4f98ab 1084 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
83e3fc89
MC
1085
1086 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1087 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1088 val |= 0x02 << 8;
1089
1090 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1091 u32 lo_water, hi_water;
1092
1093 if (bp->flow_ctrl & FLOW_CTRL_TX)
1094 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1095 else
1096 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1097 if (lo_water >= bp->rx_ring_size)
1098 lo_water = 0;
1099
1100 hi_water = bp->rx_ring_size / 4;
1101
1102 if (hi_water <= lo_water)
1103 lo_water = 0;
1104
1105 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1106 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1107
1108 if (hi_water > 0xf)
1109 hi_water = 0xf;
1110 else if (hi_water == 0)
1111 lo_water = 0;
1112 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1113 }
1114 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1115}
1116
bb4f98ab
MC
1117static void
1118bnx2_init_all_rx_contexts(struct bnx2 *bp)
1119{
1120 int i;
1121 u32 cid;
1122
1123 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1124 if (i == 1)
1125 cid = RX_RSS_CID;
1126 bnx2_init_rx_context(bp, cid);
1127 }
1128}
1129
b6016b76
MC
1130static int
1131bnx2_set_mac_link(struct bnx2 *bp)
1132{
1133 u32 val;
1134
1135 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1136 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1137 (bp->duplex == DUPLEX_HALF)) {
1138 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1139 }
1140
1141 /* Configure the EMAC mode register. */
1142 val = REG_RD(bp, BNX2_EMAC_MODE);
1143
1144 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 1145 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1146 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
1147
1148 if (bp->link_up) {
5b0c76ad
MC
1149 switch (bp->line_speed) {
1150 case SPEED_10:
59b47d8a
MC
1151 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1152 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
1153 break;
1154 }
1155 /* fall through */
1156 case SPEED_100:
1157 val |= BNX2_EMAC_MODE_PORT_MII;
1158 break;
1159 case SPEED_2500:
59b47d8a 1160 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
1161 /* fall through */
1162 case SPEED_1000:
1163 val |= BNX2_EMAC_MODE_PORT_GMII;
1164 break;
1165 }
b6016b76
MC
1166 }
1167 else {
1168 val |= BNX2_EMAC_MODE_PORT_GMII;
1169 }
1170
1171 /* Set the MAC to operate in the appropriate duplex mode. */
1172 if (bp->duplex == DUPLEX_HALF)
1173 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1174 REG_WR(bp, BNX2_EMAC_MODE, val);
1175
1176 /* Enable/disable rx PAUSE. */
1177 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1178
1179 if (bp->flow_ctrl & FLOW_CTRL_RX)
1180 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1181 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1182
1183 /* Enable/disable tx PAUSE. */
1184 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1185 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1186
1187 if (bp->flow_ctrl & FLOW_CTRL_TX)
1188 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1189 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1190
1191 /* Acknowledge the interrupt. */
1192 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1193
83e3fc89 1194 if (CHIP_NUM(bp) == CHIP_NUM_5709)
bb4f98ab 1195 bnx2_init_all_rx_contexts(bp);
83e3fc89 1196
b6016b76
MC
1197 return 0;
1198}
1199
27a005b8
MC
1200static void
1201bnx2_enable_bmsr1(struct bnx2 *bp)
1202{
583c28e5 1203 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1204 (CHIP_NUM(bp) == CHIP_NUM_5709))
1205 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1206 MII_BNX2_BLK_ADDR_GP_STATUS);
1207}
1208
1209static void
1210bnx2_disable_bmsr1(struct bnx2 *bp)
1211{
583c28e5 1212 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1213 (CHIP_NUM(bp) == CHIP_NUM_5709))
1214 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1215 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1216}
1217
605a9e20
MC
1218static int
1219bnx2_test_and_enable_2g5(struct bnx2 *bp)
1220{
1221 u32 up1;
1222 int ret = 1;
1223
583c28e5 1224 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1225 return 0;
1226
1227 if (bp->autoneg & AUTONEG_SPEED)
1228 bp->advertising |= ADVERTISED_2500baseX_Full;
1229
27a005b8
MC
1230 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1231 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1232
605a9e20
MC
1233 bnx2_read_phy(bp, bp->mii_up1, &up1);
1234 if (!(up1 & BCM5708S_UP1_2G5)) {
1235 up1 |= BCM5708S_UP1_2G5;
1236 bnx2_write_phy(bp, bp->mii_up1, up1);
1237 ret = 0;
1238 }
1239
27a005b8
MC
1240 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1241 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1242 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1243
605a9e20
MC
1244 return ret;
1245}
1246
1247static int
1248bnx2_test_and_disable_2g5(struct bnx2 *bp)
1249{
1250 u32 up1;
1251 int ret = 0;
1252
583c28e5 1253 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1254 return 0;
1255
27a005b8
MC
1256 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1257 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1258
605a9e20
MC
1259 bnx2_read_phy(bp, bp->mii_up1, &up1);
1260 if (up1 & BCM5708S_UP1_2G5) {
1261 up1 &= ~BCM5708S_UP1_2G5;
1262 bnx2_write_phy(bp, bp->mii_up1, up1);
1263 ret = 1;
1264 }
1265
27a005b8
MC
1266 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1267 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1268 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1269
605a9e20
MC
1270 return ret;
1271}
1272
1273static void
1274bnx2_enable_forced_2g5(struct bnx2 *bp)
1275{
1276 u32 bmcr;
1277
583c28e5 1278 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1279 return;
1280
27a005b8
MC
1281 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1282 u32 val;
1283
1284 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1285 MII_BNX2_BLK_ADDR_SERDES_DIG);
1286 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1287 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1288 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1289 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1290
1291 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1292 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1293 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1294
1295 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1296 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1297 bmcr |= BCM5708S_BMCR_FORCE_2500;
1298 }
1299
1300 if (bp->autoneg & AUTONEG_SPEED) {
1301 bmcr &= ~BMCR_ANENABLE;
1302 if (bp->req_duplex == DUPLEX_FULL)
1303 bmcr |= BMCR_FULLDPLX;
1304 }
1305 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1306}
1307
1308static void
1309bnx2_disable_forced_2g5(struct bnx2 *bp)
1310{
1311 u32 bmcr;
1312
583c28e5 1313 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1314 return;
1315
27a005b8
MC
1316 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1317 u32 val;
1318
1319 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1320 MII_BNX2_BLK_ADDR_SERDES_DIG);
1321 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1322 val &= ~MII_BNX2_SD_MISC1_FORCE;
1323 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1324
1325 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1326 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1327 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1328
1329 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1330 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1331 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1332 }
1333
1334 if (bp->autoneg & AUTONEG_SPEED)
1335 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1336 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1337}
1338
b2fadeae
MC
1339static void
1340bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1341{
1342 u32 val;
1343
1344 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1345 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1346 if (start)
1347 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1348 else
1349 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1350}
1351
b6016b76
MC
1352static int
1353bnx2_set_link(struct bnx2 *bp)
1354{
1355 u32 bmsr;
1356 u8 link_up;
1357
80be4434 1358 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1359 bp->link_up = 1;
1360 return 0;
1361 }
1362
583c28e5 1363 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1364 return 0;
1365
b6016b76
MC
1366 link_up = bp->link_up;
1367
27a005b8
MC
1368 bnx2_enable_bmsr1(bp);
1369 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1370 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1371 bnx2_disable_bmsr1(bp);
b6016b76 1372
583c28e5 1373 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
b6016b76 1374 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
a2724e25 1375 u32 val, an_dbg;
b6016b76 1376
583c28e5 1377 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
b2fadeae 1378 bnx2_5706s_force_link_dn(bp, 0);
583c28e5 1379 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
b2fadeae 1380 }
b6016b76 1381 val = REG_RD(bp, BNX2_EMAC_STATUS);
a2724e25
MC
1382
1383 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1384 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1385 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1386
1387 if ((val & BNX2_EMAC_STATUS_LINK) &&
1388 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
b6016b76
MC
1389 bmsr |= BMSR_LSTATUS;
1390 else
1391 bmsr &= ~BMSR_LSTATUS;
1392 }
1393
1394 if (bmsr & BMSR_LSTATUS) {
1395 bp->link_up = 1;
1396
583c28e5 1397 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad
MC
1398 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1399 bnx2_5706s_linkup(bp);
1400 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1401 bnx2_5708s_linkup(bp);
27a005b8
MC
1402 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1403 bnx2_5709s_linkup(bp);
b6016b76
MC
1404 }
1405 else {
1406 bnx2_copper_linkup(bp);
1407 }
1408 bnx2_resolve_flow_ctrl(bp);
1409 }
1410 else {
583c28e5 1411 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
605a9e20
MC
1412 (bp->autoneg & AUTONEG_SPEED))
1413 bnx2_disable_forced_2g5(bp);
b6016b76 1414
583c28e5 1415 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
b2fadeae
MC
1416 u32 bmcr;
1417
1418 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1419 bmcr |= BMCR_ANENABLE;
1420 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1421
583c28e5 1422 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b2fadeae 1423 }
b6016b76
MC
1424 bp->link_up = 0;
1425 }
1426
1427 if (bp->link_up != link_up) {
1428 bnx2_report_link(bp);
1429 }
1430
1431 bnx2_set_mac_link(bp);
1432
1433 return 0;
1434}
1435
1436static int
1437bnx2_reset_phy(struct bnx2 *bp)
1438{
1439 int i;
1440 u32 reg;
1441
ca58c3af 1442 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1443
1444#define PHY_RESET_MAX_WAIT 100
1445 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1446 udelay(10);
1447
ca58c3af 1448 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1449 if (!(reg & BMCR_RESET)) {
1450 udelay(20);
1451 break;
1452 }
1453 }
1454 if (i == PHY_RESET_MAX_WAIT) {
1455 return -EBUSY;
1456 }
1457 return 0;
1458}
1459
1460static u32
1461bnx2_phy_get_pause_adv(struct bnx2 *bp)
1462{
1463 u32 adv = 0;
1464
1465 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1466 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1467
583c28e5 1468 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1469 adv = ADVERTISE_1000XPAUSE;
1470 }
1471 else {
1472 adv = ADVERTISE_PAUSE_CAP;
1473 }
1474 }
1475 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
583c28e5 1476 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1477 adv = ADVERTISE_1000XPSE_ASYM;
1478 }
1479 else {
1480 adv = ADVERTISE_PAUSE_ASYM;
1481 }
1482 }
1483 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
583c28e5 1484 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1485 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1486 }
1487 else {
1488 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1489 }
1490 }
1491 return adv;
1492}
1493
0d8a6571
MC
1494static int bnx2_fw_sync(struct bnx2 *, u32, int);
1495
b6016b76 1496static int
0d8a6571
MC
1497bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1498{
1499 u32 speed_arg = 0, pause_adv;
1500
1501 pause_adv = bnx2_phy_get_pause_adv(bp);
1502
1503 if (bp->autoneg & AUTONEG_SPEED) {
1504 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1505 if (bp->advertising & ADVERTISED_10baseT_Half)
1506 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1507 if (bp->advertising & ADVERTISED_10baseT_Full)
1508 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1509 if (bp->advertising & ADVERTISED_100baseT_Half)
1510 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1511 if (bp->advertising & ADVERTISED_100baseT_Full)
1512 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1513 if (bp->advertising & ADVERTISED_1000baseT_Full)
1514 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1515 if (bp->advertising & ADVERTISED_2500baseX_Full)
1516 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1517 } else {
1518 if (bp->req_line_speed == SPEED_2500)
1519 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1520 else if (bp->req_line_speed == SPEED_1000)
1521 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1522 else if (bp->req_line_speed == SPEED_100) {
1523 if (bp->req_duplex == DUPLEX_FULL)
1524 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1525 else
1526 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1527 } else if (bp->req_line_speed == SPEED_10) {
1528 if (bp->req_duplex == DUPLEX_FULL)
1529 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1530 else
1531 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1532 }
1533 }
1534
1535 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1536 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
c26736ec 1537 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
0d8a6571
MC
1538 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1539
1540 if (port == PORT_TP)
1541 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1542 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1543
2726d6e1 1544 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
0d8a6571
MC
1545
1546 spin_unlock_bh(&bp->phy_lock);
1547 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1548 spin_lock_bh(&bp->phy_lock);
1549
1550 return 0;
1551}
1552
1553static int
1554bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
b6016b76 1555{
605a9e20 1556 u32 adv, bmcr;
b6016b76
MC
1557 u32 new_adv = 0;
1558
583c28e5 1559 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1560 return (bnx2_setup_remote_phy(bp, port));
1561
b6016b76
MC
1562 if (!(bp->autoneg & AUTONEG_SPEED)) {
1563 u32 new_bmcr;
5b0c76ad
MC
1564 int force_link_down = 0;
1565
605a9e20
MC
1566 if (bp->req_line_speed == SPEED_2500) {
1567 if (!bnx2_test_and_enable_2g5(bp))
1568 force_link_down = 1;
1569 } else if (bp->req_line_speed == SPEED_1000) {
1570 if (bnx2_test_and_disable_2g5(bp))
1571 force_link_down = 1;
1572 }
ca58c3af 1573 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1574 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1575
ca58c3af 1576 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1577 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1578 new_bmcr |= BMCR_SPEED1000;
605a9e20 1579
27a005b8
MC
1580 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1581 if (bp->req_line_speed == SPEED_2500)
1582 bnx2_enable_forced_2g5(bp);
1583 else if (bp->req_line_speed == SPEED_1000) {
1584 bnx2_disable_forced_2g5(bp);
1585 new_bmcr &= ~0x2000;
1586 }
1587
1588 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1589 if (bp->req_line_speed == SPEED_2500)
1590 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1591 else
1592 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1593 }
1594
b6016b76 1595 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1596 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1597 new_bmcr |= BMCR_FULLDPLX;
1598 }
1599 else {
5b0c76ad 1600 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1601 new_bmcr &= ~BMCR_FULLDPLX;
1602 }
5b0c76ad 1603 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1604 /* Force a link down visible on the other side */
1605 if (bp->link_up) {
ca58c3af 1606 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1607 ~(ADVERTISE_1000XFULL |
1608 ADVERTISE_1000XHALF));
ca58c3af 1609 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1610 BMCR_ANRESTART | BMCR_ANENABLE);
1611
1612 bp->link_up = 0;
1613 netif_carrier_off(bp->dev);
ca58c3af 1614 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1615 bnx2_report_link(bp);
b6016b76 1616 }
ca58c3af
MC
1617 bnx2_write_phy(bp, bp->mii_adv, adv);
1618 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1619 } else {
1620 bnx2_resolve_flow_ctrl(bp);
1621 bnx2_set_mac_link(bp);
b6016b76
MC
1622 }
1623 return 0;
1624 }
1625
605a9e20 1626 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1627
b6016b76
MC
1628 if (bp->advertising & ADVERTISED_1000baseT_Full)
1629 new_adv |= ADVERTISE_1000XFULL;
1630
1631 new_adv |= bnx2_phy_get_pause_adv(bp);
1632
ca58c3af
MC
1633 bnx2_read_phy(bp, bp->mii_adv, &adv);
1634 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1635
1636 bp->serdes_an_pending = 0;
1637 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1638 /* Force a link down visible on the other side */
1639 if (bp->link_up) {
ca58c3af 1640 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1641 spin_unlock_bh(&bp->phy_lock);
1642 msleep(20);
1643 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1644 }
1645
ca58c3af
MC
1646 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1647 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1648 BMCR_ANENABLE);
f8dd064e
MC
1649 /* Speed up link-up time when the link partner
1650 * does not autonegotiate which is very common
1651 * in blade servers. Some blade servers use
1652 * IPMI for kerboard input and it's important
1653 * to minimize link disruptions. Autoneg. involves
1654 * exchanging base pages plus 3 next pages and
1655 * normally completes in about 120 msec.
1656 */
1657 bp->current_interval = SERDES_AN_TIMEOUT;
1658 bp->serdes_an_pending = 1;
1659 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1660 } else {
1661 bnx2_resolve_flow_ctrl(bp);
1662 bnx2_set_mac_link(bp);
b6016b76
MC
1663 }
1664
1665 return 0;
1666}
1667
1668#define ETHTOOL_ALL_FIBRE_SPEED \
583c28e5 1669 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
deaf391b
MC
1670 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1671 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1672
1673#define ETHTOOL_ALL_COPPER_SPEED \
1674 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1675 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1676 ADVERTISED_1000baseT_Full)
1677
1678#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1679 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1680
b6016b76
MC
1681#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1682
0d8a6571
MC
1683static void
1684bnx2_set_default_remote_link(struct bnx2 *bp)
1685{
1686 u32 link;
1687
1688 if (bp->phy_port == PORT_TP)
2726d6e1 1689 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
0d8a6571 1690 else
2726d6e1 1691 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
0d8a6571
MC
1692
1693 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1694 bp->req_line_speed = 0;
1695 bp->autoneg |= AUTONEG_SPEED;
1696 bp->advertising = ADVERTISED_Autoneg;
1697 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1698 bp->advertising |= ADVERTISED_10baseT_Half;
1699 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1700 bp->advertising |= ADVERTISED_10baseT_Full;
1701 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1702 bp->advertising |= ADVERTISED_100baseT_Half;
1703 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1704 bp->advertising |= ADVERTISED_100baseT_Full;
1705 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1706 bp->advertising |= ADVERTISED_1000baseT_Full;
1707 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1708 bp->advertising |= ADVERTISED_2500baseX_Full;
1709 } else {
1710 bp->autoneg = 0;
1711 bp->advertising = 0;
1712 bp->req_duplex = DUPLEX_FULL;
1713 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1714 bp->req_line_speed = SPEED_10;
1715 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1716 bp->req_duplex = DUPLEX_HALF;
1717 }
1718 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1719 bp->req_line_speed = SPEED_100;
1720 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1721 bp->req_duplex = DUPLEX_HALF;
1722 }
1723 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1724 bp->req_line_speed = SPEED_1000;
1725 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1726 bp->req_line_speed = SPEED_2500;
1727 }
1728}
1729
deaf391b
MC
1730static void
1731bnx2_set_default_link(struct bnx2 *bp)
1732{
ab59859d
HH
1733 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1734 bnx2_set_default_remote_link(bp);
1735 return;
1736 }
0d8a6571 1737
deaf391b
MC
1738 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1739 bp->req_line_speed = 0;
583c28e5 1740 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
deaf391b
MC
1741 u32 reg;
1742
1743 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1744
2726d6e1 1745 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
deaf391b
MC
1746 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1747 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1748 bp->autoneg = 0;
1749 bp->req_line_speed = bp->line_speed = SPEED_1000;
1750 bp->req_duplex = DUPLEX_FULL;
1751 }
1752 } else
1753 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1754}
1755
df149d70
MC
1756static void
1757bnx2_send_heart_beat(struct bnx2 *bp)
1758{
1759 u32 msg;
1760 u32 addr;
1761
1762 spin_lock(&bp->indirect_lock);
1763 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1764 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1765 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1766 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1767 spin_unlock(&bp->indirect_lock);
1768}
1769
0d8a6571
MC
1770static void
1771bnx2_remote_phy_event(struct bnx2 *bp)
1772{
1773 u32 msg;
1774 u8 link_up = bp->link_up;
1775 u8 old_port;
1776
2726d6e1 1777 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571 1778
df149d70
MC
1779 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1780 bnx2_send_heart_beat(bp);
1781
1782 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1783
0d8a6571
MC
1784 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1785 bp->link_up = 0;
1786 else {
1787 u32 speed;
1788
1789 bp->link_up = 1;
1790 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1791 bp->duplex = DUPLEX_FULL;
1792 switch (speed) {
1793 case BNX2_LINK_STATUS_10HALF:
1794 bp->duplex = DUPLEX_HALF;
1795 case BNX2_LINK_STATUS_10FULL:
1796 bp->line_speed = SPEED_10;
1797 break;
1798 case BNX2_LINK_STATUS_100HALF:
1799 bp->duplex = DUPLEX_HALF;
1800 case BNX2_LINK_STATUS_100BASE_T4:
1801 case BNX2_LINK_STATUS_100FULL:
1802 bp->line_speed = SPEED_100;
1803 break;
1804 case BNX2_LINK_STATUS_1000HALF:
1805 bp->duplex = DUPLEX_HALF;
1806 case BNX2_LINK_STATUS_1000FULL:
1807 bp->line_speed = SPEED_1000;
1808 break;
1809 case BNX2_LINK_STATUS_2500HALF:
1810 bp->duplex = DUPLEX_HALF;
1811 case BNX2_LINK_STATUS_2500FULL:
1812 bp->line_speed = SPEED_2500;
1813 break;
1814 default:
1815 bp->line_speed = 0;
1816 break;
1817 }
1818
0d8a6571
MC
1819 bp->flow_ctrl = 0;
1820 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1821 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1822 if (bp->duplex == DUPLEX_FULL)
1823 bp->flow_ctrl = bp->req_flow_ctrl;
1824 } else {
1825 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1826 bp->flow_ctrl |= FLOW_CTRL_TX;
1827 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1828 bp->flow_ctrl |= FLOW_CTRL_RX;
1829 }
1830
1831 old_port = bp->phy_port;
1832 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1833 bp->phy_port = PORT_FIBRE;
1834 else
1835 bp->phy_port = PORT_TP;
1836
1837 if (old_port != bp->phy_port)
1838 bnx2_set_default_link(bp);
1839
0d8a6571
MC
1840 }
1841 if (bp->link_up != link_up)
1842 bnx2_report_link(bp);
1843
1844 bnx2_set_mac_link(bp);
1845}
1846
1847static int
1848bnx2_set_remote_link(struct bnx2 *bp)
1849{
1850 u32 evt_code;
1851
2726d6e1 1852 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
0d8a6571
MC
1853 switch (evt_code) {
1854 case BNX2_FW_EVT_CODE_LINK_EVENT:
1855 bnx2_remote_phy_event(bp);
1856 break;
1857 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1858 default:
df149d70 1859 bnx2_send_heart_beat(bp);
0d8a6571
MC
1860 break;
1861 }
1862 return 0;
1863}
1864
b6016b76
MC
1865static int
1866bnx2_setup_copper_phy(struct bnx2 *bp)
1867{
1868 u32 bmcr;
1869 u32 new_bmcr;
1870
ca58c3af 1871 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1872
1873 if (bp->autoneg & AUTONEG_SPEED) {
1874 u32 adv_reg, adv1000_reg;
1875 u32 new_adv_reg = 0;
1876 u32 new_adv1000_reg = 0;
1877
ca58c3af 1878 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1879 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1880 ADVERTISE_PAUSE_ASYM);
1881
1882 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1883 adv1000_reg &= PHY_ALL_1000_SPEED;
1884
1885 if (bp->advertising & ADVERTISED_10baseT_Half)
1886 new_adv_reg |= ADVERTISE_10HALF;
1887 if (bp->advertising & ADVERTISED_10baseT_Full)
1888 new_adv_reg |= ADVERTISE_10FULL;
1889 if (bp->advertising & ADVERTISED_100baseT_Half)
1890 new_adv_reg |= ADVERTISE_100HALF;
1891 if (bp->advertising & ADVERTISED_100baseT_Full)
1892 new_adv_reg |= ADVERTISE_100FULL;
1893 if (bp->advertising & ADVERTISED_1000baseT_Full)
1894 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1895
b6016b76
MC
1896 new_adv_reg |= ADVERTISE_CSMA;
1897
1898 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1899
1900 if ((adv1000_reg != new_adv1000_reg) ||
1901 (adv_reg != new_adv_reg) ||
1902 ((bmcr & BMCR_ANENABLE) == 0)) {
1903
ca58c3af 1904 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1905 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1906 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1907 BMCR_ANENABLE);
1908 }
1909 else if (bp->link_up) {
1910 /* Flow ctrl may have changed from auto to forced */
1911 /* or vice-versa. */
1912
1913 bnx2_resolve_flow_ctrl(bp);
1914 bnx2_set_mac_link(bp);
1915 }
1916 return 0;
1917 }
1918
1919 new_bmcr = 0;
1920 if (bp->req_line_speed == SPEED_100) {
1921 new_bmcr |= BMCR_SPEED100;
1922 }
1923 if (bp->req_duplex == DUPLEX_FULL) {
1924 new_bmcr |= BMCR_FULLDPLX;
1925 }
1926 if (new_bmcr != bmcr) {
1927 u32 bmsr;
b6016b76 1928
ca58c3af
MC
1929 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1930 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1931
b6016b76
MC
1932 if (bmsr & BMSR_LSTATUS) {
1933 /* Force link down */
ca58c3af 1934 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1935 spin_unlock_bh(&bp->phy_lock);
1936 msleep(50);
1937 spin_lock_bh(&bp->phy_lock);
1938
ca58c3af
MC
1939 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1940 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1941 }
1942
ca58c3af 1943 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1944
1945 /* Normally, the new speed is setup after the link has
1946 * gone down and up again. In some cases, link will not go
1947 * down so we need to set up the new speed here.
1948 */
1949 if (bmsr & BMSR_LSTATUS) {
1950 bp->line_speed = bp->req_line_speed;
1951 bp->duplex = bp->req_duplex;
1952 bnx2_resolve_flow_ctrl(bp);
1953 bnx2_set_mac_link(bp);
1954 }
27a005b8
MC
1955 } else {
1956 bnx2_resolve_flow_ctrl(bp);
1957 bnx2_set_mac_link(bp);
b6016b76
MC
1958 }
1959 return 0;
1960}
1961
1962static int
0d8a6571 1963bnx2_setup_phy(struct bnx2 *bp, u8 port)
b6016b76
MC
1964{
1965 if (bp->loopback == MAC_LOOPBACK)
1966 return 0;
1967
583c28e5 1968 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 1969 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1970 }
1971 else {
1972 return (bnx2_setup_copper_phy(bp));
1973 }
1974}
1975
27a005b8 1976static int
9a120bc5 1977bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
27a005b8
MC
1978{
1979 u32 val;
1980
1981 bp->mii_bmcr = MII_BMCR + 0x10;
1982 bp->mii_bmsr = MII_BMSR + 0x10;
1983 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1984 bp->mii_adv = MII_ADVERTISE + 0x10;
1985 bp->mii_lpa = MII_LPA + 0x10;
1986 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1987
1988 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1989 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1990
1991 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
9a120bc5
MC
1992 if (reset_phy)
1993 bnx2_reset_phy(bp);
27a005b8
MC
1994
1995 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1996
1997 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1998 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1999 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2000 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2001
2002 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2003 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
583c28e5 2004 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
27a005b8
MC
2005 val |= BCM5708S_UP1_2G5;
2006 else
2007 val &= ~BCM5708S_UP1_2G5;
2008 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2009
2010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2011 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2012 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2013 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2014
2015 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2016
2017 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2018 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2019 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2020
2021 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2022
2023 return 0;
2024}
2025
b6016b76 2026static int
9a120bc5 2027bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
5b0c76ad
MC
2028{
2029 u32 val;
2030
9a120bc5
MC
2031 if (reset_phy)
2032 bnx2_reset_phy(bp);
27a005b8
MC
2033
2034 bp->mii_up1 = BCM5708S_UP1;
2035
5b0c76ad
MC
2036 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2037 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2038 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2039
2040 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2041 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2042 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2043
2044 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2045 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2046 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2047
583c28e5 2048 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
5b0c76ad
MC
2049 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2050 val |= BCM5708S_UP1_2G5;
2051 bnx2_write_phy(bp, BCM5708S_UP1, val);
2052 }
2053
2054 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
2055 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2056 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
2057 /* increase tx signal amplitude */
2058 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2059 BCM5708S_BLK_ADDR_TX_MISC);
2060 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2061 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2062 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2063 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2064 }
2065
2726d6e1 2066 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
2067 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2068
2069 if (val) {
2070 u32 is_backplane;
2071
2726d6e1 2072 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
5b0c76ad
MC
2073 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2074 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2075 BCM5708S_BLK_ADDR_TX_MISC);
2076 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2077 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2078 BCM5708S_BLK_ADDR_DIG);
2079 }
2080 }
2081 return 0;
2082}
2083
2084static int
9a120bc5 2085bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2086{
9a120bc5
MC
2087 if (reset_phy)
2088 bnx2_reset_phy(bp);
27a005b8 2089
583c28e5 2090 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b6016b76 2091
59b47d8a
MC
2092 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2093 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
2094
2095 if (bp->dev->mtu > 1500) {
2096 u32 val;
2097
2098 /* Set extended packet length bit */
2099 bnx2_write_phy(bp, 0x18, 0x7);
2100 bnx2_read_phy(bp, 0x18, &val);
2101 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2102
2103 bnx2_write_phy(bp, 0x1c, 0x6c00);
2104 bnx2_read_phy(bp, 0x1c, &val);
2105 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2106 }
2107 else {
2108 u32 val;
2109
2110 bnx2_write_phy(bp, 0x18, 0x7);
2111 bnx2_read_phy(bp, 0x18, &val);
2112 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2113
2114 bnx2_write_phy(bp, 0x1c, 0x6c00);
2115 bnx2_read_phy(bp, 0x1c, &val);
2116 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2117 }
2118
2119 return 0;
2120}
2121
2122static int
9a120bc5 2123bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2124{
5b0c76ad
MC
2125 u32 val;
2126
9a120bc5
MC
2127 if (reset_phy)
2128 bnx2_reset_phy(bp);
27a005b8 2129
583c28e5 2130 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
b6016b76
MC
2131 bnx2_write_phy(bp, 0x18, 0x0c00);
2132 bnx2_write_phy(bp, 0x17, 0x000a);
2133 bnx2_write_phy(bp, 0x15, 0x310b);
2134 bnx2_write_phy(bp, 0x17, 0x201f);
2135 bnx2_write_phy(bp, 0x15, 0x9506);
2136 bnx2_write_phy(bp, 0x17, 0x401f);
2137 bnx2_write_phy(bp, 0x15, 0x14e2);
2138 bnx2_write_phy(bp, 0x18, 0x0400);
2139 }
2140
583c28e5 2141 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
b659f44e
MC
2142 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2143 MII_BNX2_DSP_EXPAND_REG | 0x8);
2144 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2145 val &= ~(1 << 8);
2146 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2147 }
2148
b6016b76 2149 if (bp->dev->mtu > 1500) {
b6016b76
MC
2150 /* Set extended packet length bit */
2151 bnx2_write_phy(bp, 0x18, 0x7);
2152 bnx2_read_phy(bp, 0x18, &val);
2153 bnx2_write_phy(bp, 0x18, val | 0x4000);
2154
2155 bnx2_read_phy(bp, 0x10, &val);
2156 bnx2_write_phy(bp, 0x10, val | 0x1);
2157 }
2158 else {
b6016b76
MC
2159 bnx2_write_phy(bp, 0x18, 0x7);
2160 bnx2_read_phy(bp, 0x18, &val);
2161 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2162
2163 bnx2_read_phy(bp, 0x10, &val);
2164 bnx2_write_phy(bp, 0x10, val & ~0x1);
2165 }
2166
5b0c76ad
MC
2167 /* ethernet@wirespeed */
2168 bnx2_write_phy(bp, 0x18, 0x7007);
2169 bnx2_read_phy(bp, 0x18, &val);
2170 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
2171 return 0;
2172}
2173
2174
2175static int
9a120bc5 2176bnx2_init_phy(struct bnx2 *bp, int reset_phy)
b6016b76
MC
2177{
2178 u32 val;
2179 int rc = 0;
2180
583c28e5
MC
2181 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2182 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
b6016b76 2183
ca58c3af
MC
2184 bp->mii_bmcr = MII_BMCR;
2185 bp->mii_bmsr = MII_BMSR;
27a005b8 2186 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
2187 bp->mii_adv = MII_ADVERTISE;
2188 bp->mii_lpa = MII_LPA;
2189
b6016b76
MC
2190 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2191
583c28e5 2192 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
2193 goto setup_phy;
2194
b6016b76
MC
2195 bnx2_read_phy(bp, MII_PHYSID1, &val);
2196 bp->phy_id = val << 16;
2197 bnx2_read_phy(bp, MII_PHYSID2, &val);
2198 bp->phy_id |= val & 0xffff;
2199
583c28e5 2200 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad 2201 if (CHIP_NUM(bp) == CHIP_NUM_5706)
9a120bc5 2202 rc = bnx2_init_5706s_phy(bp, reset_phy);
5b0c76ad 2203 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
9a120bc5 2204 rc = bnx2_init_5708s_phy(bp, reset_phy);
27a005b8 2205 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
9a120bc5 2206 rc = bnx2_init_5709s_phy(bp, reset_phy);
b6016b76
MC
2207 }
2208 else {
9a120bc5 2209 rc = bnx2_init_copper_phy(bp, reset_phy);
b6016b76
MC
2210 }
2211
0d8a6571
MC
2212setup_phy:
2213 if (!rc)
2214 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
2215
2216 return rc;
2217}
2218
2219static int
2220bnx2_set_mac_loopback(struct bnx2 *bp)
2221{
2222 u32 mac_mode;
2223
2224 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2225 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2226 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2227 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2228 bp->link_up = 1;
2229 return 0;
2230}
2231
bc5a0690
MC
2232static int bnx2_test_link(struct bnx2 *);
2233
2234static int
2235bnx2_set_phy_loopback(struct bnx2 *bp)
2236{
2237 u32 mac_mode;
2238 int rc, i;
2239
2240 spin_lock_bh(&bp->phy_lock);
ca58c3af 2241 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2242 BMCR_SPEED1000);
2243 spin_unlock_bh(&bp->phy_lock);
2244 if (rc)
2245 return rc;
2246
2247 for (i = 0; i < 10; i++) {
2248 if (bnx2_test_link(bp) == 0)
2249 break;
80be4434 2250 msleep(100);
bc5a0690
MC
2251 }
2252
2253 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2254 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2255 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2256 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2257
2258 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2259 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2260 bp->link_up = 1;
2261 return 0;
2262}
2263
b6016b76 2264static int
b090ae2b 2265bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
2266{
2267 int i;
2268 u32 val;
2269
b6016b76
MC
2270 bp->fw_wr_seq++;
2271 msg_data |= bp->fw_wr_seq;
2272
2726d6e1 2273 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76
MC
2274
2275 /* wait for an acknowledgement. */
b090ae2b
MC
2276 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2277 msleep(10);
b6016b76 2278
2726d6e1 2279 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
b6016b76
MC
2280
2281 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2282 break;
2283 }
b090ae2b
MC
2284 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2285 return 0;
b6016b76
MC
2286
2287 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2288 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2289 if (!silent)
2290 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2291 "%x\n", msg_data);
b6016b76
MC
2292
2293 msg_data &= ~BNX2_DRV_MSG_CODE;
2294 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2295
2726d6e1 2296 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2297
b6016b76
MC
2298 return -EBUSY;
2299 }
2300
b090ae2b
MC
2301 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2302 return -EIO;
2303
b6016b76
MC
2304 return 0;
2305}
2306
59b47d8a
MC
2307static int
2308bnx2_init_5709_context(struct bnx2 *bp)
2309{
2310 int i, ret = 0;
2311 u32 val;
2312
2313 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2314 val |= (BCM_PAGE_BITS - 8) << 16;
2315 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2316 for (i = 0; i < 10; i++) {
2317 val = REG_RD(bp, BNX2_CTX_COMMAND);
2318 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2319 break;
2320 udelay(2);
2321 }
2322 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2323 return -EBUSY;
2324
59b47d8a
MC
2325 for (i = 0; i < bp->ctx_pages; i++) {
2326 int j;
2327
352f7687
MC
2328 if (bp->ctx_blk[i])
2329 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2330 else
2331 return -ENOMEM;
2332
59b47d8a
MC
2333 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2334 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2335 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2336 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2337 (u64) bp->ctx_blk_mapping[i] >> 32);
2338 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2339 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2340 for (j = 0; j < 10; j++) {
2341
2342 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2343 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2344 break;
2345 udelay(5);
2346 }
2347 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2348 ret = -EBUSY;
2349 break;
2350 }
2351 }
2352 return ret;
2353}
2354
b6016b76
MC
2355static void
2356bnx2_init_context(struct bnx2 *bp)
2357{
2358 u32 vcid;
2359
2360 vcid = 96;
2361 while (vcid) {
2362 u32 vcid_addr, pcid_addr, offset;
7947b20e 2363 int i;
b6016b76
MC
2364
2365 vcid--;
2366
2367 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2368 u32 new_vcid;
2369
2370 vcid_addr = GET_PCID_ADDR(vcid);
2371 if (vcid & 0x8) {
2372 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2373 }
2374 else {
2375 new_vcid = vcid;
2376 }
2377 pcid_addr = GET_PCID_ADDR(new_vcid);
2378 }
2379 else {
2380 vcid_addr = GET_CID_ADDR(vcid);
2381 pcid_addr = vcid_addr;
2382 }
2383
7947b20e
MC
2384 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2385 vcid_addr += (i << PHY_CTX_SHIFT);
2386 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2387
5d5d0015 2388 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2389 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2390
7947b20e
MC
2391 /* Zero out the context. */
2392 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
62a8313c 2393 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
7947b20e 2394 }
b6016b76
MC
2395 }
2396}
2397
2398static int
2399bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2400{
2401 u16 *good_mbuf;
2402 u32 good_mbuf_cnt;
2403 u32 val;
2404
2405 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2406 if (good_mbuf == NULL) {
2407 printk(KERN_ERR PFX "Failed to allocate memory in "
2408 "bnx2_alloc_bad_rbuf\n");
2409 return -ENOMEM;
2410 }
2411
2412 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2413 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2414
2415 good_mbuf_cnt = 0;
2416
2417 /* Allocate a bunch of mbufs and save the good ones in an array. */
2726d6e1 2418 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76 2419 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2726d6e1
MC
2420 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2421 BNX2_RBUF_COMMAND_ALLOC_REQ);
b6016b76 2422
2726d6e1 2423 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
b6016b76
MC
2424
2425 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2426
2427 /* The addresses with Bit 9 set are bad memory blocks. */
2428 if (!(val & (1 << 9))) {
2429 good_mbuf[good_mbuf_cnt] = (u16) val;
2430 good_mbuf_cnt++;
2431 }
2432
2726d6e1 2433 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76
MC
2434 }
2435
2436 /* Free the good ones back to the mbuf pool thus discarding
2437 * all the bad ones. */
2438 while (good_mbuf_cnt) {
2439 good_mbuf_cnt--;
2440
2441 val = good_mbuf[good_mbuf_cnt];
2442 val = (val << 9) | val | 1;
2443
2726d6e1 2444 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
b6016b76
MC
2445 }
2446 kfree(good_mbuf);
2447 return 0;
2448}
2449
2450static void
6aa20a22 2451bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
2452{
2453 u32 val;
2454 u8 *mac_addr = bp->dev->dev_addr;
2455
2456 val = (mac_addr[0] << 8) | mac_addr[1];
2457
2458 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2459
6aa20a22 2460 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2461 (mac_addr[4] << 8) | mac_addr[5];
2462
2463 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2464}
2465
47bf4246 2466static inline int
bb4f98ab 2467bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246
MC
2468{
2469 dma_addr_t mapping;
bb4f98ab 2470 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246 2471 struct rx_bd *rxbd =
bb4f98ab 2472 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
47bf4246
MC
2473 struct page *page = alloc_page(GFP_ATOMIC);
2474
2475 if (!page)
2476 return -ENOMEM;
2477 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2478 PCI_DMA_FROMDEVICE);
2479 rx_pg->page = page;
2480 pci_unmap_addr_set(rx_pg, mapping, mapping);
2481 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2482 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2483 return 0;
2484}
2485
2486static void
bb4f98ab 2487bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246 2488{
bb4f98ab 2489 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246
MC
2490 struct page *page = rx_pg->page;
2491
2492 if (!page)
2493 return;
2494
2495 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2496 PCI_DMA_FROMDEVICE);
2497
2498 __free_page(page);
2499 rx_pg->page = NULL;
2500}
2501
b6016b76 2502static inline int
bb4f98ab 2503bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
b6016b76
MC
2504{
2505 struct sk_buff *skb;
bb4f98ab 2506 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
b6016b76 2507 dma_addr_t mapping;
bb4f98ab 2508 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2509 unsigned long align;
2510
932f3772 2511 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2512 if (skb == NULL) {
2513 return -ENOMEM;
2514 }
2515
59b47d8a
MC
2516 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2517 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2518
b6016b76
MC
2519 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2520 PCI_DMA_FROMDEVICE);
2521
2522 rx_buf->skb = skb;
2523 pci_unmap_addr_set(rx_buf, mapping, mapping);
2524
2525 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2526 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2527
bb4f98ab 2528 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2529
2530 return 0;
2531}
2532
da3e4fbe 2533static int
35efa7c1 2534bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2535{
43e80b89 2536 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76 2537 u32 new_link_state, old_link_state;
da3e4fbe 2538 int is_set = 1;
b6016b76 2539
da3e4fbe
MC
2540 new_link_state = sblk->status_attn_bits & event;
2541 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2542 if (new_link_state != old_link_state) {
da3e4fbe
MC
2543 if (new_link_state)
2544 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2545 else
2546 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2547 } else
2548 is_set = 0;
2549
2550 return is_set;
2551}
2552
2553static void
35efa7c1 2554bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2555{
74ecc62d
MC
2556 spin_lock(&bp->phy_lock);
2557
2558 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
b6016b76 2559 bnx2_set_link(bp);
35efa7c1 2560 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2561 bnx2_set_remote_link(bp);
2562
74ecc62d
MC
2563 spin_unlock(&bp->phy_lock);
2564
b6016b76
MC
2565}
2566
ead7270b 2567static inline u16
35efa7c1 2568bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2569{
2570 u16 cons;
2571
43e80b89
MC
2572 /* Tell compiler that status block fields can change. */
2573 barrier();
2574 cons = *bnapi->hw_tx_cons_ptr;
ead7270b
MC
2575 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2576 cons++;
2577 return cons;
2578}
2579
57851d84
MC
2580static int
2581bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2582{
35e9010b 2583 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 2584 u16 hw_cons, sw_cons, sw_ring_cons;
57851d84 2585 int tx_pkt = 0;
b6016b76 2586
35efa7c1 2587 hw_cons = bnx2_get_hw_tx_cons(bnapi);
35e9010b 2588 sw_cons = txr->tx_cons;
b6016b76
MC
2589
2590 while (sw_cons != hw_cons) {
2591 struct sw_bd *tx_buf;
2592 struct sk_buff *skb;
2593 int i, last;
2594
2595 sw_ring_cons = TX_RING_IDX(sw_cons);
2596
35e9010b 2597 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
b6016b76 2598 skb = tx_buf->skb;
1d39ed56 2599
b6016b76 2600 /* partial BD completions possible with TSO packets */
89114afd 2601 if (skb_is_gso(skb)) {
b6016b76
MC
2602 u16 last_idx, last_ring_idx;
2603
2604 last_idx = sw_cons +
2605 skb_shinfo(skb)->nr_frags + 1;
2606 last_ring_idx = sw_ring_cons +
2607 skb_shinfo(skb)->nr_frags + 1;
2608 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2609 last_idx++;
2610 }
2611 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2612 break;
2613 }
2614 }
1d39ed56 2615
b6016b76
MC
2616 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2617 skb_headlen(skb), PCI_DMA_TODEVICE);
2618
2619 tx_buf->skb = NULL;
2620 last = skb_shinfo(skb)->nr_frags;
2621
2622 for (i = 0; i < last; i++) {
2623 sw_cons = NEXT_TX_BD(sw_cons);
2624
2625 pci_unmap_page(bp->pdev,
2626 pci_unmap_addr(
35e9010b 2627 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
b6016b76
MC
2628 mapping),
2629 skb_shinfo(skb)->frags[i].size,
2630 PCI_DMA_TODEVICE);
2631 }
2632
2633 sw_cons = NEXT_TX_BD(sw_cons);
2634
745720e5 2635 dev_kfree_skb(skb);
57851d84
MC
2636 tx_pkt++;
2637 if (tx_pkt == budget)
2638 break;
b6016b76 2639
35efa7c1 2640 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2641 }
2642
35e9010b
MC
2643 txr->hw_tx_cons = hw_cons;
2644 txr->tx_cons = sw_cons;
2f8af120
MC
2645 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2646 * before checking for netif_queue_stopped(). Without the
2647 * memory barrier, there is a small possibility that bnx2_start_xmit()
2648 * will miss it and cause the queue to be stopped forever.
2649 */
2650 smp_mb();
b6016b76 2651
2f8af120 2652 if (unlikely(netif_queue_stopped(bp->dev)) &&
35e9010b 2653 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2f8af120 2654 netif_tx_lock(bp->dev);
b6016b76 2655 if ((netif_queue_stopped(bp->dev)) &&
35e9010b 2656 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
b6016b76 2657 netif_wake_queue(bp->dev);
2f8af120 2658 netif_tx_unlock(bp->dev);
b6016b76 2659 }
57851d84 2660 return tx_pkt;
b6016b76
MC
2661}
2662
1db82f2a 2663static void
bb4f98ab 2664bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
a1f60190 2665 struct sk_buff *skb, int count)
1db82f2a
MC
2666{
2667 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2668 struct rx_bd *cons_bd, *prod_bd;
2669 dma_addr_t mapping;
2670 int i;
bb4f98ab
MC
2671 u16 hw_prod = rxr->rx_pg_prod, prod;
2672 u16 cons = rxr->rx_pg_cons;
1db82f2a
MC
2673
2674 for (i = 0; i < count; i++) {
2675 prod = RX_PG_RING_IDX(hw_prod);
2676
bb4f98ab
MC
2677 prod_rx_pg = &rxr->rx_pg_ring[prod];
2678 cons_rx_pg = &rxr->rx_pg_ring[cons];
2679 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2680 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1db82f2a
MC
2681
2682 if (i == 0 && skb) {
2683 struct page *page;
2684 struct skb_shared_info *shinfo;
2685
2686 shinfo = skb_shinfo(skb);
2687 shinfo->nr_frags--;
2688 page = shinfo->frags[shinfo->nr_frags].page;
2689 shinfo->frags[shinfo->nr_frags].page = NULL;
2690 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2691 PCI_DMA_FROMDEVICE);
2692 cons_rx_pg->page = page;
2693 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2694 dev_kfree_skb(skb);
2695 }
2696 if (prod != cons) {
2697 prod_rx_pg->page = cons_rx_pg->page;
2698 cons_rx_pg->page = NULL;
2699 pci_unmap_addr_set(prod_rx_pg, mapping,
2700 pci_unmap_addr(cons_rx_pg, mapping));
2701
2702 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2703 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2704
2705 }
2706 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2707 hw_prod = NEXT_RX_BD(hw_prod);
2708 }
bb4f98ab
MC
2709 rxr->rx_pg_prod = hw_prod;
2710 rxr->rx_pg_cons = cons;
1db82f2a
MC
2711}
2712
b6016b76 2713static inline void
bb4f98ab
MC
2714bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2715 struct sk_buff *skb, u16 cons, u16 prod)
b6016b76 2716{
236b6394
MC
2717 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2718 struct rx_bd *cons_bd, *prod_bd;
2719
bb4f98ab
MC
2720 cons_rx_buf = &rxr->rx_buf_ring[cons];
2721 prod_rx_buf = &rxr->rx_buf_ring[prod];
b6016b76
MC
2722
2723 pci_dma_sync_single_for_device(bp->pdev,
2724 pci_unmap_addr(cons_rx_buf, mapping),
601d3d18 2725 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
b6016b76 2726
bb4f98ab 2727 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2728
236b6394 2729 prod_rx_buf->skb = skb;
b6016b76 2730
236b6394
MC
2731 if (cons == prod)
2732 return;
b6016b76 2733
236b6394
MC
2734 pci_unmap_addr_set(prod_rx_buf, mapping,
2735 pci_unmap_addr(cons_rx_buf, mapping));
2736
bb4f98ab
MC
2737 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2738 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2739 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2740 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2741}
2742
85833c62 2743static int
bb4f98ab 2744bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
a1f60190
MC
2745 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2746 u32 ring_idx)
85833c62
MC
2747{
2748 int err;
2749 u16 prod = ring_idx & 0xffff;
2750
bb4f98ab 2751 err = bnx2_alloc_rx_skb(bp, rxr, prod);
85833c62 2752 if (unlikely(err)) {
bb4f98ab 2753 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2754 if (hdr_len) {
2755 unsigned int raw_len = len + 4;
2756 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2757
bb4f98ab 2758 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
1db82f2a 2759 }
85833c62
MC
2760 return err;
2761 }
2762
d89cb6af 2763 skb_reserve(skb, BNX2_RX_OFFSET);
85833c62
MC
2764 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2765 PCI_DMA_FROMDEVICE);
2766
1db82f2a
MC
2767 if (hdr_len == 0) {
2768 skb_put(skb, len);
2769 return 0;
2770 } else {
2771 unsigned int i, frag_len, frag_size, pages;
2772 struct sw_pg *rx_pg;
bb4f98ab
MC
2773 u16 pg_cons = rxr->rx_pg_cons;
2774 u16 pg_prod = rxr->rx_pg_prod;
1db82f2a
MC
2775
2776 frag_size = len + 4 - hdr_len;
2777 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2778 skb_put(skb, hdr_len);
2779
2780 for (i = 0; i < pages; i++) {
2781 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2782 if (unlikely(frag_len <= 4)) {
2783 unsigned int tail = 4 - frag_len;
2784
bb4f98ab
MC
2785 rxr->rx_pg_cons = pg_cons;
2786 rxr->rx_pg_prod = pg_prod;
2787 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
a1f60190 2788 pages - i);
1db82f2a
MC
2789 skb->len -= tail;
2790 if (i == 0) {
2791 skb->tail -= tail;
2792 } else {
2793 skb_frag_t *frag =
2794 &skb_shinfo(skb)->frags[i - 1];
2795 frag->size -= tail;
2796 skb->data_len -= tail;
2797 skb->truesize -= tail;
2798 }
2799 return 0;
2800 }
bb4f98ab 2801 rx_pg = &rxr->rx_pg_ring[pg_cons];
1db82f2a
MC
2802
2803 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2804 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2805
2806 if (i == pages - 1)
2807 frag_len -= 4;
2808
2809 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2810 rx_pg->page = NULL;
2811
bb4f98ab
MC
2812 err = bnx2_alloc_rx_page(bp, rxr,
2813 RX_PG_RING_IDX(pg_prod));
1db82f2a 2814 if (unlikely(err)) {
bb4f98ab
MC
2815 rxr->rx_pg_cons = pg_cons;
2816 rxr->rx_pg_prod = pg_prod;
2817 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
a1f60190 2818 pages - i);
1db82f2a
MC
2819 return err;
2820 }
2821
2822 frag_size -= frag_len;
2823 skb->data_len += frag_len;
2824 skb->truesize += frag_len;
2825 skb->len += frag_len;
2826
2827 pg_prod = NEXT_RX_BD(pg_prod);
2828 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2829 }
bb4f98ab
MC
2830 rxr->rx_pg_prod = pg_prod;
2831 rxr->rx_pg_cons = pg_cons;
1db82f2a 2832 }
85833c62
MC
2833 return 0;
2834}
2835
c09c2627 2836static inline u16
35efa7c1 2837bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 2838{
bb4f98ab
MC
2839 u16 cons;
2840
43e80b89
MC
2841 /* Tell compiler that status block fields can change. */
2842 barrier();
2843 cons = *bnapi->hw_rx_cons_ptr;
c09c2627
MC
2844 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2845 cons++;
2846 return cons;
2847}
2848
b6016b76 2849static int
35efa7c1 2850bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2851{
bb4f98ab 2852 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76
MC
2853 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2854 struct l2_fhdr *rx_hdr;
1db82f2a 2855 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 2856
35efa7c1 2857 hw_cons = bnx2_get_hw_rx_cons(bnapi);
bb4f98ab
MC
2858 sw_cons = rxr->rx_cons;
2859 sw_prod = rxr->rx_prod;
b6016b76
MC
2860
2861 /* Memory barrier necessary as speculative reads of the rx
2862 * buffer can be ahead of the index in the status block
2863 */
2864 rmb();
2865 while (sw_cons != hw_cons) {
1db82f2a 2866 unsigned int len, hdr_len;
ade2bfe7 2867 u32 status;
b6016b76
MC
2868 struct sw_bd *rx_buf;
2869 struct sk_buff *skb;
236b6394 2870 dma_addr_t dma_addr;
b6016b76
MC
2871
2872 sw_ring_cons = RX_RING_IDX(sw_cons);
2873 sw_ring_prod = RX_RING_IDX(sw_prod);
2874
bb4f98ab 2875 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
b6016b76 2876 skb = rx_buf->skb;
236b6394
MC
2877
2878 rx_buf->skb = NULL;
2879
2880 dma_addr = pci_unmap_addr(rx_buf, mapping);
2881
2882 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
601d3d18
BL
2883 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2884 PCI_DMA_FROMDEVICE);
b6016b76
MC
2885
2886 rx_hdr = (struct l2_fhdr *) skb->data;
1db82f2a 2887 len = rx_hdr->l2_fhdr_pkt_len;
b6016b76 2888
ade2bfe7 2889 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2890 (L2_FHDR_ERRORS_BAD_CRC |
2891 L2_FHDR_ERRORS_PHY_DECODE |
2892 L2_FHDR_ERRORS_ALIGNMENT |
2893 L2_FHDR_ERRORS_TOO_SHORT |
2894 L2_FHDR_ERRORS_GIANT_FRAME)) {
2895
bb4f98ab 2896 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
a1f60190 2897 sw_ring_prod);
85833c62 2898 goto next_rx;
b6016b76 2899 }
1db82f2a
MC
2900 hdr_len = 0;
2901 if (status & L2_FHDR_STATUS_SPLIT) {
2902 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2903 pg_ring_used = 1;
2904 } else if (len > bp->rx_jumbo_thresh) {
2905 hdr_len = bp->rx_jumbo_thresh;
2906 pg_ring_used = 1;
2907 }
2908
2909 len -= 4;
b6016b76 2910
5d5d0015 2911 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
2912 struct sk_buff *new_skb;
2913
932f3772 2914 new_skb = netdev_alloc_skb(bp->dev, len + 2);
85833c62 2915 if (new_skb == NULL) {
bb4f98ab 2916 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
85833c62
MC
2917 sw_ring_prod);
2918 goto next_rx;
2919 }
b6016b76
MC
2920
2921 /* aligned copy */
d89cb6af
BL
2922 skb_copy_from_linear_data_offset(skb,
2923 BNX2_RX_OFFSET - 2,
d626f62b 2924 new_skb->data, len + 2);
b6016b76
MC
2925 skb_reserve(new_skb, 2);
2926 skb_put(new_skb, len);
b6016b76 2927
bb4f98ab 2928 bnx2_reuse_rx_skb(bp, rxr, skb,
b6016b76
MC
2929 sw_ring_cons, sw_ring_prod);
2930
2931 skb = new_skb;
bb4f98ab 2932 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
a1f60190 2933 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 2934 goto next_rx;
b6016b76
MC
2935
2936 skb->protocol = eth_type_trans(skb, bp->dev);
2937
2938 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2939 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2940
745720e5 2941 dev_kfree_skb(skb);
b6016b76
MC
2942 goto next_rx;
2943
2944 }
2945
b6016b76
MC
2946 skb->ip_summed = CHECKSUM_NONE;
2947 if (bp->rx_csum &&
2948 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2949 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2950
ade2bfe7
MC
2951 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2952 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2953 skb->ip_summed = CHECKSUM_UNNECESSARY;
2954 }
2955
2956#ifdef BCM_VLAN
79ea13ce 2957 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
b6016b76
MC
2958 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2959 rx_hdr->l2_fhdr_vlan_tag);
2960 }
2961 else
2962#endif
2963 netif_receive_skb(skb);
2964
2965 bp->dev->last_rx = jiffies;
2966 rx_pkt++;
2967
2968next_rx:
b6016b76
MC
2969 sw_cons = NEXT_RX_BD(sw_cons);
2970 sw_prod = NEXT_RX_BD(sw_prod);
2971
2972 if ((rx_pkt == budget))
2973 break;
f4e418f7
MC
2974
2975 /* Refresh hw_cons to see if there is new work */
2976 if (sw_cons == hw_cons) {
35efa7c1 2977 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
2978 rmb();
2979 }
b6016b76 2980 }
bb4f98ab
MC
2981 rxr->rx_cons = sw_cons;
2982 rxr->rx_prod = sw_prod;
b6016b76 2983
1db82f2a 2984 if (pg_ring_used)
bb4f98ab 2985 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
1db82f2a 2986
bb4f98ab 2987 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
b6016b76 2988
bb4f98ab 2989 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
2990
2991 mmiowb();
2992
2993 return rx_pkt;
2994
2995}
2996
2997/* MSI ISR - The only difference between this and the INTx ISR
2998 * is that the MSI interrupt is always serviced.
2999 */
3000static irqreturn_t
7d12e780 3001bnx2_msi(int irq, void *dev_instance)
b6016b76 3002{
f0ea2e63
MC
3003 struct bnx2_napi *bnapi = dev_instance;
3004 struct bnx2 *bp = bnapi->bp;
3005 struct net_device *dev = bp->dev;
b6016b76 3006
43e80b89 3007 prefetch(bnapi->status_blk.msi);
b6016b76
MC
3008 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3009 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3010 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3011
3012 /* Return here if interrupt is disabled. */
73eef4cd
MC
3013 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3014 return IRQ_HANDLED;
b6016b76 3015
35efa7c1 3016 netif_rx_schedule(dev, &bnapi->napi);
b6016b76 3017
73eef4cd 3018 return IRQ_HANDLED;
b6016b76
MC
3019}
3020
8e6a72c4
MC
3021static irqreturn_t
3022bnx2_msi_1shot(int irq, void *dev_instance)
3023{
f0ea2e63
MC
3024 struct bnx2_napi *bnapi = dev_instance;
3025 struct bnx2 *bp = bnapi->bp;
3026 struct net_device *dev = bp->dev;
8e6a72c4 3027
43e80b89 3028 prefetch(bnapi->status_blk.msi);
8e6a72c4
MC
3029
3030 /* Return here if interrupt is disabled. */
3031 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3032 return IRQ_HANDLED;
3033
35efa7c1 3034 netif_rx_schedule(dev, &bnapi->napi);
8e6a72c4
MC
3035
3036 return IRQ_HANDLED;
3037}
3038
b6016b76 3039static irqreturn_t
7d12e780 3040bnx2_interrupt(int irq, void *dev_instance)
b6016b76 3041{
f0ea2e63
MC
3042 struct bnx2_napi *bnapi = dev_instance;
3043 struct bnx2 *bp = bnapi->bp;
3044 struct net_device *dev = bp->dev;
43e80b89 3045 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76
MC
3046
3047 /* When using INTx, it is possible for the interrupt to arrive
3048 * at the CPU before the status block posted prior to the
3049 * interrupt. Reading a register will flush the status block.
3050 * When using MSI, the MSI message will always complete after
3051 * the status block write.
3052 */
35efa7c1 3053 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
3054 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3055 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 3056 return IRQ_NONE;
b6016b76
MC
3057
3058 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3059 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3060 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3061
b8a7ce7b
MC
3062 /* Read back to deassert IRQ immediately to avoid too many
3063 * spurious interrupts.
3064 */
3065 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3066
b6016b76 3067 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
3068 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3069 return IRQ_HANDLED;
b6016b76 3070
35efa7c1
MC
3071 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3072 bnapi->last_status_idx = sblk->status_idx;
3073 __netif_rx_schedule(dev, &bnapi->napi);
b8a7ce7b 3074 }
b6016b76 3075
73eef4cd 3076 return IRQ_HANDLED;
b6016b76
MC
3077}
3078
f4e418f7 3079static inline int
43e80b89 3080bnx2_has_fast_work(struct bnx2_napi *bnapi)
f4e418f7 3081{
35e9010b 3082 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 3083 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
f4e418f7 3084
bb4f98ab 3085 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
35e9010b 3086 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
f4e418f7 3087 return 1;
43e80b89
MC
3088 return 0;
3089}
3090
3091#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3092 STATUS_ATTN_BITS_TIMER_ABORT)
3093
3094static inline int
3095bnx2_has_work(struct bnx2_napi *bnapi)
3096{
3097 struct status_block *sblk = bnapi->status_blk.msi;
3098
3099 if (bnx2_has_fast_work(bnapi))
3100 return 1;
f4e418f7 3101
da3e4fbe
MC
3102 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3103 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
3104 return 1;
3105
3106 return 0;
3107}
3108
43e80b89 3109static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76 3110{
43e80b89 3111 struct status_block *sblk = bnapi->status_blk.msi;
da3e4fbe
MC
3112 u32 status_attn_bits = sblk->status_attn_bits;
3113 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 3114
da3e4fbe
MC
3115 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3116 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 3117
35efa7c1 3118 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
3119
3120 /* This is needed to take care of transient status
3121 * during link changes.
3122 */
3123 REG_WR(bp, BNX2_HC_COMMAND,
3124 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3125 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76 3126 }
43e80b89
MC
3127}
3128
3129static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3130 int work_done, int budget)
3131{
3132 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3133 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76 3134
35e9010b 3135 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
57851d84 3136 bnx2_tx_int(bp, bnapi, 0);
b6016b76 3137
bb4f98ab 3138 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
35efa7c1 3139 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 3140
6f535763
DM
3141 return work_done;
3142}
3143
f0ea2e63
MC
3144static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3145{
3146 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3147 struct bnx2 *bp = bnapi->bp;
3148 int work_done = 0;
3149 struct status_block_msix *sblk = bnapi->status_blk.msix;
3150
3151 while (1) {
3152 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3153 if (unlikely(work_done >= budget))
3154 break;
3155
3156 bnapi->last_status_idx = sblk->status_idx;
3157 /* status idx must be read before checking for more work. */
3158 rmb();
3159 if (likely(!bnx2_has_fast_work(bnapi))) {
3160
3161 netif_rx_complete(bp->dev, napi);
3162 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3163 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3164 bnapi->last_status_idx);
3165 break;
3166 }
3167 }
3168 return work_done;
3169}
3170
6f535763
DM
3171static int bnx2_poll(struct napi_struct *napi, int budget)
3172{
35efa7c1
MC
3173 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3174 struct bnx2 *bp = bnapi->bp;
6f535763 3175 int work_done = 0;
43e80b89 3176 struct status_block *sblk = bnapi->status_blk.msi;
6f535763
DM
3177
3178 while (1) {
43e80b89
MC
3179 bnx2_poll_link(bp, bnapi);
3180
35efa7c1 3181 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 3182
6f535763
DM
3183 if (unlikely(work_done >= budget))
3184 break;
3185
35efa7c1 3186 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
3187 * much work has been processed, so we must read it before
3188 * checking for more work.
3189 */
35efa7c1 3190 bnapi->last_status_idx = sblk->status_idx;
6dee6421 3191 rmb();
35efa7c1 3192 if (likely(!bnx2_has_work(bnapi))) {
6f535763 3193 netif_rx_complete(bp->dev, napi);
f86e82fb 3194 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
6f535763
DM
3195 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3196 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3197 bnapi->last_status_idx);
6dee6421 3198 break;
6f535763 3199 }
1269a8a6
MC
3200 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3201 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 3202 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 3203 bnapi->last_status_idx);
1269a8a6 3204
6f535763
DM
3205 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3206 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3207 bnapi->last_status_idx);
6f535763
DM
3208 break;
3209 }
b6016b76
MC
3210 }
3211
bea3348e 3212 return work_done;
b6016b76
MC
3213}
3214
932ff279 3215/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
3216 * from set_multicast.
3217 */
3218static void
3219bnx2_set_rx_mode(struct net_device *dev)
3220{
972ec0d4 3221 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
3222 u32 rx_mode, sort_mode;
3223 int i;
b6016b76 3224
c770a65c 3225 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3226
3227 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3228 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3229 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3230#ifdef BCM_VLAN
f86e82fb 3231 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
b6016b76 3232 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 3233#else
f86e82fb 3234 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
e29054f9 3235 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
3236#endif
3237 if (dev->flags & IFF_PROMISC) {
3238 /* Promiscuous mode. */
3239 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
3240 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3241 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
3242 }
3243 else if (dev->flags & IFF_ALLMULTI) {
3244 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3245 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3246 0xffffffff);
3247 }
3248 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3249 }
3250 else {
3251 /* Accept one or more multicast(s). */
3252 struct dev_mc_list *mclist;
3253 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3254 u32 regidx;
3255 u32 bit;
3256 u32 crc;
3257
3258 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3259
3260 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3261 i++, mclist = mclist->next) {
3262
3263 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3264 bit = crc & 0xff;
3265 regidx = (bit & 0xe0) >> 5;
3266 bit &= 0x1f;
3267 mc_filter[regidx] |= (1 << bit);
3268 }
3269
3270 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3271 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3272 mc_filter[i]);
3273 }
3274
3275 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3276 }
3277
3278 if (rx_mode != bp->rx_mode) {
3279 bp->rx_mode = rx_mode;
3280 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3281 }
3282
3283 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3284 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3285 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3286
c770a65c 3287 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3288}
3289
3290static void
b491edd5 3291load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
b6016b76
MC
3292 u32 rv2p_proc)
3293{
3294 int i;
3295 u32 val;
3296
d25be1d3
MC
3297 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3298 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3299 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3300 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3301 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3302 }
b6016b76
MC
3303
3304 for (i = 0; i < rv2p_code_len; i += 8) {
b491edd5 3305 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
b6016b76 3306 rv2p_code++;
b491edd5 3307 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
b6016b76
MC
3308 rv2p_code++;
3309
3310 if (rv2p_proc == RV2P_PROC1) {
3311 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3312 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3313 }
3314 else {
3315 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3316 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3317 }
3318 }
3319
3320 /* Reset the processor, un-stall is done later. */
3321 if (rv2p_proc == RV2P_PROC1) {
3322 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3323 }
3324 else {
3325 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3326 }
3327}
3328
af3ee519 3329static int
10343cca 3330load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
b6016b76
MC
3331{
3332 u32 offset;
3333 u32 val;
af3ee519 3334 int rc;
b6016b76
MC
3335
3336 /* Halt the CPU. */
2726d6e1 3337 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3338 val |= cpu_reg->mode_value_halt;
2726d6e1
MC
3339 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3340 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
b6016b76
MC
3341
3342 /* Load the Text area. */
3343 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519 3344 if (fw->gz_text) {
b6016b76
MC
3345 int j;
3346
ea1f8d5c
MC
3347 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3348 fw->gz_text_len);
3349 if (rc < 0)
b3448b0b 3350 return rc;
ea1f8d5c 3351
b6016b76 3352 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2726d6e1 3353 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
b6016b76
MC
3354 }
3355 }
3356
3357 /* Load the Data area. */
3358 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3359 if (fw->data) {
3360 int j;
3361
3362 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2726d6e1 3363 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
b6016b76
MC
3364 }
3365 }
3366
3367 /* Load the SBSS area. */
3368 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3369 if (fw->sbss_len) {
b6016b76
MC
3370 int j;
3371
3372 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2726d6e1 3373 bnx2_reg_wr_ind(bp, offset, 0);
b6016b76
MC
3374 }
3375 }
3376
3377 /* Load the BSS area. */
3378 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3379 if (fw->bss_len) {
b6016b76
MC
3380 int j;
3381
3382 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2726d6e1 3383 bnx2_reg_wr_ind(bp, offset, 0);
b6016b76
MC
3384 }
3385 }
3386
3387 /* Load the Read-Only area. */
3388 offset = cpu_reg->spad_base +
3389 (fw->rodata_addr - cpu_reg->mips_view_base);
3390 if (fw->rodata) {
3391 int j;
3392
3393 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2726d6e1 3394 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
b6016b76
MC
3395 }
3396 }
3397
3398 /* Clear the pre-fetch instruction. */
2726d6e1
MC
3399 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3400 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
b6016b76
MC
3401
3402 /* Start the CPU. */
2726d6e1 3403 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3404 val &= ~cpu_reg->mode_value_halt;
2726d6e1
MC
3405 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3406 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
af3ee519
MC
3407
3408 return 0;
b6016b76
MC
3409}
3410
fba9fe91 3411static int
b6016b76
MC
3412bnx2_init_cpus(struct bnx2 *bp)
3413{
af3ee519 3414 struct fw_info *fw;
110d0ef9
MC
3415 int rc, rv2p_len;
3416 void *text, *rv2p;
b6016b76
MC
3417
3418 /* Initialize the RV2P processor. */
b3448b0b
DV
3419 text = vmalloc(FW_BUF_SIZE);
3420 if (!text)
3421 return -ENOMEM;
110d0ef9
MC
3422 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3423 rv2p = bnx2_xi_rv2p_proc1;
3424 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3425 } else {
3426 rv2p = bnx2_rv2p_proc1;
3427 rv2p_len = sizeof(bnx2_rv2p_proc1);
3428 }
3429 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3430 if (rc < 0)
fba9fe91 3431 goto init_cpu_err;
ea1f8d5c 3432
b3448b0b 3433 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
fba9fe91 3434
110d0ef9
MC
3435 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3436 rv2p = bnx2_xi_rv2p_proc2;
3437 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3438 } else {
3439 rv2p = bnx2_rv2p_proc2;
3440 rv2p_len = sizeof(bnx2_rv2p_proc2);
3441 }
3442 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3443 if (rc < 0)
fba9fe91 3444 goto init_cpu_err;
ea1f8d5c 3445
b3448b0b 3446 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
b6016b76
MC
3447
3448 /* Initialize the RX Processor. */
d43584c8
MC
3449 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3450 fw = &bnx2_rxp_fw_09;
3451 else
3452 fw = &bnx2_rxp_fw_06;
fba9fe91 3453
ea1f8d5c 3454 fw->text = text;
10343cca 3455 rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
fba9fe91
MC
3456 if (rc)
3457 goto init_cpu_err;
3458
b6016b76 3459 /* Initialize the TX Processor. */
d43584c8
MC
3460 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3461 fw = &bnx2_txp_fw_09;
3462 else
3463 fw = &bnx2_txp_fw_06;
fba9fe91 3464
ea1f8d5c 3465 fw->text = text;
10343cca 3466 rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
fba9fe91
MC
3467 if (rc)
3468 goto init_cpu_err;
3469
b6016b76 3470 /* Initialize the TX Patch-up Processor. */
d43584c8
MC
3471 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3472 fw = &bnx2_tpat_fw_09;
3473 else
3474 fw = &bnx2_tpat_fw_06;
fba9fe91 3475
ea1f8d5c 3476 fw->text = text;
10343cca 3477 rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
fba9fe91
MC
3478 if (rc)
3479 goto init_cpu_err;
3480
b6016b76 3481 /* Initialize the Completion Processor. */
d43584c8
MC
3482 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3483 fw = &bnx2_com_fw_09;
3484 else
3485 fw = &bnx2_com_fw_06;
fba9fe91 3486
ea1f8d5c 3487 fw->text = text;
10343cca 3488 rc = load_cpu_fw(bp, &cpu_reg_com, fw);
fba9fe91
MC
3489 if (rc)
3490 goto init_cpu_err;
3491
d43584c8 3492 /* Initialize the Command Processor. */
110d0ef9 3493 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d43584c8 3494 fw = &bnx2_cp_fw_09;
110d0ef9
MC
3495 else
3496 fw = &bnx2_cp_fw_06;
3497
3498 fw->text = text;
10343cca 3499 rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
b6016b76 3500
fba9fe91 3501init_cpu_err:
ea1f8d5c 3502 vfree(text);
fba9fe91 3503 return rc;
b6016b76
MC
3504}
3505
3506static int
829ca9a3 3507bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3508{
3509 u16 pmcsr;
3510
3511 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3512
3513 switch (state) {
829ca9a3 3514 case PCI_D0: {
b6016b76
MC
3515 u32 val;
3516
3517 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3518 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3519 PCI_PM_CTRL_PME_STATUS);
3520
3521 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3522 /* delay required during transition out of D3hot */
3523 msleep(20);
3524
3525 val = REG_RD(bp, BNX2_EMAC_MODE);
3526 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3527 val &= ~BNX2_EMAC_MODE_MPKT;
3528 REG_WR(bp, BNX2_EMAC_MODE, val);
3529
3530 val = REG_RD(bp, BNX2_RPM_CONFIG);
3531 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3532 REG_WR(bp, BNX2_RPM_CONFIG, val);
3533 break;
3534 }
829ca9a3 3535 case PCI_D3hot: {
b6016b76
MC
3536 int i;
3537 u32 val, wol_msg;
3538
3539 if (bp->wol) {
3540 u32 advertising;
3541 u8 autoneg;
3542
3543 autoneg = bp->autoneg;
3544 advertising = bp->advertising;
3545
239cd343
MC
3546 if (bp->phy_port == PORT_TP) {
3547 bp->autoneg = AUTONEG_SPEED;
3548 bp->advertising = ADVERTISED_10baseT_Half |
3549 ADVERTISED_10baseT_Full |
3550 ADVERTISED_100baseT_Half |
3551 ADVERTISED_100baseT_Full |
3552 ADVERTISED_Autoneg;
3553 }
b6016b76 3554
239cd343
MC
3555 spin_lock_bh(&bp->phy_lock);
3556 bnx2_setup_phy(bp, bp->phy_port);
3557 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3558
3559 bp->autoneg = autoneg;
3560 bp->advertising = advertising;
3561
3562 bnx2_set_mac_addr(bp);
3563
3564 val = REG_RD(bp, BNX2_EMAC_MODE);
3565
3566 /* Enable port mode. */
3567 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3568 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3569 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3570 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3571 if (bp->phy_port == PORT_TP)
3572 val |= BNX2_EMAC_MODE_PORT_MII;
3573 else {
3574 val |= BNX2_EMAC_MODE_PORT_GMII;
3575 if (bp->line_speed == SPEED_2500)
3576 val |= BNX2_EMAC_MODE_25G_MODE;
3577 }
b6016b76
MC
3578
3579 REG_WR(bp, BNX2_EMAC_MODE, val);
3580
3581 /* receive all multicast */
3582 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3583 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3584 0xffffffff);
3585 }
3586 REG_WR(bp, BNX2_EMAC_RX_MODE,
3587 BNX2_EMAC_RX_MODE_SORT_MODE);
3588
3589 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3590 BNX2_RPM_SORT_USER0_MC_EN;
3591 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3592 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3593 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3594 BNX2_RPM_SORT_USER0_ENA);
3595
3596 /* Need to enable EMAC and RPM for WOL. */
3597 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3598 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3599 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3600 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3601
3602 val = REG_RD(bp, BNX2_RPM_CONFIG);
3603 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3604 REG_WR(bp, BNX2_RPM_CONFIG, val);
3605
3606 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3607 }
3608 else {
3609 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3610 }
3611
f86e82fb 3612 if (!(bp->flags & BNX2_FLAG_NO_WOL))
dda1e390 3613 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
3614
3615 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3616 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3617 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3618
3619 if (bp->wol)
3620 pmcsr |= 3;
3621 }
3622 else {
3623 pmcsr |= 3;
3624 }
3625 if (bp->wol) {
3626 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3627 }
3628 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3629 pmcsr);
3630
3631 /* No more memory access after this point until
3632 * device is brought back to D0.
3633 */
3634 udelay(50);
3635 break;
3636 }
3637 default:
3638 return -EINVAL;
3639 }
3640 return 0;
3641}
3642
3643static int
3644bnx2_acquire_nvram_lock(struct bnx2 *bp)
3645{
3646 u32 val;
3647 int j;
3648
3649 /* Request access to the flash interface. */
3650 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3651 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3652 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3653 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3654 break;
3655
3656 udelay(5);
3657 }
3658
3659 if (j >= NVRAM_TIMEOUT_COUNT)
3660 return -EBUSY;
3661
3662 return 0;
3663}
3664
3665static int
3666bnx2_release_nvram_lock(struct bnx2 *bp)
3667{
3668 int j;
3669 u32 val;
3670
3671 /* Relinquish nvram interface. */
3672 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3673
3674 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3675 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3676 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3677 break;
3678
3679 udelay(5);
3680 }
3681
3682 if (j >= NVRAM_TIMEOUT_COUNT)
3683 return -EBUSY;
3684
3685 return 0;
3686}
3687
3688
3689static int
3690bnx2_enable_nvram_write(struct bnx2 *bp)
3691{
3692 u32 val;
3693
3694 val = REG_RD(bp, BNX2_MISC_CFG);
3695 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3696
e30372c9 3697 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
3698 int j;
3699
3700 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3701 REG_WR(bp, BNX2_NVM_COMMAND,
3702 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3703
3704 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3705 udelay(5);
3706
3707 val = REG_RD(bp, BNX2_NVM_COMMAND);
3708 if (val & BNX2_NVM_COMMAND_DONE)
3709 break;
3710 }
3711
3712 if (j >= NVRAM_TIMEOUT_COUNT)
3713 return -EBUSY;
3714 }
3715 return 0;
3716}
3717
3718static void
3719bnx2_disable_nvram_write(struct bnx2 *bp)
3720{
3721 u32 val;
3722
3723 val = REG_RD(bp, BNX2_MISC_CFG);
3724 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3725}
3726
3727
3728static void
3729bnx2_enable_nvram_access(struct bnx2 *bp)
3730{
3731 u32 val;
3732
3733 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3734 /* Enable both bits, even on read. */
6aa20a22 3735 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3736 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3737}
3738
3739static void
3740bnx2_disable_nvram_access(struct bnx2 *bp)
3741{
3742 u32 val;
3743
3744 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3745 /* Disable both bits, even after read. */
6aa20a22 3746 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3747 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3748 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3749}
3750
3751static int
3752bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3753{
3754 u32 cmd;
3755 int j;
3756
e30372c9 3757 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
3758 /* Buffered flash, no erase needed */
3759 return 0;
3760
3761 /* Build an erase command */
3762 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3763 BNX2_NVM_COMMAND_DOIT;
3764
3765 /* Need to clear DONE bit separately. */
3766 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3767
3768 /* Address of the NVRAM to read from. */
3769 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3770
3771 /* Issue an erase command. */
3772 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3773
3774 /* Wait for completion. */
3775 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3776 u32 val;
3777
3778 udelay(5);
3779
3780 val = REG_RD(bp, BNX2_NVM_COMMAND);
3781 if (val & BNX2_NVM_COMMAND_DONE)
3782 break;
3783 }
3784
3785 if (j >= NVRAM_TIMEOUT_COUNT)
3786 return -EBUSY;
3787
3788 return 0;
3789}
3790
3791static int
3792bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3793{
3794 u32 cmd;
3795 int j;
3796
3797 /* Build the command word. */
3798 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3799
e30372c9
MC
3800 /* Calculate an offset of a buffered flash, not needed for 5709. */
3801 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3802 offset = ((offset / bp->flash_info->page_size) <<
3803 bp->flash_info->page_bits) +
3804 (offset % bp->flash_info->page_size);
3805 }
3806
3807 /* Need to clear DONE bit separately. */
3808 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3809
3810 /* Address of the NVRAM to read from. */
3811 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3812
3813 /* Issue a read command. */
3814 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3815
3816 /* Wait for completion. */
3817 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3818 u32 val;
3819
3820 udelay(5);
3821
3822 val = REG_RD(bp, BNX2_NVM_COMMAND);
3823 if (val & BNX2_NVM_COMMAND_DONE) {
b491edd5
AV
3824 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3825 memcpy(ret_val, &v, 4);
b6016b76
MC
3826 break;
3827 }
3828 }
3829 if (j >= NVRAM_TIMEOUT_COUNT)
3830 return -EBUSY;
3831
3832 return 0;
3833}
3834
3835
3836static int
3837bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3838{
b491edd5
AV
3839 u32 cmd;
3840 __be32 val32;
b6016b76
MC
3841 int j;
3842
3843 /* Build the command word. */
3844 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3845
e30372c9
MC
3846 /* Calculate an offset of a buffered flash, not needed for 5709. */
3847 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3848 offset = ((offset / bp->flash_info->page_size) <<
3849 bp->flash_info->page_bits) +
3850 (offset % bp->flash_info->page_size);
3851 }
3852
3853 /* Need to clear DONE bit separately. */
3854 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3855
3856 memcpy(&val32, val, 4);
b6016b76
MC
3857
3858 /* Write the data. */
b491edd5 3859 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
b6016b76
MC
3860
3861 /* Address of the NVRAM to write to. */
3862 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3863
3864 /* Issue the write command. */
3865 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3866
3867 /* Wait for completion. */
3868 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3869 udelay(5);
3870
3871 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3872 break;
3873 }
3874 if (j >= NVRAM_TIMEOUT_COUNT)
3875 return -EBUSY;
3876
3877 return 0;
3878}
3879
3880static int
3881bnx2_init_nvram(struct bnx2 *bp)
3882{
3883 u32 val;
e30372c9 3884 int j, entry_count, rc = 0;
b6016b76
MC
3885 struct flash_spec *flash;
3886
e30372c9
MC
3887 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3888 bp->flash_info = &flash_5709;
3889 goto get_flash_size;
3890 }
3891
b6016b76
MC
3892 /* Determine the selected interface. */
3893 val = REG_RD(bp, BNX2_NVM_CFG1);
3894
ff8ac609 3895 entry_count = ARRAY_SIZE(flash_table);
b6016b76 3896
b6016b76
MC
3897 if (val & 0x40000000) {
3898
3899 /* Flash interface has been reconfigured */
3900 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3901 j++, flash++) {
3902 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3903 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3904 bp->flash_info = flash;
3905 break;
3906 }
3907 }
3908 }
3909 else {
37137709 3910 u32 mask;
b6016b76
MC
3911 /* Not yet been reconfigured */
3912
37137709
MC
3913 if (val & (1 << 23))
3914 mask = FLASH_BACKUP_STRAP_MASK;
3915 else
3916 mask = FLASH_STRAP_MASK;
3917
b6016b76
MC
3918 for (j = 0, flash = &flash_table[0]; j < entry_count;
3919 j++, flash++) {
3920
37137709 3921 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3922 bp->flash_info = flash;
3923
3924 /* Request access to the flash interface. */
3925 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3926 return rc;
3927
3928 /* Enable access to flash interface */
3929 bnx2_enable_nvram_access(bp);
3930
3931 /* Reconfigure the flash interface */
3932 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3933 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3934 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3935 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3936
3937 /* Disable access to flash interface */
3938 bnx2_disable_nvram_access(bp);
3939 bnx2_release_nvram_lock(bp);
3940
3941 break;
3942 }
3943 }
3944 } /* if (val & 0x40000000) */
3945
3946 if (j == entry_count) {
3947 bp->flash_info = NULL;
2f23c523 3948 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3949 return -ENODEV;
b6016b76
MC
3950 }
3951
e30372c9 3952get_flash_size:
2726d6e1 3953 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
1122db71
MC
3954 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3955 if (val)
3956 bp->flash_size = val;
3957 else
3958 bp->flash_size = bp->flash_info->total_size;
3959
b6016b76
MC
3960 return rc;
3961}
3962
3963static int
3964bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3965 int buf_size)
3966{
3967 int rc = 0;
3968 u32 cmd_flags, offset32, len32, extra;
3969
3970 if (buf_size == 0)
3971 return 0;
3972
3973 /* Request access to the flash interface. */
3974 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3975 return rc;
3976
3977 /* Enable access to flash interface */
3978 bnx2_enable_nvram_access(bp);
3979
3980 len32 = buf_size;
3981 offset32 = offset;
3982 extra = 0;
3983
3984 cmd_flags = 0;
3985
3986 if (offset32 & 3) {
3987 u8 buf[4];
3988 u32 pre_len;
3989
3990 offset32 &= ~3;
3991 pre_len = 4 - (offset & 3);
3992
3993 if (pre_len >= len32) {
3994 pre_len = len32;
3995 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3996 BNX2_NVM_COMMAND_LAST;
3997 }
3998 else {
3999 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4000 }
4001
4002 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4003
4004 if (rc)
4005 return rc;
4006
4007 memcpy(ret_buf, buf + (offset & 3), pre_len);
4008
4009 offset32 += 4;
4010 ret_buf += pre_len;
4011 len32 -= pre_len;
4012 }
4013 if (len32 & 3) {
4014 extra = 4 - (len32 & 3);
4015 len32 = (len32 + 4) & ~3;
4016 }
4017
4018 if (len32 == 4) {
4019 u8 buf[4];
4020
4021 if (cmd_flags)
4022 cmd_flags = BNX2_NVM_COMMAND_LAST;
4023 else
4024 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4025 BNX2_NVM_COMMAND_LAST;
4026
4027 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4028
4029 memcpy(ret_buf, buf, 4 - extra);
4030 }
4031 else if (len32 > 0) {
4032 u8 buf[4];
4033
4034 /* Read the first word. */
4035 if (cmd_flags)
4036 cmd_flags = 0;
4037 else
4038 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4039
4040 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4041
4042 /* Advance to the next dword. */
4043 offset32 += 4;
4044 ret_buf += 4;
4045 len32 -= 4;
4046
4047 while (len32 > 4 && rc == 0) {
4048 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4049
4050 /* Advance to the next dword. */
4051 offset32 += 4;
4052 ret_buf += 4;
4053 len32 -= 4;
4054 }
4055
4056 if (rc)
4057 return rc;
4058
4059 cmd_flags = BNX2_NVM_COMMAND_LAST;
4060 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4061
4062 memcpy(ret_buf, buf, 4 - extra);
4063 }
4064
4065 /* Disable access to flash interface */
4066 bnx2_disable_nvram_access(bp);
4067
4068 bnx2_release_nvram_lock(bp);
4069
4070 return rc;
4071}
4072
4073static int
4074bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4075 int buf_size)
4076{
4077 u32 written, offset32, len32;
e6be763f 4078 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
4079 int rc = 0;
4080 int align_start, align_end;
4081
4082 buf = data_buf;
4083 offset32 = offset;
4084 len32 = buf_size;
4085 align_start = align_end = 0;
4086
4087 if ((align_start = (offset32 & 3))) {
4088 offset32 &= ~3;
c873879c
MC
4089 len32 += align_start;
4090 if (len32 < 4)
4091 len32 = 4;
b6016b76
MC
4092 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4093 return rc;
4094 }
4095
4096 if (len32 & 3) {
c873879c
MC
4097 align_end = 4 - (len32 & 3);
4098 len32 += align_end;
4099 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4100 return rc;
b6016b76
MC
4101 }
4102
4103 if (align_start || align_end) {
e6be763f
MC
4104 align_buf = kmalloc(len32, GFP_KERNEL);
4105 if (align_buf == NULL)
b6016b76
MC
4106 return -ENOMEM;
4107 if (align_start) {
e6be763f 4108 memcpy(align_buf, start, 4);
b6016b76
MC
4109 }
4110 if (align_end) {
e6be763f 4111 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 4112 }
e6be763f
MC
4113 memcpy(align_buf + align_start, data_buf, buf_size);
4114 buf = align_buf;
b6016b76
MC
4115 }
4116
e30372c9 4117 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
4118 flash_buffer = kmalloc(264, GFP_KERNEL);
4119 if (flash_buffer == NULL) {
4120 rc = -ENOMEM;
4121 goto nvram_write_end;
4122 }
4123 }
4124
b6016b76
MC
4125 written = 0;
4126 while ((written < len32) && (rc == 0)) {
4127 u32 page_start, page_end, data_start, data_end;
4128 u32 addr, cmd_flags;
4129 int i;
b6016b76
MC
4130
4131 /* Find the page_start addr */
4132 page_start = offset32 + written;
4133 page_start -= (page_start % bp->flash_info->page_size);
4134 /* Find the page_end addr */
4135 page_end = page_start + bp->flash_info->page_size;
4136 /* Find the data_start addr */
4137 data_start = (written == 0) ? offset32 : page_start;
4138 /* Find the data_end addr */
6aa20a22 4139 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
4140 (offset32 + len32) : page_end;
4141
4142 /* Request access to the flash interface. */
4143 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4144 goto nvram_write_end;
4145
4146 /* Enable access to flash interface */
4147 bnx2_enable_nvram_access(bp);
4148
4149 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 4150 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4151 int j;
4152
4153 /* Read the whole page into the buffer
4154 * (non-buffer flash only) */
4155 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4156 if (j == (bp->flash_info->page_size - 4)) {
4157 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4158 }
4159 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
4160 page_start + j,
4161 &flash_buffer[j],
b6016b76
MC
4162 cmd_flags);
4163
4164 if (rc)
4165 goto nvram_write_end;
4166
4167 cmd_flags = 0;
4168 }
4169 }
4170
4171 /* Enable writes to flash interface (unlock write-protect) */
4172 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4173 goto nvram_write_end;
4174
b6016b76
MC
4175 /* Loop to write back the buffer data from page_start to
4176 * data_start */
4177 i = 0;
e30372c9 4178 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
4179 /* Erase the page */
4180 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4181 goto nvram_write_end;
4182
4183 /* Re-enable the write again for the actual write */
4184 bnx2_enable_nvram_write(bp);
4185
b6016b76
MC
4186 for (addr = page_start; addr < data_start;
4187 addr += 4, i += 4) {
6aa20a22 4188
b6016b76
MC
4189 rc = bnx2_nvram_write_dword(bp, addr,
4190 &flash_buffer[i], cmd_flags);
4191
4192 if (rc != 0)
4193 goto nvram_write_end;
4194
4195 cmd_flags = 0;
4196 }
4197 }
4198
4199 /* Loop to write the new data from data_start to data_end */
bae25761 4200 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 4201 if ((addr == page_end - 4) ||
e30372c9 4202 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
4203 (addr == data_end - 4))) {
4204
4205 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4206 }
4207 rc = bnx2_nvram_write_dword(bp, addr, buf,
4208 cmd_flags);
4209
4210 if (rc != 0)
4211 goto nvram_write_end;
4212
4213 cmd_flags = 0;
4214 buf += 4;
4215 }
4216
4217 /* Loop to write back the buffer data from data_end
4218 * to page_end */
e30372c9 4219 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4220 for (addr = data_end; addr < page_end;
4221 addr += 4, i += 4) {
6aa20a22 4222
b6016b76
MC
4223 if (addr == page_end-4) {
4224 cmd_flags = BNX2_NVM_COMMAND_LAST;
4225 }
4226 rc = bnx2_nvram_write_dword(bp, addr,
4227 &flash_buffer[i], cmd_flags);
4228
4229 if (rc != 0)
4230 goto nvram_write_end;
4231
4232 cmd_flags = 0;
4233 }
4234 }
4235
4236 /* Disable writes to flash interface (lock write-protect) */
4237 bnx2_disable_nvram_write(bp);
4238
4239 /* Disable access to flash interface */
4240 bnx2_disable_nvram_access(bp);
4241 bnx2_release_nvram_lock(bp);
4242
4243 /* Increment written */
4244 written += data_end - data_start;
4245 }
4246
4247nvram_write_end:
e6be763f
MC
4248 kfree(flash_buffer);
4249 kfree(align_buf);
b6016b76
MC
4250 return rc;
4251}
4252
0d8a6571
MC
4253static void
4254bnx2_init_remote_phy(struct bnx2 *bp)
4255{
4256 u32 val;
4257
583c28e5
MC
4258 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4259 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
0d8a6571
MC
4260 return;
4261
2726d6e1 4262 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
0d8a6571
MC
4263 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4264 return;
4265
4266 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
583c28e5 4267 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
0d8a6571 4268
2726d6e1 4269 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571
MC
4270 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4271 bp->phy_port = PORT_FIBRE;
4272 else
4273 bp->phy_port = PORT_TP;
489310a4
MC
4274
4275 if (netif_running(bp->dev)) {
4276 u32 sig;
4277
489310a4
MC
4278 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4279 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
2726d6e1 4280 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
489310a4 4281 }
0d8a6571
MC
4282 }
4283}
4284
b4b36042
MC
4285static void
4286bnx2_setup_msix_tbl(struct bnx2 *bp)
4287{
4288 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4289
4290 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4291 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4292}
4293
b6016b76
MC
4294static int
4295bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4296{
4297 u32 val;
4298 int i, rc = 0;
489310a4 4299 u8 old_port;
b6016b76
MC
4300
4301 /* Wait for the current PCI transaction to complete before
4302 * issuing a reset. */
4303 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4304 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4305 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4306 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4307 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4308 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4309 udelay(5);
4310
b090ae2b
MC
4311 /* Wait for the firmware to tell us it is ok to issue a reset. */
4312 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4313
b6016b76
MC
4314 /* Deposit a driver reset signature so the firmware knows that
4315 * this is a soft reset. */
2726d6e1
MC
4316 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4317 BNX2_DRV_RESET_SIGNATURE_MAGIC);
b6016b76 4318
b6016b76
MC
4319 /* Do a dummy read to force the chip to complete all current transaction
4320 * before we issue a reset. */
4321 val = REG_RD(bp, BNX2_MISC_ID);
4322
234754d5
MC
4323 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4324 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4325 REG_RD(bp, BNX2_MISC_COMMAND);
4326 udelay(5);
b6016b76 4327
234754d5
MC
4328 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4329 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4330
234754d5 4331 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4332
234754d5
MC
4333 } else {
4334 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4335 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4336 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4337
4338 /* Chip reset. */
4339 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4340
594a9dfa
MC
4341 /* Reading back any register after chip reset will hang the
4342 * bus on 5706 A0 and A1. The msleep below provides plenty
4343 * of margin for write posting.
4344 */
234754d5 4345 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4346 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4347 msleep(20);
b6016b76 4348
234754d5
MC
4349 /* Reset takes approximate 30 usec */
4350 for (i = 0; i < 10; i++) {
4351 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4352 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4353 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4354 break;
4355 udelay(10);
4356 }
4357
4358 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4359 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4360 printk(KERN_ERR PFX "Chip reset did not complete\n");
4361 return -EBUSY;
4362 }
b6016b76
MC
4363 }
4364
4365 /* Make sure byte swapping is properly configured. */
4366 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4367 if (val != 0x01020304) {
4368 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4369 return -ENODEV;
4370 }
4371
b6016b76 4372 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
4373 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4374 if (rc)
4375 return rc;
b6016b76 4376
0d8a6571 4377 spin_lock_bh(&bp->phy_lock);
489310a4 4378 old_port = bp->phy_port;
0d8a6571 4379 bnx2_init_remote_phy(bp);
583c28e5
MC
4380 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4381 old_port != bp->phy_port)
0d8a6571
MC
4382 bnx2_set_default_remote_link(bp);
4383 spin_unlock_bh(&bp->phy_lock);
4384
b6016b76
MC
4385 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4386 /* Adjust the voltage regular to two steps lower. The default
4387 * of this register is 0x0000000e. */
4388 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4389
4390 /* Remove bad rbuf memory from the free pool. */
4391 rc = bnx2_alloc_bad_rbuf(bp);
4392 }
4393
f86e82fb 4394 if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
4395 bnx2_setup_msix_tbl(bp);
4396
b6016b76
MC
4397 return rc;
4398}
4399
4400static int
4401bnx2_init_chip(struct bnx2 *bp)
4402{
4403 u32 val;
b4b36042 4404 int rc, i;
b6016b76
MC
4405
4406 /* Make sure the interrupt is not active. */
4407 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4408
4409 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4410 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4411#ifdef __BIG_ENDIAN
6aa20a22 4412 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4413#endif
6aa20a22 4414 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4415 DMA_READ_CHANS << 12 |
4416 DMA_WRITE_CHANS << 16;
4417
4418 val |= (0x2 << 20) | (1 << 11);
4419
f86e82fb 4420 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4421 val |= (1 << 23);
4422
4423 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
f86e82fb 4424 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
b6016b76
MC
4425 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4426
4427 REG_WR(bp, BNX2_DMA_CONFIG, val);
4428
4429 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4430 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4431 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4432 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4433 }
4434
f86e82fb 4435 if (bp->flags & BNX2_FLAG_PCIX) {
b6016b76
MC
4436 u16 val16;
4437
4438 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4439 &val16);
4440 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4441 val16 & ~PCI_X_CMD_ERO);
4442 }
4443
4444 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4445 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4446 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4447 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4448
4449 /* Initialize context mapping and zero out the quick contexts. The
4450 * context block must have already been enabled. */
641bdcd5
MC
4451 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4452 rc = bnx2_init_5709_context(bp);
4453 if (rc)
4454 return rc;
4455 } else
59b47d8a 4456 bnx2_init_context(bp);
b6016b76 4457
fba9fe91
MC
4458 if ((rc = bnx2_init_cpus(bp)) != 0)
4459 return rc;
4460
b6016b76
MC
4461 bnx2_init_nvram(bp);
4462
4463 bnx2_set_mac_addr(bp);
4464
4465 val = REG_RD(bp, BNX2_MQ_CONFIG);
4466 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4467 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4468 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4469 val |= BNX2_MQ_CONFIG_HALT_DIS;
4470
b6016b76
MC
4471 REG_WR(bp, BNX2_MQ_CONFIG, val);
4472
4473 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4474 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4475 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4476
4477 val = (BCM_PAGE_BITS - 8) << 24;
4478 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4479
4480 /* Configure page size. */
4481 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4482 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4483 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4484 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4485
4486 val = bp->mac_addr[0] +
4487 (bp->mac_addr[1] << 8) +
4488 (bp->mac_addr[2] << 16) +
4489 bp->mac_addr[3] +
4490 (bp->mac_addr[4] << 8) +
4491 (bp->mac_addr[5] << 16);
4492 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4493
4494 /* Program the MTU. Also include 4 bytes for CRC32. */
4495 val = bp->dev->mtu + ETH_HLEN + 4;
4496 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4497 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4498 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4499
b4b36042
MC
4500 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4501 bp->bnx2_napi[i].last_status_idx = 0;
4502
b6016b76
MC
4503 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4504
4505 /* Set up how to generate a link change interrupt. */
4506 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4507
4508 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4509 (u64) bp->status_blk_mapping & 0xffffffff);
4510 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4511
4512 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4513 (u64) bp->stats_blk_mapping & 0xffffffff);
4514 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4515 (u64) bp->stats_blk_mapping >> 32);
4516
6aa20a22 4517 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4518 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4519
4520 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4521 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4522
4523 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4524 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4525
4526 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4527
4528 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4529
4530 REG_WR(bp, BNX2_HC_COM_TICKS,
4531 (bp->com_ticks_int << 16) | bp->com_ticks);
4532
4533 REG_WR(bp, BNX2_HC_CMD_TICKS,
4534 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4535
02537b06
MC
4536 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4537 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4538 else
7ea6920e 4539 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4540 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4541
4542 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4543 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4544 else {
8e6a72c4
MC
4545 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4546 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4547 }
4548
5e9ad9e1 4549 if (bp->irq_nvecs > 1) {
c76c0475
MC
4550 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4551 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4552
5e9ad9e1
MC
4553 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4554 }
4555
4556 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4557 val |= BNX2_HC_CONFIG_ONE_SHOT;
4558
4559 REG_WR(bp, BNX2_HC_CONFIG, val);
4560
4561 for (i = 1; i < bp->irq_nvecs; i++) {
4562 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4563 BNX2_HC_SB_CONFIG_1;
4564
6f743ca0 4565 REG_WR(bp, base,
c76c0475 4566 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5e9ad9e1 4567 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
c76c0475
MC
4568 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4569
6f743ca0 4570 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
c76c0475
MC
4571 (bp->tx_quick_cons_trip_int << 16) |
4572 bp->tx_quick_cons_trip);
4573
6f743ca0 4574 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
c76c0475
MC
4575 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4576
5e9ad9e1
MC
4577 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4578 (bp->rx_quick_cons_trip_int << 16) |
4579 bp->rx_quick_cons_trip);
8e6a72c4 4580
5e9ad9e1
MC
4581 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4582 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4583 }
8e6a72c4 4584
b6016b76
MC
4585 /* Clear internal stats counters. */
4586 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4587
da3e4fbe 4588 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4589
4590 /* Initialize the receive filter. */
4591 bnx2_set_rx_mode(bp->dev);
4592
0aa38df7
MC
4593 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4594 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4595 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4596 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4597 }
b090ae2b
MC
4598 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4599 0);
b6016b76 4600
df149d70 4601 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4602 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4603
4604 udelay(20);
4605
bf5295bb
MC
4606 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4607
b090ae2b 4608 return rc;
b6016b76
MC
4609}
4610
c76c0475
MC
4611static void
4612bnx2_clear_ring_states(struct bnx2 *bp)
4613{
4614 struct bnx2_napi *bnapi;
35e9010b 4615 struct bnx2_tx_ring_info *txr;
bb4f98ab 4616 struct bnx2_rx_ring_info *rxr;
c76c0475
MC
4617 int i;
4618
4619 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4620 bnapi = &bp->bnx2_napi[i];
35e9010b 4621 txr = &bnapi->tx_ring;
bb4f98ab 4622 rxr = &bnapi->rx_ring;
c76c0475 4623
35e9010b
MC
4624 txr->tx_cons = 0;
4625 txr->hw_tx_cons = 0;
bb4f98ab
MC
4626 rxr->rx_prod_bseq = 0;
4627 rxr->rx_prod = 0;
4628 rxr->rx_cons = 0;
4629 rxr->rx_pg_prod = 0;
4630 rxr->rx_pg_cons = 0;
c76c0475
MC
4631 }
4632}
4633
59b47d8a 4634static void
35e9010b 4635bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
59b47d8a
MC
4636{
4637 u32 val, offset0, offset1, offset2, offset3;
62a8313c 4638 u32 cid_addr = GET_CID_ADDR(cid);
59b47d8a
MC
4639
4640 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4641 offset0 = BNX2_L2CTX_TYPE_XI;
4642 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4643 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4644 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4645 } else {
4646 offset0 = BNX2_L2CTX_TYPE;
4647 offset1 = BNX2_L2CTX_CMD_TYPE;
4648 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4649 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4650 }
4651 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
62a8313c 4652 bnx2_ctx_wr(bp, cid_addr, offset0, val);
59b47d8a
MC
4653
4654 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
62a8313c 4655 bnx2_ctx_wr(bp, cid_addr, offset1, val);
59b47d8a 4656
35e9010b 4657 val = (u64) txr->tx_desc_mapping >> 32;
62a8313c 4658 bnx2_ctx_wr(bp, cid_addr, offset2, val);
59b47d8a 4659
35e9010b 4660 val = (u64) txr->tx_desc_mapping & 0xffffffff;
62a8313c 4661 bnx2_ctx_wr(bp, cid_addr, offset3, val);
59b47d8a 4662}
b6016b76
MC
4663
4664static void
35e9010b 4665bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
b6016b76
MC
4666{
4667 struct tx_bd *txbd;
c76c0475
MC
4668 u32 cid = TX_CID;
4669 struct bnx2_napi *bnapi;
35e9010b 4670 struct bnx2_tx_ring_info *txr;
c76c0475 4671
35e9010b
MC
4672 bnapi = &bp->bnx2_napi[ring_num];
4673 txr = &bnapi->tx_ring;
4674
4675 if (ring_num == 0)
4676 cid = TX_CID;
4677 else
4678 cid = TX_TSS_CID + ring_num - 1;
b6016b76 4679
2f8af120
MC
4680 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4681
35e9010b 4682 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4683
35e9010b
MC
4684 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4685 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
b6016b76 4686
35e9010b
MC
4687 txr->tx_prod = 0;
4688 txr->tx_prod_bseq = 0;
6aa20a22 4689
35e9010b
MC
4690 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4691 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4692
35e9010b 4693 bnx2_init_tx_context(bp, cid, txr);
b6016b76
MC
4694}
4695
4696static void
5d5d0015
MC
4697bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4698 int num_rings)
b6016b76 4699{
b6016b76 4700 int i;
5d5d0015 4701 struct rx_bd *rxbd;
6aa20a22 4702
5d5d0015 4703 for (i = 0; i < num_rings; i++) {
13daffa2 4704 int j;
b6016b76 4705
5d5d0015 4706 rxbd = &rx_ring[i][0];
13daffa2 4707 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 4708 rxbd->rx_bd_len = buf_size;
13daffa2
MC
4709 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4710 }
5d5d0015 4711 if (i == (num_rings - 1))
13daffa2
MC
4712 j = 0;
4713 else
4714 j = i + 1;
5d5d0015
MC
4715 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4716 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 4717 }
5d5d0015
MC
4718}
4719
4720static void
bb4f98ab 4721bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5d5d0015
MC
4722{
4723 int i;
4724 u16 prod, ring_prod;
bb4f98ab
MC
4725 u32 cid, rx_cid_addr, val;
4726 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4727 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4728
4729 if (ring_num == 0)
4730 cid = RX_CID;
4731 else
4732 cid = RX_RSS_CID + ring_num - 1;
4733
4734 rx_cid_addr = GET_CID_ADDR(cid);
5d5d0015 4735
bb4f98ab 4736 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5d5d0015
MC
4737 bp->rx_buf_use_size, bp->rx_max_ring);
4738
bb4f98ab 4739 bnx2_init_rx_context(bp, cid);
83e3fc89
MC
4740
4741 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4742 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4743 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4744 }
4745
62a8313c 4746 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246 4747 if (bp->rx_pg_ring_size) {
bb4f98ab
MC
4748 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4749 rxr->rx_pg_desc_mapping,
47bf4246
MC
4750 PAGE_SIZE, bp->rx_max_pg_ring);
4751 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
62a8313c
MC
4752 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4753 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5e9ad9e1 4754 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
47bf4246 4755
bb4f98ab 4756 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
62a8313c 4757 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
47bf4246 4758
bb4f98ab 4759 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
62a8313c 4760 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
47bf4246
MC
4761
4762 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4763 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4764 }
b6016b76 4765
bb4f98ab 4766 val = (u64) rxr->rx_desc_mapping[0] >> 32;
62a8313c 4767 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 4768
bb4f98ab 4769 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
62a8313c 4770 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 4771
bb4f98ab 4772 ring_prod = prod = rxr->rx_pg_prod;
47bf4246 4773 for (i = 0; i < bp->rx_pg_ring_size; i++) {
bb4f98ab 4774 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
47bf4246
MC
4775 break;
4776 prod = NEXT_RX_BD(prod);
4777 ring_prod = RX_PG_RING_IDX(prod);
4778 }
bb4f98ab 4779 rxr->rx_pg_prod = prod;
47bf4246 4780
bb4f98ab 4781 ring_prod = prod = rxr->rx_prod;
236b6394 4782 for (i = 0; i < bp->rx_ring_size; i++) {
bb4f98ab 4783 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
b6016b76 4784 break;
b6016b76
MC
4785 prod = NEXT_RX_BD(prod);
4786 ring_prod = RX_RING_IDX(prod);
4787 }
bb4f98ab 4788 rxr->rx_prod = prod;
b6016b76 4789
bb4f98ab
MC
4790 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4791 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4792 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
b6016b76 4793
bb4f98ab
MC
4794 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4795 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4796
4797 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
4798}
4799
35e9010b
MC
4800static void
4801bnx2_init_all_rings(struct bnx2 *bp)
4802{
4803 int i;
5e9ad9e1 4804 u32 val;
35e9010b
MC
4805
4806 bnx2_clear_ring_states(bp);
4807
4808 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4809 for (i = 0; i < bp->num_tx_rings; i++)
4810 bnx2_init_tx_ring(bp, i);
4811
4812 if (bp->num_tx_rings > 1)
4813 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4814 (TX_TSS_CID << 7));
4815
5e9ad9e1
MC
4816 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4817 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4818
bb4f98ab
MC
4819 for (i = 0; i < bp->num_rx_rings; i++)
4820 bnx2_init_rx_ring(bp, i);
5e9ad9e1
MC
4821
4822 if (bp->num_rx_rings > 1) {
4823 u32 tbl_32;
4824 u8 *tbl = (u8 *) &tbl_32;
4825
4826 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4827 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4828
4829 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4830 tbl[i % 4] = i % (bp->num_rx_rings - 1);
4831 if ((i % 4) == 3)
4832 bnx2_reg_wr_ind(bp,
4833 BNX2_RXP_SCRATCH_RSS_TBL + i,
4834 cpu_to_be32(tbl_32));
4835 }
4836
4837 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4838 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4839
4840 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4841
4842 }
35e9010b
MC
4843}
4844
5d5d0015 4845static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 4846{
5d5d0015 4847 u32 max, num_rings = 1;
13daffa2 4848
5d5d0015
MC
4849 while (ring_size > MAX_RX_DESC_CNT) {
4850 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
4851 num_rings++;
4852 }
4853 /* round to next power of 2 */
5d5d0015 4854 max = max_size;
13daffa2
MC
4855 while ((max & num_rings) == 0)
4856 max >>= 1;
4857
4858 if (num_rings != max)
4859 max <<= 1;
4860
5d5d0015
MC
4861 return max;
4862}
4863
4864static void
4865bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4866{
84eaa187 4867 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
4868
4869 /* 8 for CRC and VLAN */
d89cb6af 4870 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5d5d0015 4871
84eaa187
MC
4872 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4873 sizeof(struct skb_shared_info);
4874
601d3d18 4875 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
47bf4246
MC
4876 bp->rx_pg_ring_size = 0;
4877 bp->rx_max_pg_ring = 0;
4878 bp->rx_max_pg_ring_idx = 0;
f86e82fb 4879 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
84eaa187
MC
4880 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4881
4882 jumbo_size = size * pages;
4883 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4884 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4885
4886 bp->rx_pg_ring_size = jumbo_size;
4887 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4888 MAX_RX_PG_RINGS);
4889 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
601d3d18 4890 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
84eaa187
MC
4891 bp->rx_copy_thresh = 0;
4892 }
5d5d0015
MC
4893
4894 bp->rx_buf_use_size = rx_size;
4895 /* hw alignment */
4896 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
d89cb6af 4897 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5d5d0015
MC
4898 bp->rx_ring_size = size;
4899 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
4900 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4901}
4902
b6016b76
MC
4903static void
4904bnx2_free_tx_skbs(struct bnx2 *bp)
4905{
4906 int i;
4907
35e9010b
MC
4908 for (i = 0; i < bp->num_tx_rings; i++) {
4909 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4910 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4911 int j;
b6016b76 4912
35e9010b 4913 if (txr->tx_buf_ring == NULL)
b6016b76 4914 continue;
b6016b76 4915
35e9010b
MC
4916 for (j = 0; j < TX_DESC_CNT; ) {
4917 struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4918 struct sk_buff *skb = tx_buf->skb;
4919 int k, last;
4920
4921 if (skb == NULL) {
4922 j++;
4923 continue;
4924 }
4925
4926 pci_unmap_single(bp->pdev,
4927 pci_unmap_addr(tx_buf, mapping),
b6016b76
MC
4928 skb_headlen(skb), PCI_DMA_TODEVICE);
4929
35e9010b 4930 tx_buf->skb = NULL;
b6016b76 4931
35e9010b
MC
4932 last = skb_shinfo(skb)->nr_frags;
4933 for (k = 0; k < last; k++) {
4934 tx_buf = &txr->tx_buf_ring[j + k + 1];
4935 pci_unmap_page(bp->pdev,
4936 pci_unmap_addr(tx_buf, mapping),
4937 skb_shinfo(skb)->frags[j].size,
4938 PCI_DMA_TODEVICE);
4939 }
4940 dev_kfree_skb(skb);
4941 j += k + 1;
b6016b76 4942 }
b6016b76 4943 }
b6016b76
MC
4944}
4945
4946static void
4947bnx2_free_rx_skbs(struct bnx2 *bp)
4948{
4949 int i;
4950
bb4f98ab
MC
4951 for (i = 0; i < bp->num_rx_rings; i++) {
4952 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4953 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4954 int j;
b6016b76 4955
bb4f98ab
MC
4956 if (rxr->rx_buf_ring == NULL)
4957 return;
b6016b76 4958
bb4f98ab
MC
4959 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4960 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4961 struct sk_buff *skb = rx_buf->skb;
b6016b76 4962
bb4f98ab
MC
4963 if (skb == NULL)
4964 continue;
b6016b76 4965
bb4f98ab
MC
4966 pci_unmap_single(bp->pdev,
4967 pci_unmap_addr(rx_buf, mapping),
4968 bp->rx_buf_use_size,
4969 PCI_DMA_FROMDEVICE);
b6016b76 4970
bb4f98ab
MC
4971 rx_buf->skb = NULL;
4972
4973 dev_kfree_skb(skb);
4974 }
4975 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4976 bnx2_free_rx_page(bp, rxr, j);
b6016b76
MC
4977 }
4978}
4979
4980static void
4981bnx2_free_skbs(struct bnx2 *bp)
4982{
4983 bnx2_free_tx_skbs(bp);
4984 bnx2_free_rx_skbs(bp);
4985}
4986
4987static int
4988bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4989{
4990 int rc;
4991
4992 rc = bnx2_reset_chip(bp, reset_code);
4993 bnx2_free_skbs(bp);
4994 if (rc)
4995 return rc;
4996
fba9fe91
MC
4997 if ((rc = bnx2_init_chip(bp)) != 0)
4998 return rc;
4999
35e9010b 5000 bnx2_init_all_rings(bp);
b6016b76
MC
5001 return 0;
5002}
5003
5004static int
9a120bc5 5005bnx2_init_nic(struct bnx2 *bp, int reset_phy)
b6016b76
MC
5006{
5007 int rc;
5008
5009 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5010 return rc;
5011
80be4434 5012 spin_lock_bh(&bp->phy_lock);
9a120bc5 5013 bnx2_init_phy(bp, reset_phy);
b6016b76 5014 bnx2_set_link(bp);
543a827d
MC
5015 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5016 bnx2_remote_phy_event(bp);
0d8a6571 5017 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5018 return 0;
5019}
5020
5021static int
5022bnx2_test_registers(struct bnx2 *bp)
5023{
5024 int ret;
5bae30c9 5025 int i, is_5709;
f71e1309 5026 static const struct {
b6016b76
MC
5027 u16 offset;
5028 u16 flags;
5bae30c9 5029#define BNX2_FL_NOT_5709 1
b6016b76
MC
5030 u32 rw_mask;
5031 u32 ro_mask;
5032 } reg_tbl[] = {
5033 { 0x006c, 0, 0x00000000, 0x0000003f },
5034 { 0x0090, 0, 0xffffffff, 0x00000000 },
5035 { 0x0094, 0, 0x00000000, 0x00000000 },
5036
5bae30c9
MC
5037 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5038 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5039 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5040 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5041 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5042 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5043 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5044 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5045 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5046
5047 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5048 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5049 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5050 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5051 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5052 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5053
5054 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5055 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5056 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
5057
5058 { 0x1000, 0, 0x00000000, 0x00000001 },
15b169cc 5059 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
b6016b76
MC
5060
5061 { 0x1408, 0, 0x01c00800, 0x00000000 },
5062 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5063 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 5064 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
5065 { 0x14b0, 0, 0x00000002, 0x00000001 },
5066 { 0x14b8, 0, 0x00000000, 0x00000000 },
5067 { 0x14c0, 0, 0x00000000, 0x00000009 },
5068 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5069 { 0x14cc, 0, 0x00000000, 0x00000001 },
5070 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
5071
5072 { 0x1800, 0, 0x00000000, 0x00000001 },
5073 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
5074
5075 { 0x2800, 0, 0x00000000, 0x00000001 },
5076 { 0x2804, 0, 0x00000000, 0x00003f01 },
5077 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5078 { 0x2810, 0, 0xffff0000, 0x00000000 },
5079 { 0x2814, 0, 0xffff0000, 0x00000000 },
5080 { 0x2818, 0, 0xffff0000, 0x00000000 },
5081 { 0x281c, 0, 0xffff0000, 0x00000000 },
5082 { 0x2834, 0, 0xffffffff, 0x00000000 },
5083 { 0x2840, 0, 0x00000000, 0xffffffff },
5084 { 0x2844, 0, 0x00000000, 0xffffffff },
5085 { 0x2848, 0, 0xffffffff, 0x00000000 },
5086 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5087
5088 { 0x2c00, 0, 0x00000000, 0x00000011 },
5089 { 0x2c04, 0, 0x00000000, 0x00030007 },
5090
b6016b76
MC
5091 { 0x3c00, 0, 0x00000000, 0x00000001 },
5092 { 0x3c04, 0, 0x00000000, 0x00070000 },
5093 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5094 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5095 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5096 { 0x3c14, 0, 0x00000000, 0xffffffff },
5097 { 0x3c18, 0, 0x00000000, 0xffffffff },
5098 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5099 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
5100
5101 { 0x5004, 0, 0x00000000, 0x0000007f },
5102 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 5103
b6016b76
MC
5104 { 0x5c00, 0, 0x00000000, 0x00000001 },
5105 { 0x5c04, 0, 0x00000000, 0x0003000f },
5106 { 0x5c08, 0, 0x00000003, 0x00000000 },
5107 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5108 { 0x5c10, 0, 0x00000000, 0xffffffff },
5109 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5110 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5111 { 0x5c88, 0, 0x00000000, 0x00077373 },
5112 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5113
5114 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5115 { 0x680c, 0, 0xffffffff, 0x00000000 },
5116 { 0x6810, 0, 0xffffffff, 0x00000000 },
5117 { 0x6814, 0, 0xffffffff, 0x00000000 },
5118 { 0x6818, 0, 0xffffffff, 0x00000000 },
5119 { 0x681c, 0, 0xffffffff, 0x00000000 },
5120 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5121 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5122 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5123 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5124 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5125 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5126 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5127 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5128 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5129 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5130 { 0x684c, 0, 0xffffffff, 0x00000000 },
5131 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5132 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5133 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5134 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5135 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5136 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5137
5138 { 0xffff, 0, 0x00000000, 0x00000000 },
5139 };
5140
5141 ret = 0;
5bae30c9
MC
5142 is_5709 = 0;
5143 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5144 is_5709 = 1;
5145
b6016b76
MC
5146 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5147 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
5148 u16 flags = reg_tbl[i].flags;
5149
5150 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5151 continue;
b6016b76
MC
5152
5153 offset = (u32) reg_tbl[i].offset;
5154 rw_mask = reg_tbl[i].rw_mask;
5155 ro_mask = reg_tbl[i].ro_mask;
5156
14ab9b86 5157 save_val = readl(bp->regview + offset);
b6016b76 5158
14ab9b86 5159 writel(0, bp->regview + offset);
b6016b76 5160
14ab9b86 5161 val = readl(bp->regview + offset);
b6016b76
MC
5162 if ((val & rw_mask) != 0) {
5163 goto reg_test_err;
5164 }
5165
5166 if ((val & ro_mask) != (save_val & ro_mask)) {
5167 goto reg_test_err;
5168 }
5169
14ab9b86 5170 writel(0xffffffff, bp->regview + offset);
b6016b76 5171
14ab9b86 5172 val = readl(bp->regview + offset);
b6016b76
MC
5173 if ((val & rw_mask) != rw_mask) {
5174 goto reg_test_err;
5175 }
5176
5177 if ((val & ro_mask) != (save_val & ro_mask)) {
5178 goto reg_test_err;
5179 }
5180
14ab9b86 5181 writel(save_val, bp->regview + offset);
b6016b76
MC
5182 continue;
5183
5184reg_test_err:
14ab9b86 5185 writel(save_val, bp->regview + offset);
b6016b76
MC
5186 ret = -ENODEV;
5187 break;
5188 }
5189 return ret;
5190}
5191
5192static int
5193bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5194{
f71e1309 5195 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
5196 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5197 int i;
5198
5199 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5200 u32 offset;
5201
5202 for (offset = 0; offset < size; offset += 4) {
5203
2726d6e1 5204 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
b6016b76 5205
2726d6e1 5206 if (bnx2_reg_rd_ind(bp, start + offset) !=
b6016b76
MC
5207 test_pattern[i]) {
5208 return -ENODEV;
5209 }
5210 }
5211 }
5212 return 0;
5213}
5214
5215static int
5216bnx2_test_memory(struct bnx2 *bp)
5217{
5218 int ret = 0;
5219 int i;
5bae30c9 5220 static struct mem_entry {
b6016b76
MC
5221 u32 offset;
5222 u32 len;
5bae30c9 5223 } mem_tbl_5706[] = {
b6016b76 5224 { 0x60000, 0x4000 },
5b0c76ad 5225 { 0xa0000, 0x3000 },
b6016b76
MC
5226 { 0xe0000, 0x4000 },
5227 { 0x120000, 0x4000 },
5228 { 0x1a0000, 0x4000 },
5229 { 0x160000, 0x4000 },
5230 { 0xffffffff, 0 },
5bae30c9
MC
5231 },
5232 mem_tbl_5709[] = {
5233 { 0x60000, 0x4000 },
5234 { 0xa0000, 0x3000 },
5235 { 0xe0000, 0x4000 },
5236 { 0x120000, 0x4000 },
5237 { 0x1a0000, 0x4000 },
5238 { 0xffffffff, 0 },
b6016b76 5239 };
5bae30c9
MC
5240 struct mem_entry *mem_tbl;
5241
5242 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5243 mem_tbl = mem_tbl_5709;
5244 else
5245 mem_tbl = mem_tbl_5706;
b6016b76
MC
5246
5247 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5248 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5249 mem_tbl[i].len)) != 0) {
5250 return ret;
5251 }
5252 }
6aa20a22 5253
b6016b76
MC
5254 return ret;
5255}
5256
bc5a0690
MC
5257#define BNX2_MAC_LOOPBACK 0
5258#define BNX2_PHY_LOOPBACK 1
5259
b6016b76 5260static int
bc5a0690 5261bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
5262{
5263 unsigned int pkt_size, num_pkts, i;
5264 struct sk_buff *skb, *rx_skb;
5265 unsigned char *packet;
bc5a0690 5266 u16 rx_start_idx, rx_idx;
b6016b76
MC
5267 dma_addr_t map;
5268 struct tx_bd *txbd;
5269 struct sw_bd *rx_buf;
5270 struct l2_fhdr *rx_hdr;
5271 int ret = -ENODEV;
c76c0475 5272 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
35e9010b 5273 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 5274 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
c76c0475
MC
5275
5276 tx_napi = bnapi;
b6016b76 5277
35e9010b 5278 txr = &tx_napi->tx_ring;
bb4f98ab 5279 rxr = &bnapi->rx_ring;
bc5a0690
MC
5280 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5281 bp->loopback = MAC_LOOPBACK;
5282 bnx2_set_mac_loopback(bp);
5283 }
5284 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
583c28e5 5285 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
489310a4
MC
5286 return 0;
5287
80be4434 5288 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
5289 bnx2_set_phy_loopback(bp);
5290 }
5291 else
5292 return -EINVAL;
b6016b76 5293
84eaa187 5294 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 5295 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
5296 if (!skb)
5297 return -ENOMEM;
b6016b76 5298 packet = skb_put(skb, pkt_size);
6634292b 5299 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
5300 memset(packet + 6, 0x0, 8);
5301 for (i = 14; i < pkt_size; i++)
5302 packet[i] = (unsigned char) (i & 0xff);
5303
5304 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5305 PCI_DMA_TODEVICE);
5306
bf5295bb
MC
5307 REG_WR(bp, BNX2_HC_COMMAND,
5308 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5309
b6016b76
MC
5310 REG_RD(bp, BNX2_HC_COMMAND);
5311
5312 udelay(5);
35efa7c1 5313 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 5314
b6016b76
MC
5315 num_pkts = 0;
5316
35e9010b 5317 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
b6016b76
MC
5318
5319 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5320 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5321 txbd->tx_bd_mss_nbytes = pkt_size;
5322 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5323
5324 num_pkts++;
35e9010b
MC
5325 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5326 txr->tx_prod_bseq += pkt_size;
b6016b76 5327
35e9010b
MC
5328 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5329 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
5330
5331 udelay(100);
5332
bf5295bb
MC
5333 REG_WR(bp, BNX2_HC_COMMAND,
5334 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5335
b6016b76
MC
5336 REG_RD(bp, BNX2_HC_COMMAND);
5337
5338 udelay(5);
5339
5340 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 5341 dev_kfree_skb(skb);
b6016b76 5342
35e9010b 5343 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
b6016b76 5344 goto loopback_test_done;
b6016b76 5345
35efa7c1 5346 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
5347 if (rx_idx != rx_start_idx + num_pkts) {
5348 goto loopback_test_done;
5349 }
5350
bb4f98ab 5351 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
b6016b76
MC
5352 rx_skb = rx_buf->skb;
5353
5354 rx_hdr = (struct l2_fhdr *) rx_skb->data;
d89cb6af 5355 skb_reserve(rx_skb, BNX2_RX_OFFSET);
b6016b76
MC
5356
5357 pci_dma_sync_single_for_cpu(bp->pdev,
5358 pci_unmap_addr(rx_buf, mapping),
5359 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5360
ade2bfe7 5361 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5362 (L2_FHDR_ERRORS_BAD_CRC |
5363 L2_FHDR_ERRORS_PHY_DECODE |
5364 L2_FHDR_ERRORS_ALIGNMENT |
5365 L2_FHDR_ERRORS_TOO_SHORT |
5366 L2_FHDR_ERRORS_GIANT_FRAME)) {
5367
5368 goto loopback_test_done;
5369 }
5370
5371 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5372 goto loopback_test_done;
5373 }
5374
5375 for (i = 14; i < pkt_size; i++) {
5376 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5377 goto loopback_test_done;
5378 }
5379 }
5380
5381 ret = 0;
5382
5383loopback_test_done:
5384 bp->loopback = 0;
5385 return ret;
5386}
5387
bc5a0690
MC
5388#define BNX2_MAC_LOOPBACK_FAILED 1
5389#define BNX2_PHY_LOOPBACK_FAILED 2
5390#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5391 BNX2_PHY_LOOPBACK_FAILED)
5392
5393static int
5394bnx2_test_loopback(struct bnx2 *bp)
5395{
5396 int rc = 0;
5397
5398 if (!netif_running(bp->dev))
5399 return BNX2_LOOPBACK_FAILED;
5400
5401 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5402 spin_lock_bh(&bp->phy_lock);
9a120bc5 5403 bnx2_init_phy(bp, 1);
bc5a0690
MC
5404 spin_unlock_bh(&bp->phy_lock);
5405 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5406 rc |= BNX2_MAC_LOOPBACK_FAILED;
5407 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5408 rc |= BNX2_PHY_LOOPBACK_FAILED;
5409 return rc;
5410}
5411
b6016b76
MC
5412#define NVRAM_SIZE 0x200
5413#define CRC32_RESIDUAL 0xdebb20e3
5414
5415static int
5416bnx2_test_nvram(struct bnx2 *bp)
5417{
b491edd5 5418 __be32 buf[NVRAM_SIZE / 4];
b6016b76
MC
5419 u8 *data = (u8 *) buf;
5420 int rc = 0;
5421 u32 magic, csum;
5422
5423 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5424 goto test_nvram_done;
5425
5426 magic = be32_to_cpu(buf[0]);
5427 if (magic != 0x669955aa) {
5428 rc = -ENODEV;
5429 goto test_nvram_done;
5430 }
5431
5432 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5433 goto test_nvram_done;
5434
5435 csum = ether_crc_le(0x100, data);
5436 if (csum != CRC32_RESIDUAL) {
5437 rc = -ENODEV;
5438 goto test_nvram_done;
5439 }
5440
5441 csum = ether_crc_le(0x100, data + 0x100);
5442 if (csum != CRC32_RESIDUAL) {
5443 rc = -ENODEV;
5444 }
5445
5446test_nvram_done:
5447 return rc;
5448}
5449
5450static int
5451bnx2_test_link(struct bnx2 *bp)
5452{
5453 u32 bmsr;
5454
583c28e5 5455 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
489310a4
MC
5456 if (bp->link_up)
5457 return 0;
5458 return -ENODEV;
5459 }
c770a65c 5460 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5461 bnx2_enable_bmsr1(bp);
5462 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5463 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5464 bnx2_disable_bmsr1(bp);
c770a65c 5465 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5466
b6016b76
MC
5467 if (bmsr & BMSR_LSTATUS) {
5468 return 0;
5469 }
5470 return -ENODEV;
5471}
5472
5473static int
5474bnx2_test_intr(struct bnx2 *bp)
5475{
5476 int i;
b6016b76
MC
5477 u16 status_idx;
5478
5479 if (!netif_running(bp->dev))
5480 return -ENODEV;
5481
5482 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5483
5484 /* This register is not touched during run-time. */
bf5295bb 5485 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5486 REG_RD(bp, BNX2_HC_COMMAND);
5487
5488 for (i = 0; i < 10; i++) {
5489 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5490 status_idx) {
5491
5492 break;
5493 }
5494
5495 msleep_interruptible(10);
5496 }
5497 if (i < 10)
5498 return 0;
5499
5500 return -ENODEV;
5501}
5502
38ea3686 5503/* Determining link for parallel detection. */
b2fadeae
MC
5504static int
5505bnx2_5706_serdes_has_link(struct bnx2 *bp)
5506{
5507 u32 mode_ctl, an_dbg, exp;
5508
38ea3686
MC
5509 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5510 return 0;
5511
b2fadeae
MC
5512 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5513 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5514
5515 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5516 return 0;
5517
5518 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5519 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5520 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5521
f3014c0c 5522 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
b2fadeae
MC
5523 return 0;
5524
5525 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5526 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5527 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5528
5529 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5530 return 0;
5531
5532 return 1;
5533}
5534
b6016b76 5535static void
48b01e2d 5536bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5537{
b2fadeae
MC
5538 int check_link = 1;
5539
48b01e2d 5540 spin_lock(&bp->phy_lock);
b2fadeae 5541 if (bp->serdes_an_pending) {
48b01e2d 5542 bp->serdes_an_pending--;
b2fadeae
MC
5543 check_link = 0;
5544 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
48b01e2d 5545 u32 bmcr;
b6016b76 5546
48b01e2d 5547 bp->current_interval = bp->timer_interval;
cd339a0e 5548
ca58c3af 5549 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5550
48b01e2d 5551 if (bmcr & BMCR_ANENABLE) {
b2fadeae 5552 if (bnx2_5706_serdes_has_link(bp)) {
48b01e2d
MC
5553 bmcr &= ~BMCR_ANENABLE;
5554 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5555 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
583c28e5 5556 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d 5557 }
b6016b76 5558 }
48b01e2d
MC
5559 }
5560 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
583c28e5 5561 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
48b01e2d 5562 u32 phy2;
b6016b76 5563
48b01e2d
MC
5564 bnx2_write_phy(bp, 0x17, 0x0f01);
5565 bnx2_read_phy(bp, 0x15, &phy2);
5566 if (phy2 & 0x20) {
5567 u32 bmcr;
cd339a0e 5568
ca58c3af 5569 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 5570 bmcr |= BMCR_ANENABLE;
ca58c3af 5571 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 5572
583c28e5 5573 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d
MC
5574 }
5575 } else
5576 bp->current_interval = bp->timer_interval;
b6016b76 5577
a2724e25 5578 if (check_link) {
b2fadeae
MC
5579 u32 val;
5580
5581 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5582 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5583 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5584
a2724e25
MC
5585 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5586 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5587 bnx2_5706s_force_link_dn(bp, 1);
5588 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5589 } else
5590 bnx2_set_link(bp);
5591 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5592 bnx2_set_link(bp);
b2fadeae 5593 }
48b01e2d
MC
5594 spin_unlock(&bp->phy_lock);
5595}
b6016b76 5596
f8dd064e
MC
5597static void
5598bnx2_5708_serdes_timer(struct bnx2 *bp)
5599{
583c28e5 5600 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
5601 return;
5602
583c28e5 5603 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
f8dd064e
MC
5604 bp->serdes_an_pending = 0;
5605 return;
5606 }
b6016b76 5607
f8dd064e
MC
5608 spin_lock(&bp->phy_lock);
5609 if (bp->serdes_an_pending)
5610 bp->serdes_an_pending--;
5611 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5612 u32 bmcr;
b6016b76 5613
ca58c3af 5614 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 5615 if (bmcr & BMCR_ANENABLE) {
605a9e20 5616 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
5617 bp->current_interval = SERDES_FORCED_TIMEOUT;
5618 } else {
605a9e20 5619 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
5620 bp->serdes_an_pending = 2;
5621 bp->current_interval = bp->timer_interval;
b6016b76 5622 }
b6016b76 5623
f8dd064e
MC
5624 } else
5625 bp->current_interval = bp->timer_interval;
b6016b76 5626
f8dd064e
MC
5627 spin_unlock(&bp->phy_lock);
5628}
5629
48b01e2d
MC
5630static void
5631bnx2_timer(unsigned long data)
5632{
5633 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 5634
48b01e2d
MC
5635 if (!netif_running(bp->dev))
5636 return;
b6016b76 5637
48b01e2d
MC
5638 if (atomic_read(&bp->intr_sem) != 0)
5639 goto bnx2_restart_timer;
b6016b76 5640
df149d70 5641 bnx2_send_heart_beat(bp);
b6016b76 5642
2726d6e1
MC
5643 bp->stats_blk->stat_FwRxDrop =
5644 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 5645
02537b06
MC
5646 /* workaround occasional corrupted counters */
5647 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5648 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5649 BNX2_HC_COMMAND_STATS_NOW);
5650
583c28e5 5651 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
f8dd064e
MC
5652 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5653 bnx2_5706_serdes_timer(bp);
27a005b8 5654 else
f8dd064e 5655 bnx2_5708_serdes_timer(bp);
b6016b76
MC
5656 }
5657
5658bnx2_restart_timer:
cd339a0e 5659 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5660}
5661
8e6a72c4
MC
5662static int
5663bnx2_request_irq(struct bnx2 *bp)
5664{
6d866ffc 5665 unsigned long flags;
b4b36042
MC
5666 struct bnx2_irq *irq;
5667 int rc = 0, i;
8e6a72c4 5668
f86e82fb 5669 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6d866ffc
MC
5670 flags = 0;
5671 else
5672 flags = IRQF_SHARED;
b4b36042
MC
5673
5674 for (i = 0; i < bp->irq_nvecs; i++) {
5675 irq = &bp->irq_tbl[i];
c76c0475 5676 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
f0ea2e63 5677 &bp->bnx2_napi[i]);
b4b36042
MC
5678 if (rc)
5679 break;
5680 irq->requested = 1;
5681 }
8e6a72c4
MC
5682 return rc;
5683}
5684
5685static void
5686bnx2_free_irq(struct bnx2 *bp)
5687{
b4b36042
MC
5688 struct bnx2_irq *irq;
5689 int i;
8e6a72c4 5690
b4b36042
MC
5691 for (i = 0; i < bp->irq_nvecs; i++) {
5692 irq = &bp->irq_tbl[i];
5693 if (irq->requested)
f0ea2e63 5694 free_irq(irq->vector, &bp->bnx2_napi[i]);
b4b36042 5695 irq->requested = 0;
6d866ffc 5696 }
f86e82fb 5697 if (bp->flags & BNX2_FLAG_USING_MSI)
b4b36042 5698 pci_disable_msi(bp->pdev);
f86e82fb 5699 else if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
5700 pci_disable_msix(bp->pdev);
5701
f86e82fb 5702 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
b4b36042
MC
5703}
5704
5705static void
5e9ad9e1 5706bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
b4b36042 5707{
57851d84
MC
5708 int i, rc;
5709 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5710
b4b36042
MC
5711 bnx2_setup_msix_tbl(bp);
5712 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5713 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5714 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
57851d84
MC
5715
5716 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5717 msix_ent[i].entry = i;
5718 msix_ent[i].vector = 0;
35e9010b
MC
5719
5720 strcpy(bp->irq_tbl[i].name, bp->dev->name);
f0ea2e63 5721 bp->irq_tbl[i].handler = bnx2_msi_1shot;
57851d84
MC
5722 }
5723
5724 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5725 if (rc != 0)
5726 return;
5727
5e9ad9e1 5728 bp->irq_nvecs = msix_vecs;
f86e82fb 5729 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
57851d84
MC
5730 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5731 bp->irq_tbl[i].vector = msix_ent[i].vector;
6d866ffc
MC
5732}
5733
5734static void
5735bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5736{
5e9ad9e1
MC
5737 int cpus = num_online_cpus();
5738 int msix_vecs = min(cpus + 1, RX_MAX_RSS_RINGS);
5739
6d866ffc
MC
5740 bp->irq_tbl[0].handler = bnx2_interrupt;
5741 strcpy(bp->irq_tbl[0].name, bp->dev->name);
b4b36042
MC
5742 bp->irq_nvecs = 1;
5743 bp->irq_tbl[0].vector = bp->pdev->irq;
5744
5e9ad9e1
MC
5745 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5746 bnx2_enable_msix(bp, msix_vecs);
6d866ffc 5747
f86e82fb
DM
5748 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5749 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6d866ffc 5750 if (pci_enable_msi(bp->pdev) == 0) {
f86e82fb 5751 bp->flags |= BNX2_FLAG_USING_MSI;
6d866ffc 5752 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
f86e82fb 5753 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6d866ffc
MC
5754 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5755 } else
5756 bp->irq_tbl[0].handler = bnx2_msi;
b4b36042
MC
5757
5758 bp->irq_tbl[0].vector = bp->pdev->irq;
6d866ffc
MC
5759 }
5760 }
35e9010b 5761 bp->num_tx_rings = 1;
5e9ad9e1 5762 bp->num_rx_rings = bp->irq_nvecs;
8e6a72c4
MC
5763}
5764
b6016b76
MC
5765/* Called with rtnl_lock */
5766static int
5767bnx2_open(struct net_device *dev)
5768{
972ec0d4 5769 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5770 int rc;
5771
1b2f922f
MC
5772 netif_carrier_off(dev);
5773
829ca9a3 5774 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5775 bnx2_disable_int(bp);
5776
35e9010b
MC
5777 bnx2_setup_int_mode(bp, disable_msi);
5778 bnx2_napi_enable(bp);
b6016b76 5779 rc = bnx2_alloc_mem(bp);
2739a8bb
MC
5780 if (rc)
5781 goto open_err;
b6016b76 5782
8e6a72c4 5783 rc = bnx2_request_irq(bp);
2739a8bb
MC
5784 if (rc)
5785 goto open_err;
b6016b76 5786
9a120bc5 5787 rc = bnx2_init_nic(bp, 1);
2739a8bb
MC
5788 if (rc)
5789 goto open_err;
6aa20a22 5790
cd339a0e 5791 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5792
5793 atomic_set(&bp->intr_sem, 0);
5794
5795 bnx2_enable_int(bp);
5796
f86e82fb 5797 if (bp->flags & BNX2_FLAG_USING_MSI) {
b6016b76
MC
5798 /* Test MSI to make sure it is working
5799 * If MSI test fails, go back to INTx mode
5800 */
5801 if (bnx2_test_intr(bp) != 0) {
5802 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5803 " using MSI, switching to INTx mode. Please"
5804 " report this failure to the PCI maintainer"
5805 " and include system chipset information.\n",
5806 bp->dev->name);
5807
5808 bnx2_disable_int(bp);
8e6a72c4 5809 bnx2_free_irq(bp);
b6016b76 5810
6d866ffc
MC
5811 bnx2_setup_int_mode(bp, 1);
5812
9a120bc5 5813 rc = bnx2_init_nic(bp, 0);
b6016b76 5814
8e6a72c4
MC
5815 if (!rc)
5816 rc = bnx2_request_irq(bp);
5817
b6016b76 5818 if (rc) {
b6016b76 5819 del_timer_sync(&bp->timer);
2739a8bb 5820 goto open_err;
b6016b76
MC
5821 }
5822 bnx2_enable_int(bp);
5823 }
5824 }
f86e82fb 5825 if (bp->flags & BNX2_FLAG_USING_MSI)
b6016b76 5826 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
f86e82fb 5827 else if (bp->flags & BNX2_FLAG_USING_MSIX)
57851d84 5828 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
b6016b76
MC
5829
5830 netif_start_queue(dev);
5831
5832 return 0;
2739a8bb
MC
5833
5834open_err:
5835 bnx2_napi_disable(bp);
5836 bnx2_free_skbs(bp);
5837 bnx2_free_irq(bp);
5838 bnx2_free_mem(bp);
5839 return rc;
b6016b76
MC
5840}
5841
5842static void
c4028958 5843bnx2_reset_task(struct work_struct *work)
b6016b76 5844{
c4028958 5845 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5846
afdc08b9
MC
5847 if (!netif_running(bp->dev))
5848 return;
5849
b6016b76
MC
5850 bnx2_netif_stop(bp);
5851
9a120bc5 5852 bnx2_init_nic(bp, 1);
b6016b76
MC
5853
5854 atomic_set(&bp->intr_sem, 1);
5855 bnx2_netif_start(bp);
5856}
5857
5858static void
5859bnx2_tx_timeout(struct net_device *dev)
5860{
972ec0d4 5861 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5862
5863 /* This allows the netif to be shutdown gracefully before resetting */
5864 schedule_work(&bp->reset_task);
5865}
5866
5867#ifdef BCM_VLAN
5868/* Called with rtnl_lock */
5869static void
5870bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5871{
972ec0d4 5872 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5873
5874 bnx2_netif_stop(bp);
5875
5876 bp->vlgrp = vlgrp;
5877 bnx2_set_rx_mode(dev);
5878
5879 bnx2_netif_start(bp);
5880}
b6016b76
MC
5881#endif
5882
932ff279 5883/* Called with netif_tx_lock.
2f8af120
MC
5884 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5885 * netif_wake_queue().
b6016b76
MC
5886 */
5887static int
5888bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5889{
972ec0d4 5890 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5891 dma_addr_t mapping;
5892 struct tx_bd *txbd;
5893 struct sw_bd *tx_buf;
5894 u32 len, vlan_tag_flags, last_frag, mss;
5895 u16 prod, ring_prod;
5896 int i;
35e9010b
MC
5897 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5898 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 5899
35e9010b 5900 if (unlikely(bnx2_tx_avail(bp, txr) <
a550c99b 5901 (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
5902 netif_stop_queue(dev);
5903 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5904 dev->name);
5905
5906 return NETDEV_TX_BUSY;
5907 }
5908 len = skb_headlen(skb);
35e9010b 5909 prod = txr->tx_prod;
b6016b76
MC
5910 ring_prod = TX_RING_IDX(prod);
5911
5912 vlan_tag_flags = 0;
84fa7933 5913 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
5914 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5915 }
5916
79ea13ce 5917 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
b6016b76
MC
5918 vlan_tag_flags |=
5919 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5920 }
fde82055 5921 if ((mss = skb_shinfo(skb)->gso_size)) {
b6016b76 5922 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 5923 struct iphdr *iph;
b6016b76 5924
b6016b76
MC
5925 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5926
4666f87a
MC
5927 tcp_opt_len = tcp_optlen(skb);
5928
5929 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5930 u32 tcp_off = skb_transport_offset(skb) -
5931 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 5932
4666f87a
MC
5933 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5934 TX_BD_FLAGS_SW_FLAGS;
5935 if (likely(tcp_off == 0))
5936 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5937 else {
5938 tcp_off >>= 3;
5939 vlan_tag_flags |= ((tcp_off & 0x3) <<
5940 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5941 ((tcp_off & 0x10) <<
5942 TX_BD_FLAGS_TCP6_OFF4_SHL);
5943 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5944 }
5945 } else {
5946 if (skb_header_cloned(skb) &&
5947 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5948 dev_kfree_skb(skb);
5949 return NETDEV_TX_OK;
5950 }
b6016b76 5951
4666f87a
MC
5952 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5953
5954 iph = ip_hdr(skb);
5955 iph->check = 0;
5956 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5957 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5958 iph->daddr, 0,
5959 IPPROTO_TCP,
5960 0);
5961 if (tcp_opt_len || (iph->ihl > 5)) {
5962 vlan_tag_flags |= ((iph->ihl - 5) +
5963 (tcp_opt_len >> 2)) << 8;
5964 }
b6016b76 5965 }
4666f87a 5966 } else
b6016b76 5967 mss = 0;
b6016b76
MC
5968
5969 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 5970
35e9010b 5971 tx_buf = &txr->tx_buf_ring[ring_prod];
b6016b76
MC
5972 tx_buf->skb = skb;
5973 pci_unmap_addr_set(tx_buf, mapping, mapping);
5974
35e9010b 5975 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
5976
5977 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5978 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5979 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5980 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5981
5982 last_frag = skb_shinfo(skb)->nr_frags;
5983
5984 for (i = 0; i < last_frag; i++) {
5985 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5986
5987 prod = NEXT_TX_BD(prod);
5988 ring_prod = TX_RING_IDX(prod);
35e9010b 5989 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
5990
5991 len = frag->size;
5992 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5993 len, PCI_DMA_TODEVICE);
35e9010b 5994 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
b6016b76
MC
5995 mapping, mapping);
5996
5997 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5998 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5999 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6000 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6001
6002 }
6003 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6004
6005 prod = NEXT_TX_BD(prod);
35e9010b 6006 txr->tx_prod_bseq += skb->len;
b6016b76 6007
35e9010b
MC
6008 REG_WR16(bp, txr->tx_bidx_addr, prod);
6009 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
6010
6011 mmiowb();
6012
35e9010b 6013 txr->tx_prod = prod;
b6016b76
MC
6014 dev->trans_start = jiffies;
6015
35e9010b 6016 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
e89bbf10 6017 netif_stop_queue(dev);
35e9010b 6018 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
e89bbf10 6019 netif_wake_queue(dev);
b6016b76
MC
6020 }
6021
6022 return NETDEV_TX_OK;
6023}
6024
6025/* Called with rtnl_lock */
6026static int
6027bnx2_close(struct net_device *dev)
6028{
972ec0d4 6029 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6030 u32 reset_code;
6031
4bb073c0 6032 cancel_work_sync(&bp->reset_task);
afdc08b9 6033
bea3348e 6034 bnx2_disable_int_sync(bp);
35efa7c1 6035 bnx2_napi_disable(bp);
b6016b76 6036 del_timer_sync(&bp->timer);
f86e82fb 6037 if (bp->flags & BNX2_FLAG_NO_WOL)
6c4f095e 6038 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 6039 else if (bp->wol)
b6016b76
MC
6040 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6041 else
6042 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6043 bnx2_reset_chip(bp, reset_code);
8e6a72c4 6044 bnx2_free_irq(bp);
b6016b76
MC
6045 bnx2_free_skbs(bp);
6046 bnx2_free_mem(bp);
6047 bp->link_up = 0;
6048 netif_carrier_off(bp->dev);
829ca9a3 6049 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
6050 return 0;
6051}
6052
6053#define GET_NET_STATS64(ctr) \
6054 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6055 (unsigned long) (ctr##_lo)
6056
6057#define GET_NET_STATS32(ctr) \
6058 (ctr##_lo)
6059
6060#if (BITS_PER_LONG == 64)
6061#define GET_NET_STATS GET_NET_STATS64
6062#else
6063#define GET_NET_STATS GET_NET_STATS32
6064#endif
6065
6066static struct net_device_stats *
6067bnx2_get_stats(struct net_device *dev)
6068{
972ec0d4 6069 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6070 struct statistics_block *stats_blk = bp->stats_blk;
6071 struct net_device_stats *net_stats = &bp->net_stats;
6072
6073 if (bp->stats_blk == NULL) {
6074 return net_stats;
6075 }
6076 net_stats->rx_packets =
6077 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6078 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6079 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6080
6081 net_stats->tx_packets =
6082 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6083 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6084 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6085
6086 net_stats->rx_bytes =
6087 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6088
6089 net_stats->tx_bytes =
6090 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6091
6aa20a22 6092 net_stats->multicast =
b6016b76
MC
6093 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6094
6aa20a22 6095 net_stats->collisions =
b6016b76
MC
6096 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6097
6aa20a22 6098 net_stats->rx_length_errors =
b6016b76
MC
6099 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6100 stats_blk->stat_EtherStatsOverrsizePkts);
6101
6aa20a22 6102 net_stats->rx_over_errors =
b6016b76
MC
6103 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6104
6aa20a22 6105 net_stats->rx_frame_errors =
b6016b76
MC
6106 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6107
6aa20a22 6108 net_stats->rx_crc_errors =
b6016b76
MC
6109 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6110
6111 net_stats->rx_errors = net_stats->rx_length_errors +
6112 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6113 net_stats->rx_crc_errors;
6114
6115 net_stats->tx_aborted_errors =
6116 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6117 stats_blk->stat_Dot3StatsLateCollisions);
6118
5b0c76ad
MC
6119 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6120 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
6121 net_stats->tx_carrier_errors = 0;
6122 else {
6123 net_stats->tx_carrier_errors =
6124 (unsigned long)
6125 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6126 }
6127
6128 net_stats->tx_errors =
6aa20a22 6129 (unsigned long)
b6016b76
MC
6130 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6131 +
6132 net_stats->tx_aborted_errors +
6133 net_stats->tx_carrier_errors;
6134
cea94db9
MC
6135 net_stats->rx_missed_errors =
6136 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6137 stats_blk->stat_FwRxDrop);
6138
b6016b76
MC
6139 return net_stats;
6140}
6141
6142/* All ethtool functions called with rtnl_lock */
6143
6144static int
6145bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6146{
972ec0d4 6147 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 6148 int support_serdes = 0, support_copper = 0;
b6016b76
MC
6149
6150 cmd->supported = SUPPORTED_Autoneg;
583c28e5 6151 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6152 support_serdes = 1;
6153 support_copper = 1;
6154 } else if (bp->phy_port == PORT_FIBRE)
6155 support_serdes = 1;
6156 else
6157 support_copper = 1;
6158
6159 if (support_serdes) {
b6016b76
MC
6160 cmd->supported |= SUPPORTED_1000baseT_Full |
6161 SUPPORTED_FIBRE;
583c28e5 6162 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
605a9e20 6163 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 6164
b6016b76 6165 }
7b6b8347 6166 if (support_copper) {
b6016b76
MC
6167 cmd->supported |= SUPPORTED_10baseT_Half |
6168 SUPPORTED_10baseT_Full |
6169 SUPPORTED_100baseT_Half |
6170 SUPPORTED_100baseT_Full |
6171 SUPPORTED_1000baseT_Full |
6172 SUPPORTED_TP;
6173
b6016b76
MC
6174 }
6175
7b6b8347
MC
6176 spin_lock_bh(&bp->phy_lock);
6177 cmd->port = bp->phy_port;
b6016b76
MC
6178 cmd->advertising = bp->advertising;
6179
6180 if (bp->autoneg & AUTONEG_SPEED) {
6181 cmd->autoneg = AUTONEG_ENABLE;
6182 }
6183 else {
6184 cmd->autoneg = AUTONEG_DISABLE;
6185 }
6186
6187 if (netif_carrier_ok(dev)) {
6188 cmd->speed = bp->line_speed;
6189 cmd->duplex = bp->duplex;
6190 }
6191 else {
6192 cmd->speed = -1;
6193 cmd->duplex = -1;
6194 }
7b6b8347 6195 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6196
6197 cmd->transceiver = XCVR_INTERNAL;
6198 cmd->phy_address = bp->phy_addr;
6199
6200 return 0;
6201}
6aa20a22 6202
b6016b76
MC
6203static int
6204bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6205{
972ec0d4 6206 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6207 u8 autoneg = bp->autoneg;
6208 u8 req_duplex = bp->req_duplex;
6209 u16 req_line_speed = bp->req_line_speed;
6210 u32 advertising = bp->advertising;
7b6b8347
MC
6211 int err = -EINVAL;
6212
6213 spin_lock_bh(&bp->phy_lock);
6214
6215 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6216 goto err_out_unlock;
6217
583c28e5
MC
6218 if (cmd->port != bp->phy_port &&
6219 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
7b6b8347 6220 goto err_out_unlock;
b6016b76 6221
d6b14486
MC
6222 /* If device is down, we can store the settings only if the user
6223 * is setting the currently active port.
6224 */
6225 if (!netif_running(dev) && cmd->port != bp->phy_port)
6226 goto err_out_unlock;
6227
b6016b76
MC
6228 if (cmd->autoneg == AUTONEG_ENABLE) {
6229 autoneg |= AUTONEG_SPEED;
6230
6aa20a22 6231 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
6232
6233 /* allow advertising 1 speed */
6234 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6235 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6236 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6237 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6238
7b6b8347
MC
6239 if (cmd->port == PORT_FIBRE)
6240 goto err_out_unlock;
b6016b76
MC
6241
6242 advertising = cmd->advertising;
6243
27a005b8 6244 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
583c28e5 6245 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
7b6b8347
MC
6246 (cmd->port == PORT_TP))
6247 goto err_out_unlock;
6248 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 6249 advertising = cmd->advertising;
7b6b8347
MC
6250 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6251 goto err_out_unlock;
b6016b76 6252 else {
7b6b8347 6253 if (cmd->port == PORT_FIBRE)
b6016b76 6254 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 6255 else
b6016b76 6256 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
6257 }
6258 advertising |= ADVERTISED_Autoneg;
6259 }
6260 else {
7b6b8347 6261 if (cmd->port == PORT_FIBRE) {
80be4434
MC
6262 if ((cmd->speed != SPEED_1000 &&
6263 cmd->speed != SPEED_2500) ||
6264 (cmd->duplex != DUPLEX_FULL))
7b6b8347 6265 goto err_out_unlock;
80be4434
MC
6266
6267 if (cmd->speed == SPEED_2500 &&
583c28e5 6268 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7b6b8347 6269 goto err_out_unlock;
b6016b76 6270 }
7b6b8347
MC
6271 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6272 goto err_out_unlock;
6273
b6016b76
MC
6274 autoneg &= ~AUTONEG_SPEED;
6275 req_line_speed = cmd->speed;
6276 req_duplex = cmd->duplex;
6277 advertising = 0;
6278 }
6279
6280 bp->autoneg = autoneg;
6281 bp->advertising = advertising;
6282 bp->req_line_speed = req_line_speed;
6283 bp->req_duplex = req_duplex;
6284
d6b14486
MC
6285 err = 0;
6286 /* If device is down, the new settings will be picked up when it is
6287 * brought up.
6288 */
6289 if (netif_running(dev))
6290 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 6291
7b6b8347 6292err_out_unlock:
c770a65c 6293 spin_unlock_bh(&bp->phy_lock);
b6016b76 6294
7b6b8347 6295 return err;
b6016b76
MC
6296}
6297
6298static void
6299bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6300{
972ec0d4 6301 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6302
6303 strcpy(info->driver, DRV_MODULE_NAME);
6304 strcpy(info->version, DRV_MODULE_VERSION);
6305 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 6306 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
6307}
6308
244ac4f4
MC
6309#define BNX2_REGDUMP_LEN (32 * 1024)
6310
6311static int
6312bnx2_get_regs_len(struct net_device *dev)
6313{
6314 return BNX2_REGDUMP_LEN;
6315}
6316
6317static void
6318bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6319{
6320 u32 *p = _p, i, offset;
6321 u8 *orig_p = _p;
6322 struct bnx2 *bp = netdev_priv(dev);
6323 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6324 0x0800, 0x0880, 0x0c00, 0x0c10,
6325 0x0c30, 0x0d08, 0x1000, 0x101c,
6326 0x1040, 0x1048, 0x1080, 0x10a4,
6327 0x1400, 0x1490, 0x1498, 0x14f0,
6328 0x1500, 0x155c, 0x1580, 0x15dc,
6329 0x1600, 0x1658, 0x1680, 0x16d8,
6330 0x1800, 0x1820, 0x1840, 0x1854,
6331 0x1880, 0x1894, 0x1900, 0x1984,
6332 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6333 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6334 0x2000, 0x2030, 0x23c0, 0x2400,
6335 0x2800, 0x2820, 0x2830, 0x2850,
6336 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6337 0x3c00, 0x3c94, 0x4000, 0x4010,
6338 0x4080, 0x4090, 0x43c0, 0x4458,
6339 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6340 0x4fc0, 0x5010, 0x53c0, 0x5444,
6341 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6342 0x5fc0, 0x6000, 0x6400, 0x6428,
6343 0x6800, 0x6848, 0x684c, 0x6860,
6344 0x6888, 0x6910, 0x8000 };
6345
6346 regs->version = 0;
6347
6348 memset(p, 0, BNX2_REGDUMP_LEN);
6349
6350 if (!netif_running(bp->dev))
6351 return;
6352
6353 i = 0;
6354 offset = reg_boundaries[0];
6355 p += offset;
6356 while (offset < BNX2_REGDUMP_LEN) {
6357 *p++ = REG_RD(bp, offset);
6358 offset += 4;
6359 if (offset == reg_boundaries[i + 1]) {
6360 offset = reg_boundaries[i + 2];
6361 p = (u32 *) (orig_p + offset);
6362 i += 2;
6363 }
6364 }
6365}
6366
b6016b76
MC
6367static void
6368bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6369{
972ec0d4 6370 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6371
f86e82fb 6372 if (bp->flags & BNX2_FLAG_NO_WOL) {
b6016b76
MC
6373 wol->supported = 0;
6374 wol->wolopts = 0;
6375 }
6376 else {
6377 wol->supported = WAKE_MAGIC;
6378 if (bp->wol)
6379 wol->wolopts = WAKE_MAGIC;
6380 else
6381 wol->wolopts = 0;
6382 }
6383 memset(&wol->sopass, 0, sizeof(wol->sopass));
6384}
6385
6386static int
6387bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6388{
972ec0d4 6389 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6390
6391 if (wol->wolopts & ~WAKE_MAGIC)
6392 return -EINVAL;
6393
6394 if (wol->wolopts & WAKE_MAGIC) {
f86e82fb 6395 if (bp->flags & BNX2_FLAG_NO_WOL)
b6016b76
MC
6396 return -EINVAL;
6397
6398 bp->wol = 1;
6399 }
6400 else {
6401 bp->wol = 0;
6402 }
6403 return 0;
6404}
6405
6406static int
6407bnx2_nway_reset(struct net_device *dev)
6408{
972ec0d4 6409 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6410 u32 bmcr;
6411
6412 if (!(bp->autoneg & AUTONEG_SPEED)) {
6413 return -EINVAL;
6414 }
6415
c770a65c 6416 spin_lock_bh(&bp->phy_lock);
b6016b76 6417
583c28e5 6418 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6419 int rc;
6420
6421 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6422 spin_unlock_bh(&bp->phy_lock);
6423 return rc;
6424 }
6425
b6016b76 6426 /* Force a link down visible on the other side */
583c28e5 6427 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
ca58c3af 6428 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 6429 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6430
6431 msleep(20);
6432
c770a65c 6433 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
6434
6435 bp->current_interval = SERDES_AN_TIMEOUT;
6436 bp->serdes_an_pending = 1;
6437 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6438 }
6439
ca58c3af 6440 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6441 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 6442 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 6443
c770a65c 6444 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6445
6446 return 0;
6447}
6448
6449static int
6450bnx2_get_eeprom_len(struct net_device *dev)
6451{
972ec0d4 6452 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6453
1122db71 6454 if (bp->flash_info == NULL)
b6016b76
MC
6455 return 0;
6456
1122db71 6457 return (int) bp->flash_size;
b6016b76
MC
6458}
6459
6460static int
6461bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6462 u8 *eebuf)
6463{
972ec0d4 6464 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6465 int rc;
6466
1064e944 6467 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
6468
6469 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6470
6471 return rc;
6472}
6473
6474static int
6475bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6476 u8 *eebuf)
6477{
972ec0d4 6478 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6479 int rc;
6480
1064e944 6481 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
6482
6483 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6484
6485 return rc;
6486}
6487
6488static int
6489bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6490{
972ec0d4 6491 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6492
6493 memset(coal, 0, sizeof(struct ethtool_coalesce));
6494
6495 coal->rx_coalesce_usecs = bp->rx_ticks;
6496 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6497 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6498 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6499
6500 coal->tx_coalesce_usecs = bp->tx_ticks;
6501 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6502 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6503 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6504
6505 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6506
6507 return 0;
6508}
6509
6510static int
6511bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6512{
972ec0d4 6513 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6514
6515 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6516 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6517
6aa20a22 6518 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
6519 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6520
6521 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6522 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6523
6524 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6525 if (bp->rx_quick_cons_trip_int > 0xff)
6526 bp->rx_quick_cons_trip_int = 0xff;
6527
6528 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6529 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6530
6531 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6532 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6533
6534 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6535 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6536
6537 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6538 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6539 0xff;
6540
6541 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
6542 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6543 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6544 bp->stats_ticks = USEC_PER_SEC;
6545 }
7ea6920e
MC
6546 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6547 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6548 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6549
6550 if (netif_running(bp->dev)) {
6551 bnx2_netif_stop(bp);
9a120bc5 6552 bnx2_init_nic(bp, 0);
b6016b76
MC
6553 bnx2_netif_start(bp);
6554 }
6555
6556 return 0;
6557}
6558
6559static void
6560bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6561{
972ec0d4 6562 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6563
13daffa2 6564 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 6565 ering->rx_mini_max_pending = 0;
47bf4246 6566 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
6567
6568 ering->rx_pending = bp->rx_ring_size;
6569 ering->rx_mini_pending = 0;
47bf4246 6570 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
6571
6572 ering->tx_max_pending = MAX_TX_DESC_CNT;
6573 ering->tx_pending = bp->tx_ring_size;
6574}
6575
6576static int
5d5d0015 6577bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 6578{
13daffa2
MC
6579 if (netif_running(bp->dev)) {
6580 bnx2_netif_stop(bp);
6581 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6582 bnx2_free_skbs(bp);
6583 bnx2_free_mem(bp);
6584 }
6585
5d5d0015
MC
6586 bnx2_set_rx_ring_size(bp, rx);
6587 bp->tx_ring_size = tx;
b6016b76
MC
6588
6589 if (netif_running(bp->dev)) {
13daffa2
MC
6590 int rc;
6591
6592 rc = bnx2_alloc_mem(bp);
6593 if (rc)
6594 return rc;
9a120bc5 6595 bnx2_init_nic(bp, 0);
b6016b76
MC
6596 bnx2_netif_start(bp);
6597 }
b6016b76
MC
6598 return 0;
6599}
6600
5d5d0015
MC
6601static int
6602bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6603{
6604 struct bnx2 *bp = netdev_priv(dev);
6605 int rc;
6606
6607 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6608 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6609 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6610
6611 return -EINVAL;
6612 }
6613 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6614 return rc;
6615}
6616
b6016b76
MC
6617static void
6618bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6619{
972ec0d4 6620 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6621
6622 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6623 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6624 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6625}
6626
6627static int
6628bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6629{
972ec0d4 6630 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6631
6632 bp->req_flow_ctrl = 0;
6633 if (epause->rx_pause)
6634 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6635 if (epause->tx_pause)
6636 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6637
6638 if (epause->autoneg) {
6639 bp->autoneg |= AUTONEG_FLOW_CTRL;
6640 }
6641 else {
6642 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6643 }
6644
c770a65c 6645 spin_lock_bh(&bp->phy_lock);
b6016b76 6646
0d8a6571 6647 bnx2_setup_phy(bp, bp->phy_port);
b6016b76 6648
c770a65c 6649 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6650
6651 return 0;
6652}
6653
6654static u32
6655bnx2_get_rx_csum(struct net_device *dev)
6656{
972ec0d4 6657 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6658
6659 return bp->rx_csum;
6660}
6661
6662static int
6663bnx2_set_rx_csum(struct net_device *dev, u32 data)
6664{
972ec0d4 6665 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6666
6667 bp->rx_csum = data;
6668 return 0;
6669}
6670
b11d6213
MC
6671static int
6672bnx2_set_tso(struct net_device *dev, u32 data)
6673{
4666f87a
MC
6674 struct bnx2 *bp = netdev_priv(dev);
6675
6676 if (data) {
b11d6213 6677 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6678 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6679 dev->features |= NETIF_F_TSO6;
6680 } else
6681 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6682 NETIF_F_TSO_ECN);
b11d6213
MC
6683 return 0;
6684}
6685
cea94db9 6686#define BNX2_NUM_STATS 46
b6016b76 6687
14ab9b86 6688static struct {
b6016b76
MC
6689 char string[ETH_GSTRING_LEN];
6690} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6691 { "rx_bytes" },
6692 { "rx_error_bytes" },
6693 { "tx_bytes" },
6694 { "tx_error_bytes" },
6695 { "rx_ucast_packets" },
6696 { "rx_mcast_packets" },
6697 { "rx_bcast_packets" },
6698 { "tx_ucast_packets" },
6699 { "tx_mcast_packets" },
6700 { "tx_bcast_packets" },
6701 { "tx_mac_errors" },
6702 { "tx_carrier_errors" },
6703 { "rx_crc_errors" },
6704 { "rx_align_errors" },
6705 { "tx_single_collisions" },
6706 { "tx_multi_collisions" },
6707 { "tx_deferred" },
6708 { "tx_excess_collisions" },
6709 { "tx_late_collisions" },
6710 { "tx_total_collisions" },
6711 { "rx_fragments" },
6712 { "rx_jabbers" },
6713 { "rx_undersize_packets" },
6714 { "rx_oversize_packets" },
6715 { "rx_64_byte_packets" },
6716 { "rx_65_to_127_byte_packets" },
6717 { "rx_128_to_255_byte_packets" },
6718 { "rx_256_to_511_byte_packets" },
6719 { "rx_512_to_1023_byte_packets" },
6720 { "rx_1024_to_1522_byte_packets" },
6721 { "rx_1523_to_9022_byte_packets" },
6722 { "tx_64_byte_packets" },
6723 { "tx_65_to_127_byte_packets" },
6724 { "tx_128_to_255_byte_packets" },
6725 { "tx_256_to_511_byte_packets" },
6726 { "tx_512_to_1023_byte_packets" },
6727 { "tx_1024_to_1522_byte_packets" },
6728 { "tx_1523_to_9022_byte_packets" },
6729 { "rx_xon_frames" },
6730 { "rx_xoff_frames" },
6731 { "tx_xon_frames" },
6732 { "tx_xoff_frames" },
6733 { "rx_mac_ctrl_frames" },
6734 { "rx_filtered_packets" },
6735 { "rx_discards" },
cea94db9 6736 { "rx_fw_discards" },
b6016b76
MC
6737};
6738
6739#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6740
f71e1309 6741static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6742 STATS_OFFSET32(stat_IfHCInOctets_hi),
6743 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6744 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6745 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6746 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6747 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6748 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6749 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6750 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6751 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6752 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
6753 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6754 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6755 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6756 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6757 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6758 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6759 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6760 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6761 STATS_OFFSET32(stat_EtherStatsCollisions),
6762 STATS_OFFSET32(stat_EtherStatsFragments),
6763 STATS_OFFSET32(stat_EtherStatsJabbers),
6764 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6765 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6766 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6767 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6768 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6769 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6770 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6771 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6772 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6773 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6774 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6775 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6776 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6777 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6778 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6779 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6780 STATS_OFFSET32(stat_XonPauseFramesReceived),
6781 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6782 STATS_OFFSET32(stat_OutXonSent),
6783 STATS_OFFSET32(stat_OutXoffSent),
6784 STATS_OFFSET32(stat_MacControlFramesReceived),
6785 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6786 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6787 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6788};
6789
6790/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6791 * skipped because of errata.
6aa20a22 6792 */
14ab9b86 6793static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6794 8,0,8,8,8,8,8,8,8,8,
6795 4,0,4,4,4,4,4,4,4,4,
6796 4,4,4,4,4,4,4,4,4,4,
6797 4,4,4,4,4,4,4,4,4,4,
cea94db9 6798 4,4,4,4,4,4,
b6016b76
MC
6799};
6800
5b0c76ad
MC
6801static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6802 8,0,8,8,8,8,8,8,8,8,
6803 4,4,4,4,4,4,4,4,4,4,
6804 4,4,4,4,4,4,4,4,4,4,
6805 4,4,4,4,4,4,4,4,4,4,
cea94db9 6806 4,4,4,4,4,4,
5b0c76ad
MC
6807};
6808
b6016b76
MC
6809#define BNX2_NUM_TESTS 6
6810
14ab9b86 6811static struct {
b6016b76
MC
6812 char string[ETH_GSTRING_LEN];
6813} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6814 { "register_test (offline)" },
6815 { "memory_test (offline)" },
6816 { "loopback_test (offline)" },
6817 { "nvram_test (online)" },
6818 { "interrupt_test (online)" },
6819 { "link_test (online)" },
6820};
6821
6822static int
b9f2c044 6823bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 6824{
b9f2c044
JG
6825 switch (sset) {
6826 case ETH_SS_TEST:
6827 return BNX2_NUM_TESTS;
6828 case ETH_SS_STATS:
6829 return BNX2_NUM_STATS;
6830 default:
6831 return -EOPNOTSUPP;
6832 }
b6016b76
MC
6833}
6834
6835static void
6836bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6837{
972ec0d4 6838 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6839
6840 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6841 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6842 int i;
6843
b6016b76
MC
6844 bnx2_netif_stop(bp);
6845 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6846 bnx2_free_skbs(bp);
6847
6848 if (bnx2_test_registers(bp) != 0) {
6849 buf[0] = 1;
6850 etest->flags |= ETH_TEST_FL_FAILED;
6851 }
6852 if (bnx2_test_memory(bp) != 0) {
6853 buf[1] = 1;
6854 etest->flags |= ETH_TEST_FL_FAILED;
6855 }
bc5a0690 6856 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6857 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
6858
6859 if (!netif_running(bp->dev)) {
6860 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6861 }
6862 else {
9a120bc5 6863 bnx2_init_nic(bp, 1);
b6016b76
MC
6864 bnx2_netif_start(bp);
6865 }
6866
6867 /* wait for link up */
80be4434
MC
6868 for (i = 0; i < 7; i++) {
6869 if (bp->link_up)
6870 break;
6871 msleep_interruptible(1000);
6872 }
b6016b76
MC
6873 }
6874
6875 if (bnx2_test_nvram(bp) != 0) {
6876 buf[3] = 1;
6877 etest->flags |= ETH_TEST_FL_FAILED;
6878 }
6879 if (bnx2_test_intr(bp) != 0) {
6880 buf[4] = 1;
6881 etest->flags |= ETH_TEST_FL_FAILED;
6882 }
6883
6884 if (bnx2_test_link(bp) != 0) {
6885 buf[5] = 1;
6886 etest->flags |= ETH_TEST_FL_FAILED;
6887
6888 }
6889}
6890
6891static void
6892bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6893{
6894 switch (stringset) {
6895 case ETH_SS_STATS:
6896 memcpy(buf, bnx2_stats_str_arr,
6897 sizeof(bnx2_stats_str_arr));
6898 break;
6899 case ETH_SS_TEST:
6900 memcpy(buf, bnx2_tests_str_arr,
6901 sizeof(bnx2_tests_str_arr));
6902 break;
6903 }
6904}
6905
b6016b76
MC
6906static void
6907bnx2_get_ethtool_stats(struct net_device *dev,
6908 struct ethtool_stats *stats, u64 *buf)
6909{
972ec0d4 6910 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6911 int i;
6912 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 6913 u8 *stats_len_arr = NULL;
b6016b76
MC
6914
6915 if (hw_stats == NULL) {
6916 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6917 return;
6918 }
6919
5b0c76ad
MC
6920 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6921 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6922 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6923 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 6924 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
6925 else
6926 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
6927
6928 for (i = 0; i < BNX2_NUM_STATS; i++) {
6929 if (stats_len_arr[i] == 0) {
6930 /* skip this counter */
6931 buf[i] = 0;
6932 continue;
6933 }
6934 if (stats_len_arr[i] == 4) {
6935 /* 4-byte counter */
6936 buf[i] = (u64)
6937 *(hw_stats + bnx2_stats_offset_arr[i]);
6938 continue;
6939 }
6940 /* 8-byte counter */
6941 buf[i] = (((u64) *(hw_stats +
6942 bnx2_stats_offset_arr[i])) << 32) +
6943 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6944 }
6945}
6946
6947static int
6948bnx2_phys_id(struct net_device *dev, u32 data)
6949{
972ec0d4 6950 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6951 int i;
6952 u32 save;
6953
6954 if (data == 0)
6955 data = 2;
6956
6957 save = REG_RD(bp, BNX2_MISC_CFG);
6958 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6959
6960 for (i = 0; i < (data * 2); i++) {
6961 if ((i % 2) == 0) {
6962 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6963 }
6964 else {
6965 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6966 BNX2_EMAC_LED_1000MB_OVERRIDE |
6967 BNX2_EMAC_LED_100MB_OVERRIDE |
6968 BNX2_EMAC_LED_10MB_OVERRIDE |
6969 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6970 BNX2_EMAC_LED_TRAFFIC);
6971 }
6972 msleep_interruptible(500);
6973 if (signal_pending(current))
6974 break;
6975 }
6976 REG_WR(bp, BNX2_EMAC_LED, 0);
6977 REG_WR(bp, BNX2_MISC_CFG, save);
6978 return 0;
6979}
6980
4666f87a
MC
6981static int
6982bnx2_set_tx_csum(struct net_device *dev, u32 data)
6983{
6984 struct bnx2 *bp = netdev_priv(dev);
6985
6986 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 6987 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
6988 else
6989 return (ethtool_op_set_tx_csum(dev, data));
6990}
6991
7282d491 6992static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
6993 .get_settings = bnx2_get_settings,
6994 .set_settings = bnx2_set_settings,
6995 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
6996 .get_regs_len = bnx2_get_regs_len,
6997 .get_regs = bnx2_get_regs,
b6016b76
MC
6998 .get_wol = bnx2_get_wol,
6999 .set_wol = bnx2_set_wol,
7000 .nway_reset = bnx2_nway_reset,
7001 .get_link = ethtool_op_get_link,
7002 .get_eeprom_len = bnx2_get_eeprom_len,
7003 .get_eeprom = bnx2_get_eeprom,
7004 .set_eeprom = bnx2_set_eeprom,
7005 .get_coalesce = bnx2_get_coalesce,
7006 .set_coalesce = bnx2_set_coalesce,
7007 .get_ringparam = bnx2_get_ringparam,
7008 .set_ringparam = bnx2_set_ringparam,
7009 .get_pauseparam = bnx2_get_pauseparam,
7010 .set_pauseparam = bnx2_set_pauseparam,
7011 .get_rx_csum = bnx2_get_rx_csum,
7012 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 7013 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 7014 .set_sg = ethtool_op_set_sg,
b11d6213 7015 .set_tso = bnx2_set_tso,
b6016b76
MC
7016 .self_test = bnx2_self_test,
7017 .get_strings = bnx2_get_strings,
7018 .phys_id = bnx2_phys_id,
b6016b76 7019 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 7020 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
7021};
7022
7023/* Called with rtnl_lock */
7024static int
7025bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7026{
14ab9b86 7027 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 7028 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7029 int err;
7030
7031 switch(cmd) {
7032 case SIOCGMIIPHY:
7033 data->phy_id = bp->phy_addr;
7034
7035 /* fallthru */
7036 case SIOCGMIIREG: {
7037 u32 mii_regval;
7038
583c28e5 7039 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7040 return -EOPNOTSUPP;
7041
dad3e452
MC
7042 if (!netif_running(dev))
7043 return -EAGAIN;
7044
c770a65c 7045 spin_lock_bh(&bp->phy_lock);
b6016b76 7046 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 7047 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7048
7049 data->val_out = mii_regval;
7050
7051 return err;
7052 }
7053
7054 case SIOCSMIIREG:
7055 if (!capable(CAP_NET_ADMIN))
7056 return -EPERM;
7057
583c28e5 7058 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7059 return -EOPNOTSUPP;
7060
dad3e452
MC
7061 if (!netif_running(dev))
7062 return -EAGAIN;
7063
c770a65c 7064 spin_lock_bh(&bp->phy_lock);
b6016b76 7065 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 7066 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7067
7068 return err;
7069
7070 default:
7071 /* do nothing */
7072 break;
7073 }
7074 return -EOPNOTSUPP;
7075}
7076
7077/* Called with rtnl_lock */
7078static int
7079bnx2_change_mac_addr(struct net_device *dev, void *p)
7080{
7081 struct sockaddr *addr = p;
972ec0d4 7082 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7083
73eef4cd
MC
7084 if (!is_valid_ether_addr(addr->sa_data))
7085 return -EINVAL;
7086
b6016b76
MC
7087 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7088 if (netif_running(dev))
7089 bnx2_set_mac_addr(bp);
7090
7091 return 0;
7092}
7093
7094/* Called with rtnl_lock */
7095static int
7096bnx2_change_mtu(struct net_device *dev, int new_mtu)
7097{
972ec0d4 7098 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7099
7100 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7101 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7102 return -EINVAL;
7103
7104 dev->mtu = new_mtu;
5d5d0015 7105 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
7106}
7107
7108#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7109static void
7110poll_bnx2(struct net_device *dev)
7111{
972ec0d4 7112 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7113
7114 disable_irq(bp->pdev->irq);
7d12e780 7115 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
7116 enable_irq(bp->pdev->irq);
7117}
7118#endif
7119
253c8b75
MC
7120static void __devinit
7121bnx2_get_5709_media(struct bnx2 *bp)
7122{
7123 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7124 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7125 u32 strap;
7126
7127 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7128 return;
7129 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
583c28e5 7130 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7131 return;
7132 }
7133
7134 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7135 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7136 else
7137 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7138
7139 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7140 switch (strap) {
7141 case 0x4:
7142 case 0x5:
7143 case 0x6:
583c28e5 7144 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7145 return;
7146 }
7147 } else {
7148 switch (strap) {
7149 case 0x1:
7150 case 0x2:
7151 case 0x4:
583c28e5 7152 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7153 return;
7154 }
7155 }
7156}
7157
883e5151
MC
7158static void __devinit
7159bnx2_get_pci_speed(struct bnx2 *bp)
7160{
7161 u32 reg;
7162
7163 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7164 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7165 u32 clkreg;
7166
f86e82fb 7167 bp->flags |= BNX2_FLAG_PCIX;
883e5151
MC
7168
7169 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7170
7171 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7172 switch (clkreg) {
7173 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7174 bp->bus_speed_mhz = 133;
7175 break;
7176
7177 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7178 bp->bus_speed_mhz = 100;
7179 break;
7180
7181 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7182 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7183 bp->bus_speed_mhz = 66;
7184 break;
7185
7186 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7187 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7188 bp->bus_speed_mhz = 50;
7189 break;
7190
7191 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7192 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7193 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7194 bp->bus_speed_mhz = 33;
7195 break;
7196 }
7197 }
7198 else {
7199 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7200 bp->bus_speed_mhz = 66;
7201 else
7202 bp->bus_speed_mhz = 33;
7203 }
7204
7205 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
f86e82fb 7206 bp->flags |= BNX2_FLAG_PCI_32BIT;
883e5151
MC
7207
7208}
7209
b6016b76
MC
7210static int __devinit
7211bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7212{
7213 struct bnx2 *bp;
7214 unsigned long mem_len;
58fc2ea4 7215 int rc, i, j;
b6016b76 7216 u32 reg;
40453c83 7217 u64 dma_mask, persist_dma_mask;
b6016b76 7218
b6016b76 7219 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 7220 bp = netdev_priv(dev);
b6016b76
MC
7221
7222 bp->flags = 0;
7223 bp->phy_flags = 0;
7224
7225 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7226 rc = pci_enable_device(pdev);
7227 if (rc) {
898eb71c 7228 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
b6016b76
MC
7229 goto err_out;
7230 }
7231
7232 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 7233 dev_err(&pdev->dev,
2e8a538d 7234 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
7235 rc = -ENODEV;
7236 goto err_out_disable;
7237 }
7238
7239 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7240 if (rc) {
9b91cf9d 7241 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
7242 goto err_out_disable;
7243 }
7244
7245 pci_set_master(pdev);
6ff2da49 7246 pci_save_state(pdev);
b6016b76
MC
7247
7248 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7249 if (bp->pm_cap == 0) {
9b91cf9d 7250 dev_err(&pdev->dev,
2e8a538d 7251 "Cannot find power management capability, aborting.\n");
b6016b76
MC
7252 rc = -EIO;
7253 goto err_out_release;
7254 }
7255
b6016b76
MC
7256 bp->dev = dev;
7257 bp->pdev = pdev;
7258
7259 spin_lock_init(&bp->phy_lock);
1b8227c4 7260 spin_lock_init(&bp->indirect_lock);
c4028958 7261 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
7262
7263 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 7264 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
7265 dev->mem_end = dev->mem_start + mem_len;
7266 dev->irq = pdev->irq;
7267
7268 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7269
7270 if (!bp->regview) {
9b91cf9d 7271 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
7272 rc = -ENOMEM;
7273 goto err_out_release;
7274 }
7275
7276 /* Configure byte swap and enable write to the reg_window registers.
7277 * Rely on CPU to do target byte swapping on big endian systems
7278 * The chip's target access swapping will not swap all accesses
7279 */
7280 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7281 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7282 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7283
829ca9a3 7284 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7285
7286 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7287
883e5151
MC
7288 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7289 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7290 dev_err(&pdev->dev,
7291 "Cannot find PCIE capability, aborting.\n");
7292 rc = -EIO;
7293 goto err_out_unmap;
7294 }
f86e82fb 7295 bp->flags |= BNX2_FLAG_PCIE;
2dd201d7 7296 if (CHIP_REV(bp) == CHIP_REV_Ax)
f86e82fb 7297 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
883e5151 7298 } else {
59b47d8a
MC
7299 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7300 if (bp->pcix_cap == 0) {
7301 dev_err(&pdev->dev,
7302 "Cannot find PCIX capability, aborting.\n");
7303 rc = -EIO;
7304 goto err_out_unmap;
7305 }
7306 }
7307
b4b36042
MC
7308 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7309 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
f86e82fb 7310 bp->flags |= BNX2_FLAG_MSIX_CAP;
b4b36042
MC
7311 }
7312
8e6a72c4
MC
7313 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7314 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
f86e82fb 7315 bp->flags |= BNX2_FLAG_MSI_CAP;
8e6a72c4
MC
7316 }
7317
40453c83
MC
7318 /* 5708 cannot support DMA addresses > 40-bit. */
7319 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7320 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7321 else
7322 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7323
7324 /* Configure DMA attributes. */
7325 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7326 dev->features |= NETIF_F_HIGHDMA;
7327 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7328 if (rc) {
7329 dev_err(&pdev->dev,
7330 "pci_set_consistent_dma_mask failed, aborting.\n");
7331 goto err_out_unmap;
7332 }
7333 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7334 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7335 goto err_out_unmap;
7336 }
7337
f86e82fb 7338 if (!(bp->flags & BNX2_FLAG_PCIE))
883e5151 7339 bnx2_get_pci_speed(bp);
b6016b76
MC
7340
7341 /* 5706A0 may falsely detect SERR and PERR. */
7342 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7343 reg = REG_RD(bp, PCI_COMMAND);
7344 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7345 REG_WR(bp, PCI_COMMAND, reg);
7346 }
7347 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
f86e82fb 7348 !(bp->flags & BNX2_FLAG_PCIX)) {
b6016b76 7349
9b91cf9d 7350 dev_err(&pdev->dev,
2e8a538d 7351 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
7352 goto err_out_unmap;
7353 }
7354
7355 bnx2_init_nvram(bp);
7356
2726d6e1 7357 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
e3648b3d
MC
7358
7359 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
7360 BNX2_SHM_HDR_SIGNATURE_SIG) {
7361 u32 off = PCI_FUNC(pdev->devfn) << 2;
7362
2726d6e1 7363 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
24cb230b 7364 } else
e3648b3d
MC
7365 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7366
b6016b76
MC
7367 /* Get the permanent MAC address. First we need to make sure the
7368 * firmware is actually running.
7369 */
2726d6e1 7370 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
7371
7372 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7373 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 7374 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
7375 rc = -ENODEV;
7376 goto err_out_unmap;
7377 }
7378
2726d6e1 7379 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
58fc2ea4
MC
7380 for (i = 0, j = 0; i < 3; i++) {
7381 u8 num, k, skip0;
7382
7383 num = (u8) (reg >> (24 - (i * 8)));
7384 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7385 if (num >= k || !skip0 || k == 1) {
7386 bp->fw_version[j++] = (num / k) + '0';
7387 skip0 = 0;
7388 }
7389 }
7390 if (i != 2)
7391 bp->fw_version[j++] = '.';
7392 }
2726d6e1 7393 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
846f5c62
MC
7394 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7395 bp->wol = 1;
7396
7397 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
f86e82fb 7398 bp->flags |= BNX2_FLAG_ASF_ENABLE;
c2d3db8c
MC
7399
7400 for (i = 0; i < 30; i++) {
2726d6e1 7401 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
c2d3db8c
MC
7402 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7403 break;
7404 msleep(10);
7405 }
7406 }
2726d6e1 7407 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
58fc2ea4
MC
7408 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7409 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7410 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7411 int i;
2726d6e1 7412 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
58fc2ea4
MC
7413
7414 bp->fw_version[j++] = ' ';
7415 for (i = 0; i < 3; i++) {
2726d6e1 7416 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
58fc2ea4
MC
7417 reg = swab32(reg);
7418 memcpy(&bp->fw_version[j], &reg, 4);
7419 j += 4;
7420 }
7421 }
b6016b76 7422
2726d6e1 7423 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
7424 bp->mac_addr[0] = (u8) (reg >> 8);
7425 bp->mac_addr[1] = (u8) reg;
7426
2726d6e1 7427 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
7428 bp->mac_addr[2] = (u8) (reg >> 24);
7429 bp->mac_addr[3] = (u8) (reg >> 16);
7430 bp->mac_addr[4] = (u8) (reg >> 8);
7431 bp->mac_addr[5] = (u8) reg;
7432
7433 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 7434 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
7435
7436 bp->rx_csum = 1;
7437
b6016b76
MC
7438 bp->tx_quick_cons_trip_int = 20;
7439 bp->tx_quick_cons_trip = 20;
7440 bp->tx_ticks_int = 80;
7441 bp->tx_ticks = 80;
6aa20a22 7442
b6016b76
MC
7443 bp->rx_quick_cons_trip_int = 6;
7444 bp->rx_quick_cons_trip = 6;
7445 bp->rx_ticks_int = 18;
7446 bp->rx_ticks = 18;
7447
7ea6920e 7448 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
7449
7450 bp->timer_interval = HZ;
cd339a0e 7451 bp->current_interval = HZ;
b6016b76 7452
5b0c76ad
MC
7453 bp->phy_addr = 1;
7454
b6016b76 7455 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
7456 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7457 bnx2_get_5709_media(bp);
7458 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
583c28e5 7459 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bac0dff6 7460
0d8a6571 7461 bp->phy_port = PORT_TP;
583c28e5 7462 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 7463 bp->phy_port = PORT_FIBRE;
2726d6e1 7464 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
846f5c62 7465 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
f86e82fb 7466 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
7467 bp->wol = 0;
7468 }
38ea3686
MC
7469 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7470 /* Don't do parallel detect on this board because of
7471 * some board problems. The link will not go down
7472 * if we do parallel detect.
7473 */
7474 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7475 pdev->subsystem_device == 0x310c)
7476 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7477 } else {
5b0c76ad 7478 bp->phy_addr = 2;
5b0c76ad 7479 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
583c28e5 7480 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
5b0c76ad 7481 }
0d8a6571
MC
7482 bnx2_init_remote_phy(bp);
7483
261dd5ca
MC
7484 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7485 CHIP_NUM(bp) == CHIP_NUM_5708)
583c28e5 7486 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
fb0c18bd
MC
7487 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7488 (CHIP_REV(bp) == CHIP_REV_Ax ||
7489 CHIP_REV(bp) == CHIP_REV_Bx))
583c28e5 7490 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
b6016b76 7491
16088272
MC
7492 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7493 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
846f5c62 7494 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
f86e82fb 7495 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
7496 bp->wol = 0;
7497 }
dda1e390 7498
b6016b76
MC
7499 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7500 bp->tx_quick_cons_trip_int =
7501 bp->tx_quick_cons_trip;
7502 bp->tx_ticks_int = bp->tx_ticks;
7503 bp->rx_quick_cons_trip_int =
7504 bp->rx_quick_cons_trip;
7505 bp->rx_ticks_int = bp->rx_ticks;
7506 bp->comp_prod_trip_int = bp->comp_prod_trip;
7507 bp->com_ticks_int = bp->com_ticks;
7508 bp->cmd_ticks_int = bp->cmd_ticks;
7509 }
7510
f9317a40
MC
7511 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7512 *
7513 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7514 * with byte enables disabled on the unused 32-bit word. This is legal
7515 * but causes problems on the AMD 8132 which will eventually stop
7516 * responding after a while.
7517 *
7518 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 7519 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
7520 */
7521 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7522 struct pci_dev *amd_8132 = NULL;
7523
7524 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7525 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7526 amd_8132))) {
f9317a40 7527
44c10138
AK
7528 if (amd_8132->revision >= 0x10 &&
7529 amd_8132->revision <= 0x13) {
f9317a40
MC
7530 disable_msi = 1;
7531 pci_dev_put(amd_8132);
7532 break;
7533 }
7534 }
7535 }
7536
deaf391b 7537 bnx2_set_default_link(bp);
b6016b76
MC
7538 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7539
cd339a0e
MC
7540 init_timer(&bp->timer);
7541 bp->timer.expires = RUN_AT(bp->timer_interval);
7542 bp->timer.data = (unsigned long) bp;
7543 bp->timer.function = bnx2_timer;
7544
b6016b76
MC
7545 return 0;
7546
7547err_out_unmap:
7548 if (bp->regview) {
7549 iounmap(bp->regview);
73eef4cd 7550 bp->regview = NULL;
b6016b76
MC
7551 }
7552
7553err_out_release:
7554 pci_release_regions(pdev);
7555
7556err_out_disable:
7557 pci_disable_device(pdev);
7558 pci_set_drvdata(pdev, NULL);
7559
7560err_out:
7561 return rc;
7562}
7563
883e5151
MC
7564static char * __devinit
7565bnx2_bus_string(struct bnx2 *bp, char *str)
7566{
7567 char *s = str;
7568
f86e82fb 7569 if (bp->flags & BNX2_FLAG_PCIE) {
883e5151
MC
7570 s += sprintf(s, "PCI Express");
7571 } else {
7572 s += sprintf(s, "PCI");
f86e82fb 7573 if (bp->flags & BNX2_FLAG_PCIX)
883e5151 7574 s += sprintf(s, "-X");
f86e82fb 7575 if (bp->flags & BNX2_FLAG_PCI_32BIT)
883e5151
MC
7576 s += sprintf(s, " 32-bit");
7577 else
7578 s += sprintf(s, " 64-bit");
7579 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7580 }
7581 return str;
7582}
7583
2ba582b7 7584static void __devinit
35efa7c1
MC
7585bnx2_init_napi(struct bnx2 *bp)
7586{
b4b36042 7587 int i;
35efa7c1 7588
b4b36042 7589 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
35e9010b
MC
7590 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7591 int (*poll)(struct napi_struct *, int);
7592
7593 if (i == 0)
7594 poll = bnx2_poll;
7595 else
f0ea2e63 7596 poll = bnx2_poll_msix;
35e9010b
MC
7597
7598 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
b4b36042
MC
7599 bnapi->bp = bp;
7600 }
35efa7c1
MC
7601}
7602
b6016b76
MC
7603static int __devinit
7604bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7605{
7606 static int version_printed = 0;
7607 struct net_device *dev = NULL;
7608 struct bnx2 *bp;
0795af57 7609 int rc;
883e5151 7610 char str[40];
0795af57 7611 DECLARE_MAC_BUF(mac);
b6016b76
MC
7612
7613 if (version_printed++ == 0)
7614 printk(KERN_INFO "%s", version);
7615
7616 /* dev zeroed in init_etherdev */
7617 dev = alloc_etherdev(sizeof(*bp));
7618
7619 if (!dev)
7620 return -ENOMEM;
7621
7622 rc = bnx2_init_board(pdev, dev);
7623 if (rc < 0) {
7624 free_netdev(dev);
7625 return rc;
7626 }
7627
7628 dev->open = bnx2_open;
7629 dev->hard_start_xmit = bnx2_start_xmit;
7630 dev->stop = bnx2_close;
7631 dev->get_stats = bnx2_get_stats;
7632 dev->set_multicast_list = bnx2_set_rx_mode;
7633 dev->do_ioctl = bnx2_ioctl;
7634 dev->set_mac_address = bnx2_change_mac_addr;
7635 dev->change_mtu = bnx2_change_mtu;
7636 dev->tx_timeout = bnx2_tx_timeout;
7637 dev->watchdog_timeo = TX_TIMEOUT;
7638#ifdef BCM_VLAN
7639 dev->vlan_rx_register = bnx2_vlan_rx_register;
b6016b76 7640#endif
b6016b76 7641 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 7642
972ec0d4 7643 bp = netdev_priv(dev);
35efa7c1 7644 bnx2_init_napi(bp);
b6016b76
MC
7645
7646#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7647 dev->poll_controller = poll_bnx2;
7648#endif
7649
1b2f922f
MC
7650 pci_set_drvdata(pdev, dev);
7651
7652 memcpy(dev->dev_addr, bp->mac_addr, 6);
7653 memcpy(dev->perm_addr, bp->mac_addr, 6);
7654 bp->name = board_info[ent->driver_data].name;
7655
d212f87b 7656 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 7657 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
7658 dev->features |= NETIF_F_IPV6_CSUM;
7659
1b2f922f
MC
7660#ifdef BCM_VLAN
7661 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7662#endif
7663 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7664 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7665 dev->features |= NETIF_F_TSO6;
1b2f922f 7666
b6016b76 7667 if ((rc = register_netdev(dev))) {
9b91cf9d 7668 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
7669 if (bp->regview)
7670 iounmap(bp->regview);
7671 pci_release_regions(pdev);
7672 pci_disable_device(pdev);
7673 pci_set_drvdata(pdev, NULL);
7674 free_netdev(dev);
7675 return rc;
7676 }
7677
883e5151 7678 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
0795af57 7679 "IRQ %d, node addr %s\n",
b6016b76
MC
7680 dev->name,
7681 bp->name,
7682 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7683 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 7684 bnx2_bus_string(bp, str),
b6016b76 7685 dev->base_addr,
0795af57 7686 bp->pdev->irq, print_mac(mac, dev->dev_addr));
b6016b76 7687
b6016b76
MC
7688 return 0;
7689}
7690
7691static void __devexit
7692bnx2_remove_one(struct pci_dev *pdev)
7693{
7694 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7695 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7696
afdc08b9
MC
7697 flush_scheduled_work();
7698
b6016b76
MC
7699 unregister_netdev(dev);
7700
7701 if (bp->regview)
7702 iounmap(bp->regview);
7703
7704 free_netdev(dev);
7705 pci_release_regions(pdev);
7706 pci_disable_device(pdev);
7707 pci_set_drvdata(pdev, NULL);
7708}
7709
7710static int
829ca9a3 7711bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
7712{
7713 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7714 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7715 u32 reset_code;
7716
6caebb02
MC
7717 /* PCI register 4 needs to be saved whether netif_running() or not.
7718 * MSI address and data need to be saved if using MSI and
7719 * netif_running().
7720 */
7721 pci_save_state(pdev);
b6016b76
MC
7722 if (!netif_running(dev))
7723 return 0;
7724
1d60290f 7725 flush_scheduled_work();
b6016b76
MC
7726 bnx2_netif_stop(bp);
7727 netif_device_detach(dev);
7728 del_timer_sync(&bp->timer);
f86e82fb 7729 if (bp->flags & BNX2_FLAG_NO_WOL)
6c4f095e 7730 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 7731 else if (bp->wol)
b6016b76
MC
7732 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7733 else
7734 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7735 bnx2_reset_chip(bp, reset_code);
7736 bnx2_free_skbs(bp);
829ca9a3 7737 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
7738 return 0;
7739}
7740
7741static int
7742bnx2_resume(struct pci_dev *pdev)
7743{
7744 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7745 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7746
6caebb02 7747 pci_restore_state(pdev);
b6016b76
MC
7748 if (!netif_running(dev))
7749 return 0;
7750
829ca9a3 7751 bnx2_set_power_state(bp, PCI_D0);
b6016b76 7752 netif_device_attach(dev);
9a120bc5 7753 bnx2_init_nic(bp, 1);
b6016b76
MC
7754 bnx2_netif_start(bp);
7755 return 0;
7756}
7757
6ff2da49
WX
7758/**
7759 * bnx2_io_error_detected - called when PCI error is detected
7760 * @pdev: Pointer to PCI device
7761 * @state: The current pci connection state
7762 *
7763 * This function is called after a PCI bus error affecting
7764 * this device has been detected.
7765 */
7766static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7767 pci_channel_state_t state)
7768{
7769 struct net_device *dev = pci_get_drvdata(pdev);
7770 struct bnx2 *bp = netdev_priv(dev);
7771
7772 rtnl_lock();
7773 netif_device_detach(dev);
7774
7775 if (netif_running(dev)) {
7776 bnx2_netif_stop(bp);
7777 del_timer_sync(&bp->timer);
7778 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7779 }
7780
7781 pci_disable_device(pdev);
7782 rtnl_unlock();
7783
7784 /* Request a slot slot reset. */
7785 return PCI_ERS_RESULT_NEED_RESET;
7786}
7787
7788/**
7789 * bnx2_io_slot_reset - called after the pci bus has been reset.
7790 * @pdev: Pointer to PCI device
7791 *
7792 * Restart the card from scratch, as if from a cold-boot.
7793 */
7794static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7795{
7796 struct net_device *dev = pci_get_drvdata(pdev);
7797 struct bnx2 *bp = netdev_priv(dev);
7798
7799 rtnl_lock();
7800 if (pci_enable_device(pdev)) {
7801 dev_err(&pdev->dev,
7802 "Cannot re-enable PCI device after reset.\n");
7803 rtnl_unlock();
7804 return PCI_ERS_RESULT_DISCONNECT;
7805 }
7806 pci_set_master(pdev);
7807 pci_restore_state(pdev);
7808
7809 if (netif_running(dev)) {
7810 bnx2_set_power_state(bp, PCI_D0);
7811 bnx2_init_nic(bp, 1);
7812 }
7813
7814 rtnl_unlock();
7815 return PCI_ERS_RESULT_RECOVERED;
7816}
7817
7818/**
7819 * bnx2_io_resume - called when traffic can start flowing again.
7820 * @pdev: Pointer to PCI device
7821 *
7822 * This callback is called when the error recovery driver tells us that
7823 * its OK to resume normal operation.
7824 */
7825static void bnx2_io_resume(struct pci_dev *pdev)
7826{
7827 struct net_device *dev = pci_get_drvdata(pdev);
7828 struct bnx2 *bp = netdev_priv(dev);
7829
7830 rtnl_lock();
7831 if (netif_running(dev))
7832 bnx2_netif_start(bp);
7833
7834 netif_device_attach(dev);
7835 rtnl_unlock();
7836}
7837
7838static struct pci_error_handlers bnx2_err_handler = {
7839 .error_detected = bnx2_io_error_detected,
7840 .slot_reset = bnx2_io_slot_reset,
7841 .resume = bnx2_io_resume,
7842};
7843
b6016b76 7844static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
7845 .name = DRV_MODULE_NAME,
7846 .id_table = bnx2_pci_tbl,
7847 .probe = bnx2_init_one,
7848 .remove = __devexit_p(bnx2_remove_one),
7849 .suspend = bnx2_suspend,
7850 .resume = bnx2_resume,
6ff2da49 7851 .err_handler = &bnx2_err_handler,
b6016b76
MC
7852};
7853
7854static int __init bnx2_init(void)
7855{
29917620 7856 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
7857}
7858
7859static void __exit bnx2_cleanup(void)
7860{
7861 pci_unregister_driver(&bnx2_pci_driver);
7862}
7863
7864module_init(bnx2_init);
7865module_exit(bnx2_cleanup);
7866
7867
7868
This page took 0.984528 seconds and 5 git commands to generate.