[BNX2]: Fix RX packet rot.
[deliverable/linux.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2007 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE 0x8000
56
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.6.8"
60 #define DRV_MODULE_RELDATE "October 17, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
66
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
86 BCM5708,
87 BCM5708S,
88 BCM5709,
89 BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94 char *name;
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126 { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
133 /* Slow EEPROM */
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230 {
231 u32 diff;
232
233 smp_mb();
234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
244 return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271 offset += cid_addr;
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
290 spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 }
406
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
409 {
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
418 }
419
420 static void
421 bnx2_disable_int_sync(struct bnx2 *bp)
422 {
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
426 }
427
428 static void
429 bnx2_netif_stop(struct bnx2 *bp)
430 {
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
433 napi_disable(&bp->napi);
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436 }
437 }
438
439 static void
440 bnx2_netif_start(struct bnx2 *bp)
441 {
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
445 napi_enable(&bp->napi);
446 bnx2_enable_int(bp);
447 }
448 }
449 }
450
451 static void
452 bnx2_free_mem(struct bnx2 *bp)
453 {
454 int i;
455
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459 bp->ctx_blk[i],
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
462 }
463 }
464 if (bp->status_blk) {
465 pci_free_consistent(bp->pdev, bp->status_stats_size,
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
468 bp->stats_blk = NULL;
469 }
470 if (bp->tx_desc_ring) {
471 pci_free_consistent(bp->pdev,
472 sizeof(struct tx_bd) * TX_DESC_CNT,
473 bp->tx_desc_ring, bp->tx_desc_mapping);
474 bp->tx_desc_ring = NULL;
475 }
476 kfree(bp->tx_buf_ring);
477 bp->tx_buf_ring = NULL;
478 for (i = 0; i < bp->rx_max_ring; i++) {
479 if (bp->rx_desc_ring[i])
480 pci_free_consistent(bp->pdev,
481 sizeof(struct rx_bd) * RX_DESC_CNT,
482 bp->rx_desc_ring[i],
483 bp->rx_desc_mapping[i]);
484 bp->rx_desc_ring[i] = NULL;
485 }
486 vfree(bp->rx_buf_ring);
487 bp->rx_buf_ring = NULL;
488 }
489
490 static int
491 bnx2_alloc_mem(struct bnx2 *bp)
492 {
493 int i, status_blk_size;
494
495 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
496 GFP_KERNEL);
497 if (bp->tx_buf_ring == NULL)
498 return -ENOMEM;
499
500 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
501 sizeof(struct tx_bd) *
502 TX_DESC_CNT,
503 &bp->tx_desc_mapping);
504 if (bp->tx_desc_ring == NULL)
505 goto alloc_mem_err;
506
507 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
508 bp->rx_max_ring);
509 if (bp->rx_buf_ring == NULL)
510 goto alloc_mem_err;
511
512 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
513 bp->rx_max_ring);
514
515 for (i = 0; i < bp->rx_max_ring; i++) {
516 bp->rx_desc_ring[i] =
517 pci_alloc_consistent(bp->pdev,
518 sizeof(struct rx_bd) * RX_DESC_CNT,
519 &bp->rx_desc_mapping[i]);
520 if (bp->rx_desc_ring[i] == NULL)
521 goto alloc_mem_err;
522
523 }
524
525 /* Combine status and statistics blocks into one allocation. */
526 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
527 bp->status_stats_size = status_blk_size +
528 sizeof(struct statistics_block);
529
530 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
531 &bp->status_blk_mapping);
532 if (bp->status_blk == NULL)
533 goto alloc_mem_err;
534
535 memset(bp->status_blk, 0, bp->status_stats_size);
536
537 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
538 status_blk_size);
539
540 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
541
542 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
543 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
544 if (bp->ctx_pages == 0)
545 bp->ctx_pages = 1;
546 for (i = 0; i < bp->ctx_pages; i++) {
547 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
548 BCM_PAGE_SIZE,
549 &bp->ctx_blk_mapping[i]);
550 if (bp->ctx_blk[i] == NULL)
551 goto alloc_mem_err;
552 }
553 }
554 return 0;
555
556 alloc_mem_err:
557 bnx2_free_mem(bp);
558 return -ENOMEM;
559 }
560
561 static void
562 bnx2_report_fw_link(struct bnx2 *bp)
563 {
564 u32 fw_link_status = 0;
565
566 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
567 return;
568
569 if (bp->link_up) {
570 u32 bmsr;
571
572 switch (bp->line_speed) {
573 case SPEED_10:
574 if (bp->duplex == DUPLEX_HALF)
575 fw_link_status = BNX2_LINK_STATUS_10HALF;
576 else
577 fw_link_status = BNX2_LINK_STATUS_10FULL;
578 break;
579 case SPEED_100:
580 if (bp->duplex == DUPLEX_HALF)
581 fw_link_status = BNX2_LINK_STATUS_100HALF;
582 else
583 fw_link_status = BNX2_LINK_STATUS_100FULL;
584 break;
585 case SPEED_1000:
586 if (bp->duplex == DUPLEX_HALF)
587 fw_link_status = BNX2_LINK_STATUS_1000HALF;
588 else
589 fw_link_status = BNX2_LINK_STATUS_1000FULL;
590 break;
591 case SPEED_2500:
592 if (bp->duplex == DUPLEX_HALF)
593 fw_link_status = BNX2_LINK_STATUS_2500HALF;
594 else
595 fw_link_status = BNX2_LINK_STATUS_2500FULL;
596 break;
597 }
598
599 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
600
601 if (bp->autoneg) {
602 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
603
604 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
605 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
606
607 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
608 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
609 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
610 else
611 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
612 }
613 }
614 else
615 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
616
617 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
618 }
619
620 static char *
621 bnx2_xceiver_str(struct bnx2 *bp)
622 {
623 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
624 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
625 "Copper"));
626 }
627
628 static void
629 bnx2_report_link(struct bnx2 *bp)
630 {
631 if (bp->link_up) {
632 netif_carrier_on(bp->dev);
633 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
634 bnx2_xceiver_str(bp));
635
636 printk("%d Mbps ", bp->line_speed);
637
638 if (bp->duplex == DUPLEX_FULL)
639 printk("full duplex");
640 else
641 printk("half duplex");
642
643 if (bp->flow_ctrl) {
644 if (bp->flow_ctrl & FLOW_CTRL_RX) {
645 printk(", receive ");
646 if (bp->flow_ctrl & FLOW_CTRL_TX)
647 printk("& transmit ");
648 }
649 else {
650 printk(", transmit ");
651 }
652 printk("flow control ON");
653 }
654 printk("\n");
655 }
656 else {
657 netif_carrier_off(bp->dev);
658 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
659 bnx2_xceiver_str(bp));
660 }
661
662 bnx2_report_fw_link(bp);
663 }
664
665 static void
666 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
667 {
668 u32 local_adv, remote_adv;
669
670 bp->flow_ctrl = 0;
671 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
672 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
673
674 if (bp->duplex == DUPLEX_FULL) {
675 bp->flow_ctrl = bp->req_flow_ctrl;
676 }
677 return;
678 }
679
680 if (bp->duplex != DUPLEX_FULL) {
681 return;
682 }
683
684 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
685 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
686 u32 val;
687
688 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
689 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
690 bp->flow_ctrl |= FLOW_CTRL_TX;
691 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
692 bp->flow_ctrl |= FLOW_CTRL_RX;
693 return;
694 }
695
696 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
697 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
698
699 if (bp->phy_flags & PHY_SERDES_FLAG) {
700 u32 new_local_adv = 0;
701 u32 new_remote_adv = 0;
702
703 if (local_adv & ADVERTISE_1000XPAUSE)
704 new_local_adv |= ADVERTISE_PAUSE_CAP;
705 if (local_adv & ADVERTISE_1000XPSE_ASYM)
706 new_local_adv |= ADVERTISE_PAUSE_ASYM;
707 if (remote_adv & ADVERTISE_1000XPAUSE)
708 new_remote_adv |= ADVERTISE_PAUSE_CAP;
709 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
710 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
711
712 local_adv = new_local_adv;
713 remote_adv = new_remote_adv;
714 }
715
716 /* See Table 28B-3 of 802.3ab-1999 spec. */
717 if (local_adv & ADVERTISE_PAUSE_CAP) {
718 if(local_adv & ADVERTISE_PAUSE_ASYM) {
719 if (remote_adv & ADVERTISE_PAUSE_CAP) {
720 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
721 }
722 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
723 bp->flow_ctrl = FLOW_CTRL_RX;
724 }
725 }
726 else {
727 if (remote_adv & ADVERTISE_PAUSE_CAP) {
728 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
729 }
730 }
731 }
732 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
733 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
734 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
735
736 bp->flow_ctrl = FLOW_CTRL_TX;
737 }
738 }
739 }
740
741 static int
742 bnx2_5709s_linkup(struct bnx2 *bp)
743 {
744 u32 val, speed;
745
746 bp->link_up = 1;
747
748 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
749 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
750 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
751
752 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
753 bp->line_speed = bp->req_line_speed;
754 bp->duplex = bp->req_duplex;
755 return 0;
756 }
757 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
758 switch (speed) {
759 case MII_BNX2_GP_TOP_AN_SPEED_10:
760 bp->line_speed = SPEED_10;
761 break;
762 case MII_BNX2_GP_TOP_AN_SPEED_100:
763 bp->line_speed = SPEED_100;
764 break;
765 case MII_BNX2_GP_TOP_AN_SPEED_1G:
766 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
767 bp->line_speed = SPEED_1000;
768 break;
769 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
770 bp->line_speed = SPEED_2500;
771 break;
772 }
773 if (val & MII_BNX2_GP_TOP_AN_FD)
774 bp->duplex = DUPLEX_FULL;
775 else
776 bp->duplex = DUPLEX_HALF;
777 return 0;
778 }
779
780 static int
781 bnx2_5708s_linkup(struct bnx2 *bp)
782 {
783 u32 val;
784
785 bp->link_up = 1;
786 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
787 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
788 case BCM5708S_1000X_STAT1_SPEED_10:
789 bp->line_speed = SPEED_10;
790 break;
791 case BCM5708S_1000X_STAT1_SPEED_100:
792 bp->line_speed = SPEED_100;
793 break;
794 case BCM5708S_1000X_STAT1_SPEED_1G:
795 bp->line_speed = SPEED_1000;
796 break;
797 case BCM5708S_1000X_STAT1_SPEED_2G5:
798 bp->line_speed = SPEED_2500;
799 break;
800 }
801 if (val & BCM5708S_1000X_STAT1_FD)
802 bp->duplex = DUPLEX_FULL;
803 else
804 bp->duplex = DUPLEX_HALF;
805
806 return 0;
807 }
808
809 static int
810 bnx2_5706s_linkup(struct bnx2 *bp)
811 {
812 u32 bmcr, local_adv, remote_adv, common;
813
814 bp->link_up = 1;
815 bp->line_speed = SPEED_1000;
816
817 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
818 if (bmcr & BMCR_FULLDPLX) {
819 bp->duplex = DUPLEX_FULL;
820 }
821 else {
822 bp->duplex = DUPLEX_HALF;
823 }
824
825 if (!(bmcr & BMCR_ANENABLE)) {
826 return 0;
827 }
828
829 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
830 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
831
832 common = local_adv & remote_adv;
833 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
834
835 if (common & ADVERTISE_1000XFULL) {
836 bp->duplex = DUPLEX_FULL;
837 }
838 else {
839 bp->duplex = DUPLEX_HALF;
840 }
841 }
842
843 return 0;
844 }
845
846 static int
847 bnx2_copper_linkup(struct bnx2 *bp)
848 {
849 u32 bmcr;
850
851 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
852 if (bmcr & BMCR_ANENABLE) {
853 u32 local_adv, remote_adv, common;
854
855 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
856 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
857
858 common = local_adv & (remote_adv >> 2);
859 if (common & ADVERTISE_1000FULL) {
860 bp->line_speed = SPEED_1000;
861 bp->duplex = DUPLEX_FULL;
862 }
863 else if (common & ADVERTISE_1000HALF) {
864 bp->line_speed = SPEED_1000;
865 bp->duplex = DUPLEX_HALF;
866 }
867 else {
868 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
869 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
870
871 common = local_adv & remote_adv;
872 if (common & ADVERTISE_100FULL) {
873 bp->line_speed = SPEED_100;
874 bp->duplex = DUPLEX_FULL;
875 }
876 else if (common & ADVERTISE_100HALF) {
877 bp->line_speed = SPEED_100;
878 bp->duplex = DUPLEX_HALF;
879 }
880 else if (common & ADVERTISE_10FULL) {
881 bp->line_speed = SPEED_10;
882 bp->duplex = DUPLEX_FULL;
883 }
884 else if (common & ADVERTISE_10HALF) {
885 bp->line_speed = SPEED_10;
886 bp->duplex = DUPLEX_HALF;
887 }
888 else {
889 bp->line_speed = 0;
890 bp->link_up = 0;
891 }
892 }
893 }
894 else {
895 if (bmcr & BMCR_SPEED100) {
896 bp->line_speed = SPEED_100;
897 }
898 else {
899 bp->line_speed = SPEED_10;
900 }
901 if (bmcr & BMCR_FULLDPLX) {
902 bp->duplex = DUPLEX_FULL;
903 }
904 else {
905 bp->duplex = DUPLEX_HALF;
906 }
907 }
908
909 return 0;
910 }
911
912 static int
913 bnx2_set_mac_link(struct bnx2 *bp)
914 {
915 u32 val;
916
917 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
918 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
919 (bp->duplex == DUPLEX_HALF)) {
920 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
921 }
922
923 /* Configure the EMAC mode register. */
924 val = REG_RD(bp, BNX2_EMAC_MODE);
925
926 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
927 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
928 BNX2_EMAC_MODE_25G_MODE);
929
930 if (bp->link_up) {
931 switch (bp->line_speed) {
932 case SPEED_10:
933 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
934 val |= BNX2_EMAC_MODE_PORT_MII_10M;
935 break;
936 }
937 /* fall through */
938 case SPEED_100:
939 val |= BNX2_EMAC_MODE_PORT_MII;
940 break;
941 case SPEED_2500:
942 val |= BNX2_EMAC_MODE_25G_MODE;
943 /* fall through */
944 case SPEED_1000:
945 val |= BNX2_EMAC_MODE_PORT_GMII;
946 break;
947 }
948 }
949 else {
950 val |= BNX2_EMAC_MODE_PORT_GMII;
951 }
952
953 /* Set the MAC to operate in the appropriate duplex mode. */
954 if (bp->duplex == DUPLEX_HALF)
955 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
956 REG_WR(bp, BNX2_EMAC_MODE, val);
957
958 /* Enable/disable rx PAUSE. */
959 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
960
961 if (bp->flow_ctrl & FLOW_CTRL_RX)
962 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
963 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
964
965 /* Enable/disable tx PAUSE. */
966 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
967 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
968
969 if (bp->flow_ctrl & FLOW_CTRL_TX)
970 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
971 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
972
973 /* Acknowledge the interrupt. */
974 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
975
976 return 0;
977 }
978
979 static void
980 bnx2_enable_bmsr1(struct bnx2 *bp)
981 {
982 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
983 (CHIP_NUM(bp) == CHIP_NUM_5709))
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
985 MII_BNX2_BLK_ADDR_GP_STATUS);
986 }
987
988 static void
989 bnx2_disable_bmsr1(struct bnx2 *bp)
990 {
991 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
992 (CHIP_NUM(bp) == CHIP_NUM_5709))
993 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
994 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
995 }
996
997 static int
998 bnx2_test_and_enable_2g5(struct bnx2 *bp)
999 {
1000 u32 up1;
1001 int ret = 1;
1002
1003 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1004 return 0;
1005
1006 if (bp->autoneg & AUTONEG_SPEED)
1007 bp->advertising |= ADVERTISED_2500baseX_Full;
1008
1009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
1012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (!(up1 & BCM5708S_UP1_2G5)) {
1014 up1 |= BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1016 ret = 0;
1017 }
1018
1019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
1023 return ret;
1024 }
1025
1026 static int
1027 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1028 {
1029 u32 up1;
1030 int ret = 0;
1031
1032 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1033 return 0;
1034
1035 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1036 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1037
1038 bnx2_read_phy(bp, bp->mii_up1, &up1);
1039 if (up1 & BCM5708S_UP1_2G5) {
1040 up1 &= ~BCM5708S_UP1_2G5;
1041 bnx2_write_phy(bp, bp->mii_up1, up1);
1042 ret = 1;
1043 }
1044
1045 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1046 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1047 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1048
1049 return ret;
1050 }
1051
1052 static void
1053 bnx2_enable_forced_2g5(struct bnx2 *bp)
1054 {
1055 u32 bmcr;
1056
1057 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1058 return;
1059
1060 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1061 u32 val;
1062
1063 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1064 MII_BNX2_BLK_ADDR_SERDES_DIG);
1065 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1066 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1067 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1068 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1069
1070 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1071 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1072 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1073
1074 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1075 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1076 bmcr |= BCM5708S_BMCR_FORCE_2500;
1077 }
1078
1079 if (bp->autoneg & AUTONEG_SPEED) {
1080 bmcr &= ~BMCR_ANENABLE;
1081 if (bp->req_duplex == DUPLEX_FULL)
1082 bmcr |= BMCR_FULLDPLX;
1083 }
1084 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1085 }
1086
1087 static void
1088 bnx2_disable_forced_2g5(struct bnx2 *bp)
1089 {
1090 u32 bmcr;
1091
1092 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1093 return;
1094
1095 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1096 u32 val;
1097
1098 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1099 MII_BNX2_BLK_ADDR_SERDES_DIG);
1100 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1101 val &= ~MII_BNX2_SD_MISC1_FORCE;
1102 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1103
1104 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1105 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1106 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1107
1108 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1109 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1111 }
1112
1113 if (bp->autoneg & AUTONEG_SPEED)
1114 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1115 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1116 }
1117
1118 static int
1119 bnx2_set_link(struct bnx2 *bp)
1120 {
1121 u32 bmsr;
1122 u8 link_up;
1123
1124 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1125 bp->link_up = 1;
1126 return 0;
1127 }
1128
1129 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1130 return 0;
1131
1132 link_up = bp->link_up;
1133
1134 bnx2_enable_bmsr1(bp);
1135 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1136 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1137 bnx2_disable_bmsr1(bp);
1138
1139 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1140 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1141 u32 val;
1142
1143 val = REG_RD(bp, BNX2_EMAC_STATUS);
1144 if (val & BNX2_EMAC_STATUS_LINK)
1145 bmsr |= BMSR_LSTATUS;
1146 else
1147 bmsr &= ~BMSR_LSTATUS;
1148 }
1149
1150 if (bmsr & BMSR_LSTATUS) {
1151 bp->link_up = 1;
1152
1153 if (bp->phy_flags & PHY_SERDES_FLAG) {
1154 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1155 bnx2_5706s_linkup(bp);
1156 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1157 bnx2_5708s_linkup(bp);
1158 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1159 bnx2_5709s_linkup(bp);
1160 }
1161 else {
1162 bnx2_copper_linkup(bp);
1163 }
1164 bnx2_resolve_flow_ctrl(bp);
1165 }
1166 else {
1167 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1168 (bp->autoneg & AUTONEG_SPEED))
1169 bnx2_disable_forced_2g5(bp);
1170
1171 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1172 bp->link_up = 0;
1173 }
1174
1175 if (bp->link_up != link_up) {
1176 bnx2_report_link(bp);
1177 }
1178
1179 bnx2_set_mac_link(bp);
1180
1181 return 0;
1182 }
1183
1184 static int
1185 bnx2_reset_phy(struct bnx2 *bp)
1186 {
1187 int i;
1188 u32 reg;
1189
1190 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1191
1192 #define PHY_RESET_MAX_WAIT 100
1193 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1194 udelay(10);
1195
1196 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1197 if (!(reg & BMCR_RESET)) {
1198 udelay(20);
1199 break;
1200 }
1201 }
1202 if (i == PHY_RESET_MAX_WAIT) {
1203 return -EBUSY;
1204 }
1205 return 0;
1206 }
1207
1208 static u32
1209 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1210 {
1211 u32 adv = 0;
1212
1213 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1214 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1215
1216 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217 adv = ADVERTISE_1000XPAUSE;
1218 }
1219 else {
1220 adv = ADVERTISE_PAUSE_CAP;
1221 }
1222 }
1223 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 adv = ADVERTISE_1000XPSE_ASYM;
1226 }
1227 else {
1228 adv = ADVERTISE_PAUSE_ASYM;
1229 }
1230 }
1231 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1232 if (bp->phy_flags & PHY_SERDES_FLAG) {
1233 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1234 }
1235 else {
1236 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1237 }
1238 }
1239 return adv;
1240 }
1241
1242 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1243
1244 static int
1245 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1246 {
1247 u32 speed_arg = 0, pause_adv;
1248
1249 pause_adv = bnx2_phy_get_pause_adv(bp);
1250
1251 if (bp->autoneg & AUTONEG_SPEED) {
1252 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1253 if (bp->advertising & ADVERTISED_10baseT_Half)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1255 if (bp->advertising & ADVERTISED_10baseT_Full)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1257 if (bp->advertising & ADVERTISED_100baseT_Half)
1258 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1259 if (bp->advertising & ADVERTISED_100baseT_Full)
1260 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1261 if (bp->advertising & ADVERTISED_1000baseT_Full)
1262 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1263 if (bp->advertising & ADVERTISED_2500baseX_Full)
1264 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1265 } else {
1266 if (bp->req_line_speed == SPEED_2500)
1267 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1268 else if (bp->req_line_speed == SPEED_1000)
1269 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1270 else if (bp->req_line_speed == SPEED_100) {
1271 if (bp->req_duplex == DUPLEX_FULL)
1272 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1273 else
1274 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1275 } else if (bp->req_line_speed == SPEED_10) {
1276 if (bp->req_duplex == DUPLEX_FULL)
1277 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1278 else
1279 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1280 }
1281 }
1282
1283 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1284 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1285 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1286 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1287
1288 if (port == PORT_TP)
1289 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1290 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1291
1292 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1293
1294 spin_unlock_bh(&bp->phy_lock);
1295 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1296 spin_lock_bh(&bp->phy_lock);
1297
1298 return 0;
1299 }
1300
1301 static int
1302 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1303 {
1304 u32 adv, bmcr;
1305 u32 new_adv = 0;
1306
1307 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1308 return (bnx2_setup_remote_phy(bp, port));
1309
1310 if (!(bp->autoneg & AUTONEG_SPEED)) {
1311 u32 new_bmcr;
1312 int force_link_down = 0;
1313
1314 if (bp->req_line_speed == SPEED_2500) {
1315 if (!bnx2_test_and_enable_2g5(bp))
1316 force_link_down = 1;
1317 } else if (bp->req_line_speed == SPEED_1000) {
1318 if (bnx2_test_and_disable_2g5(bp))
1319 force_link_down = 1;
1320 }
1321 bnx2_read_phy(bp, bp->mii_adv, &adv);
1322 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1323
1324 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1325 new_bmcr = bmcr & ~BMCR_ANENABLE;
1326 new_bmcr |= BMCR_SPEED1000;
1327
1328 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1329 if (bp->req_line_speed == SPEED_2500)
1330 bnx2_enable_forced_2g5(bp);
1331 else if (bp->req_line_speed == SPEED_1000) {
1332 bnx2_disable_forced_2g5(bp);
1333 new_bmcr &= ~0x2000;
1334 }
1335
1336 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1337 if (bp->req_line_speed == SPEED_2500)
1338 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1339 else
1340 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1341 }
1342
1343 if (bp->req_duplex == DUPLEX_FULL) {
1344 adv |= ADVERTISE_1000XFULL;
1345 new_bmcr |= BMCR_FULLDPLX;
1346 }
1347 else {
1348 adv |= ADVERTISE_1000XHALF;
1349 new_bmcr &= ~BMCR_FULLDPLX;
1350 }
1351 if ((new_bmcr != bmcr) || (force_link_down)) {
1352 /* Force a link down visible on the other side */
1353 if (bp->link_up) {
1354 bnx2_write_phy(bp, bp->mii_adv, adv &
1355 ~(ADVERTISE_1000XFULL |
1356 ADVERTISE_1000XHALF));
1357 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1358 BMCR_ANRESTART | BMCR_ANENABLE);
1359
1360 bp->link_up = 0;
1361 netif_carrier_off(bp->dev);
1362 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1363 bnx2_report_link(bp);
1364 }
1365 bnx2_write_phy(bp, bp->mii_adv, adv);
1366 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1367 } else {
1368 bnx2_resolve_flow_ctrl(bp);
1369 bnx2_set_mac_link(bp);
1370 }
1371 return 0;
1372 }
1373
1374 bnx2_test_and_enable_2g5(bp);
1375
1376 if (bp->advertising & ADVERTISED_1000baseT_Full)
1377 new_adv |= ADVERTISE_1000XFULL;
1378
1379 new_adv |= bnx2_phy_get_pause_adv(bp);
1380
1381 bnx2_read_phy(bp, bp->mii_adv, &adv);
1382 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1383
1384 bp->serdes_an_pending = 0;
1385 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1386 /* Force a link down visible on the other side */
1387 if (bp->link_up) {
1388 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1389 spin_unlock_bh(&bp->phy_lock);
1390 msleep(20);
1391 spin_lock_bh(&bp->phy_lock);
1392 }
1393
1394 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1395 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1396 BMCR_ANENABLE);
1397 /* Speed up link-up time when the link partner
1398 * does not autonegotiate which is very common
1399 * in blade servers. Some blade servers use
1400 * IPMI for kerboard input and it's important
1401 * to minimize link disruptions. Autoneg. involves
1402 * exchanging base pages plus 3 next pages and
1403 * normally completes in about 120 msec.
1404 */
1405 bp->current_interval = SERDES_AN_TIMEOUT;
1406 bp->serdes_an_pending = 1;
1407 mod_timer(&bp->timer, jiffies + bp->current_interval);
1408 } else {
1409 bnx2_resolve_flow_ctrl(bp);
1410 bnx2_set_mac_link(bp);
1411 }
1412
1413 return 0;
1414 }
1415
1416 #define ETHTOOL_ALL_FIBRE_SPEED \
1417 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1418 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1419 (ADVERTISED_1000baseT_Full)
1420
1421 #define ETHTOOL_ALL_COPPER_SPEED \
1422 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1423 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1424 ADVERTISED_1000baseT_Full)
1425
1426 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1427 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1428
1429 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1430
1431 static void
1432 bnx2_set_default_remote_link(struct bnx2 *bp)
1433 {
1434 u32 link;
1435
1436 if (bp->phy_port == PORT_TP)
1437 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1438 else
1439 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1440
1441 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1442 bp->req_line_speed = 0;
1443 bp->autoneg |= AUTONEG_SPEED;
1444 bp->advertising = ADVERTISED_Autoneg;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1446 bp->advertising |= ADVERTISED_10baseT_Half;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1448 bp->advertising |= ADVERTISED_10baseT_Full;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1450 bp->advertising |= ADVERTISED_100baseT_Half;
1451 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1452 bp->advertising |= ADVERTISED_100baseT_Full;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1454 bp->advertising |= ADVERTISED_1000baseT_Full;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1456 bp->advertising |= ADVERTISED_2500baseX_Full;
1457 } else {
1458 bp->autoneg = 0;
1459 bp->advertising = 0;
1460 bp->req_duplex = DUPLEX_FULL;
1461 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1462 bp->req_line_speed = SPEED_10;
1463 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1464 bp->req_duplex = DUPLEX_HALF;
1465 }
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1467 bp->req_line_speed = SPEED_100;
1468 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1469 bp->req_duplex = DUPLEX_HALF;
1470 }
1471 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1472 bp->req_line_speed = SPEED_1000;
1473 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1474 bp->req_line_speed = SPEED_2500;
1475 }
1476 }
1477
1478 static void
1479 bnx2_set_default_link(struct bnx2 *bp)
1480 {
1481 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1482 return bnx2_set_default_remote_link(bp);
1483
1484 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1485 bp->req_line_speed = 0;
1486 if (bp->phy_flags & PHY_SERDES_FLAG) {
1487 u32 reg;
1488
1489 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1490
1491 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1492 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1493 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1494 bp->autoneg = 0;
1495 bp->req_line_speed = bp->line_speed = SPEED_1000;
1496 bp->req_duplex = DUPLEX_FULL;
1497 }
1498 } else
1499 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1500 }
1501
1502 static void
1503 bnx2_send_heart_beat(struct bnx2 *bp)
1504 {
1505 u32 msg;
1506 u32 addr;
1507
1508 spin_lock(&bp->indirect_lock);
1509 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1510 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1511 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1512 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1513 spin_unlock(&bp->indirect_lock);
1514 }
1515
1516 static void
1517 bnx2_remote_phy_event(struct bnx2 *bp)
1518 {
1519 u32 msg;
1520 u8 link_up = bp->link_up;
1521 u8 old_port;
1522
1523 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1524
1525 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1526 bnx2_send_heart_beat(bp);
1527
1528 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1529
1530 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1531 bp->link_up = 0;
1532 else {
1533 u32 speed;
1534
1535 bp->link_up = 1;
1536 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1537 bp->duplex = DUPLEX_FULL;
1538 switch (speed) {
1539 case BNX2_LINK_STATUS_10HALF:
1540 bp->duplex = DUPLEX_HALF;
1541 case BNX2_LINK_STATUS_10FULL:
1542 bp->line_speed = SPEED_10;
1543 break;
1544 case BNX2_LINK_STATUS_100HALF:
1545 bp->duplex = DUPLEX_HALF;
1546 case BNX2_LINK_STATUS_100BASE_T4:
1547 case BNX2_LINK_STATUS_100FULL:
1548 bp->line_speed = SPEED_100;
1549 break;
1550 case BNX2_LINK_STATUS_1000HALF:
1551 bp->duplex = DUPLEX_HALF;
1552 case BNX2_LINK_STATUS_1000FULL:
1553 bp->line_speed = SPEED_1000;
1554 break;
1555 case BNX2_LINK_STATUS_2500HALF:
1556 bp->duplex = DUPLEX_HALF;
1557 case BNX2_LINK_STATUS_2500FULL:
1558 bp->line_speed = SPEED_2500;
1559 break;
1560 default:
1561 bp->line_speed = 0;
1562 break;
1563 }
1564
1565 spin_lock(&bp->phy_lock);
1566 bp->flow_ctrl = 0;
1567 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1568 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1569 if (bp->duplex == DUPLEX_FULL)
1570 bp->flow_ctrl = bp->req_flow_ctrl;
1571 } else {
1572 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1573 bp->flow_ctrl |= FLOW_CTRL_TX;
1574 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1575 bp->flow_ctrl |= FLOW_CTRL_RX;
1576 }
1577
1578 old_port = bp->phy_port;
1579 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1580 bp->phy_port = PORT_FIBRE;
1581 else
1582 bp->phy_port = PORT_TP;
1583
1584 if (old_port != bp->phy_port)
1585 bnx2_set_default_link(bp);
1586
1587 spin_unlock(&bp->phy_lock);
1588 }
1589 if (bp->link_up != link_up)
1590 bnx2_report_link(bp);
1591
1592 bnx2_set_mac_link(bp);
1593 }
1594
1595 static int
1596 bnx2_set_remote_link(struct bnx2 *bp)
1597 {
1598 u32 evt_code;
1599
1600 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1601 switch (evt_code) {
1602 case BNX2_FW_EVT_CODE_LINK_EVENT:
1603 bnx2_remote_phy_event(bp);
1604 break;
1605 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1606 default:
1607 bnx2_send_heart_beat(bp);
1608 break;
1609 }
1610 return 0;
1611 }
1612
1613 static int
1614 bnx2_setup_copper_phy(struct bnx2 *bp)
1615 {
1616 u32 bmcr;
1617 u32 new_bmcr;
1618
1619 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1620
1621 if (bp->autoneg & AUTONEG_SPEED) {
1622 u32 adv_reg, adv1000_reg;
1623 u32 new_adv_reg = 0;
1624 u32 new_adv1000_reg = 0;
1625
1626 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1627 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1628 ADVERTISE_PAUSE_ASYM);
1629
1630 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1631 adv1000_reg &= PHY_ALL_1000_SPEED;
1632
1633 if (bp->advertising & ADVERTISED_10baseT_Half)
1634 new_adv_reg |= ADVERTISE_10HALF;
1635 if (bp->advertising & ADVERTISED_10baseT_Full)
1636 new_adv_reg |= ADVERTISE_10FULL;
1637 if (bp->advertising & ADVERTISED_100baseT_Half)
1638 new_adv_reg |= ADVERTISE_100HALF;
1639 if (bp->advertising & ADVERTISED_100baseT_Full)
1640 new_adv_reg |= ADVERTISE_100FULL;
1641 if (bp->advertising & ADVERTISED_1000baseT_Full)
1642 new_adv1000_reg |= ADVERTISE_1000FULL;
1643
1644 new_adv_reg |= ADVERTISE_CSMA;
1645
1646 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1647
1648 if ((adv1000_reg != new_adv1000_reg) ||
1649 (adv_reg != new_adv_reg) ||
1650 ((bmcr & BMCR_ANENABLE) == 0)) {
1651
1652 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1653 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1654 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1655 BMCR_ANENABLE);
1656 }
1657 else if (bp->link_up) {
1658 /* Flow ctrl may have changed from auto to forced */
1659 /* or vice-versa. */
1660
1661 bnx2_resolve_flow_ctrl(bp);
1662 bnx2_set_mac_link(bp);
1663 }
1664 return 0;
1665 }
1666
1667 new_bmcr = 0;
1668 if (bp->req_line_speed == SPEED_100) {
1669 new_bmcr |= BMCR_SPEED100;
1670 }
1671 if (bp->req_duplex == DUPLEX_FULL) {
1672 new_bmcr |= BMCR_FULLDPLX;
1673 }
1674 if (new_bmcr != bmcr) {
1675 u32 bmsr;
1676
1677 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1678 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1679
1680 if (bmsr & BMSR_LSTATUS) {
1681 /* Force link down */
1682 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1683 spin_unlock_bh(&bp->phy_lock);
1684 msleep(50);
1685 spin_lock_bh(&bp->phy_lock);
1686
1687 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1688 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1689 }
1690
1691 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1692
1693 /* Normally, the new speed is setup after the link has
1694 * gone down and up again. In some cases, link will not go
1695 * down so we need to set up the new speed here.
1696 */
1697 if (bmsr & BMSR_LSTATUS) {
1698 bp->line_speed = bp->req_line_speed;
1699 bp->duplex = bp->req_duplex;
1700 bnx2_resolve_flow_ctrl(bp);
1701 bnx2_set_mac_link(bp);
1702 }
1703 } else {
1704 bnx2_resolve_flow_ctrl(bp);
1705 bnx2_set_mac_link(bp);
1706 }
1707 return 0;
1708 }
1709
1710 static int
1711 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1712 {
1713 if (bp->loopback == MAC_LOOPBACK)
1714 return 0;
1715
1716 if (bp->phy_flags & PHY_SERDES_FLAG) {
1717 return (bnx2_setup_serdes_phy(bp, port));
1718 }
1719 else {
1720 return (bnx2_setup_copper_phy(bp));
1721 }
1722 }
1723
1724 static int
1725 bnx2_init_5709s_phy(struct bnx2 *bp)
1726 {
1727 u32 val;
1728
1729 bp->mii_bmcr = MII_BMCR + 0x10;
1730 bp->mii_bmsr = MII_BMSR + 0x10;
1731 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1732 bp->mii_adv = MII_ADVERTISE + 0x10;
1733 bp->mii_lpa = MII_LPA + 0x10;
1734 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1735
1736 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1737 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1738
1739 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1740 bnx2_reset_phy(bp);
1741
1742 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1743
1744 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1745 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1746 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1747 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1748
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1750 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1751 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1752 val |= BCM5708S_UP1_2G5;
1753 else
1754 val &= ~BCM5708S_UP1_2G5;
1755 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1756
1757 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1758 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1759 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1760 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1761
1762 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1763
1764 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1765 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1766 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1767
1768 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1769
1770 return 0;
1771 }
1772
1773 static int
1774 bnx2_init_5708s_phy(struct bnx2 *bp)
1775 {
1776 u32 val;
1777
1778 bnx2_reset_phy(bp);
1779
1780 bp->mii_up1 = BCM5708S_UP1;
1781
1782 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1783 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1784 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1785
1786 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1787 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1788 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1789
1790 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1791 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1792 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1793
1794 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1795 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1796 val |= BCM5708S_UP1_2G5;
1797 bnx2_write_phy(bp, BCM5708S_UP1, val);
1798 }
1799
1800 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1801 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1802 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1803 /* increase tx signal amplitude */
1804 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1805 BCM5708S_BLK_ADDR_TX_MISC);
1806 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1807 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1808 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1809 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1810 }
1811
1812 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1813 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1814
1815 if (val) {
1816 u32 is_backplane;
1817
1818 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1819 BNX2_SHARED_HW_CFG_CONFIG);
1820 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1821 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1822 BCM5708S_BLK_ADDR_TX_MISC);
1823 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1824 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1825 BCM5708S_BLK_ADDR_DIG);
1826 }
1827 }
1828 return 0;
1829 }
1830
1831 static int
1832 bnx2_init_5706s_phy(struct bnx2 *bp)
1833 {
1834 bnx2_reset_phy(bp);
1835
1836 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1837
1838 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1839 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1840
1841 if (bp->dev->mtu > 1500) {
1842 u32 val;
1843
1844 /* Set extended packet length bit */
1845 bnx2_write_phy(bp, 0x18, 0x7);
1846 bnx2_read_phy(bp, 0x18, &val);
1847 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1848
1849 bnx2_write_phy(bp, 0x1c, 0x6c00);
1850 bnx2_read_phy(bp, 0x1c, &val);
1851 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1852 }
1853 else {
1854 u32 val;
1855
1856 bnx2_write_phy(bp, 0x18, 0x7);
1857 bnx2_read_phy(bp, 0x18, &val);
1858 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1859
1860 bnx2_write_phy(bp, 0x1c, 0x6c00);
1861 bnx2_read_phy(bp, 0x1c, &val);
1862 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1863 }
1864
1865 return 0;
1866 }
1867
1868 static int
1869 bnx2_init_copper_phy(struct bnx2 *bp)
1870 {
1871 u32 val;
1872
1873 bnx2_reset_phy(bp);
1874
1875 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1876 bnx2_write_phy(bp, 0x18, 0x0c00);
1877 bnx2_write_phy(bp, 0x17, 0x000a);
1878 bnx2_write_phy(bp, 0x15, 0x310b);
1879 bnx2_write_phy(bp, 0x17, 0x201f);
1880 bnx2_write_phy(bp, 0x15, 0x9506);
1881 bnx2_write_phy(bp, 0x17, 0x401f);
1882 bnx2_write_phy(bp, 0x15, 0x14e2);
1883 bnx2_write_phy(bp, 0x18, 0x0400);
1884 }
1885
1886 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1887 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1888 MII_BNX2_DSP_EXPAND_REG | 0x8);
1889 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1890 val &= ~(1 << 8);
1891 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1892 }
1893
1894 if (bp->dev->mtu > 1500) {
1895 /* Set extended packet length bit */
1896 bnx2_write_phy(bp, 0x18, 0x7);
1897 bnx2_read_phy(bp, 0x18, &val);
1898 bnx2_write_phy(bp, 0x18, val | 0x4000);
1899
1900 bnx2_read_phy(bp, 0x10, &val);
1901 bnx2_write_phy(bp, 0x10, val | 0x1);
1902 }
1903 else {
1904 bnx2_write_phy(bp, 0x18, 0x7);
1905 bnx2_read_phy(bp, 0x18, &val);
1906 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1907
1908 bnx2_read_phy(bp, 0x10, &val);
1909 bnx2_write_phy(bp, 0x10, val & ~0x1);
1910 }
1911
1912 /* ethernet@wirespeed */
1913 bnx2_write_phy(bp, 0x18, 0x7007);
1914 bnx2_read_phy(bp, 0x18, &val);
1915 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1916 return 0;
1917 }
1918
1919
1920 static int
1921 bnx2_init_phy(struct bnx2 *bp)
1922 {
1923 u32 val;
1924 int rc = 0;
1925
1926 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1927 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1928
1929 bp->mii_bmcr = MII_BMCR;
1930 bp->mii_bmsr = MII_BMSR;
1931 bp->mii_bmsr1 = MII_BMSR;
1932 bp->mii_adv = MII_ADVERTISE;
1933 bp->mii_lpa = MII_LPA;
1934
1935 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1936
1937 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1938 goto setup_phy;
1939
1940 bnx2_read_phy(bp, MII_PHYSID1, &val);
1941 bp->phy_id = val << 16;
1942 bnx2_read_phy(bp, MII_PHYSID2, &val);
1943 bp->phy_id |= val & 0xffff;
1944
1945 if (bp->phy_flags & PHY_SERDES_FLAG) {
1946 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1947 rc = bnx2_init_5706s_phy(bp);
1948 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1949 rc = bnx2_init_5708s_phy(bp);
1950 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1951 rc = bnx2_init_5709s_phy(bp);
1952 }
1953 else {
1954 rc = bnx2_init_copper_phy(bp);
1955 }
1956
1957 setup_phy:
1958 if (!rc)
1959 rc = bnx2_setup_phy(bp, bp->phy_port);
1960
1961 return rc;
1962 }
1963
1964 static int
1965 bnx2_set_mac_loopback(struct bnx2 *bp)
1966 {
1967 u32 mac_mode;
1968
1969 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1970 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1971 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1972 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1973 bp->link_up = 1;
1974 return 0;
1975 }
1976
1977 static int bnx2_test_link(struct bnx2 *);
1978
1979 static int
1980 bnx2_set_phy_loopback(struct bnx2 *bp)
1981 {
1982 u32 mac_mode;
1983 int rc, i;
1984
1985 spin_lock_bh(&bp->phy_lock);
1986 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1987 BMCR_SPEED1000);
1988 spin_unlock_bh(&bp->phy_lock);
1989 if (rc)
1990 return rc;
1991
1992 for (i = 0; i < 10; i++) {
1993 if (bnx2_test_link(bp) == 0)
1994 break;
1995 msleep(100);
1996 }
1997
1998 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1999 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2000 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2001 BNX2_EMAC_MODE_25G_MODE);
2002
2003 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2004 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2005 bp->link_up = 1;
2006 return 0;
2007 }
2008
2009 static int
2010 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2011 {
2012 int i;
2013 u32 val;
2014
2015 bp->fw_wr_seq++;
2016 msg_data |= bp->fw_wr_seq;
2017
2018 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2019
2020 /* wait for an acknowledgement. */
2021 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2022 msleep(10);
2023
2024 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2025
2026 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2027 break;
2028 }
2029 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2030 return 0;
2031
2032 /* If we timed out, inform the firmware that this is the case. */
2033 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2034 if (!silent)
2035 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2036 "%x\n", msg_data);
2037
2038 msg_data &= ~BNX2_DRV_MSG_CODE;
2039 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2040
2041 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2042
2043 return -EBUSY;
2044 }
2045
2046 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2047 return -EIO;
2048
2049 return 0;
2050 }
2051
2052 static int
2053 bnx2_init_5709_context(struct bnx2 *bp)
2054 {
2055 int i, ret = 0;
2056 u32 val;
2057
2058 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2059 val |= (BCM_PAGE_BITS - 8) << 16;
2060 REG_WR(bp, BNX2_CTX_COMMAND, val);
2061 for (i = 0; i < 10; i++) {
2062 val = REG_RD(bp, BNX2_CTX_COMMAND);
2063 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2064 break;
2065 udelay(2);
2066 }
2067 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2068 return -EBUSY;
2069
2070 for (i = 0; i < bp->ctx_pages; i++) {
2071 int j;
2072
2073 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2074 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2075 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2076 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2077 (u64) bp->ctx_blk_mapping[i] >> 32);
2078 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2079 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2080 for (j = 0; j < 10; j++) {
2081
2082 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2083 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2084 break;
2085 udelay(5);
2086 }
2087 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2088 ret = -EBUSY;
2089 break;
2090 }
2091 }
2092 return ret;
2093 }
2094
2095 static void
2096 bnx2_init_context(struct bnx2 *bp)
2097 {
2098 u32 vcid;
2099
2100 vcid = 96;
2101 while (vcid) {
2102 u32 vcid_addr, pcid_addr, offset;
2103 int i;
2104
2105 vcid--;
2106
2107 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2108 u32 new_vcid;
2109
2110 vcid_addr = GET_PCID_ADDR(vcid);
2111 if (vcid & 0x8) {
2112 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2113 }
2114 else {
2115 new_vcid = vcid;
2116 }
2117 pcid_addr = GET_PCID_ADDR(new_vcid);
2118 }
2119 else {
2120 vcid_addr = GET_CID_ADDR(vcid);
2121 pcid_addr = vcid_addr;
2122 }
2123
2124 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2125 vcid_addr += (i << PHY_CTX_SHIFT);
2126 pcid_addr += (i << PHY_CTX_SHIFT);
2127
2128 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2129 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2130
2131 /* Zero out the context. */
2132 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2133 CTX_WR(bp, 0x00, offset, 0);
2134
2135 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2136 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2137 }
2138 }
2139 }
2140
2141 static int
2142 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2143 {
2144 u16 *good_mbuf;
2145 u32 good_mbuf_cnt;
2146 u32 val;
2147
2148 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2149 if (good_mbuf == NULL) {
2150 printk(KERN_ERR PFX "Failed to allocate memory in "
2151 "bnx2_alloc_bad_rbuf\n");
2152 return -ENOMEM;
2153 }
2154
2155 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2156 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2157
2158 good_mbuf_cnt = 0;
2159
2160 /* Allocate a bunch of mbufs and save the good ones in an array. */
2161 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2162 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2163 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2164
2165 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2166
2167 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2168
2169 /* The addresses with Bit 9 set are bad memory blocks. */
2170 if (!(val & (1 << 9))) {
2171 good_mbuf[good_mbuf_cnt] = (u16) val;
2172 good_mbuf_cnt++;
2173 }
2174
2175 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2176 }
2177
2178 /* Free the good ones back to the mbuf pool thus discarding
2179 * all the bad ones. */
2180 while (good_mbuf_cnt) {
2181 good_mbuf_cnt--;
2182
2183 val = good_mbuf[good_mbuf_cnt];
2184 val = (val << 9) | val | 1;
2185
2186 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2187 }
2188 kfree(good_mbuf);
2189 return 0;
2190 }
2191
2192 static void
2193 bnx2_set_mac_addr(struct bnx2 *bp)
2194 {
2195 u32 val;
2196 u8 *mac_addr = bp->dev->dev_addr;
2197
2198 val = (mac_addr[0] << 8) | mac_addr[1];
2199
2200 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2201
2202 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2203 (mac_addr[4] << 8) | mac_addr[5];
2204
2205 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2206 }
2207
2208 static inline int
2209 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2210 {
2211 struct sk_buff *skb;
2212 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2213 dma_addr_t mapping;
2214 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2215 unsigned long align;
2216
2217 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2218 if (skb == NULL) {
2219 return -ENOMEM;
2220 }
2221
2222 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2223 skb_reserve(skb, BNX2_RX_ALIGN - align);
2224
2225 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2226 PCI_DMA_FROMDEVICE);
2227
2228 rx_buf->skb = skb;
2229 pci_unmap_addr_set(rx_buf, mapping, mapping);
2230
2231 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2232 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2233
2234 bp->rx_prod_bseq += bp->rx_buf_use_size;
2235
2236 return 0;
2237 }
2238
2239 static int
2240 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2241 {
2242 struct status_block *sblk = bp->status_blk;
2243 u32 new_link_state, old_link_state;
2244 int is_set = 1;
2245
2246 new_link_state = sblk->status_attn_bits & event;
2247 old_link_state = sblk->status_attn_bits_ack & event;
2248 if (new_link_state != old_link_state) {
2249 if (new_link_state)
2250 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2251 else
2252 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2253 } else
2254 is_set = 0;
2255
2256 return is_set;
2257 }
2258
2259 static void
2260 bnx2_phy_int(struct bnx2 *bp)
2261 {
2262 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2263 spin_lock(&bp->phy_lock);
2264 bnx2_set_link(bp);
2265 spin_unlock(&bp->phy_lock);
2266 }
2267 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2268 bnx2_set_remote_link(bp);
2269
2270 }
2271
2272 static void
2273 bnx2_tx_int(struct bnx2 *bp)
2274 {
2275 struct status_block *sblk = bp->status_blk;
2276 u16 hw_cons, sw_cons, sw_ring_cons;
2277 int tx_free_bd = 0;
2278
2279 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2280 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2281 hw_cons++;
2282 }
2283 sw_cons = bp->tx_cons;
2284
2285 while (sw_cons != hw_cons) {
2286 struct sw_bd *tx_buf;
2287 struct sk_buff *skb;
2288 int i, last;
2289
2290 sw_ring_cons = TX_RING_IDX(sw_cons);
2291
2292 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2293 skb = tx_buf->skb;
2294
2295 /* partial BD completions possible with TSO packets */
2296 if (skb_is_gso(skb)) {
2297 u16 last_idx, last_ring_idx;
2298
2299 last_idx = sw_cons +
2300 skb_shinfo(skb)->nr_frags + 1;
2301 last_ring_idx = sw_ring_cons +
2302 skb_shinfo(skb)->nr_frags + 1;
2303 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2304 last_idx++;
2305 }
2306 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2307 break;
2308 }
2309 }
2310
2311 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2312 skb_headlen(skb), PCI_DMA_TODEVICE);
2313
2314 tx_buf->skb = NULL;
2315 last = skb_shinfo(skb)->nr_frags;
2316
2317 for (i = 0; i < last; i++) {
2318 sw_cons = NEXT_TX_BD(sw_cons);
2319
2320 pci_unmap_page(bp->pdev,
2321 pci_unmap_addr(
2322 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2323 mapping),
2324 skb_shinfo(skb)->frags[i].size,
2325 PCI_DMA_TODEVICE);
2326 }
2327
2328 sw_cons = NEXT_TX_BD(sw_cons);
2329
2330 tx_free_bd += last + 1;
2331
2332 dev_kfree_skb(skb);
2333
2334 hw_cons = bp->hw_tx_cons =
2335 sblk->status_tx_quick_consumer_index0;
2336
2337 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2338 hw_cons++;
2339 }
2340 }
2341
2342 bp->tx_cons = sw_cons;
2343 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2344 * before checking for netif_queue_stopped(). Without the
2345 * memory barrier, there is a small possibility that bnx2_start_xmit()
2346 * will miss it and cause the queue to be stopped forever.
2347 */
2348 smp_mb();
2349
2350 if (unlikely(netif_queue_stopped(bp->dev)) &&
2351 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2352 netif_tx_lock(bp->dev);
2353 if ((netif_queue_stopped(bp->dev)) &&
2354 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2355 netif_wake_queue(bp->dev);
2356 netif_tx_unlock(bp->dev);
2357 }
2358 }
2359
2360 static inline void
2361 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2362 u16 cons, u16 prod)
2363 {
2364 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2365 struct rx_bd *cons_bd, *prod_bd;
2366
2367 cons_rx_buf = &bp->rx_buf_ring[cons];
2368 prod_rx_buf = &bp->rx_buf_ring[prod];
2369
2370 pci_dma_sync_single_for_device(bp->pdev,
2371 pci_unmap_addr(cons_rx_buf, mapping),
2372 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2373
2374 bp->rx_prod_bseq += bp->rx_buf_use_size;
2375
2376 prod_rx_buf->skb = skb;
2377
2378 if (cons == prod)
2379 return;
2380
2381 pci_unmap_addr_set(prod_rx_buf, mapping,
2382 pci_unmap_addr(cons_rx_buf, mapping));
2383
2384 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2385 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2386 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2387 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2388 }
2389
2390 static inline u16
2391 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2392 {
2393 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2394
2395 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2396 cons++;
2397 return cons;
2398 }
2399
2400 static int
2401 bnx2_rx_int(struct bnx2 *bp, int budget)
2402 {
2403 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2404 struct l2_fhdr *rx_hdr;
2405 int rx_pkt = 0;
2406
2407 hw_cons = bnx2_get_hw_rx_cons(bp);
2408 sw_cons = bp->rx_cons;
2409 sw_prod = bp->rx_prod;
2410
2411 /* Memory barrier necessary as speculative reads of the rx
2412 * buffer can be ahead of the index in the status block
2413 */
2414 rmb();
2415 while (sw_cons != hw_cons) {
2416 unsigned int len;
2417 u32 status;
2418 struct sw_bd *rx_buf;
2419 struct sk_buff *skb;
2420 dma_addr_t dma_addr;
2421
2422 sw_ring_cons = RX_RING_IDX(sw_cons);
2423 sw_ring_prod = RX_RING_IDX(sw_prod);
2424
2425 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2426 skb = rx_buf->skb;
2427
2428 rx_buf->skb = NULL;
2429
2430 dma_addr = pci_unmap_addr(rx_buf, mapping);
2431
2432 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2433 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2434
2435 rx_hdr = (struct l2_fhdr *) skb->data;
2436 len = rx_hdr->l2_fhdr_pkt_len - 4;
2437
2438 if ((status = rx_hdr->l2_fhdr_status) &
2439 (L2_FHDR_ERRORS_BAD_CRC |
2440 L2_FHDR_ERRORS_PHY_DECODE |
2441 L2_FHDR_ERRORS_ALIGNMENT |
2442 L2_FHDR_ERRORS_TOO_SHORT |
2443 L2_FHDR_ERRORS_GIANT_FRAME)) {
2444
2445 goto reuse_rx;
2446 }
2447
2448 /* Since we don't have a jumbo ring, copy small packets
2449 * if mtu > 1500
2450 */
2451 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2452 struct sk_buff *new_skb;
2453
2454 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2455 if (new_skb == NULL)
2456 goto reuse_rx;
2457
2458 /* aligned copy */
2459 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2460 new_skb->data, len + 2);
2461 skb_reserve(new_skb, 2);
2462 skb_put(new_skb, len);
2463
2464 bnx2_reuse_rx_skb(bp, skb,
2465 sw_ring_cons, sw_ring_prod);
2466
2467 skb = new_skb;
2468 }
2469 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2470 pci_unmap_single(bp->pdev, dma_addr,
2471 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2472
2473 skb_reserve(skb, bp->rx_offset);
2474 skb_put(skb, len);
2475 }
2476 else {
2477 reuse_rx:
2478 bnx2_reuse_rx_skb(bp, skb,
2479 sw_ring_cons, sw_ring_prod);
2480 goto next_rx;
2481 }
2482
2483 skb->protocol = eth_type_trans(skb, bp->dev);
2484
2485 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2486 (ntohs(skb->protocol) != 0x8100)) {
2487
2488 dev_kfree_skb(skb);
2489 goto next_rx;
2490
2491 }
2492
2493 skb->ip_summed = CHECKSUM_NONE;
2494 if (bp->rx_csum &&
2495 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2496 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2497
2498 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2499 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2500 skb->ip_summed = CHECKSUM_UNNECESSARY;
2501 }
2502
2503 #ifdef BCM_VLAN
2504 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2505 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2506 rx_hdr->l2_fhdr_vlan_tag);
2507 }
2508 else
2509 #endif
2510 netif_receive_skb(skb);
2511
2512 bp->dev->last_rx = jiffies;
2513 rx_pkt++;
2514
2515 next_rx:
2516 sw_cons = NEXT_RX_BD(sw_cons);
2517 sw_prod = NEXT_RX_BD(sw_prod);
2518
2519 if ((rx_pkt == budget))
2520 break;
2521
2522 /* Refresh hw_cons to see if there is new work */
2523 if (sw_cons == hw_cons) {
2524 hw_cons = bnx2_get_hw_rx_cons(bp);
2525 rmb();
2526 }
2527 }
2528 bp->rx_cons = sw_cons;
2529 bp->rx_prod = sw_prod;
2530
2531 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2532
2533 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2534
2535 mmiowb();
2536
2537 return rx_pkt;
2538
2539 }
2540
2541 /* MSI ISR - The only difference between this and the INTx ISR
2542 * is that the MSI interrupt is always serviced.
2543 */
2544 static irqreturn_t
2545 bnx2_msi(int irq, void *dev_instance)
2546 {
2547 struct net_device *dev = dev_instance;
2548 struct bnx2 *bp = netdev_priv(dev);
2549
2550 prefetch(bp->status_blk);
2551 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2552 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2553 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2554
2555 /* Return here if interrupt is disabled. */
2556 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2557 return IRQ_HANDLED;
2558
2559 netif_rx_schedule(dev, &bp->napi);
2560
2561 return IRQ_HANDLED;
2562 }
2563
2564 static irqreturn_t
2565 bnx2_msi_1shot(int irq, void *dev_instance)
2566 {
2567 struct net_device *dev = dev_instance;
2568 struct bnx2 *bp = netdev_priv(dev);
2569
2570 prefetch(bp->status_blk);
2571
2572 /* Return here if interrupt is disabled. */
2573 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2574 return IRQ_HANDLED;
2575
2576 netif_rx_schedule(dev, &bp->napi);
2577
2578 return IRQ_HANDLED;
2579 }
2580
2581 static irqreturn_t
2582 bnx2_interrupt(int irq, void *dev_instance)
2583 {
2584 struct net_device *dev = dev_instance;
2585 struct bnx2 *bp = netdev_priv(dev);
2586 struct status_block *sblk = bp->status_blk;
2587
2588 /* When using INTx, it is possible for the interrupt to arrive
2589 * at the CPU before the status block posted prior to the
2590 * interrupt. Reading a register will flush the status block.
2591 * When using MSI, the MSI message will always complete after
2592 * the status block write.
2593 */
2594 if ((sblk->status_idx == bp->last_status_idx) &&
2595 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2596 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2597 return IRQ_NONE;
2598
2599 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2600 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2601 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2602
2603 /* Read back to deassert IRQ immediately to avoid too many
2604 * spurious interrupts.
2605 */
2606 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2607
2608 /* Return here if interrupt is shared and is disabled. */
2609 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2610 return IRQ_HANDLED;
2611
2612 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2613 bp->last_status_idx = sblk->status_idx;
2614 __netif_rx_schedule(dev, &bp->napi);
2615 }
2616
2617 return IRQ_HANDLED;
2618 }
2619
2620 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2621 STATUS_ATTN_BITS_TIMER_ABORT)
2622
2623 static inline int
2624 bnx2_has_work(struct bnx2 *bp)
2625 {
2626 struct status_block *sblk = bp->status_blk;
2627
2628 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2629 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2630 return 1;
2631
2632 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2633 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2634 return 1;
2635
2636 return 0;
2637 }
2638
2639 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2640 {
2641 struct status_block *sblk = bp->status_blk;
2642 u32 status_attn_bits = sblk->status_attn_bits;
2643 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2644
2645 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2646 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2647
2648 bnx2_phy_int(bp);
2649
2650 /* This is needed to take care of transient status
2651 * during link changes.
2652 */
2653 REG_WR(bp, BNX2_HC_COMMAND,
2654 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2655 REG_RD(bp, BNX2_HC_COMMAND);
2656 }
2657
2658 if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2659 bnx2_tx_int(bp);
2660
2661 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2662 work_done += bnx2_rx_int(bp, budget - work_done);
2663
2664 return work_done;
2665 }
2666
2667 static int bnx2_poll(struct napi_struct *napi, int budget)
2668 {
2669 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2670 int work_done = 0;
2671 struct status_block *sblk = bp->status_blk;
2672
2673 while (1) {
2674 work_done = bnx2_poll_work(bp, work_done, budget);
2675
2676 if (unlikely(work_done >= budget))
2677 break;
2678
2679 /* bp->last_status_idx is used below to tell the hw how
2680 * much work has been processed, so we must read it before
2681 * checking for more work.
2682 */
2683 bp->last_status_idx = sblk->status_idx;
2684 rmb();
2685 if (likely(!bnx2_has_work(bp))) {
2686 netif_rx_complete(bp->dev, napi);
2687 if (likely(bp->flags & USING_MSI_FLAG)) {
2688 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2689 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2690 bp->last_status_idx);
2691 break;
2692 }
2693 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2694 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2695 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2696 bp->last_status_idx);
2697
2698 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2699 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2700 bp->last_status_idx);
2701 break;
2702 }
2703 }
2704
2705 return work_done;
2706 }
2707
2708 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2709 * from set_multicast.
2710 */
2711 static void
2712 bnx2_set_rx_mode(struct net_device *dev)
2713 {
2714 struct bnx2 *bp = netdev_priv(dev);
2715 u32 rx_mode, sort_mode;
2716 int i;
2717
2718 spin_lock_bh(&bp->phy_lock);
2719
2720 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2721 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2722 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2723 #ifdef BCM_VLAN
2724 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2725 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2726 #else
2727 if (!(bp->flags & ASF_ENABLE_FLAG))
2728 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2729 #endif
2730 if (dev->flags & IFF_PROMISC) {
2731 /* Promiscuous mode. */
2732 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2733 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2734 BNX2_RPM_SORT_USER0_PROM_VLAN;
2735 }
2736 else if (dev->flags & IFF_ALLMULTI) {
2737 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2738 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2739 0xffffffff);
2740 }
2741 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2742 }
2743 else {
2744 /* Accept one or more multicast(s). */
2745 struct dev_mc_list *mclist;
2746 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2747 u32 regidx;
2748 u32 bit;
2749 u32 crc;
2750
2751 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2752
2753 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2754 i++, mclist = mclist->next) {
2755
2756 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2757 bit = crc & 0xff;
2758 regidx = (bit & 0xe0) >> 5;
2759 bit &= 0x1f;
2760 mc_filter[regidx] |= (1 << bit);
2761 }
2762
2763 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2764 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2765 mc_filter[i]);
2766 }
2767
2768 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2769 }
2770
2771 if (rx_mode != bp->rx_mode) {
2772 bp->rx_mode = rx_mode;
2773 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2774 }
2775
2776 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2777 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2778 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2779
2780 spin_unlock_bh(&bp->phy_lock);
2781 }
2782
2783 static void
2784 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2785 u32 rv2p_proc)
2786 {
2787 int i;
2788 u32 val;
2789
2790
2791 for (i = 0; i < rv2p_code_len; i += 8) {
2792 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2793 rv2p_code++;
2794 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2795 rv2p_code++;
2796
2797 if (rv2p_proc == RV2P_PROC1) {
2798 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2799 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2800 }
2801 else {
2802 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2803 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2804 }
2805 }
2806
2807 /* Reset the processor, un-stall is done later. */
2808 if (rv2p_proc == RV2P_PROC1) {
2809 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2810 }
2811 else {
2812 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2813 }
2814 }
2815
2816 static int
2817 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2818 {
2819 u32 offset;
2820 u32 val;
2821 int rc;
2822
2823 /* Halt the CPU. */
2824 val = REG_RD_IND(bp, cpu_reg->mode);
2825 val |= cpu_reg->mode_value_halt;
2826 REG_WR_IND(bp, cpu_reg->mode, val);
2827 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2828
2829 /* Load the Text area. */
2830 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2831 if (fw->gz_text) {
2832 int j;
2833
2834 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2835 fw->gz_text_len);
2836 if (rc < 0)
2837 return rc;
2838
2839 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2840 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2841 }
2842 }
2843
2844 /* Load the Data area. */
2845 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2846 if (fw->data) {
2847 int j;
2848
2849 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2850 REG_WR_IND(bp, offset, fw->data[j]);
2851 }
2852 }
2853
2854 /* Load the SBSS area. */
2855 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2856 if (fw->sbss_len) {
2857 int j;
2858
2859 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2860 REG_WR_IND(bp, offset, 0);
2861 }
2862 }
2863
2864 /* Load the BSS area. */
2865 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2866 if (fw->bss_len) {
2867 int j;
2868
2869 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2870 REG_WR_IND(bp, offset, 0);
2871 }
2872 }
2873
2874 /* Load the Read-Only area. */
2875 offset = cpu_reg->spad_base +
2876 (fw->rodata_addr - cpu_reg->mips_view_base);
2877 if (fw->rodata) {
2878 int j;
2879
2880 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2881 REG_WR_IND(bp, offset, fw->rodata[j]);
2882 }
2883 }
2884
2885 /* Clear the pre-fetch instruction. */
2886 REG_WR_IND(bp, cpu_reg->inst, 0);
2887 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2888
2889 /* Start the CPU. */
2890 val = REG_RD_IND(bp, cpu_reg->mode);
2891 val &= ~cpu_reg->mode_value_halt;
2892 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2893 REG_WR_IND(bp, cpu_reg->mode, val);
2894
2895 return 0;
2896 }
2897
2898 static int
2899 bnx2_init_cpus(struct bnx2 *bp)
2900 {
2901 struct cpu_reg cpu_reg;
2902 struct fw_info *fw;
2903 int rc;
2904 void *text;
2905
2906 /* Initialize the RV2P processor. */
2907 text = vmalloc(FW_BUF_SIZE);
2908 if (!text)
2909 return -ENOMEM;
2910 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
2911 if (rc < 0)
2912 goto init_cpu_err;
2913
2914 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2915
2916 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
2917 if (rc < 0)
2918 goto init_cpu_err;
2919
2920 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
2921
2922 /* Initialize the RX Processor. */
2923 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2924 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2925 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2926 cpu_reg.state = BNX2_RXP_CPU_STATE;
2927 cpu_reg.state_value_clear = 0xffffff;
2928 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2929 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2930 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2931 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2932 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2933 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2934 cpu_reg.mips_view_base = 0x8000000;
2935
2936 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2937 fw = &bnx2_rxp_fw_09;
2938 else
2939 fw = &bnx2_rxp_fw_06;
2940
2941 fw->text = text;
2942 rc = load_cpu_fw(bp, &cpu_reg, fw);
2943 if (rc)
2944 goto init_cpu_err;
2945
2946 /* Initialize the TX Processor. */
2947 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2948 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2949 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2950 cpu_reg.state = BNX2_TXP_CPU_STATE;
2951 cpu_reg.state_value_clear = 0xffffff;
2952 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2953 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2954 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2955 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2956 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2957 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2958 cpu_reg.mips_view_base = 0x8000000;
2959
2960 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2961 fw = &bnx2_txp_fw_09;
2962 else
2963 fw = &bnx2_txp_fw_06;
2964
2965 fw->text = text;
2966 rc = load_cpu_fw(bp, &cpu_reg, fw);
2967 if (rc)
2968 goto init_cpu_err;
2969
2970 /* Initialize the TX Patch-up Processor. */
2971 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2972 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2973 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2974 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2975 cpu_reg.state_value_clear = 0xffffff;
2976 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2977 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2978 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2979 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2980 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2981 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2982 cpu_reg.mips_view_base = 0x8000000;
2983
2984 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2985 fw = &bnx2_tpat_fw_09;
2986 else
2987 fw = &bnx2_tpat_fw_06;
2988
2989 fw->text = text;
2990 rc = load_cpu_fw(bp, &cpu_reg, fw);
2991 if (rc)
2992 goto init_cpu_err;
2993
2994 /* Initialize the Completion Processor. */
2995 cpu_reg.mode = BNX2_COM_CPU_MODE;
2996 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2997 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2998 cpu_reg.state = BNX2_COM_CPU_STATE;
2999 cpu_reg.state_value_clear = 0xffffff;
3000 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3001 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3002 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3003 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3004 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3005 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3006 cpu_reg.mips_view_base = 0x8000000;
3007
3008 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3009 fw = &bnx2_com_fw_09;
3010 else
3011 fw = &bnx2_com_fw_06;
3012
3013 fw->text = text;
3014 rc = load_cpu_fw(bp, &cpu_reg, fw);
3015 if (rc)
3016 goto init_cpu_err;
3017
3018 /* Initialize the Command Processor. */
3019 cpu_reg.mode = BNX2_CP_CPU_MODE;
3020 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3021 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3022 cpu_reg.state = BNX2_CP_CPU_STATE;
3023 cpu_reg.state_value_clear = 0xffffff;
3024 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3025 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3026 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3027 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3028 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3029 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3030 cpu_reg.mips_view_base = 0x8000000;
3031
3032 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3033 fw = &bnx2_cp_fw_09;
3034
3035 fw->text = text;
3036 rc = load_cpu_fw(bp, &cpu_reg, fw);
3037 if (rc)
3038 goto init_cpu_err;
3039 }
3040 init_cpu_err:
3041 vfree(text);
3042 return rc;
3043 }
3044
3045 static int
3046 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3047 {
3048 u16 pmcsr;
3049
3050 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3051
3052 switch (state) {
3053 case PCI_D0: {
3054 u32 val;
3055
3056 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3057 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3058 PCI_PM_CTRL_PME_STATUS);
3059
3060 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3061 /* delay required during transition out of D3hot */
3062 msleep(20);
3063
3064 val = REG_RD(bp, BNX2_EMAC_MODE);
3065 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3066 val &= ~BNX2_EMAC_MODE_MPKT;
3067 REG_WR(bp, BNX2_EMAC_MODE, val);
3068
3069 val = REG_RD(bp, BNX2_RPM_CONFIG);
3070 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3071 REG_WR(bp, BNX2_RPM_CONFIG, val);
3072 break;
3073 }
3074 case PCI_D3hot: {
3075 int i;
3076 u32 val, wol_msg;
3077
3078 if (bp->wol) {
3079 u32 advertising;
3080 u8 autoneg;
3081
3082 autoneg = bp->autoneg;
3083 advertising = bp->advertising;
3084
3085 if (bp->phy_port == PORT_TP) {
3086 bp->autoneg = AUTONEG_SPEED;
3087 bp->advertising = ADVERTISED_10baseT_Half |
3088 ADVERTISED_10baseT_Full |
3089 ADVERTISED_100baseT_Half |
3090 ADVERTISED_100baseT_Full |
3091 ADVERTISED_Autoneg;
3092 }
3093
3094 spin_lock_bh(&bp->phy_lock);
3095 bnx2_setup_phy(bp, bp->phy_port);
3096 spin_unlock_bh(&bp->phy_lock);
3097
3098 bp->autoneg = autoneg;
3099 bp->advertising = advertising;
3100
3101 bnx2_set_mac_addr(bp);
3102
3103 val = REG_RD(bp, BNX2_EMAC_MODE);
3104
3105 /* Enable port mode. */
3106 val &= ~BNX2_EMAC_MODE_PORT;
3107 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3108 BNX2_EMAC_MODE_ACPI_RCVD |
3109 BNX2_EMAC_MODE_MPKT;
3110 if (bp->phy_port == PORT_TP)
3111 val |= BNX2_EMAC_MODE_PORT_MII;
3112 else {
3113 val |= BNX2_EMAC_MODE_PORT_GMII;
3114 if (bp->line_speed == SPEED_2500)
3115 val |= BNX2_EMAC_MODE_25G_MODE;
3116 }
3117
3118 REG_WR(bp, BNX2_EMAC_MODE, val);
3119
3120 /* receive all multicast */
3121 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3122 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3123 0xffffffff);
3124 }
3125 REG_WR(bp, BNX2_EMAC_RX_MODE,
3126 BNX2_EMAC_RX_MODE_SORT_MODE);
3127
3128 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3129 BNX2_RPM_SORT_USER0_MC_EN;
3130 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3131 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3132 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3133 BNX2_RPM_SORT_USER0_ENA);
3134
3135 /* Need to enable EMAC and RPM for WOL. */
3136 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3137 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3138 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3139 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3140
3141 val = REG_RD(bp, BNX2_RPM_CONFIG);
3142 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3143 REG_WR(bp, BNX2_RPM_CONFIG, val);
3144
3145 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3146 }
3147 else {
3148 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3149 }
3150
3151 if (!(bp->flags & NO_WOL_FLAG))
3152 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3153
3154 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3155 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3156 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3157
3158 if (bp->wol)
3159 pmcsr |= 3;
3160 }
3161 else {
3162 pmcsr |= 3;
3163 }
3164 if (bp->wol) {
3165 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3166 }
3167 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3168 pmcsr);
3169
3170 /* No more memory access after this point until
3171 * device is brought back to D0.
3172 */
3173 udelay(50);
3174 break;
3175 }
3176 default:
3177 return -EINVAL;
3178 }
3179 return 0;
3180 }
3181
3182 static int
3183 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3184 {
3185 u32 val;
3186 int j;
3187
3188 /* Request access to the flash interface. */
3189 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3190 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3191 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3192 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3193 break;
3194
3195 udelay(5);
3196 }
3197
3198 if (j >= NVRAM_TIMEOUT_COUNT)
3199 return -EBUSY;
3200
3201 return 0;
3202 }
3203
3204 static int
3205 bnx2_release_nvram_lock(struct bnx2 *bp)
3206 {
3207 int j;
3208 u32 val;
3209
3210 /* Relinquish nvram interface. */
3211 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3212
3213 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3214 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3215 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3216 break;
3217
3218 udelay(5);
3219 }
3220
3221 if (j >= NVRAM_TIMEOUT_COUNT)
3222 return -EBUSY;
3223
3224 return 0;
3225 }
3226
3227
3228 static int
3229 bnx2_enable_nvram_write(struct bnx2 *bp)
3230 {
3231 u32 val;
3232
3233 val = REG_RD(bp, BNX2_MISC_CFG);
3234 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3235
3236 if (bp->flash_info->flags & BNX2_NV_WREN) {
3237 int j;
3238
3239 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3240 REG_WR(bp, BNX2_NVM_COMMAND,
3241 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3242
3243 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3244 udelay(5);
3245
3246 val = REG_RD(bp, BNX2_NVM_COMMAND);
3247 if (val & BNX2_NVM_COMMAND_DONE)
3248 break;
3249 }
3250
3251 if (j >= NVRAM_TIMEOUT_COUNT)
3252 return -EBUSY;
3253 }
3254 return 0;
3255 }
3256
3257 static void
3258 bnx2_disable_nvram_write(struct bnx2 *bp)
3259 {
3260 u32 val;
3261
3262 val = REG_RD(bp, BNX2_MISC_CFG);
3263 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3264 }
3265
3266
3267 static void
3268 bnx2_enable_nvram_access(struct bnx2 *bp)
3269 {
3270 u32 val;
3271
3272 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3273 /* Enable both bits, even on read. */
3274 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3275 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3276 }
3277
3278 static void
3279 bnx2_disable_nvram_access(struct bnx2 *bp)
3280 {
3281 u32 val;
3282
3283 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3284 /* Disable both bits, even after read. */
3285 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3286 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3287 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3288 }
3289
3290 static int
3291 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3292 {
3293 u32 cmd;
3294 int j;
3295
3296 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3297 /* Buffered flash, no erase needed */
3298 return 0;
3299
3300 /* Build an erase command */
3301 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3302 BNX2_NVM_COMMAND_DOIT;
3303
3304 /* Need to clear DONE bit separately. */
3305 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3306
3307 /* Address of the NVRAM to read from. */
3308 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3309
3310 /* Issue an erase command. */
3311 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3312
3313 /* Wait for completion. */
3314 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3315 u32 val;
3316
3317 udelay(5);
3318
3319 val = REG_RD(bp, BNX2_NVM_COMMAND);
3320 if (val & BNX2_NVM_COMMAND_DONE)
3321 break;
3322 }
3323
3324 if (j >= NVRAM_TIMEOUT_COUNT)
3325 return -EBUSY;
3326
3327 return 0;
3328 }
3329
3330 static int
3331 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3332 {
3333 u32 cmd;
3334 int j;
3335
3336 /* Build the command word. */
3337 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3338
3339 /* Calculate an offset of a buffered flash, not needed for 5709. */
3340 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3341 offset = ((offset / bp->flash_info->page_size) <<
3342 bp->flash_info->page_bits) +
3343 (offset % bp->flash_info->page_size);
3344 }
3345
3346 /* Need to clear DONE bit separately. */
3347 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3348
3349 /* Address of the NVRAM to read from. */
3350 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3351
3352 /* Issue a read command. */
3353 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3354
3355 /* Wait for completion. */
3356 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3357 u32 val;
3358
3359 udelay(5);
3360
3361 val = REG_RD(bp, BNX2_NVM_COMMAND);
3362 if (val & BNX2_NVM_COMMAND_DONE) {
3363 val = REG_RD(bp, BNX2_NVM_READ);
3364
3365 val = be32_to_cpu(val);
3366 memcpy(ret_val, &val, 4);
3367 break;
3368 }
3369 }
3370 if (j >= NVRAM_TIMEOUT_COUNT)
3371 return -EBUSY;
3372
3373 return 0;
3374 }
3375
3376
3377 static int
3378 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3379 {
3380 u32 cmd, val32;
3381 int j;
3382
3383 /* Build the command word. */
3384 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3385
3386 /* Calculate an offset of a buffered flash, not needed for 5709. */
3387 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3388 offset = ((offset / bp->flash_info->page_size) <<
3389 bp->flash_info->page_bits) +
3390 (offset % bp->flash_info->page_size);
3391 }
3392
3393 /* Need to clear DONE bit separately. */
3394 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3395
3396 memcpy(&val32, val, 4);
3397 val32 = cpu_to_be32(val32);
3398
3399 /* Write the data. */
3400 REG_WR(bp, BNX2_NVM_WRITE, val32);
3401
3402 /* Address of the NVRAM to write to. */
3403 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3404
3405 /* Issue the write command. */
3406 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3407
3408 /* Wait for completion. */
3409 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3410 udelay(5);
3411
3412 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3413 break;
3414 }
3415 if (j >= NVRAM_TIMEOUT_COUNT)
3416 return -EBUSY;
3417
3418 return 0;
3419 }
3420
3421 static int
3422 bnx2_init_nvram(struct bnx2 *bp)
3423 {
3424 u32 val;
3425 int j, entry_count, rc = 0;
3426 struct flash_spec *flash;
3427
3428 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3429 bp->flash_info = &flash_5709;
3430 goto get_flash_size;
3431 }
3432
3433 /* Determine the selected interface. */
3434 val = REG_RD(bp, BNX2_NVM_CFG1);
3435
3436 entry_count = ARRAY_SIZE(flash_table);
3437
3438 if (val & 0x40000000) {
3439
3440 /* Flash interface has been reconfigured */
3441 for (j = 0, flash = &flash_table[0]; j < entry_count;
3442 j++, flash++) {
3443 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3444 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3445 bp->flash_info = flash;
3446 break;
3447 }
3448 }
3449 }
3450 else {
3451 u32 mask;
3452 /* Not yet been reconfigured */
3453
3454 if (val & (1 << 23))
3455 mask = FLASH_BACKUP_STRAP_MASK;
3456 else
3457 mask = FLASH_STRAP_MASK;
3458
3459 for (j = 0, flash = &flash_table[0]; j < entry_count;
3460 j++, flash++) {
3461
3462 if ((val & mask) == (flash->strapping & mask)) {
3463 bp->flash_info = flash;
3464
3465 /* Request access to the flash interface. */
3466 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3467 return rc;
3468
3469 /* Enable access to flash interface */
3470 bnx2_enable_nvram_access(bp);
3471
3472 /* Reconfigure the flash interface */
3473 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3474 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3475 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3476 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3477
3478 /* Disable access to flash interface */
3479 bnx2_disable_nvram_access(bp);
3480 bnx2_release_nvram_lock(bp);
3481
3482 break;
3483 }
3484 }
3485 } /* if (val & 0x40000000) */
3486
3487 if (j == entry_count) {
3488 bp->flash_info = NULL;
3489 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3490 return -ENODEV;
3491 }
3492
3493 get_flash_size:
3494 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3495 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3496 if (val)
3497 bp->flash_size = val;
3498 else
3499 bp->flash_size = bp->flash_info->total_size;
3500
3501 return rc;
3502 }
3503
3504 static int
3505 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3506 int buf_size)
3507 {
3508 int rc = 0;
3509 u32 cmd_flags, offset32, len32, extra;
3510
3511 if (buf_size == 0)
3512 return 0;
3513
3514 /* Request access to the flash interface. */
3515 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3516 return rc;
3517
3518 /* Enable access to flash interface */
3519 bnx2_enable_nvram_access(bp);
3520
3521 len32 = buf_size;
3522 offset32 = offset;
3523 extra = 0;
3524
3525 cmd_flags = 0;
3526
3527 if (offset32 & 3) {
3528 u8 buf[4];
3529 u32 pre_len;
3530
3531 offset32 &= ~3;
3532 pre_len = 4 - (offset & 3);
3533
3534 if (pre_len >= len32) {
3535 pre_len = len32;
3536 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3537 BNX2_NVM_COMMAND_LAST;
3538 }
3539 else {
3540 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3541 }
3542
3543 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3544
3545 if (rc)
3546 return rc;
3547
3548 memcpy(ret_buf, buf + (offset & 3), pre_len);
3549
3550 offset32 += 4;
3551 ret_buf += pre_len;
3552 len32 -= pre_len;
3553 }
3554 if (len32 & 3) {
3555 extra = 4 - (len32 & 3);
3556 len32 = (len32 + 4) & ~3;
3557 }
3558
3559 if (len32 == 4) {
3560 u8 buf[4];
3561
3562 if (cmd_flags)
3563 cmd_flags = BNX2_NVM_COMMAND_LAST;
3564 else
3565 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3566 BNX2_NVM_COMMAND_LAST;
3567
3568 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3569
3570 memcpy(ret_buf, buf, 4 - extra);
3571 }
3572 else if (len32 > 0) {
3573 u8 buf[4];
3574
3575 /* Read the first word. */
3576 if (cmd_flags)
3577 cmd_flags = 0;
3578 else
3579 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3580
3581 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3582
3583 /* Advance to the next dword. */
3584 offset32 += 4;
3585 ret_buf += 4;
3586 len32 -= 4;
3587
3588 while (len32 > 4 && rc == 0) {
3589 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3590
3591 /* Advance to the next dword. */
3592 offset32 += 4;
3593 ret_buf += 4;
3594 len32 -= 4;
3595 }
3596
3597 if (rc)
3598 return rc;
3599
3600 cmd_flags = BNX2_NVM_COMMAND_LAST;
3601 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3602
3603 memcpy(ret_buf, buf, 4 - extra);
3604 }
3605
3606 /* Disable access to flash interface */
3607 bnx2_disable_nvram_access(bp);
3608
3609 bnx2_release_nvram_lock(bp);
3610
3611 return rc;
3612 }
3613
3614 static int
3615 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3616 int buf_size)
3617 {
3618 u32 written, offset32, len32;
3619 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3620 int rc = 0;
3621 int align_start, align_end;
3622
3623 buf = data_buf;
3624 offset32 = offset;
3625 len32 = buf_size;
3626 align_start = align_end = 0;
3627
3628 if ((align_start = (offset32 & 3))) {
3629 offset32 &= ~3;
3630 len32 += align_start;
3631 if (len32 < 4)
3632 len32 = 4;
3633 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3634 return rc;
3635 }
3636
3637 if (len32 & 3) {
3638 align_end = 4 - (len32 & 3);
3639 len32 += align_end;
3640 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3641 return rc;
3642 }
3643
3644 if (align_start || align_end) {
3645 align_buf = kmalloc(len32, GFP_KERNEL);
3646 if (align_buf == NULL)
3647 return -ENOMEM;
3648 if (align_start) {
3649 memcpy(align_buf, start, 4);
3650 }
3651 if (align_end) {
3652 memcpy(align_buf + len32 - 4, end, 4);
3653 }
3654 memcpy(align_buf + align_start, data_buf, buf_size);
3655 buf = align_buf;
3656 }
3657
3658 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3659 flash_buffer = kmalloc(264, GFP_KERNEL);
3660 if (flash_buffer == NULL) {
3661 rc = -ENOMEM;
3662 goto nvram_write_end;
3663 }
3664 }
3665
3666 written = 0;
3667 while ((written < len32) && (rc == 0)) {
3668 u32 page_start, page_end, data_start, data_end;
3669 u32 addr, cmd_flags;
3670 int i;
3671
3672 /* Find the page_start addr */
3673 page_start = offset32 + written;
3674 page_start -= (page_start % bp->flash_info->page_size);
3675 /* Find the page_end addr */
3676 page_end = page_start + bp->flash_info->page_size;
3677 /* Find the data_start addr */
3678 data_start = (written == 0) ? offset32 : page_start;
3679 /* Find the data_end addr */
3680 data_end = (page_end > offset32 + len32) ?
3681 (offset32 + len32) : page_end;
3682
3683 /* Request access to the flash interface. */
3684 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3685 goto nvram_write_end;
3686
3687 /* Enable access to flash interface */
3688 bnx2_enable_nvram_access(bp);
3689
3690 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3691 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3692 int j;
3693
3694 /* Read the whole page into the buffer
3695 * (non-buffer flash only) */
3696 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3697 if (j == (bp->flash_info->page_size - 4)) {
3698 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3699 }
3700 rc = bnx2_nvram_read_dword(bp,
3701 page_start + j,
3702 &flash_buffer[j],
3703 cmd_flags);
3704
3705 if (rc)
3706 goto nvram_write_end;
3707
3708 cmd_flags = 0;
3709 }
3710 }
3711
3712 /* Enable writes to flash interface (unlock write-protect) */
3713 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3714 goto nvram_write_end;
3715
3716 /* Loop to write back the buffer data from page_start to
3717 * data_start */
3718 i = 0;
3719 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3720 /* Erase the page */
3721 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3722 goto nvram_write_end;
3723
3724 /* Re-enable the write again for the actual write */
3725 bnx2_enable_nvram_write(bp);
3726
3727 for (addr = page_start; addr < data_start;
3728 addr += 4, i += 4) {
3729
3730 rc = bnx2_nvram_write_dword(bp, addr,
3731 &flash_buffer[i], cmd_flags);
3732
3733 if (rc != 0)
3734 goto nvram_write_end;
3735
3736 cmd_flags = 0;
3737 }
3738 }
3739
3740 /* Loop to write the new data from data_start to data_end */
3741 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3742 if ((addr == page_end - 4) ||
3743 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3744 (addr == data_end - 4))) {
3745
3746 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3747 }
3748 rc = bnx2_nvram_write_dword(bp, addr, buf,
3749 cmd_flags);
3750
3751 if (rc != 0)
3752 goto nvram_write_end;
3753
3754 cmd_flags = 0;
3755 buf += 4;
3756 }
3757
3758 /* Loop to write back the buffer data from data_end
3759 * to page_end */
3760 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3761 for (addr = data_end; addr < page_end;
3762 addr += 4, i += 4) {
3763
3764 if (addr == page_end-4) {
3765 cmd_flags = BNX2_NVM_COMMAND_LAST;
3766 }
3767 rc = bnx2_nvram_write_dword(bp, addr,
3768 &flash_buffer[i], cmd_flags);
3769
3770 if (rc != 0)
3771 goto nvram_write_end;
3772
3773 cmd_flags = 0;
3774 }
3775 }
3776
3777 /* Disable writes to flash interface (lock write-protect) */
3778 bnx2_disable_nvram_write(bp);
3779
3780 /* Disable access to flash interface */
3781 bnx2_disable_nvram_access(bp);
3782 bnx2_release_nvram_lock(bp);
3783
3784 /* Increment written */
3785 written += data_end - data_start;
3786 }
3787
3788 nvram_write_end:
3789 kfree(flash_buffer);
3790 kfree(align_buf);
3791 return rc;
3792 }
3793
3794 static void
3795 bnx2_init_remote_phy(struct bnx2 *bp)
3796 {
3797 u32 val;
3798
3799 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3800 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3801 return;
3802
3803 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3804 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3805 return;
3806
3807 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3808 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3809
3810 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3811 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3812 bp->phy_port = PORT_FIBRE;
3813 else
3814 bp->phy_port = PORT_TP;
3815
3816 if (netif_running(bp->dev)) {
3817 u32 sig;
3818
3819 if (val & BNX2_LINK_STATUS_LINK_UP) {
3820 bp->link_up = 1;
3821 netif_carrier_on(bp->dev);
3822 } else {
3823 bp->link_up = 0;
3824 netif_carrier_off(bp->dev);
3825 }
3826 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3827 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3828 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3829 sig);
3830 }
3831 }
3832 }
3833
3834 static int
3835 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3836 {
3837 u32 val;
3838 int i, rc = 0;
3839 u8 old_port;
3840
3841 /* Wait for the current PCI transaction to complete before
3842 * issuing a reset. */
3843 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3844 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3845 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3846 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3847 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3848 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3849 udelay(5);
3850
3851 /* Wait for the firmware to tell us it is ok to issue a reset. */
3852 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3853
3854 /* Deposit a driver reset signature so the firmware knows that
3855 * this is a soft reset. */
3856 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3857 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3858
3859 /* Do a dummy read to force the chip to complete all current transaction
3860 * before we issue a reset. */
3861 val = REG_RD(bp, BNX2_MISC_ID);
3862
3863 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3864 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3865 REG_RD(bp, BNX2_MISC_COMMAND);
3866 udelay(5);
3867
3868 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3869 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3870
3871 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3872
3873 } else {
3874 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3875 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3876 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3877
3878 /* Chip reset. */
3879 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3880
3881 /* Reading back any register after chip reset will hang the
3882 * bus on 5706 A0 and A1. The msleep below provides plenty
3883 * of margin for write posting.
3884 */
3885 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3886 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3887 msleep(20);
3888
3889 /* Reset takes approximate 30 usec */
3890 for (i = 0; i < 10; i++) {
3891 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3892 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3893 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3894 break;
3895 udelay(10);
3896 }
3897
3898 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3899 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3900 printk(KERN_ERR PFX "Chip reset did not complete\n");
3901 return -EBUSY;
3902 }
3903 }
3904
3905 /* Make sure byte swapping is properly configured. */
3906 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3907 if (val != 0x01020304) {
3908 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3909 return -ENODEV;
3910 }
3911
3912 /* Wait for the firmware to finish its initialization. */
3913 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3914 if (rc)
3915 return rc;
3916
3917 spin_lock_bh(&bp->phy_lock);
3918 old_port = bp->phy_port;
3919 bnx2_init_remote_phy(bp);
3920 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
3921 bnx2_set_default_remote_link(bp);
3922 spin_unlock_bh(&bp->phy_lock);
3923
3924 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3925 /* Adjust the voltage regular to two steps lower. The default
3926 * of this register is 0x0000000e. */
3927 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3928
3929 /* Remove bad rbuf memory from the free pool. */
3930 rc = bnx2_alloc_bad_rbuf(bp);
3931 }
3932
3933 return rc;
3934 }
3935
3936 static int
3937 bnx2_init_chip(struct bnx2 *bp)
3938 {
3939 u32 val;
3940 int rc;
3941
3942 /* Make sure the interrupt is not active. */
3943 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3944
3945 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3946 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3947 #ifdef __BIG_ENDIAN
3948 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3949 #endif
3950 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3951 DMA_READ_CHANS << 12 |
3952 DMA_WRITE_CHANS << 16;
3953
3954 val |= (0x2 << 20) | (1 << 11);
3955
3956 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3957 val |= (1 << 23);
3958
3959 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3960 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3961 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3962
3963 REG_WR(bp, BNX2_DMA_CONFIG, val);
3964
3965 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3966 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3967 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3968 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3969 }
3970
3971 if (bp->flags & PCIX_FLAG) {
3972 u16 val16;
3973
3974 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3975 &val16);
3976 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3977 val16 & ~PCI_X_CMD_ERO);
3978 }
3979
3980 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3981 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3982 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3983 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3984
3985 /* Initialize context mapping and zero out the quick contexts. The
3986 * context block must have already been enabled. */
3987 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3988 rc = bnx2_init_5709_context(bp);
3989 if (rc)
3990 return rc;
3991 } else
3992 bnx2_init_context(bp);
3993
3994 if ((rc = bnx2_init_cpus(bp)) != 0)
3995 return rc;
3996
3997 bnx2_init_nvram(bp);
3998
3999 bnx2_set_mac_addr(bp);
4000
4001 val = REG_RD(bp, BNX2_MQ_CONFIG);
4002 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4003 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4004 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4005 val |= BNX2_MQ_CONFIG_HALT_DIS;
4006
4007 REG_WR(bp, BNX2_MQ_CONFIG, val);
4008
4009 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4010 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4011 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4012
4013 val = (BCM_PAGE_BITS - 8) << 24;
4014 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4015
4016 /* Configure page size. */
4017 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4018 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4019 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4020 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4021
4022 val = bp->mac_addr[0] +
4023 (bp->mac_addr[1] << 8) +
4024 (bp->mac_addr[2] << 16) +
4025 bp->mac_addr[3] +
4026 (bp->mac_addr[4] << 8) +
4027 (bp->mac_addr[5] << 16);
4028 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4029
4030 /* Program the MTU. Also include 4 bytes for CRC32. */
4031 val = bp->dev->mtu + ETH_HLEN + 4;
4032 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4033 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4034 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4035
4036 bp->last_status_idx = 0;
4037 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4038
4039 /* Set up how to generate a link change interrupt. */
4040 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4041
4042 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4043 (u64) bp->status_blk_mapping & 0xffffffff);
4044 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4045
4046 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4047 (u64) bp->stats_blk_mapping & 0xffffffff);
4048 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4049 (u64) bp->stats_blk_mapping >> 32);
4050
4051 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4052 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4053
4054 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4055 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4056
4057 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4058 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4059
4060 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4061
4062 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4063
4064 REG_WR(bp, BNX2_HC_COM_TICKS,
4065 (bp->com_ticks_int << 16) | bp->com_ticks);
4066
4067 REG_WR(bp, BNX2_HC_CMD_TICKS,
4068 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4069
4070 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4071 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4072 else
4073 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4074 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4075
4076 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4077 val = BNX2_HC_CONFIG_COLLECT_STATS;
4078 else {
4079 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4080 BNX2_HC_CONFIG_COLLECT_STATS;
4081 }
4082
4083 if (bp->flags & ONE_SHOT_MSI_FLAG)
4084 val |= BNX2_HC_CONFIG_ONE_SHOT;
4085
4086 REG_WR(bp, BNX2_HC_CONFIG, val);
4087
4088 /* Clear internal stats counters. */
4089 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4090
4091 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4092
4093 /* Initialize the receive filter. */
4094 bnx2_set_rx_mode(bp->dev);
4095
4096 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4097 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4098 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4099 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4100 }
4101 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4102 0);
4103
4104 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4105 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4106
4107 udelay(20);
4108
4109 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4110
4111 return rc;
4112 }
4113
4114 static void
4115 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4116 {
4117 u32 val, offset0, offset1, offset2, offset3;
4118
4119 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4120 offset0 = BNX2_L2CTX_TYPE_XI;
4121 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4122 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4123 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4124 } else {
4125 offset0 = BNX2_L2CTX_TYPE;
4126 offset1 = BNX2_L2CTX_CMD_TYPE;
4127 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4128 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4129 }
4130 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4131 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4132
4133 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4134 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4135
4136 val = (u64) bp->tx_desc_mapping >> 32;
4137 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4138
4139 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4140 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4141 }
4142
4143 static void
4144 bnx2_init_tx_ring(struct bnx2 *bp)
4145 {
4146 struct tx_bd *txbd;
4147 u32 cid;
4148
4149 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4150
4151 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4152
4153 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4154 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4155
4156 bp->tx_prod = 0;
4157 bp->tx_cons = 0;
4158 bp->hw_tx_cons = 0;
4159 bp->tx_prod_bseq = 0;
4160
4161 cid = TX_CID;
4162 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4163 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4164
4165 bnx2_init_tx_context(bp, cid);
4166 }
4167
4168 static void
4169 bnx2_init_rx_ring(struct bnx2 *bp)
4170 {
4171 struct rx_bd *rxbd;
4172 int i;
4173 u16 prod, ring_prod;
4174 u32 val;
4175
4176 /* 8 for CRC and VLAN */
4177 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4178 /* hw alignment */
4179 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4180
4181 ring_prod = prod = bp->rx_prod = 0;
4182 bp->rx_cons = 0;
4183 bp->rx_prod_bseq = 0;
4184
4185 for (i = 0; i < bp->rx_max_ring; i++) {
4186 int j;
4187
4188 rxbd = &bp->rx_desc_ring[i][0];
4189 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4190 rxbd->rx_bd_len = bp->rx_buf_use_size;
4191 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4192 }
4193 if (i == (bp->rx_max_ring - 1))
4194 j = 0;
4195 else
4196 j = i + 1;
4197 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4198 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4199 0xffffffff;
4200 }
4201
4202 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4203 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4204 val |= 0x02 << 8;
4205 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4206
4207 val = (u64) bp->rx_desc_mapping[0] >> 32;
4208 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4209
4210 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4211 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4212
4213 for (i = 0; i < bp->rx_ring_size; i++) {
4214 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4215 break;
4216 }
4217 prod = NEXT_RX_BD(prod);
4218 ring_prod = RX_RING_IDX(prod);
4219 }
4220 bp->rx_prod = prod;
4221
4222 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4223
4224 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4225 }
4226
4227 static void
4228 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4229 {
4230 u32 num_rings, max;
4231
4232 bp->rx_ring_size = size;
4233 num_rings = 1;
4234 while (size > MAX_RX_DESC_CNT) {
4235 size -= MAX_RX_DESC_CNT;
4236 num_rings++;
4237 }
4238 /* round to next power of 2 */
4239 max = MAX_RX_RINGS;
4240 while ((max & num_rings) == 0)
4241 max >>= 1;
4242
4243 if (num_rings != max)
4244 max <<= 1;
4245
4246 bp->rx_max_ring = max;
4247 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4248 }
4249
4250 static void
4251 bnx2_free_tx_skbs(struct bnx2 *bp)
4252 {
4253 int i;
4254
4255 if (bp->tx_buf_ring == NULL)
4256 return;
4257
4258 for (i = 0; i < TX_DESC_CNT; ) {
4259 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4260 struct sk_buff *skb = tx_buf->skb;
4261 int j, last;
4262
4263 if (skb == NULL) {
4264 i++;
4265 continue;
4266 }
4267
4268 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4269 skb_headlen(skb), PCI_DMA_TODEVICE);
4270
4271 tx_buf->skb = NULL;
4272
4273 last = skb_shinfo(skb)->nr_frags;
4274 for (j = 0; j < last; j++) {
4275 tx_buf = &bp->tx_buf_ring[i + j + 1];
4276 pci_unmap_page(bp->pdev,
4277 pci_unmap_addr(tx_buf, mapping),
4278 skb_shinfo(skb)->frags[j].size,
4279 PCI_DMA_TODEVICE);
4280 }
4281 dev_kfree_skb(skb);
4282 i += j + 1;
4283 }
4284
4285 }
4286
4287 static void
4288 bnx2_free_rx_skbs(struct bnx2 *bp)
4289 {
4290 int i;
4291
4292 if (bp->rx_buf_ring == NULL)
4293 return;
4294
4295 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4296 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4297 struct sk_buff *skb = rx_buf->skb;
4298
4299 if (skb == NULL)
4300 continue;
4301
4302 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4303 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4304
4305 rx_buf->skb = NULL;
4306
4307 dev_kfree_skb(skb);
4308 }
4309 }
4310
4311 static void
4312 bnx2_free_skbs(struct bnx2 *bp)
4313 {
4314 bnx2_free_tx_skbs(bp);
4315 bnx2_free_rx_skbs(bp);
4316 }
4317
4318 static int
4319 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4320 {
4321 int rc;
4322
4323 rc = bnx2_reset_chip(bp, reset_code);
4324 bnx2_free_skbs(bp);
4325 if (rc)
4326 return rc;
4327
4328 if ((rc = bnx2_init_chip(bp)) != 0)
4329 return rc;
4330
4331 bnx2_init_tx_ring(bp);
4332 bnx2_init_rx_ring(bp);
4333 return 0;
4334 }
4335
4336 static int
4337 bnx2_init_nic(struct bnx2 *bp)
4338 {
4339 int rc;
4340
4341 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4342 return rc;
4343
4344 spin_lock_bh(&bp->phy_lock);
4345 bnx2_init_phy(bp);
4346 bnx2_set_link(bp);
4347 spin_unlock_bh(&bp->phy_lock);
4348 return 0;
4349 }
4350
4351 static int
4352 bnx2_test_registers(struct bnx2 *bp)
4353 {
4354 int ret;
4355 int i, is_5709;
4356 static const struct {
4357 u16 offset;
4358 u16 flags;
4359 #define BNX2_FL_NOT_5709 1
4360 u32 rw_mask;
4361 u32 ro_mask;
4362 } reg_tbl[] = {
4363 { 0x006c, 0, 0x00000000, 0x0000003f },
4364 { 0x0090, 0, 0xffffffff, 0x00000000 },
4365 { 0x0094, 0, 0x00000000, 0x00000000 },
4366
4367 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4368 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4369 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4370 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4371 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4372 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4373 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4374 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4375 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4376
4377 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4378 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4379 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4380 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4381 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4382 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4383
4384 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4385 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4386 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4387
4388 { 0x1000, 0, 0x00000000, 0x00000001 },
4389 { 0x1004, 0, 0x00000000, 0x000f0001 },
4390
4391 { 0x1408, 0, 0x01c00800, 0x00000000 },
4392 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4393 { 0x14a8, 0, 0x00000000, 0x000001ff },
4394 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4395 { 0x14b0, 0, 0x00000002, 0x00000001 },
4396 { 0x14b8, 0, 0x00000000, 0x00000000 },
4397 { 0x14c0, 0, 0x00000000, 0x00000009 },
4398 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4399 { 0x14cc, 0, 0x00000000, 0x00000001 },
4400 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4401
4402 { 0x1800, 0, 0x00000000, 0x00000001 },
4403 { 0x1804, 0, 0x00000000, 0x00000003 },
4404
4405 { 0x2800, 0, 0x00000000, 0x00000001 },
4406 { 0x2804, 0, 0x00000000, 0x00003f01 },
4407 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4408 { 0x2810, 0, 0xffff0000, 0x00000000 },
4409 { 0x2814, 0, 0xffff0000, 0x00000000 },
4410 { 0x2818, 0, 0xffff0000, 0x00000000 },
4411 { 0x281c, 0, 0xffff0000, 0x00000000 },
4412 { 0x2834, 0, 0xffffffff, 0x00000000 },
4413 { 0x2840, 0, 0x00000000, 0xffffffff },
4414 { 0x2844, 0, 0x00000000, 0xffffffff },
4415 { 0x2848, 0, 0xffffffff, 0x00000000 },
4416 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4417
4418 { 0x2c00, 0, 0x00000000, 0x00000011 },
4419 { 0x2c04, 0, 0x00000000, 0x00030007 },
4420
4421 { 0x3c00, 0, 0x00000000, 0x00000001 },
4422 { 0x3c04, 0, 0x00000000, 0x00070000 },
4423 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4424 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4425 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4426 { 0x3c14, 0, 0x00000000, 0xffffffff },
4427 { 0x3c18, 0, 0x00000000, 0xffffffff },
4428 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4429 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4430
4431 { 0x5004, 0, 0x00000000, 0x0000007f },
4432 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4433
4434 { 0x5c00, 0, 0x00000000, 0x00000001 },
4435 { 0x5c04, 0, 0x00000000, 0x0003000f },
4436 { 0x5c08, 0, 0x00000003, 0x00000000 },
4437 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4438 { 0x5c10, 0, 0x00000000, 0xffffffff },
4439 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4440 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4441 { 0x5c88, 0, 0x00000000, 0x00077373 },
4442 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4443
4444 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4445 { 0x680c, 0, 0xffffffff, 0x00000000 },
4446 { 0x6810, 0, 0xffffffff, 0x00000000 },
4447 { 0x6814, 0, 0xffffffff, 0x00000000 },
4448 { 0x6818, 0, 0xffffffff, 0x00000000 },
4449 { 0x681c, 0, 0xffffffff, 0x00000000 },
4450 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4451 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4452 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4453 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4454 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4455 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4456 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4457 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4458 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4459 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4460 { 0x684c, 0, 0xffffffff, 0x00000000 },
4461 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4462 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4463 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4464 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4465 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4466 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4467
4468 { 0xffff, 0, 0x00000000, 0x00000000 },
4469 };
4470
4471 ret = 0;
4472 is_5709 = 0;
4473 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4474 is_5709 = 1;
4475
4476 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4477 u32 offset, rw_mask, ro_mask, save_val, val;
4478 u16 flags = reg_tbl[i].flags;
4479
4480 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4481 continue;
4482
4483 offset = (u32) reg_tbl[i].offset;
4484 rw_mask = reg_tbl[i].rw_mask;
4485 ro_mask = reg_tbl[i].ro_mask;
4486
4487 save_val = readl(bp->regview + offset);
4488
4489 writel(0, bp->regview + offset);
4490
4491 val = readl(bp->regview + offset);
4492 if ((val & rw_mask) != 0) {
4493 goto reg_test_err;
4494 }
4495
4496 if ((val & ro_mask) != (save_val & ro_mask)) {
4497 goto reg_test_err;
4498 }
4499
4500 writel(0xffffffff, bp->regview + offset);
4501
4502 val = readl(bp->regview + offset);
4503 if ((val & rw_mask) != rw_mask) {
4504 goto reg_test_err;
4505 }
4506
4507 if ((val & ro_mask) != (save_val & ro_mask)) {
4508 goto reg_test_err;
4509 }
4510
4511 writel(save_val, bp->regview + offset);
4512 continue;
4513
4514 reg_test_err:
4515 writel(save_val, bp->regview + offset);
4516 ret = -ENODEV;
4517 break;
4518 }
4519 return ret;
4520 }
4521
4522 static int
4523 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4524 {
4525 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4526 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4527 int i;
4528
4529 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4530 u32 offset;
4531
4532 for (offset = 0; offset < size; offset += 4) {
4533
4534 REG_WR_IND(bp, start + offset, test_pattern[i]);
4535
4536 if (REG_RD_IND(bp, start + offset) !=
4537 test_pattern[i]) {
4538 return -ENODEV;
4539 }
4540 }
4541 }
4542 return 0;
4543 }
4544
4545 static int
4546 bnx2_test_memory(struct bnx2 *bp)
4547 {
4548 int ret = 0;
4549 int i;
4550 static struct mem_entry {
4551 u32 offset;
4552 u32 len;
4553 } mem_tbl_5706[] = {
4554 { 0x60000, 0x4000 },
4555 { 0xa0000, 0x3000 },
4556 { 0xe0000, 0x4000 },
4557 { 0x120000, 0x4000 },
4558 { 0x1a0000, 0x4000 },
4559 { 0x160000, 0x4000 },
4560 { 0xffffffff, 0 },
4561 },
4562 mem_tbl_5709[] = {
4563 { 0x60000, 0x4000 },
4564 { 0xa0000, 0x3000 },
4565 { 0xe0000, 0x4000 },
4566 { 0x120000, 0x4000 },
4567 { 0x1a0000, 0x4000 },
4568 { 0xffffffff, 0 },
4569 };
4570 struct mem_entry *mem_tbl;
4571
4572 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4573 mem_tbl = mem_tbl_5709;
4574 else
4575 mem_tbl = mem_tbl_5706;
4576
4577 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4578 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4579 mem_tbl[i].len)) != 0) {
4580 return ret;
4581 }
4582 }
4583
4584 return ret;
4585 }
4586
4587 #define BNX2_MAC_LOOPBACK 0
4588 #define BNX2_PHY_LOOPBACK 1
4589
4590 static int
4591 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4592 {
4593 unsigned int pkt_size, num_pkts, i;
4594 struct sk_buff *skb, *rx_skb;
4595 unsigned char *packet;
4596 u16 rx_start_idx, rx_idx;
4597 dma_addr_t map;
4598 struct tx_bd *txbd;
4599 struct sw_bd *rx_buf;
4600 struct l2_fhdr *rx_hdr;
4601 int ret = -ENODEV;
4602
4603 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4604 bp->loopback = MAC_LOOPBACK;
4605 bnx2_set_mac_loopback(bp);
4606 }
4607 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4608 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4609 return 0;
4610
4611 bp->loopback = PHY_LOOPBACK;
4612 bnx2_set_phy_loopback(bp);
4613 }
4614 else
4615 return -EINVAL;
4616
4617 pkt_size = 1514;
4618 skb = netdev_alloc_skb(bp->dev, pkt_size);
4619 if (!skb)
4620 return -ENOMEM;
4621 packet = skb_put(skb, pkt_size);
4622 memcpy(packet, bp->dev->dev_addr, 6);
4623 memset(packet + 6, 0x0, 8);
4624 for (i = 14; i < pkt_size; i++)
4625 packet[i] = (unsigned char) (i & 0xff);
4626
4627 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4628 PCI_DMA_TODEVICE);
4629
4630 REG_WR(bp, BNX2_HC_COMMAND,
4631 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4632
4633 REG_RD(bp, BNX2_HC_COMMAND);
4634
4635 udelay(5);
4636 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4637
4638 num_pkts = 0;
4639
4640 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4641
4642 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4643 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4644 txbd->tx_bd_mss_nbytes = pkt_size;
4645 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4646
4647 num_pkts++;
4648 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4649 bp->tx_prod_bseq += pkt_size;
4650
4651 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4652 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4653
4654 udelay(100);
4655
4656 REG_WR(bp, BNX2_HC_COMMAND,
4657 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4658
4659 REG_RD(bp, BNX2_HC_COMMAND);
4660
4661 udelay(5);
4662
4663 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4664 dev_kfree_skb(skb);
4665
4666 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4667 goto loopback_test_done;
4668 }
4669
4670 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4671 if (rx_idx != rx_start_idx + num_pkts) {
4672 goto loopback_test_done;
4673 }
4674
4675 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4676 rx_skb = rx_buf->skb;
4677
4678 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4679 skb_reserve(rx_skb, bp->rx_offset);
4680
4681 pci_dma_sync_single_for_cpu(bp->pdev,
4682 pci_unmap_addr(rx_buf, mapping),
4683 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4684
4685 if (rx_hdr->l2_fhdr_status &
4686 (L2_FHDR_ERRORS_BAD_CRC |
4687 L2_FHDR_ERRORS_PHY_DECODE |
4688 L2_FHDR_ERRORS_ALIGNMENT |
4689 L2_FHDR_ERRORS_TOO_SHORT |
4690 L2_FHDR_ERRORS_GIANT_FRAME)) {
4691
4692 goto loopback_test_done;
4693 }
4694
4695 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4696 goto loopback_test_done;
4697 }
4698
4699 for (i = 14; i < pkt_size; i++) {
4700 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4701 goto loopback_test_done;
4702 }
4703 }
4704
4705 ret = 0;
4706
4707 loopback_test_done:
4708 bp->loopback = 0;
4709 return ret;
4710 }
4711
4712 #define BNX2_MAC_LOOPBACK_FAILED 1
4713 #define BNX2_PHY_LOOPBACK_FAILED 2
4714 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4715 BNX2_PHY_LOOPBACK_FAILED)
4716
4717 static int
4718 bnx2_test_loopback(struct bnx2 *bp)
4719 {
4720 int rc = 0;
4721
4722 if (!netif_running(bp->dev))
4723 return BNX2_LOOPBACK_FAILED;
4724
4725 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4726 spin_lock_bh(&bp->phy_lock);
4727 bnx2_init_phy(bp);
4728 spin_unlock_bh(&bp->phy_lock);
4729 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4730 rc |= BNX2_MAC_LOOPBACK_FAILED;
4731 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4732 rc |= BNX2_PHY_LOOPBACK_FAILED;
4733 return rc;
4734 }
4735
4736 #define NVRAM_SIZE 0x200
4737 #define CRC32_RESIDUAL 0xdebb20e3
4738
4739 static int
4740 bnx2_test_nvram(struct bnx2 *bp)
4741 {
4742 u32 buf[NVRAM_SIZE / 4];
4743 u8 *data = (u8 *) buf;
4744 int rc = 0;
4745 u32 magic, csum;
4746
4747 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4748 goto test_nvram_done;
4749
4750 magic = be32_to_cpu(buf[0]);
4751 if (magic != 0x669955aa) {
4752 rc = -ENODEV;
4753 goto test_nvram_done;
4754 }
4755
4756 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4757 goto test_nvram_done;
4758
4759 csum = ether_crc_le(0x100, data);
4760 if (csum != CRC32_RESIDUAL) {
4761 rc = -ENODEV;
4762 goto test_nvram_done;
4763 }
4764
4765 csum = ether_crc_le(0x100, data + 0x100);
4766 if (csum != CRC32_RESIDUAL) {
4767 rc = -ENODEV;
4768 }
4769
4770 test_nvram_done:
4771 return rc;
4772 }
4773
4774 static int
4775 bnx2_test_link(struct bnx2 *bp)
4776 {
4777 u32 bmsr;
4778
4779 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4780 if (bp->link_up)
4781 return 0;
4782 return -ENODEV;
4783 }
4784 spin_lock_bh(&bp->phy_lock);
4785 bnx2_enable_bmsr1(bp);
4786 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4787 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4788 bnx2_disable_bmsr1(bp);
4789 spin_unlock_bh(&bp->phy_lock);
4790
4791 if (bmsr & BMSR_LSTATUS) {
4792 return 0;
4793 }
4794 return -ENODEV;
4795 }
4796
4797 static int
4798 bnx2_test_intr(struct bnx2 *bp)
4799 {
4800 int i;
4801 u16 status_idx;
4802
4803 if (!netif_running(bp->dev))
4804 return -ENODEV;
4805
4806 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4807
4808 /* This register is not touched during run-time. */
4809 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4810 REG_RD(bp, BNX2_HC_COMMAND);
4811
4812 for (i = 0; i < 10; i++) {
4813 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4814 status_idx) {
4815
4816 break;
4817 }
4818
4819 msleep_interruptible(10);
4820 }
4821 if (i < 10)
4822 return 0;
4823
4824 return -ENODEV;
4825 }
4826
4827 static void
4828 bnx2_5706_serdes_timer(struct bnx2 *bp)
4829 {
4830 spin_lock(&bp->phy_lock);
4831 if (bp->serdes_an_pending)
4832 bp->serdes_an_pending--;
4833 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4834 u32 bmcr;
4835
4836 bp->current_interval = bp->timer_interval;
4837
4838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4839
4840 if (bmcr & BMCR_ANENABLE) {
4841 u32 phy1, phy2;
4842
4843 bnx2_write_phy(bp, 0x1c, 0x7c00);
4844 bnx2_read_phy(bp, 0x1c, &phy1);
4845
4846 bnx2_write_phy(bp, 0x17, 0x0f01);
4847 bnx2_read_phy(bp, 0x15, &phy2);
4848 bnx2_write_phy(bp, 0x17, 0x0f01);
4849 bnx2_read_phy(bp, 0x15, &phy2);
4850
4851 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4852 !(phy2 & 0x20)) { /* no CONFIG */
4853
4854 bmcr &= ~BMCR_ANENABLE;
4855 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4856 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4857 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4858 }
4859 }
4860 }
4861 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4862 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4863 u32 phy2;
4864
4865 bnx2_write_phy(bp, 0x17, 0x0f01);
4866 bnx2_read_phy(bp, 0x15, &phy2);
4867 if (phy2 & 0x20) {
4868 u32 bmcr;
4869
4870 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4871 bmcr |= BMCR_ANENABLE;
4872 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4873
4874 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4875 }
4876 } else
4877 bp->current_interval = bp->timer_interval;
4878
4879 spin_unlock(&bp->phy_lock);
4880 }
4881
4882 static void
4883 bnx2_5708_serdes_timer(struct bnx2 *bp)
4884 {
4885 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4886 return;
4887
4888 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4889 bp->serdes_an_pending = 0;
4890 return;
4891 }
4892
4893 spin_lock(&bp->phy_lock);
4894 if (bp->serdes_an_pending)
4895 bp->serdes_an_pending--;
4896 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4897 u32 bmcr;
4898
4899 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4900 if (bmcr & BMCR_ANENABLE) {
4901 bnx2_enable_forced_2g5(bp);
4902 bp->current_interval = SERDES_FORCED_TIMEOUT;
4903 } else {
4904 bnx2_disable_forced_2g5(bp);
4905 bp->serdes_an_pending = 2;
4906 bp->current_interval = bp->timer_interval;
4907 }
4908
4909 } else
4910 bp->current_interval = bp->timer_interval;
4911
4912 spin_unlock(&bp->phy_lock);
4913 }
4914
4915 static void
4916 bnx2_timer(unsigned long data)
4917 {
4918 struct bnx2 *bp = (struct bnx2 *) data;
4919
4920 if (!netif_running(bp->dev))
4921 return;
4922
4923 if (atomic_read(&bp->intr_sem) != 0)
4924 goto bnx2_restart_timer;
4925
4926 bnx2_send_heart_beat(bp);
4927
4928 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4929
4930 /* workaround occasional corrupted counters */
4931 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4932 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4933 BNX2_HC_COMMAND_STATS_NOW);
4934
4935 if (bp->phy_flags & PHY_SERDES_FLAG) {
4936 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4937 bnx2_5706_serdes_timer(bp);
4938 else
4939 bnx2_5708_serdes_timer(bp);
4940 }
4941
4942 bnx2_restart_timer:
4943 mod_timer(&bp->timer, jiffies + bp->current_interval);
4944 }
4945
4946 static int
4947 bnx2_request_irq(struct bnx2 *bp)
4948 {
4949 struct net_device *dev = bp->dev;
4950 int rc = 0;
4951
4952 if (bp->flags & USING_MSI_FLAG) {
4953 irq_handler_t fn = bnx2_msi;
4954
4955 if (bp->flags & ONE_SHOT_MSI_FLAG)
4956 fn = bnx2_msi_1shot;
4957
4958 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4959 } else
4960 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4961 IRQF_SHARED, dev->name, dev);
4962 return rc;
4963 }
4964
4965 static void
4966 bnx2_free_irq(struct bnx2 *bp)
4967 {
4968 struct net_device *dev = bp->dev;
4969
4970 if (bp->flags & USING_MSI_FLAG) {
4971 free_irq(bp->pdev->irq, dev);
4972 pci_disable_msi(bp->pdev);
4973 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4974 } else
4975 free_irq(bp->pdev->irq, dev);
4976 }
4977
4978 /* Called with rtnl_lock */
4979 static int
4980 bnx2_open(struct net_device *dev)
4981 {
4982 struct bnx2 *bp = netdev_priv(dev);
4983 int rc;
4984
4985 netif_carrier_off(dev);
4986
4987 bnx2_set_power_state(bp, PCI_D0);
4988 bnx2_disable_int(bp);
4989
4990 rc = bnx2_alloc_mem(bp);
4991 if (rc)
4992 return rc;
4993
4994 napi_enable(&bp->napi);
4995
4996 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4997 if (pci_enable_msi(bp->pdev) == 0) {
4998 bp->flags |= USING_MSI_FLAG;
4999 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5000 bp->flags |= ONE_SHOT_MSI_FLAG;
5001 }
5002 }
5003 rc = bnx2_request_irq(bp);
5004
5005 if (rc) {
5006 napi_disable(&bp->napi);
5007 bnx2_free_mem(bp);
5008 return rc;
5009 }
5010
5011 rc = bnx2_init_nic(bp);
5012
5013 if (rc) {
5014 napi_disable(&bp->napi);
5015 bnx2_free_irq(bp);
5016 bnx2_free_skbs(bp);
5017 bnx2_free_mem(bp);
5018 return rc;
5019 }
5020
5021 mod_timer(&bp->timer, jiffies + bp->current_interval);
5022
5023 atomic_set(&bp->intr_sem, 0);
5024
5025 bnx2_enable_int(bp);
5026
5027 if (bp->flags & USING_MSI_FLAG) {
5028 /* Test MSI to make sure it is working
5029 * If MSI test fails, go back to INTx mode
5030 */
5031 if (bnx2_test_intr(bp) != 0) {
5032 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5033 " using MSI, switching to INTx mode. Please"
5034 " report this failure to the PCI maintainer"
5035 " and include system chipset information.\n",
5036 bp->dev->name);
5037
5038 bnx2_disable_int(bp);
5039 bnx2_free_irq(bp);
5040
5041 rc = bnx2_init_nic(bp);
5042
5043 if (!rc)
5044 rc = bnx2_request_irq(bp);
5045
5046 if (rc) {
5047 napi_disable(&bp->napi);
5048 bnx2_free_skbs(bp);
5049 bnx2_free_mem(bp);
5050 del_timer_sync(&bp->timer);
5051 return rc;
5052 }
5053 bnx2_enable_int(bp);
5054 }
5055 }
5056 if (bp->flags & USING_MSI_FLAG) {
5057 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5058 }
5059
5060 netif_start_queue(dev);
5061
5062 return 0;
5063 }
5064
5065 static void
5066 bnx2_reset_task(struct work_struct *work)
5067 {
5068 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5069
5070 if (!netif_running(bp->dev))
5071 return;
5072
5073 bp->in_reset_task = 1;
5074 bnx2_netif_stop(bp);
5075
5076 bnx2_init_nic(bp);
5077
5078 atomic_set(&bp->intr_sem, 1);
5079 bnx2_netif_start(bp);
5080 bp->in_reset_task = 0;
5081 }
5082
5083 static void
5084 bnx2_tx_timeout(struct net_device *dev)
5085 {
5086 struct bnx2 *bp = netdev_priv(dev);
5087
5088 /* This allows the netif to be shutdown gracefully before resetting */
5089 schedule_work(&bp->reset_task);
5090 }
5091
5092 #ifdef BCM_VLAN
5093 /* Called with rtnl_lock */
5094 static void
5095 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5096 {
5097 struct bnx2 *bp = netdev_priv(dev);
5098
5099 bnx2_netif_stop(bp);
5100
5101 bp->vlgrp = vlgrp;
5102 bnx2_set_rx_mode(dev);
5103
5104 bnx2_netif_start(bp);
5105 }
5106 #endif
5107
5108 /* Called with netif_tx_lock.
5109 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5110 * netif_wake_queue().
5111 */
5112 static int
5113 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5114 {
5115 struct bnx2 *bp = netdev_priv(dev);
5116 dma_addr_t mapping;
5117 struct tx_bd *txbd;
5118 struct sw_bd *tx_buf;
5119 u32 len, vlan_tag_flags, last_frag, mss;
5120 u16 prod, ring_prod;
5121 int i;
5122
5123 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5124 netif_stop_queue(dev);
5125 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5126 dev->name);
5127
5128 return NETDEV_TX_BUSY;
5129 }
5130 len = skb_headlen(skb);
5131 prod = bp->tx_prod;
5132 ring_prod = TX_RING_IDX(prod);
5133
5134 vlan_tag_flags = 0;
5135 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5136 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5137 }
5138
5139 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5140 vlan_tag_flags |=
5141 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5142 }
5143 if ((mss = skb_shinfo(skb)->gso_size)) {
5144 u32 tcp_opt_len, ip_tcp_len;
5145 struct iphdr *iph;
5146
5147 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5148
5149 tcp_opt_len = tcp_optlen(skb);
5150
5151 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5152 u32 tcp_off = skb_transport_offset(skb) -
5153 sizeof(struct ipv6hdr) - ETH_HLEN;
5154
5155 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5156 TX_BD_FLAGS_SW_FLAGS;
5157 if (likely(tcp_off == 0))
5158 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5159 else {
5160 tcp_off >>= 3;
5161 vlan_tag_flags |= ((tcp_off & 0x3) <<
5162 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5163 ((tcp_off & 0x10) <<
5164 TX_BD_FLAGS_TCP6_OFF4_SHL);
5165 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5166 }
5167 } else {
5168 if (skb_header_cloned(skb) &&
5169 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5170 dev_kfree_skb(skb);
5171 return NETDEV_TX_OK;
5172 }
5173
5174 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5175
5176 iph = ip_hdr(skb);
5177 iph->check = 0;
5178 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5179 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5180 iph->daddr, 0,
5181 IPPROTO_TCP,
5182 0);
5183 if (tcp_opt_len || (iph->ihl > 5)) {
5184 vlan_tag_flags |= ((iph->ihl - 5) +
5185 (tcp_opt_len >> 2)) << 8;
5186 }
5187 }
5188 } else
5189 mss = 0;
5190
5191 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5192
5193 tx_buf = &bp->tx_buf_ring[ring_prod];
5194 tx_buf->skb = skb;
5195 pci_unmap_addr_set(tx_buf, mapping, mapping);
5196
5197 txbd = &bp->tx_desc_ring[ring_prod];
5198
5199 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5200 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5201 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5202 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5203
5204 last_frag = skb_shinfo(skb)->nr_frags;
5205
5206 for (i = 0; i < last_frag; i++) {
5207 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5208
5209 prod = NEXT_TX_BD(prod);
5210 ring_prod = TX_RING_IDX(prod);
5211 txbd = &bp->tx_desc_ring[ring_prod];
5212
5213 len = frag->size;
5214 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5215 len, PCI_DMA_TODEVICE);
5216 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5217 mapping, mapping);
5218
5219 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5220 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5221 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5222 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5223
5224 }
5225 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5226
5227 prod = NEXT_TX_BD(prod);
5228 bp->tx_prod_bseq += skb->len;
5229
5230 REG_WR16(bp, bp->tx_bidx_addr, prod);
5231 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5232
5233 mmiowb();
5234
5235 bp->tx_prod = prod;
5236 dev->trans_start = jiffies;
5237
5238 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5239 netif_stop_queue(dev);
5240 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5241 netif_wake_queue(dev);
5242 }
5243
5244 return NETDEV_TX_OK;
5245 }
5246
5247 /* Called with rtnl_lock */
5248 static int
5249 bnx2_close(struct net_device *dev)
5250 {
5251 struct bnx2 *bp = netdev_priv(dev);
5252 u32 reset_code;
5253
5254 /* Calling flush_scheduled_work() may deadlock because
5255 * linkwatch_event() may be on the workqueue and it will try to get
5256 * the rtnl_lock which we are holding.
5257 */
5258 while (bp->in_reset_task)
5259 msleep(1);
5260
5261 bnx2_disable_int_sync(bp);
5262 napi_disable(&bp->napi);
5263 del_timer_sync(&bp->timer);
5264 if (bp->flags & NO_WOL_FLAG)
5265 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5266 else if (bp->wol)
5267 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5268 else
5269 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5270 bnx2_reset_chip(bp, reset_code);
5271 bnx2_free_irq(bp);
5272 bnx2_free_skbs(bp);
5273 bnx2_free_mem(bp);
5274 bp->link_up = 0;
5275 netif_carrier_off(bp->dev);
5276 bnx2_set_power_state(bp, PCI_D3hot);
5277 return 0;
5278 }
5279
5280 #define GET_NET_STATS64(ctr) \
5281 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5282 (unsigned long) (ctr##_lo)
5283
5284 #define GET_NET_STATS32(ctr) \
5285 (ctr##_lo)
5286
5287 #if (BITS_PER_LONG == 64)
5288 #define GET_NET_STATS GET_NET_STATS64
5289 #else
5290 #define GET_NET_STATS GET_NET_STATS32
5291 #endif
5292
5293 static struct net_device_stats *
5294 bnx2_get_stats(struct net_device *dev)
5295 {
5296 struct bnx2 *bp = netdev_priv(dev);
5297 struct statistics_block *stats_blk = bp->stats_blk;
5298 struct net_device_stats *net_stats = &bp->net_stats;
5299
5300 if (bp->stats_blk == NULL) {
5301 return net_stats;
5302 }
5303 net_stats->rx_packets =
5304 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5305 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5306 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5307
5308 net_stats->tx_packets =
5309 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5310 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5311 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5312
5313 net_stats->rx_bytes =
5314 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5315
5316 net_stats->tx_bytes =
5317 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5318
5319 net_stats->multicast =
5320 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5321
5322 net_stats->collisions =
5323 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5324
5325 net_stats->rx_length_errors =
5326 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5327 stats_blk->stat_EtherStatsOverrsizePkts);
5328
5329 net_stats->rx_over_errors =
5330 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5331
5332 net_stats->rx_frame_errors =
5333 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5334
5335 net_stats->rx_crc_errors =
5336 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5337
5338 net_stats->rx_errors = net_stats->rx_length_errors +
5339 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5340 net_stats->rx_crc_errors;
5341
5342 net_stats->tx_aborted_errors =
5343 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5344 stats_blk->stat_Dot3StatsLateCollisions);
5345
5346 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5347 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5348 net_stats->tx_carrier_errors = 0;
5349 else {
5350 net_stats->tx_carrier_errors =
5351 (unsigned long)
5352 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5353 }
5354
5355 net_stats->tx_errors =
5356 (unsigned long)
5357 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5358 +
5359 net_stats->tx_aborted_errors +
5360 net_stats->tx_carrier_errors;
5361
5362 net_stats->rx_missed_errors =
5363 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5364 stats_blk->stat_FwRxDrop);
5365
5366 return net_stats;
5367 }
5368
5369 /* All ethtool functions called with rtnl_lock */
5370
5371 static int
5372 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5373 {
5374 struct bnx2 *bp = netdev_priv(dev);
5375 int support_serdes = 0, support_copper = 0;
5376
5377 cmd->supported = SUPPORTED_Autoneg;
5378 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5379 support_serdes = 1;
5380 support_copper = 1;
5381 } else if (bp->phy_port == PORT_FIBRE)
5382 support_serdes = 1;
5383 else
5384 support_copper = 1;
5385
5386 if (support_serdes) {
5387 cmd->supported |= SUPPORTED_1000baseT_Full |
5388 SUPPORTED_FIBRE;
5389 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5390 cmd->supported |= SUPPORTED_2500baseX_Full;
5391
5392 }
5393 if (support_copper) {
5394 cmd->supported |= SUPPORTED_10baseT_Half |
5395 SUPPORTED_10baseT_Full |
5396 SUPPORTED_100baseT_Half |
5397 SUPPORTED_100baseT_Full |
5398 SUPPORTED_1000baseT_Full |
5399 SUPPORTED_TP;
5400
5401 }
5402
5403 spin_lock_bh(&bp->phy_lock);
5404 cmd->port = bp->phy_port;
5405 cmd->advertising = bp->advertising;
5406
5407 if (bp->autoneg & AUTONEG_SPEED) {
5408 cmd->autoneg = AUTONEG_ENABLE;
5409 }
5410 else {
5411 cmd->autoneg = AUTONEG_DISABLE;
5412 }
5413
5414 if (netif_carrier_ok(dev)) {
5415 cmd->speed = bp->line_speed;
5416 cmd->duplex = bp->duplex;
5417 }
5418 else {
5419 cmd->speed = -1;
5420 cmd->duplex = -1;
5421 }
5422 spin_unlock_bh(&bp->phy_lock);
5423
5424 cmd->transceiver = XCVR_INTERNAL;
5425 cmd->phy_address = bp->phy_addr;
5426
5427 return 0;
5428 }
5429
5430 static int
5431 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5432 {
5433 struct bnx2 *bp = netdev_priv(dev);
5434 u8 autoneg = bp->autoneg;
5435 u8 req_duplex = bp->req_duplex;
5436 u16 req_line_speed = bp->req_line_speed;
5437 u32 advertising = bp->advertising;
5438 int err = -EINVAL;
5439
5440 spin_lock_bh(&bp->phy_lock);
5441
5442 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5443 goto err_out_unlock;
5444
5445 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5446 goto err_out_unlock;
5447
5448 if (cmd->autoneg == AUTONEG_ENABLE) {
5449 autoneg |= AUTONEG_SPEED;
5450
5451 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5452
5453 /* allow advertising 1 speed */
5454 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5455 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5456 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5457 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5458
5459 if (cmd->port == PORT_FIBRE)
5460 goto err_out_unlock;
5461
5462 advertising = cmd->advertising;
5463
5464 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5465 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5466 (cmd->port == PORT_TP))
5467 goto err_out_unlock;
5468 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5469 advertising = cmd->advertising;
5470 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5471 goto err_out_unlock;
5472 else {
5473 if (cmd->port == PORT_FIBRE)
5474 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5475 else
5476 advertising = ETHTOOL_ALL_COPPER_SPEED;
5477 }
5478 advertising |= ADVERTISED_Autoneg;
5479 }
5480 else {
5481 if (cmd->port == PORT_FIBRE) {
5482 if ((cmd->speed != SPEED_1000 &&
5483 cmd->speed != SPEED_2500) ||
5484 (cmd->duplex != DUPLEX_FULL))
5485 goto err_out_unlock;
5486
5487 if (cmd->speed == SPEED_2500 &&
5488 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5489 goto err_out_unlock;
5490 }
5491 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5492 goto err_out_unlock;
5493
5494 autoneg &= ~AUTONEG_SPEED;
5495 req_line_speed = cmd->speed;
5496 req_duplex = cmd->duplex;
5497 advertising = 0;
5498 }
5499
5500 bp->autoneg = autoneg;
5501 bp->advertising = advertising;
5502 bp->req_line_speed = req_line_speed;
5503 bp->req_duplex = req_duplex;
5504
5505 err = bnx2_setup_phy(bp, cmd->port);
5506
5507 err_out_unlock:
5508 spin_unlock_bh(&bp->phy_lock);
5509
5510 return err;
5511 }
5512
5513 static void
5514 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5515 {
5516 struct bnx2 *bp = netdev_priv(dev);
5517
5518 strcpy(info->driver, DRV_MODULE_NAME);
5519 strcpy(info->version, DRV_MODULE_VERSION);
5520 strcpy(info->bus_info, pci_name(bp->pdev));
5521 strcpy(info->fw_version, bp->fw_version);
5522 }
5523
5524 #define BNX2_REGDUMP_LEN (32 * 1024)
5525
5526 static int
5527 bnx2_get_regs_len(struct net_device *dev)
5528 {
5529 return BNX2_REGDUMP_LEN;
5530 }
5531
5532 static void
5533 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5534 {
5535 u32 *p = _p, i, offset;
5536 u8 *orig_p = _p;
5537 struct bnx2 *bp = netdev_priv(dev);
5538 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5539 0x0800, 0x0880, 0x0c00, 0x0c10,
5540 0x0c30, 0x0d08, 0x1000, 0x101c,
5541 0x1040, 0x1048, 0x1080, 0x10a4,
5542 0x1400, 0x1490, 0x1498, 0x14f0,
5543 0x1500, 0x155c, 0x1580, 0x15dc,
5544 0x1600, 0x1658, 0x1680, 0x16d8,
5545 0x1800, 0x1820, 0x1840, 0x1854,
5546 0x1880, 0x1894, 0x1900, 0x1984,
5547 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5548 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5549 0x2000, 0x2030, 0x23c0, 0x2400,
5550 0x2800, 0x2820, 0x2830, 0x2850,
5551 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5552 0x3c00, 0x3c94, 0x4000, 0x4010,
5553 0x4080, 0x4090, 0x43c0, 0x4458,
5554 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5555 0x4fc0, 0x5010, 0x53c0, 0x5444,
5556 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5557 0x5fc0, 0x6000, 0x6400, 0x6428,
5558 0x6800, 0x6848, 0x684c, 0x6860,
5559 0x6888, 0x6910, 0x8000 };
5560
5561 regs->version = 0;
5562
5563 memset(p, 0, BNX2_REGDUMP_LEN);
5564
5565 if (!netif_running(bp->dev))
5566 return;
5567
5568 i = 0;
5569 offset = reg_boundaries[0];
5570 p += offset;
5571 while (offset < BNX2_REGDUMP_LEN) {
5572 *p++ = REG_RD(bp, offset);
5573 offset += 4;
5574 if (offset == reg_boundaries[i + 1]) {
5575 offset = reg_boundaries[i + 2];
5576 p = (u32 *) (orig_p + offset);
5577 i += 2;
5578 }
5579 }
5580 }
5581
5582 static void
5583 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5584 {
5585 struct bnx2 *bp = netdev_priv(dev);
5586
5587 if (bp->flags & NO_WOL_FLAG) {
5588 wol->supported = 0;
5589 wol->wolopts = 0;
5590 }
5591 else {
5592 wol->supported = WAKE_MAGIC;
5593 if (bp->wol)
5594 wol->wolopts = WAKE_MAGIC;
5595 else
5596 wol->wolopts = 0;
5597 }
5598 memset(&wol->sopass, 0, sizeof(wol->sopass));
5599 }
5600
5601 static int
5602 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5603 {
5604 struct bnx2 *bp = netdev_priv(dev);
5605
5606 if (wol->wolopts & ~WAKE_MAGIC)
5607 return -EINVAL;
5608
5609 if (wol->wolopts & WAKE_MAGIC) {
5610 if (bp->flags & NO_WOL_FLAG)
5611 return -EINVAL;
5612
5613 bp->wol = 1;
5614 }
5615 else {
5616 bp->wol = 0;
5617 }
5618 return 0;
5619 }
5620
5621 static int
5622 bnx2_nway_reset(struct net_device *dev)
5623 {
5624 struct bnx2 *bp = netdev_priv(dev);
5625 u32 bmcr;
5626
5627 if (!(bp->autoneg & AUTONEG_SPEED)) {
5628 return -EINVAL;
5629 }
5630
5631 spin_lock_bh(&bp->phy_lock);
5632
5633 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5634 int rc;
5635
5636 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5637 spin_unlock_bh(&bp->phy_lock);
5638 return rc;
5639 }
5640
5641 /* Force a link down visible on the other side */
5642 if (bp->phy_flags & PHY_SERDES_FLAG) {
5643 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5644 spin_unlock_bh(&bp->phy_lock);
5645
5646 msleep(20);
5647
5648 spin_lock_bh(&bp->phy_lock);
5649
5650 bp->current_interval = SERDES_AN_TIMEOUT;
5651 bp->serdes_an_pending = 1;
5652 mod_timer(&bp->timer, jiffies + bp->current_interval);
5653 }
5654
5655 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5656 bmcr &= ~BMCR_LOOPBACK;
5657 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5658
5659 spin_unlock_bh(&bp->phy_lock);
5660
5661 return 0;
5662 }
5663
5664 static int
5665 bnx2_get_eeprom_len(struct net_device *dev)
5666 {
5667 struct bnx2 *bp = netdev_priv(dev);
5668
5669 if (bp->flash_info == NULL)
5670 return 0;
5671
5672 return (int) bp->flash_size;
5673 }
5674
5675 static int
5676 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5677 u8 *eebuf)
5678 {
5679 struct bnx2 *bp = netdev_priv(dev);
5680 int rc;
5681
5682 /* parameters already validated in ethtool_get_eeprom */
5683
5684 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5685
5686 return rc;
5687 }
5688
5689 static int
5690 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5691 u8 *eebuf)
5692 {
5693 struct bnx2 *bp = netdev_priv(dev);
5694 int rc;
5695
5696 /* parameters already validated in ethtool_set_eeprom */
5697
5698 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5699
5700 return rc;
5701 }
5702
5703 static int
5704 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5705 {
5706 struct bnx2 *bp = netdev_priv(dev);
5707
5708 memset(coal, 0, sizeof(struct ethtool_coalesce));
5709
5710 coal->rx_coalesce_usecs = bp->rx_ticks;
5711 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5712 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5713 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5714
5715 coal->tx_coalesce_usecs = bp->tx_ticks;
5716 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5717 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5718 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5719
5720 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5721
5722 return 0;
5723 }
5724
5725 static int
5726 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5727 {
5728 struct bnx2 *bp = netdev_priv(dev);
5729
5730 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5731 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5732
5733 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5734 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5735
5736 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5737 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5738
5739 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5740 if (bp->rx_quick_cons_trip_int > 0xff)
5741 bp->rx_quick_cons_trip_int = 0xff;
5742
5743 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5744 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5745
5746 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5747 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5748
5749 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5750 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5751
5752 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5753 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5754 0xff;
5755
5756 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5757 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5758 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5759 bp->stats_ticks = USEC_PER_SEC;
5760 }
5761 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5762 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5763 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5764
5765 if (netif_running(bp->dev)) {
5766 bnx2_netif_stop(bp);
5767 bnx2_init_nic(bp);
5768 bnx2_netif_start(bp);
5769 }
5770
5771 return 0;
5772 }
5773
5774 static void
5775 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5776 {
5777 struct bnx2 *bp = netdev_priv(dev);
5778
5779 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5780 ering->rx_mini_max_pending = 0;
5781 ering->rx_jumbo_max_pending = 0;
5782
5783 ering->rx_pending = bp->rx_ring_size;
5784 ering->rx_mini_pending = 0;
5785 ering->rx_jumbo_pending = 0;
5786
5787 ering->tx_max_pending = MAX_TX_DESC_CNT;
5788 ering->tx_pending = bp->tx_ring_size;
5789 }
5790
5791 static int
5792 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5793 {
5794 struct bnx2 *bp = netdev_priv(dev);
5795
5796 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5797 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5798 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5799
5800 return -EINVAL;
5801 }
5802 if (netif_running(bp->dev)) {
5803 bnx2_netif_stop(bp);
5804 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5805 bnx2_free_skbs(bp);
5806 bnx2_free_mem(bp);
5807 }
5808
5809 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5810 bp->tx_ring_size = ering->tx_pending;
5811
5812 if (netif_running(bp->dev)) {
5813 int rc;
5814
5815 rc = bnx2_alloc_mem(bp);
5816 if (rc)
5817 return rc;
5818 bnx2_init_nic(bp);
5819 bnx2_netif_start(bp);
5820 }
5821
5822 return 0;
5823 }
5824
5825 static void
5826 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5827 {
5828 struct bnx2 *bp = netdev_priv(dev);
5829
5830 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5831 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5832 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5833 }
5834
5835 static int
5836 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5837 {
5838 struct bnx2 *bp = netdev_priv(dev);
5839
5840 bp->req_flow_ctrl = 0;
5841 if (epause->rx_pause)
5842 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5843 if (epause->tx_pause)
5844 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5845
5846 if (epause->autoneg) {
5847 bp->autoneg |= AUTONEG_FLOW_CTRL;
5848 }
5849 else {
5850 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5851 }
5852
5853 spin_lock_bh(&bp->phy_lock);
5854
5855 bnx2_setup_phy(bp, bp->phy_port);
5856
5857 spin_unlock_bh(&bp->phy_lock);
5858
5859 return 0;
5860 }
5861
5862 static u32
5863 bnx2_get_rx_csum(struct net_device *dev)
5864 {
5865 struct bnx2 *bp = netdev_priv(dev);
5866
5867 return bp->rx_csum;
5868 }
5869
5870 static int
5871 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5872 {
5873 struct bnx2 *bp = netdev_priv(dev);
5874
5875 bp->rx_csum = data;
5876 return 0;
5877 }
5878
5879 static int
5880 bnx2_set_tso(struct net_device *dev, u32 data)
5881 {
5882 struct bnx2 *bp = netdev_priv(dev);
5883
5884 if (data) {
5885 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5886 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5887 dev->features |= NETIF_F_TSO6;
5888 } else
5889 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5890 NETIF_F_TSO_ECN);
5891 return 0;
5892 }
5893
5894 #define BNX2_NUM_STATS 46
5895
5896 static struct {
5897 char string[ETH_GSTRING_LEN];
5898 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5899 { "rx_bytes" },
5900 { "rx_error_bytes" },
5901 { "tx_bytes" },
5902 { "tx_error_bytes" },
5903 { "rx_ucast_packets" },
5904 { "rx_mcast_packets" },
5905 { "rx_bcast_packets" },
5906 { "tx_ucast_packets" },
5907 { "tx_mcast_packets" },
5908 { "tx_bcast_packets" },
5909 { "tx_mac_errors" },
5910 { "tx_carrier_errors" },
5911 { "rx_crc_errors" },
5912 { "rx_align_errors" },
5913 { "tx_single_collisions" },
5914 { "tx_multi_collisions" },
5915 { "tx_deferred" },
5916 { "tx_excess_collisions" },
5917 { "tx_late_collisions" },
5918 { "tx_total_collisions" },
5919 { "rx_fragments" },
5920 { "rx_jabbers" },
5921 { "rx_undersize_packets" },
5922 { "rx_oversize_packets" },
5923 { "rx_64_byte_packets" },
5924 { "rx_65_to_127_byte_packets" },
5925 { "rx_128_to_255_byte_packets" },
5926 { "rx_256_to_511_byte_packets" },
5927 { "rx_512_to_1023_byte_packets" },
5928 { "rx_1024_to_1522_byte_packets" },
5929 { "rx_1523_to_9022_byte_packets" },
5930 { "tx_64_byte_packets" },
5931 { "tx_65_to_127_byte_packets" },
5932 { "tx_128_to_255_byte_packets" },
5933 { "tx_256_to_511_byte_packets" },
5934 { "tx_512_to_1023_byte_packets" },
5935 { "tx_1024_to_1522_byte_packets" },
5936 { "tx_1523_to_9022_byte_packets" },
5937 { "rx_xon_frames" },
5938 { "rx_xoff_frames" },
5939 { "tx_xon_frames" },
5940 { "tx_xoff_frames" },
5941 { "rx_mac_ctrl_frames" },
5942 { "rx_filtered_packets" },
5943 { "rx_discards" },
5944 { "rx_fw_discards" },
5945 };
5946
5947 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5948
5949 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5950 STATS_OFFSET32(stat_IfHCInOctets_hi),
5951 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5952 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5953 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5954 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5955 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5956 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5957 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5958 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5959 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5960 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5961 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5962 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5963 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5964 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5965 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5966 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5967 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5968 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5969 STATS_OFFSET32(stat_EtherStatsCollisions),
5970 STATS_OFFSET32(stat_EtherStatsFragments),
5971 STATS_OFFSET32(stat_EtherStatsJabbers),
5972 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5973 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5974 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5975 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5976 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5977 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5978 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5979 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5980 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5981 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5982 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5983 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5984 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5985 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5986 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5987 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5988 STATS_OFFSET32(stat_XonPauseFramesReceived),
5989 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5990 STATS_OFFSET32(stat_OutXonSent),
5991 STATS_OFFSET32(stat_OutXoffSent),
5992 STATS_OFFSET32(stat_MacControlFramesReceived),
5993 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5994 STATS_OFFSET32(stat_IfInMBUFDiscards),
5995 STATS_OFFSET32(stat_FwRxDrop),
5996 };
5997
5998 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5999 * skipped because of errata.
6000 */
6001 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6002 8,0,8,8,8,8,8,8,8,8,
6003 4,0,4,4,4,4,4,4,4,4,
6004 4,4,4,4,4,4,4,4,4,4,
6005 4,4,4,4,4,4,4,4,4,4,
6006 4,4,4,4,4,4,
6007 };
6008
6009 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6010 8,0,8,8,8,8,8,8,8,8,
6011 4,4,4,4,4,4,4,4,4,4,
6012 4,4,4,4,4,4,4,4,4,4,
6013 4,4,4,4,4,4,4,4,4,4,
6014 4,4,4,4,4,4,
6015 };
6016
6017 #define BNX2_NUM_TESTS 6
6018
6019 static struct {
6020 char string[ETH_GSTRING_LEN];
6021 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6022 { "register_test (offline)" },
6023 { "memory_test (offline)" },
6024 { "loopback_test (offline)" },
6025 { "nvram_test (online)" },
6026 { "interrupt_test (online)" },
6027 { "link_test (online)" },
6028 };
6029
6030 static int
6031 bnx2_get_sset_count(struct net_device *dev, int sset)
6032 {
6033 switch (sset) {
6034 case ETH_SS_TEST:
6035 return BNX2_NUM_TESTS;
6036 case ETH_SS_STATS:
6037 return BNX2_NUM_STATS;
6038 default:
6039 return -EOPNOTSUPP;
6040 }
6041 }
6042
6043 static void
6044 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6045 {
6046 struct bnx2 *bp = netdev_priv(dev);
6047
6048 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6049 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6050 int i;
6051
6052 bnx2_netif_stop(bp);
6053 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6054 bnx2_free_skbs(bp);
6055
6056 if (bnx2_test_registers(bp) != 0) {
6057 buf[0] = 1;
6058 etest->flags |= ETH_TEST_FL_FAILED;
6059 }
6060 if (bnx2_test_memory(bp) != 0) {
6061 buf[1] = 1;
6062 etest->flags |= ETH_TEST_FL_FAILED;
6063 }
6064 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6065 etest->flags |= ETH_TEST_FL_FAILED;
6066
6067 if (!netif_running(bp->dev)) {
6068 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6069 }
6070 else {
6071 bnx2_init_nic(bp);
6072 bnx2_netif_start(bp);
6073 }
6074
6075 /* wait for link up */
6076 for (i = 0; i < 7; i++) {
6077 if (bp->link_up)
6078 break;
6079 msleep_interruptible(1000);
6080 }
6081 }
6082
6083 if (bnx2_test_nvram(bp) != 0) {
6084 buf[3] = 1;
6085 etest->flags |= ETH_TEST_FL_FAILED;
6086 }
6087 if (bnx2_test_intr(bp) != 0) {
6088 buf[4] = 1;
6089 etest->flags |= ETH_TEST_FL_FAILED;
6090 }
6091
6092 if (bnx2_test_link(bp) != 0) {
6093 buf[5] = 1;
6094 etest->flags |= ETH_TEST_FL_FAILED;
6095
6096 }
6097 }
6098
6099 static void
6100 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6101 {
6102 switch (stringset) {
6103 case ETH_SS_STATS:
6104 memcpy(buf, bnx2_stats_str_arr,
6105 sizeof(bnx2_stats_str_arr));
6106 break;
6107 case ETH_SS_TEST:
6108 memcpy(buf, bnx2_tests_str_arr,
6109 sizeof(bnx2_tests_str_arr));
6110 break;
6111 }
6112 }
6113
6114 static void
6115 bnx2_get_ethtool_stats(struct net_device *dev,
6116 struct ethtool_stats *stats, u64 *buf)
6117 {
6118 struct bnx2 *bp = netdev_priv(dev);
6119 int i;
6120 u32 *hw_stats = (u32 *) bp->stats_blk;
6121 u8 *stats_len_arr = NULL;
6122
6123 if (hw_stats == NULL) {
6124 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6125 return;
6126 }
6127
6128 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6129 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6130 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6131 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6132 stats_len_arr = bnx2_5706_stats_len_arr;
6133 else
6134 stats_len_arr = bnx2_5708_stats_len_arr;
6135
6136 for (i = 0; i < BNX2_NUM_STATS; i++) {
6137 if (stats_len_arr[i] == 0) {
6138 /* skip this counter */
6139 buf[i] = 0;
6140 continue;
6141 }
6142 if (stats_len_arr[i] == 4) {
6143 /* 4-byte counter */
6144 buf[i] = (u64)
6145 *(hw_stats + bnx2_stats_offset_arr[i]);
6146 continue;
6147 }
6148 /* 8-byte counter */
6149 buf[i] = (((u64) *(hw_stats +
6150 bnx2_stats_offset_arr[i])) << 32) +
6151 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6152 }
6153 }
6154
6155 static int
6156 bnx2_phys_id(struct net_device *dev, u32 data)
6157 {
6158 struct bnx2 *bp = netdev_priv(dev);
6159 int i;
6160 u32 save;
6161
6162 if (data == 0)
6163 data = 2;
6164
6165 save = REG_RD(bp, BNX2_MISC_CFG);
6166 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6167
6168 for (i = 0; i < (data * 2); i++) {
6169 if ((i % 2) == 0) {
6170 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6171 }
6172 else {
6173 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6174 BNX2_EMAC_LED_1000MB_OVERRIDE |
6175 BNX2_EMAC_LED_100MB_OVERRIDE |
6176 BNX2_EMAC_LED_10MB_OVERRIDE |
6177 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6178 BNX2_EMAC_LED_TRAFFIC);
6179 }
6180 msleep_interruptible(500);
6181 if (signal_pending(current))
6182 break;
6183 }
6184 REG_WR(bp, BNX2_EMAC_LED, 0);
6185 REG_WR(bp, BNX2_MISC_CFG, save);
6186 return 0;
6187 }
6188
6189 static int
6190 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6191 {
6192 struct bnx2 *bp = netdev_priv(dev);
6193
6194 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6195 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6196 else
6197 return (ethtool_op_set_tx_csum(dev, data));
6198 }
6199
6200 static const struct ethtool_ops bnx2_ethtool_ops = {
6201 .get_settings = bnx2_get_settings,
6202 .set_settings = bnx2_set_settings,
6203 .get_drvinfo = bnx2_get_drvinfo,
6204 .get_regs_len = bnx2_get_regs_len,
6205 .get_regs = bnx2_get_regs,
6206 .get_wol = bnx2_get_wol,
6207 .set_wol = bnx2_set_wol,
6208 .nway_reset = bnx2_nway_reset,
6209 .get_link = ethtool_op_get_link,
6210 .get_eeprom_len = bnx2_get_eeprom_len,
6211 .get_eeprom = bnx2_get_eeprom,
6212 .set_eeprom = bnx2_set_eeprom,
6213 .get_coalesce = bnx2_get_coalesce,
6214 .set_coalesce = bnx2_set_coalesce,
6215 .get_ringparam = bnx2_get_ringparam,
6216 .set_ringparam = bnx2_set_ringparam,
6217 .get_pauseparam = bnx2_get_pauseparam,
6218 .set_pauseparam = bnx2_set_pauseparam,
6219 .get_rx_csum = bnx2_get_rx_csum,
6220 .set_rx_csum = bnx2_set_rx_csum,
6221 .set_tx_csum = bnx2_set_tx_csum,
6222 .set_sg = ethtool_op_set_sg,
6223 .set_tso = bnx2_set_tso,
6224 .self_test = bnx2_self_test,
6225 .get_strings = bnx2_get_strings,
6226 .phys_id = bnx2_phys_id,
6227 .get_ethtool_stats = bnx2_get_ethtool_stats,
6228 .get_sset_count = bnx2_get_sset_count,
6229 };
6230
6231 /* Called with rtnl_lock */
6232 static int
6233 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6234 {
6235 struct mii_ioctl_data *data = if_mii(ifr);
6236 struct bnx2 *bp = netdev_priv(dev);
6237 int err;
6238
6239 switch(cmd) {
6240 case SIOCGMIIPHY:
6241 data->phy_id = bp->phy_addr;
6242
6243 /* fallthru */
6244 case SIOCGMIIREG: {
6245 u32 mii_regval;
6246
6247 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6248 return -EOPNOTSUPP;
6249
6250 if (!netif_running(dev))
6251 return -EAGAIN;
6252
6253 spin_lock_bh(&bp->phy_lock);
6254 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6255 spin_unlock_bh(&bp->phy_lock);
6256
6257 data->val_out = mii_regval;
6258
6259 return err;
6260 }
6261
6262 case SIOCSMIIREG:
6263 if (!capable(CAP_NET_ADMIN))
6264 return -EPERM;
6265
6266 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6267 return -EOPNOTSUPP;
6268
6269 if (!netif_running(dev))
6270 return -EAGAIN;
6271
6272 spin_lock_bh(&bp->phy_lock);
6273 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6274 spin_unlock_bh(&bp->phy_lock);
6275
6276 return err;
6277
6278 default:
6279 /* do nothing */
6280 break;
6281 }
6282 return -EOPNOTSUPP;
6283 }
6284
6285 /* Called with rtnl_lock */
6286 static int
6287 bnx2_change_mac_addr(struct net_device *dev, void *p)
6288 {
6289 struct sockaddr *addr = p;
6290 struct bnx2 *bp = netdev_priv(dev);
6291
6292 if (!is_valid_ether_addr(addr->sa_data))
6293 return -EINVAL;
6294
6295 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6296 if (netif_running(dev))
6297 bnx2_set_mac_addr(bp);
6298
6299 return 0;
6300 }
6301
6302 /* Called with rtnl_lock */
6303 static int
6304 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6305 {
6306 struct bnx2 *bp = netdev_priv(dev);
6307
6308 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6309 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6310 return -EINVAL;
6311
6312 dev->mtu = new_mtu;
6313 if (netif_running(dev)) {
6314 bnx2_netif_stop(bp);
6315
6316 bnx2_init_nic(bp);
6317
6318 bnx2_netif_start(bp);
6319 }
6320 return 0;
6321 }
6322
6323 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6324 static void
6325 poll_bnx2(struct net_device *dev)
6326 {
6327 struct bnx2 *bp = netdev_priv(dev);
6328
6329 disable_irq(bp->pdev->irq);
6330 bnx2_interrupt(bp->pdev->irq, dev);
6331 enable_irq(bp->pdev->irq);
6332 }
6333 #endif
6334
6335 static void __devinit
6336 bnx2_get_5709_media(struct bnx2 *bp)
6337 {
6338 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6339 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6340 u32 strap;
6341
6342 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6343 return;
6344 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6345 bp->phy_flags |= PHY_SERDES_FLAG;
6346 return;
6347 }
6348
6349 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6350 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6351 else
6352 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6353
6354 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6355 switch (strap) {
6356 case 0x4:
6357 case 0x5:
6358 case 0x6:
6359 bp->phy_flags |= PHY_SERDES_FLAG;
6360 return;
6361 }
6362 } else {
6363 switch (strap) {
6364 case 0x1:
6365 case 0x2:
6366 case 0x4:
6367 bp->phy_flags |= PHY_SERDES_FLAG;
6368 return;
6369 }
6370 }
6371 }
6372
6373 static void __devinit
6374 bnx2_get_pci_speed(struct bnx2 *bp)
6375 {
6376 u32 reg;
6377
6378 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6379 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6380 u32 clkreg;
6381
6382 bp->flags |= PCIX_FLAG;
6383
6384 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6385
6386 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6387 switch (clkreg) {
6388 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6389 bp->bus_speed_mhz = 133;
6390 break;
6391
6392 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6393 bp->bus_speed_mhz = 100;
6394 break;
6395
6396 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6397 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6398 bp->bus_speed_mhz = 66;
6399 break;
6400
6401 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6402 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6403 bp->bus_speed_mhz = 50;
6404 break;
6405
6406 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6407 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6408 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6409 bp->bus_speed_mhz = 33;
6410 break;
6411 }
6412 }
6413 else {
6414 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6415 bp->bus_speed_mhz = 66;
6416 else
6417 bp->bus_speed_mhz = 33;
6418 }
6419
6420 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6421 bp->flags |= PCI_32BIT_FLAG;
6422
6423 }
6424
6425 static int __devinit
6426 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6427 {
6428 struct bnx2 *bp;
6429 unsigned long mem_len;
6430 int rc, i, j;
6431 u32 reg;
6432 u64 dma_mask, persist_dma_mask;
6433
6434 SET_NETDEV_DEV(dev, &pdev->dev);
6435 bp = netdev_priv(dev);
6436
6437 bp->flags = 0;
6438 bp->phy_flags = 0;
6439
6440 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6441 rc = pci_enable_device(pdev);
6442 if (rc) {
6443 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6444 goto err_out;
6445 }
6446
6447 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6448 dev_err(&pdev->dev,
6449 "Cannot find PCI device base address, aborting.\n");
6450 rc = -ENODEV;
6451 goto err_out_disable;
6452 }
6453
6454 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6455 if (rc) {
6456 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6457 goto err_out_disable;
6458 }
6459
6460 pci_set_master(pdev);
6461
6462 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6463 if (bp->pm_cap == 0) {
6464 dev_err(&pdev->dev,
6465 "Cannot find power management capability, aborting.\n");
6466 rc = -EIO;
6467 goto err_out_release;
6468 }
6469
6470 bp->dev = dev;
6471 bp->pdev = pdev;
6472
6473 spin_lock_init(&bp->phy_lock);
6474 spin_lock_init(&bp->indirect_lock);
6475 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6476
6477 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6478 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6479 dev->mem_end = dev->mem_start + mem_len;
6480 dev->irq = pdev->irq;
6481
6482 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6483
6484 if (!bp->regview) {
6485 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6486 rc = -ENOMEM;
6487 goto err_out_release;
6488 }
6489
6490 /* Configure byte swap and enable write to the reg_window registers.
6491 * Rely on CPU to do target byte swapping on big endian systems
6492 * The chip's target access swapping will not swap all accesses
6493 */
6494 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6495 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6496 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6497
6498 bnx2_set_power_state(bp, PCI_D0);
6499
6500 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6501
6502 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6503 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6504 dev_err(&pdev->dev,
6505 "Cannot find PCIE capability, aborting.\n");
6506 rc = -EIO;
6507 goto err_out_unmap;
6508 }
6509 bp->flags |= PCIE_FLAG;
6510 } else {
6511 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6512 if (bp->pcix_cap == 0) {
6513 dev_err(&pdev->dev,
6514 "Cannot find PCIX capability, aborting.\n");
6515 rc = -EIO;
6516 goto err_out_unmap;
6517 }
6518 }
6519
6520 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6521 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6522 bp->flags |= MSI_CAP_FLAG;
6523 }
6524
6525 /* 5708 cannot support DMA addresses > 40-bit. */
6526 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6527 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6528 else
6529 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6530
6531 /* Configure DMA attributes. */
6532 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6533 dev->features |= NETIF_F_HIGHDMA;
6534 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6535 if (rc) {
6536 dev_err(&pdev->dev,
6537 "pci_set_consistent_dma_mask failed, aborting.\n");
6538 goto err_out_unmap;
6539 }
6540 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6541 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6542 goto err_out_unmap;
6543 }
6544
6545 if (!(bp->flags & PCIE_FLAG))
6546 bnx2_get_pci_speed(bp);
6547
6548 /* 5706A0 may falsely detect SERR and PERR. */
6549 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6550 reg = REG_RD(bp, PCI_COMMAND);
6551 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6552 REG_WR(bp, PCI_COMMAND, reg);
6553 }
6554 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6555 !(bp->flags & PCIX_FLAG)) {
6556
6557 dev_err(&pdev->dev,
6558 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6559 goto err_out_unmap;
6560 }
6561
6562 bnx2_init_nvram(bp);
6563
6564 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6565
6566 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6567 BNX2_SHM_HDR_SIGNATURE_SIG) {
6568 u32 off = PCI_FUNC(pdev->devfn) << 2;
6569
6570 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6571 } else
6572 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6573
6574 /* Get the permanent MAC address. First we need to make sure the
6575 * firmware is actually running.
6576 */
6577 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6578
6579 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6580 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6581 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6582 rc = -ENODEV;
6583 goto err_out_unmap;
6584 }
6585
6586 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6587 for (i = 0, j = 0; i < 3; i++) {
6588 u8 num, k, skip0;
6589
6590 num = (u8) (reg >> (24 - (i * 8)));
6591 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6592 if (num >= k || !skip0 || k == 1) {
6593 bp->fw_version[j++] = (num / k) + '0';
6594 skip0 = 0;
6595 }
6596 }
6597 if (i != 2)
6598 bp->fw_version[j++] = '.';
6599 }
6600 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6601 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6602 bp->wol = 1;
6603
6604 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
6605 bp->flags |= ASF_ENABLE_FLAG;
6606
6607 for (i = 0; i < 30; i++) {
6608 reg = REG_RD_IND(bp, bp->shmem_base +
6609 BNX2_BC_STATE_CONDITION);
6610 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6611 break;
6612 msleep(10);
6613 }
6614 }
6615 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6616 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6617 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6618 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6619 int i;
6620 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6621
6622 bp->fw_version[j++] = ' ';
6623 for (i = 0; i < 3; i++) {
6624 reg = REG_RD_IND(bp, addr + i * 4);
6625 reg = swab32(reg);
6626 memcpy(&bp->fw_version[j], &reg, 4);
6627 j += 4;
6628 }
6629 }
6630
6631 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6632 bp->mac_addr[0] = (u8) (reg >> 8);
6633 bp->mac_addr[1] = (u8) reg;
6634
6635 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6636 bp->mac_addr[2] = (u8) (reg >> 24);
6637 bp->mac_addr[3] = (u8) (reg >> 16);
6638 bp->mac_addr[4] = (u8) (reg >> 8);
6639 bp->mac_addr[5] = (u8) reg;
6640
6641 bp->tx_ring_size = MAX_TX_DESC_CNT;
6642 bnx2_set_rx_ring_size(bp, 255);
6643
6644 bp->rx_csum = 1;
6645
6646 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6647
6648 bp->tx_quick_cons_trip_int = 20;
6649 bp->tx_quick_cons_trip = 20;
6650 bp->tx_ticks_int = 80;
6651 bp->tx_ticks = 80;
6652
6653 bp->rx_quick_cons_trip_int = 6;
6654 bp->rx_quick_cons_trip = 6;
6655 bp->rx_ticks_int = 18;
6656 bp->rx_ticks = 18;
6657
6658 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6659
6660 bp->timer_interval = HZ;
6661 bp->current_interval = HZ;
6662
6663 bp->phy_addr = 1;
6664
6665 /* Disable WOL support if we are running on a SERDES chip. */
6666 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6667 bnx2_get_5709_media(bp);
6668 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6669 bp->phy_flags |= PHY_SERDES_FLAG;
6670
6671 bp->phy_port = PORT_TP;
6672 if (bp->phy_flags & PHY_SERDES_FLAG) {
6673 bp->phy_port = PORT_FIBRE;
6674 reg = REG_RD_IND(bp, bp->shmem_base +
6675 BNX2_SHARED_HW_CFG_CONFIG);
6676 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6677 bp->flags |= NO_WOL_FLAG;
6678 bp->wol = 0;
6679 }
6680 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6681 bp->phy_addr = 2;
6682 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6683 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6684 }
6685 bnx2_init_remote_phy(bp);
6686
6687 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6688 CHIP_NUM(bp) == CHIP_NUM_5708)
6689 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6690 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6691 (CHIP_REV(bp) == CHIP_REV_Ax ||
6692 CHIP_REV(bp) == CHIP_REV_Bx))
6693 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6694
6695 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6696 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6697 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
6698 bp->flags |= NO_WOL_FLAG;
6699 bp->wol = 0;
6700 }
6701
6702 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6703 bp->tx_quick_cons_trip_int =
6704 bp->tx_quick_cons_trip;
6705 bp->tx_ticks_int = bp->tx_ticks;
6706 bp->rx_quick_cons_trip_int =
6707 bp->rx_quick_cons_trip;
6708 bp->rx_ticks_int = bp->rx_ticks;
6709 bp->comp_prod_trip_int = bp->comp_prod_trip;
6710 bp->com_ticks_int = bp->com_ticks;
6711 bp->cmd_ticks_int = bp->cmd_ticks;
6712 }
6713
6714 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6715 *
6716 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6717 * with byte enables disabled on the unused 32-bit word. This is legal
6718 * but causes problems on the AMD 8132 which will eventually stop
6719 * responding after a while.
6720 *
6721 * AMD believes this incompatibility is unique to the 5706, and
6722 * prefers to locally disable MSI rather than globally disabling it.
6723 */
6724 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6725 struct pci_dev *amd_8132 = NULL;
6726
6727 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6728 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6729 amd_8132))) {
6730
6731 if (amd_8132->revision >= 0x10 &&
6732 amd_8132->revision <= 0x13) {
6733 disable_msi = 1;
6734 pci_dev_put(amd_8132);
6735 break;
6736 }
6737 }
6738 }
6739
6740 bnx2_set_default_link(bp);
6741 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6742
6743 init_timer(&bp->timer);
6744 bp->timer.expires = RUN_AT(bp->timer_interval);
6745 bp->timer.data = (unsigned long) bp;
6746 bp->timer.function = bnx2_timer;
6747
6748 return 0;
6749
6750 err_out_unmap:
6751 if (bp->regview) {
6752 iounmap(bp->regview);
6753 bp->regview = NULL;
6754 }
6755
6756 err_out_release:
6757 pci_release_regions(pdev);
6758
6759 err_out_disable:
6760 pci_disable_device(pdev);
6761 pci_set_drvdata(pdev, NULL);
6762
6763 err_out:
6764 return rc;
6765 }
6766
6767 static char * __devinit
6768 bnx2_bus_string(struct bnx2 *bp, char *str)
6769 {
6770 char *s = str;
6771
6772 if (bp->flags & PCIE_FLAG) {
6773 s += sprintf(s, "PCI Express");
6774 } else {
6775 s += sprintf(s, "PCI");
6776 if (bp->flags & PCIX_FLAG)
6777 s += sprintf(s, "-X");
6778 if (bp->flags & PCI_32BIT_FLAG)
6779 s += sprintf(s, " 32-bit");
6780 else
6781 s += sprintf(s, " 64-bit");
6782 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6783 }
6784 return str;
6785 }
6786
6787 static int __devinit
6788 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6789 {
6790 static int version_printed = 0;
6791 struct net_device *dev = NULL;
6792 struct bnx2 *bp;
6793 int rc;
6794 char str[40];
6795 DECLARE_MAC_BUF(mac);
6796
6797 if (version_printed++ == 0)
6798 printk(KERN_INFO "%s", version);
6799
6800 /* dev zeroed in init_etherdev */
6801 dev = alloc_etherdev(sizeof(*bp));
6802
6803 if (!dev)
6804 return -ENOMEM;
6805
6806 rc = bnx2_init_board(pdev, dev);
6807 if (rc < 0) {
6808 free_netdev(dev);
6809 return rc;
6810 }
6811
6812 dev->open = bnx2_open;
6813 dev->hard_start_xmit = bnx2_start_xmit;
6814 dev->stop = bnx2_close;
6815 dev->get_stats = bnx2_get_stats;
6816 dev->set_multicast_list = bnx2_set_rx_mode;
6817 dev->do_ioctl = bnx2_ioctl;
6818 dev->set_mac_address = bnx2_change_mac_addr;
6819 dev->change_mtu = bnx2_change_mtu;
6820 dev->tx_timeout = bnx2_tx_timeout;
6821 dev->watchdog_timeo = TX_TIMEOUT;
6822 #ifdef BCM_VLAN
6823 dev->vlan_rx_register = bnx2_vlan_rx_register;
6824 #endif
6825 dev->ethtool_ops = &bnx2_ethtool_ops;
6826
6827 bp = netdev_priv(dev);
6828 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6829
6830 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6831 dev->poll_controller = poll_bnx2;
6832 #endif
6833
6834 pci_set_drvdata(pdev, dev);
6835
6836 memcpy(dev->dev_addr, bp->mac_addr, 6);
6837 memcpy(dev->perm_addr, bp->mac_addr, 6);
6838 bp->name = board_info[ent->driver_data].name;
6839
6840 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6841 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6842 dev->features |= NETIF_F_IPV6_CSUM;
6843
6844 #ifdef BCM_VLAN
6845 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6846 #endif
6847 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6848 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6849 dev->features |= NETIF_F_TSO6;
6850
6851 if ((rc = register_netdev(dev))) {
6852 dev_err(&pdev->dev, "Cannot register net device\n");
6853 if (bp->regview)
6854 iounmap(bp->regview);
6855 pci_release_regions(pdev);
6856 pci_disable_device(pdev);
6857 pci_set_drvdata(pdev, NULL);
6858 free_netdev(dev);
6859 return rc;
6860 }
6861
6862 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6863 "IRQ %d, node addr %s\n",
6864 dev->name,
6865 bp->name,
6866 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6867 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6868 bnx2_bus_string(bp, str),
6869 dev->base_addr,
6870 bp->pdev->irq, print_mac(mac, dev->dev_addr));
6871
6872 return 0;
6873 }
6874
6875 static void __devexit
6876 bnx2_remove_one(struct pci_dev *pdev)
6877 {
6878 struct net_device *dev = pci_get_drvdata(pdev);
6879 struct bnx2 *bp = netdev_priv(dev);
6880
6881 flush_scheduled_work();
6882
6883 unregister_netdev(dev);
6884
6885 if (bp->regview)
6886 iounmap(bp->regview);
6887
6888 free_netdev(dev);
6889 pci_release_regions(pdev);
6890 pci_disable_device(pdev);
6891 pci_set_drvdata(pdev, NULL);
6892 }
6893
6894 static int
6895 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6896 {
6897 struct net_device *dev = pci_get_drvdata(pdev);
6898 struct bnx2 *bp = netdev_priv(dev);
6899 u32 reset_code;
6900
6901 /* PCI register 4 needs to be saved whether netif_running() or not.
6902 * MSI address and data need to be saved if using MSI and
6903 * netif_running().
6904 */
6905 pci_save_state(pdev);
6906 if (!netif_running(dev))
6907 return 0;
6908
6909 flush_scheduled_work();
6910 bnx2_netif_stop(bp);
6911 netif_device_detach(dev);
6912 del_timer_sync(&bp->timer);
6913 if (bp->flags & NO_WOL_FLAG)
6914 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6915 else if (bp->wol)
6916 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6917 else
6918 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6919 bnx2_reset_chip(bp, reset_code);
6920 bnx2_free_skbs(bp);
6921 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6922 return 0;
6923 }
6924
6925 static int
6926 bnx2_resume(struct pci_dev *pdev)
6927 {
6928 struct net_device *dev = pci_get_drvdata(pdev);
6929 struct bnx2 *bp = netdev_priv(dev);
6930
6931 pci_restore_state(pdev);
6932 if (!netif_running(dev))
6933 return 0;
6934
6935 bnx2_set_power_state(bp, PCI_D0);
6936 netif_device_attach(dev);
6937 bnx2_init_nic(bp);
6938 bnx2_netif_start(bp);
6939 return 0;
6940 }
6941
6942 static struct pci_driver bnx2_pci_driver = {
6943 .name = DRV_MODULE_NAME,
6944 .id_table = bnx2_pci_tbl,
6945 .probe = bnx2_init_one,
6946 .remove = __devexit_p(bnx2_remove_one),
6947 .suspend = bnx2_suspend,
6948 .resume = bnx2_resume,
6949 };
6950
6951 static int __init bnx2_init(void)
6952 {
6953 return pci_register_driver(&bnx2_pci_driver);
6954 }
6955
6956 static void __exit bnx2_cleanup(void)
6957 {
6958 pci_unregister_driver(&bnx2_pci_driver);
6959 }
6960
6961 module_init(bnx2_init);
6962 module_exit(bnx2_cleanup);
6963
6964
6965
This page took 0.227468 seconds and 5 git commands to generate.