Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[deliverable/linux.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2007 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.6.2"
58 #define DRV_MODULE_RELDATE "July 6, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
64
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
84 BCM5708,
85 BCM5708S,
86 BCM5709,
87 BCM5709S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92 char *name;
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
103 };
104
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
124 { 0, }
125 };
126
127 static struct flash_spec flash_table[] =
128 {
129 /* Slow EEPROM */
130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
212 };
213
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217 {
218 u32 diff;
219
220 smp_mb();
221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
231 return (bp->tx_ring_size - diff);
232 }
233
234 static u32
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 {
237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
244 }
245
246 static void
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248 {
249 spin_lock_bh(&bp->indirect_lock);
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252 spin_unlock_bh(&bp->indirect_lock);
253 }
254
255 static void
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257 {
258 offset += cid_addr;
259 spin_lock_bh(&bp->indirect_lock);
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
277 spin_unlock_bh(&bp->indirect_lock);
278 }
279
280 static int
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282 {
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335 }
336
337 static int
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339 {
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
357
358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384 }
385
386 static void
387 bnx2_disable_int(struct bnx2 *bp)
388 {
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392 }
393
394 static void
395 bnx2_enable_int(struct bnx2 *bp)
396 {
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
405 }
406
407 static void
408 bnx2_disable_int_sync(struct bnx2 *bp)
409 {
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413 }
414
415 static void
416 bnx2_netif_stop(struct bnx2 *bp)
417 {
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424 }
425
426 static void
427 bnx2_netif_start(struct bnx2 *bp)
428 {
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436 }
437
438 static void
439 bnx2_free_mem(struct bnx2 *bp)
440 {
441 int i;
442
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
451 if (bp->status_blk) {
452 pci_free_consistent(bp->pdev, bp->status_stats_size,
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
455 bp->stats_blk = NULL;
456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
472 }
473 vfree(bp->rx_buf_ring);
474 bp->rx_buf_ring = NULL;
475 }
476
477 static int
478 bnx2_alloc_mem(struct bnx2 *bp)
479 {
480 int i, status_blk_size;
481
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
511
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
522 memset(bp->status_blk, 0, bp->status_stats_size);
523
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
526
527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
528
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
541 return 0;
542
543 alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546 }
547
548 static void
549 bnx2_report_fw_link(struct bnx2 *bp)
550 {
551 u32 fw_link_status = 0;
552
553 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
554 return;
555
556 if (bp->link_up) {
557 u32 bmsr;
558
559 switch (bp->line_speed) {
560 case SPEED_10:
561 if (bp->duplex == DUPLEX_HALF)
562 fw_link_status = BNX2_LINK_STATUS_10HALF;
563 else
564 fw_link_status = BNX2_LINK_STATUS_10FULL;
565 break;
566 case SPEED_100:
567 if (bp->duplex == DUPLEX_HALF)
568 fw_link_status = BNX2_LINK_STATUS_100HALF;
569 else
570 fw_link_status = BNX2_LINK_STATUS_100FULL;
571 break;
572 case SPEED_1000:
573 if (bp->duplex == DUPLEX_HALF)
574 fw_link_status = BNX2_LINK_STATUS_1000HALF;
575 else
576 fw_link_status = BNX2_LINK_STATUS_1000FULL;
577 break;
578 case SPEED_2500:
579 if (bp->duplex == DUPLEX_HALF)
580 fw_link_status = BNX2_LINK_STATUS_2500HALF;
581 else
582 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583 break;
584 }
585
586 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
587
588 if (bp->autoneg) {
589 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
590
591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
593
594 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
597 else
598 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599 }
600 }
601 else
602 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
603
604 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605 }
606
607 static char *
608 bnx2_xceiver_str(struct bnx2 *bp)
609 {
610 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
611 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
612 "Copper"));
613 }
614
615 static void
616 bnx2_report_link(struct bnx2 *bp)
617 {
618 if (bp->link_up) {
619 netif_carrier_on(bp->dev);
620 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
621 bnx2_xceiver_str(bp));
622
623 printk("%d Mbps ", bp->line_speed);
624
625 if (bp->duplex == DUPLEX_FULL)
626 printk("full duplex");
627 else
628 printk("half duplex");
629
630 if (bp->flow_ctrl) {
631 if (bp->flow_ctrl & FLOW_CTRL_RX) {
632 printk(", receive ");
633 if (bp->flow_ctrl & FLOW_CTRL_TX)
634 printk("& transmit ");
635 }
636 else {
637 printk(", transmit ");
638 }
639 printk("flow control ON");
640 }
641 printk("\n");
642 }
643 else {
644 netif_carrier_off(bp->dev);
645 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
646 bnx2_xceiver_str(bp));
647 }
648
649 bnx2_report_fw_link(bp);
650 }
651
652 static void
653 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
654 {
655 u32 local_adv, remote_adv;
656
657 bp->flow_ctrl = 0;
658 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
659 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
660
661 if (bp->duplex == DUPLEX_FULL) {
662 bp->flow_ctrl = bp->req_flow_ctrl;
663 }
664 return;
665 }
666
667 if (bp->duplex != DUPLEX_FULL) {
668 return;
669 }
670
671 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
672 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
673 u32 val;
674
675 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
676 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
677 bp->flow_ctrl |= FLOW_CTRL_TX;
678 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
679 bp->flow_ctrl |= FLOW_CTRL_RX;
680 return;
681 }
682
683 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
684 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
685
686 if (bp->phy_flags & PHY_SERDES_FLAG) {
687 u32 new_local_adv = 0;
688 u32 new_remote_adv = 0;
689
690 if (local_adv & ADVERTISE_1000XPAUSE)
691 new_local_adv |= ADVERTISE_PAUSE_CAP;
692 if (local_adv & ADVERTISE_1000XPSE_ASYM)
693 new_local_adv |= ADVERTISE_PAUSE_ASYM;
694 if (remote_adv & ADVERTISE_1000XPAUSE)
695 new_remote_adv |= ADVERTISE_PAUSE_CAP;
696 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
697 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
698
699 local_adv = new_local_adv;
700 remote_adv = new_remote_adv;
701 }
702
703 /* See Table 28B-3 of 802.3ab-1999 spec. */
704 if (local_adv & ADVERTISE_PAUSE_CAP) {
705 if(local_adv & ADVERTISE_PAUSE_ASYM) {
706 if (remote_adv & ADVERTISE_PAUSE_CAP) {
707 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
708 }
709 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
710 bp->flow_ctrl = FLOW_CTRL_RX;
711 }
712 }
713 else {
714 if (remote_adv & ADVERTISE_PAUSE_CAP) {
715 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
716 }
717 }
718 }
719 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
720 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
721 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
722
723 bp->flow_ctrl = FLOW_CTRL_TX;
724 }
725 }
726 }
727
728 static int
729 bnx2_5709s_linkup(struct bnx2 *bp)
730 {
731 u32 val, speed;
732
733 bp->link_up = 1;
734
735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
736 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
737 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
738
739 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
740 bp->line_speed = bp->req_line_speed;
741 bp->duplex = bp->req_duplex;
742 return 0;
743 }
744 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
745 switch (speed) {
746 case MII_BNX2_GP_TOP_AN_SPEED_10:
747 bp->line_speed = SPEED_10;
748 break;
749 case MII_BNX2_GP_TOP_AN_SPEED_100:
750 bp->line_speed = SPEED_100;
751 break;
752 case MII_BNX2_GP_TOP_AN_SPEED_1G:
753 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
754 bp->line_speed = SPEED_1000;
755 break;
756 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
757 bp->line_speed = SPEED_2500;
758 break;
759 }
760 if (val & MII_BNX2_GP_TOP_AN_FD)
761 bp->duplex = DUPLEX_FULL;
762 else
763 bp->duplex = DUPLEX_HALF;
764 return 0;
765 }
766
767 static int
768 bnx2_5708s_linkup(struct bnx2 *bp)
769 {
770 u32 val;
771
772 bp->link_up = 1;
773 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
774 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
775 case BCM5708S_1000X_STAT1_SPEED_10:
776 bp->line_speed = SPEED_10;
777 break;
778 case BCM5708S_1000X_STAT1_SPEED_100:
779 bp->line_speed = SPEED_100;
780 break;
781 case BCM5708S_1000X_STAT1_SPEED_1G:
782 bp->line_speed = SPEED_1000;
783 break;
784 case BCM5708S_1000X_STAT1_SPEED_2G5:
785 bp->line_speed = SPEED_2500;
786 break;
787 }
788 if (val & BCM5708S_1000X_STAT1_FD)
789 bp->duplex = DUPLEX_FULL;
790 else
791 bp->duplex = DUPLEX_HALF;
792
793 return 0;
794 }
795
796 static int
797 bnx2_5706s_linkup(struct bnx2 *bp)
798 {
799 u32 bmcr, local_adv, remote_adv, common;
800
801 bp->link_up = 1;
802 bp->line_speed = SPEED_1000;
803
804 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
805 if (bmcr & BMCR_FULLDPLX) {
806 bp->duplex = DUPLEX_FULL;
807 }
808 else {
809 bp->duplex = DUPLEX_HALF;
810 }
811
812 if (!(bmcr & BMCR_ANENABLE)) {
813 return 0;
814 }
815
816 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
817 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
818
819 common = local_adv & remote_adv;
820 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
821
822 if (common & ADVERTISE_1000XFULL) {
823 bp->duplex = DUPLEX_FULL;
824 }
825 else {
826 bp->duplex = DUPLEX_HALF;
827 }
828 }
829
830 return 0;
831 }
832
833 static int
834 bnx2_copper_linkup(struct bnx2 *bp)
835 {
836 u32 bmcr;
837
838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839 if (bmcr & BMCR_ANENABLE) {
840 u32 local_adv, remote_adv, common;
841
842 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
843 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
844
845 common = local_adv & (remote_adv >> 2);
846 if (common & ADVERTISE_1000FULL) {
847 bp->line_speed = SPEED_1000;
848 bp->duplex = DUPLEX_FULL;
849 }
850 else if (common & ADVERTISE_1000HALF) {
851 bp->line_speed = SPEED_1000;
852 bp->duplex = DUPLEX_HALF;
853 }
854 else {
855 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
856 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
857
858 common = local_adv & remote_adv;
859 if (common & ADVERTISE_100FULL) {
860 bp->line_speed = SPEED_100;
861 bp->duplex = DUPLEX_FULL;
862 }
863 else if (common & ADVERTISE_100HALF) {
864 bp->line_speed = SPEED_100;
865 bp->duplex = DUPLEX_HALF;
866 }
867 else if (common & ADVERTISE_10FULL) {
868 bp->line_speed = SPEED_10;
869 bp->duplex = DUPLEX_FULL;
870 }
871 else if (common & ADVERTISE_10HALF) {
872 bp->line_speed = SPEED_10;
873 bp->duplex = DUPLEX_HALF;
874 }
875 else {
876 bp->line_speed = 0;
877 bp->link_up = 0;
878 }
879 }
880 }
881 else {
882 if (bmcr & BMCR_SPEED100) {
883 bp->line_speed = SPEED_100;
884 }
885 else {
886 bp->line_speed = SPEED_10;
887 }
888 if (bmcr & BMCR_FULLDPLX) {
889 bp->duplex = DUPLEX_FULL;
890 }
891 else {
892 bp->duplex = DUPLEX_HALF;
893 }
894 }
895
896 return 0;
897 }
898
899 static int
900 bnx2_set_mac_link(struct bnx2 *bp)
901 {
902 u32 val;
903
904 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
905 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
906 (bp->duplex == DUPLEX_HALF)) {
907 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
908 }
909
910 /* Configure the EMAC mode register. */
911 val = REG_RD(bp, BNX2_EMAC_MODE);
912
913 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
914 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
915 BNX2_EMAC_MODE_25G_MODE);
916
917 if (bp->link_up) {
918 switch (bp->line_speed) {
919 case SPEED_10:
920 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
921 val |= BNX2_EMAC_MODE_PORT_MII_10M;
922 break;
923 }
924 /* fall through */
925 case SPEED_100:
926 val |= BNX2_EMAC_MODE_PORT_MII;
927 break;
928 case SPEED_2500:
929 val |= BNX2_EMAC_MODE_25G_MODE;
930 /* fall through */
931 case SPEED_1000:
932 val |= BNX2_EMAC_MODE_PORT_GMII;
933 break;
934 }
935 }
936 else {
937 val |= BNX2_EMAC_MODE_PORT_GMII;
938 }
939
940 /* Set the MAC to operate in the appropriate duplex mode. */
941 if (bp->duplex == DUPLEX_HALF)
942 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
943 REG_WR(bp, BNX2_EMAC_MODE, val);
944
945 /* Enable/disable rx PAUSE. */
946 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
947
948 if (bp->flow_ctrl & FLOW_CTRL_RX)
949 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
950 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
951
952 /* Enable/disable tx PAUSE. */
953 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
954 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
955
956 if (bp->flow_ctrl & FLOW_CTRL_TX)
957 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
958 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
959
960 /* Acknowledge the interrupt. */
961 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
962
963 return 0;
964 }
965
966 static void
967 bnx2_enable_bmsr1(struct bnx2 *bp)
968 {
969 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
970 (CHIP_NUM(bp) == CHIP_NUM_5709))
971 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
972 MII_BNX2_BLK_ADDR_GP_STATUS);
973 }
974
975 static void
976 bnx2_disable_bmsr1(struct bnx2 *bp)
977 {
978 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
979 (CHIP_NUM(bp) == CHIP_NUM_5709))
980 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
981 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
982 }
983
984 static int
985 bnx2_test_and_enable_2g5(struct bnx2 *bp)
986 {
987 u32 up1;
988 int ret = 1;
989
990 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
991 return 0;
992
993 if (bp->autoneg & AUTONEG_SPEED)
994 bp->advertising |= ADVERTISED_2500baseX_Full;
995
996 if (CHIP_NUM(bp) == CHIP_NUM_5709)
997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
998
999 bnx2_read_phy(bp, bp->mii_up1, &up1);
1000 if (!(up1 & BCM5708S_UP1_2G5)) {
1001 up1 |= BCM5708S_UP1_2G5;
1002 bnx2_write_phy(bp, bp->mii_up1, up1);
1003 ret = 0;
1004 }
1005
1006 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1007 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1008 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1009
1010 return ret;
1011 }
1012
1013 static int
1014 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1015 {
1016 u32 up1;
1017 int ret = 0;
1018
1019 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1020 return 0;
1021
1022 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1024
1025 bnx2_read_phy(bp, bp->mii_up1, &up1);
1026 if (up1 & BCM5708S_UP1_2G5) {
1027 up1 &= ~BCM5708S_UP1_2G5;
1028 bnx2_write_phy(bp, bp->mii_up1, up1);
1029 ret = 1;
1030 }
1031
1032 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1033 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1034 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1035
1036 return ret;
1037 }
1038
1039 static void
1040 bnx2_enable_forced_2g5(struct bnx2 *bp)
1041 {
1042 u32 bmcr;
1043
1044 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1045 return;
1046
1047 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1048 u32 val;
1049
1050 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1051 MII_BNX2_BLK_ADDR_SERDES_DIG);
1052 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1053 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1054 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1055 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1056
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1060
1061 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1062 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1063 bmcr |= BCM5708S_BMCR_FORCE_2500;
1064 }
1065
1066 if (bp->autoneg & AUTONEG_SPEED) {
1067 bmcr &= ~BMCR_ANENABLE;
1068 if (bp->req_duplex == DUPLEX_FULL)
1069 bmcr |= BMCR_FULLDPLX;
1070 }
1071 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1072 }
1073
1074 static void
1075 bnx2_disable_forced_2g5(struct bnx2 *bp)
1076 {
1077 u32 bmcr;
1078
1079 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1080 return;
1081
1082 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1083 u32 val;
1084
1085 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1086 MII_BNX2_BLK_ADDR_SERDES_DIG);
1087 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1088 val &= ~MII_BNX2_SD_MISC1_FORCE;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1098 }
1099
1100 if (bp->autoneg & AUTONEG_SPEED)
1101 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1102 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1103 }
1104
1105 static int
1106 bnx2_set_link(struct bnx2 *bp)
1107 {
1108 u32 bmsr;
1109 u8 link_up;
1110
1111 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1112 bp->link_up = 1;
1113 return 0;
1114 }
1115
1116 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1117 return 0;
1118
1119 link_up = bp->link_up;
1120
1121 bnx2_enable_bmsr1(bp);
1122 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1124 bnx2_disable_bmsr1(bp);
1125
1126 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1127 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1128 u32 val;
1129
1130 val = REG_RD(bp, BNX2_EMAC_STATUS);
1131 if (val & BNX2_EMAC_STATUS_LINK)
1132 bmsr |= BMSR_LSTATUS;
1133 else
1134 bmsr &= ~BMSR_LSTATUS;
1135 }
1136
1137 if (bmsr & BMSR_LSTATUS) {
1138 bp->link_up = 1;
1139
1140 if (bp->phy_flags & PHY_SERDES_FLAG) {
1141 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1142 bnx2_5706s_linkup(bp);
1143 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1144 bnx2_5708s_linkup(bp);
1145 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1146 bnx2_5709s_linkup(bp);
1147 }
1148 else {
1149 bnx2_copper_linkup(bp);
1150 }
1151 bnx2_resolve_flow_ctrl(bp);
1152 }
1153 else {
1154 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1155 (bp->autoneg & AUTONEG_SPEED))
1156 bnx2_disable_forced_2g5(bp);
1157
1158 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1159 bp->link_up = 0;
1160 }
1161
1162 if (bp->link_up != link_up) {
1163 bnx2_report_link(bp);
1164 }
1165
1166 bnx2_set_mac_link(bp);
1167
1168 return 0;
1169 }
1170
1171 static int
1172 bnx2_reset_phy(struct bnx2 *bp)
1173 {
1174 int i;
1175 u32 reg;
1176
1177 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1178
1179 #define PHY_RESET_MAX_WAIT 100
1180 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1181 udelay(10);
1182
1183 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1184 if (!(reg & BMCR_RESET)) {
1185 udelay(20);
1186 break;
1187 }
1188 }
1189 if (i == PHY_RESET_MAX_WAIT) {
1190 return -EBUSY;
1191 }
1192 return 0;
1193 }
1194
1195 static u32
1196 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1197 {
1198 u32 adv = 0;
1199
1200 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1201 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1202
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE;
1205 }
1206 else {
1207 adv = ADVERTISE_PAUSE_CAP;
1208 }
1209 }
1210 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1211 if (bp->phy_flags & PHY_SERDES_FLAG) {
1212 adv = ADVERTISE_1000XPSE_ASYM;
1213 }
1214 else {
1215 adv = ADVERTISE_PAUSE_ASYM;
1216 }
1217 }
1218 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1219 if (bp->phy_flags & PHY_SERDES_FLAG) {
1220 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1221 }
1222 else {
1223 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1224 }
1225 }
1226 return adv;
1227 }
1228
1229 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1230
1231 static int
1232 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1233 {
1234 u32 speed_arg = 0, pause_adv;
1235
1236 pause_adv = bnx2_phy_get_pause_adv(bp);
1237
1238 if (bp->autoneg & AUTONEG_SPEED) {
1239 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1240 if (bp->advertising & ADVERTISED_10baseT_Half)
1241 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1242 if (bp->advertising & ADVERTISED_10baseT_Full)
1243 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1244 if (bp->advertising & ADVERTISED_100baseT_Half)
1245 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1246 if (bp->advertising & ADVERTISED_100baseT_Full)
1247 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1248 if (bp->advertising & ADVERTISED_1000baseT_Full)
1249 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1250 if (bp->advertising & ADVERTISED_2500baseX_Full)
1251 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1252 } else {
1253 if (bp->req_line_speed == SPEED_2500)
1254 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1255 else if (bp->req_line_speed == SPEED_1000)
1256 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1257 else if (bp->req_line_speed == SPEED_100) {
1258 if (bp->req_duplex == DUPLEX_FULL)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1260 else
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1262 } else if (bp->req_line_speed == SPEED_10) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1265 else
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1267 }
1268 }
1269
1270 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1271 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1272 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1273 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1274
1275 if (port == PORT_TP)
1276 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1277 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1278
1279 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1280
1281 spin_unlock_bh(&bp->phy_lock);
1282 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1283 spin_lock_bh(&bp->phy_lock);
1284
1285 return 0;
1286 }
1287
1288 static int
1289 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1290 {
1291 u32 adv, bmcr;
1292 u32 new_adv = 0;
1293
1294 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1295 return (bnx2_setup_remote_phy(bp, port));
1296
1297 if (!(bp->autoneg & AUTONEG_SPEED)) {
1298 u32 new_bmcr;
1299 int force_link_down = 0;
1300
1301 if (bp->req_line_speed == SPEED_2500) {
1302 if (!bnx2_test_and_enable_2g5(bp))
1303 force_link_down = 1;
1304 } else if (bp->req_line_speed == SPEED_1000) {
1305 if (bnx2_test_and_disable_2g5(bp))
1306 force_link_down = 1;
1307 }
1308 bnx2_read_phy(bp, bp->mii_adv, &adv);
1309 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1310
1311 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1312 new_bmcr = bmcr & ~BMCR_ANENABLE;
1313 new_bmcr |= BMCR_SPEED1000;
1314
1315 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1316 if (bp->req_line_speed == SPEED_2500)
1317 bnx2_enable_forced_2g5(bp);
1318 else if (bp->req_line_speed == SPEED_1000) {
1319 bnx2_disable_forced_2g5(bp);
1320 new_bmcr &= ~0x2000;
1321 }
1322
1323 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1324 if (bp->req_line_speed == SPEED_2500)
1325 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1326 else
1327 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1328 }
1329
1330 if (bp->req_duplex == DUPLEX_FULL) {
1331 adv |= ADVERTISE_1000XFULL;
1332 new_bmcr |= BMCR_FULLDPLX;
1333 }
1334 else {
1335 adv |= ADVERTISE_1000XHALF;
1336 new_bmcr &= ~BMCR_FULLDPLX;
1337 }
1338 if ((new_bmcr != bmcr) || (force_link_down)) {
1339 /* Force a link down visible on the other side */
1340 if (bp->link_up) {
1341 bnx2_write_phy(bp, bp->mii_adv, adv &
1342 ~(ADVERTISE_1000XFULL |
1343 ADVERTISE_1000XHALF));
1344 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1345 BMCR_ANRESTART | BMCR_ANENABLE);
1346
1347 bp->link_up = 0;
1348 netif_carrier_off(bp->dev);
1349 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1350 bnx2_report_link(bp);
1351 }
1352 bnx2_write_phy(bp, bp->mii_adv, adv);
1353 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1354 } else {
1355 bnx2_resolve_flow_ctrl(bp);
1356 bnx2_set_mac_link(bp);
1357 }
1358 return 0;
1359 }
1360
1361 bnx2_test_and_enable_2g5(bp);
1362
1363 if (bp->advertising & ADVERTISED_1000baseT_Full)
1364 new_adv |= ADVERTISE_1000XFULL;
1365
1366 new_adv |= bnx2_phy_get_pause_adv(bp);
1367
1368 bnx2_read_phy(bp, bp->mii_adv, &adv);
1369 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1370
1371 bp->serdes_an_pending = 0;
1372 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1373 /* Force a link down visible on the other side */
1374 if (bp->link_up) {
1375 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1376 spin_unlock_bh(&bp->phy_lock);
1377 msleep(20);
1378 spin_lock_bh(&bp->phy_lock);
1379 }
1380
1381 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1382 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1383 BMCR_ANENABLE);
1384 /* Speed up link-up time when the link partner
1385 * does not autonegotiate which is very common
1386 * in blade servers. Some blade servers use
1387 * IPMI for kerboard input and it's important
1388 * to minimize link disruptions. Autoneg. involves
1389 * exchanging base pages plus 3 next pages and
1390 * normally completes in about 120 msec.
1391 */
1392 bp->current_interval = SERDES_AN_TIMEOUT;
1393 bp->serdes_an_pending = 1;
1394 mod_timer(&bp->timer, jiffies + bp->current_interval);
1395 } else {
1396 bnx2_resolve_flow_ctrl(bp);
1397 bnx2_set_mac_link(bp);
1398 }
1399
1400 return 0;
1401 }
1402
1403 #define ETHTOOL_ALL_FIBRE_SPEED \
1404 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1405 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1406 (ADVERTISED_1000baseT_Full)
1407
1408 #define ETHTOOL_ALL_COPPER_SPEED \
1409 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1410 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1411 ADVERTISED_1000baseT_Full)
1412
1413 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1414 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1415
1416 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1417
1418 static void
1419 bnx2_set_default_remote_link(struct bnx2 *bp)
1420 {
1421 u32 link;
1422
1423 if (bp->phy_port == PORT_TP)
1424 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1425 else
1426 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1427
1428 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1429 bp->req_line_speed = 0;
1430 bp->autoneg |= AUTONEG_SPEED;
1431 bp->advertising = ADVERTISED_Autoneg;
1432 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1433 bp->advertising |= ADVERTISED_10baseT_Half;
1434 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1435 bp->advertising |= ADVERTISED_10baseT_Full;
1436 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1437 bp->advertising |= ADVERTISED_100baseT_Half;
1438 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1439 bp->advertising |= ADVERTISED_100baseT_Full;
1440 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1441 bp->advertising |= ADVERTISED_1000baseT_Full;
1442 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1443 bp->advertising |= ADVERTISED_2500baseX_Full;
1444 } else {
1445 bp->autoneg = 0;
1446 bp->advertising = 0;
1447 bp->req_duplex = DUPLEX_FULL;
1448 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1449 bp->req_line_speed = SPEED_10;
1450 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1451 bp->req_duplex = DUPLEX_HALF;
1452 }
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1454 bp->req_line_speed = SPEED_100;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1457 }
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1459 bp->req_line_speed = SPEED_1000;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1461 bp->req_line_speed = SPEED_2500;
1462 }
1463 }
1464
1465 static void
1466 bnx2_set_default_link(struct bnx2 *bp)
1467 {
1468 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1469 return bnx2_set_default_remote_link(bp);
1470
1471 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1472 bp->req_line_speed = 0;
1473 if (bp->phy_flags & PHY_SERDES_FLAG) {
1474 u32 reg;
1475
1476 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1477
1478 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1479 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1480 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1481 bp->autoneg = 0;
1482 bp->req_line_speed = bp->line_speed = SPEED_1000;
1483 bp->req_duplex = DUPLEX_FULL;
1484 }
1485 } else
1486 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1487 }
1488
1489 static void
1490 bnx2_send_heart_beat(struct bnx2 *bp)
1491 {
1492 u32 msg;
1493 u32 addr;
1494
1495 spin_lock(&bp->indirect_lock);
1496 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1497 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1498 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1499 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1500 spin_unlock(&bp->indirect_lock);
1501 }
1502
1503 static void
1504 bnx2_remote_phy_event(struct bnx2 *bp)
1505 {
1506 u32 msg;
1507 u8 link_up = bp->link_up;
1508 u8 old_port;
1509
1510 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1511
1512 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1513 bnx2_send_heart_beat(bp);
1514
1515 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1516
1517 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1518 bp->link_up = 0;
1519 else {
1520 u32 speed;
1521
1522 bp->link_up = 1;
1523 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1524 bp->duplex = DUPLEX_FULL;
1525 switch (speed) {
1526 case BNX2_LINK_STATUS_10HALF:
1527 bp->duplex = DUPLEX_HALF;
1528 case BNX2_LINK_STATUS_10FULL:
1529 bp->line_speed = SPEED_10;
1530 break;
1531 case BNX2_LINK_STATUS_100HALF:
1532 bp->duplex = DUPLEX_HALF;
1533 case BNX2_LINK_STATUS_100BASE_T4:
1534 case BNX2_LINK_STATUS_100FULL:
1535 bp->line_speed = SPEED_100;
1536 break;
1537 case BNX2_LINK_STATUS_1000HALF:
1538 bp->duplex = DUPLEX_HALF;
1539 case BNX2_LINK_STATUS_1000FULL:
1540 bp->line_speed = SPEED_1000;
1541 break;
1542 case BNX2_LINK_STATUS_2500HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_2500FULL:
1545 bp->line_speed = SPEED_2500;
1546 break;
1547 default:
1548 bp->line_speed = 0;
1549 break;
1550 }
1551
1552 spin_lock(&bp->phy_lock);
1553 bp->flow_ctrl = 0;
1554 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1555 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1556 if (bp->duplex == DUPLEX_FULL)
1557 bp->flow_ctrl = bp->req_flow_ctrl;
1558 } else {
1559 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1560 bp->flow_ctrl |= FLOW_CTRL_TX;
1561 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1562 bp->flow_ctrl |= FLOW_CTRL_RX;
1563 }
1564
1565 old_port = bp->phy_port;
1566 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1567 bp->phy_port = PORT_FIBRE;
1568 else
1569 bp->phy_port = PORT_TP;
1570
1571 if (old_port != bp->phy_port)
1572 bnx2_set_default_link(bp);
1573
1574 spin_unlock(&bp->phy_lock);
1575 }
1576 if (bp->link_up != link_up)
1577 bnx2_report_link(bp);
1578
1579 bnx2_set_mac_link(bp);
1580 }
1581
1582 static int
1583 bnx2_set_remote_link(struct bnx2 *bp)
1584 {
1585 u32 evt_code;
1586
1587 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1588 switch (evt_code) {
1589 case BNX2_FW_EVT_CODE_LINK_EVENT:
1590 bnx2_remote_phy_event(bp);
1591 break;
1592 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1593 default:
1594 bnx2_send_heart_beat(bp);
1595 break;
1596 }
1597 return 0;
1598 }
1599
1600 static int
1601 bnx2_setup_copper_phy(struct bnx2 *bp)
1602 {
1603 u32 bmcr;
1604 u32 new_bmcr;
1605
1606 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1607
1608 if (bp->autoneg & AUTONEG_SPEED) {
1609 u32 adv_reg, adv1000_reg;
1610 u32 new_adv_reg = 0;
1611 u32 new_adv1000_reg = 0;
1612
1613 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1614 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1615 ADVERTISE_PAUSE_ASYM);
1616
1617 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1618 adv1000_reg &= PHY_ALL_1000_SPEED;
1619
1620 if (bp->advertising & ADVERTISED_10baseT_Half)
1621 new_adv_reg |= ADVERTISE_10HALF;
1622 if (bp->advertising & ADVERTISED_10baseT_Full)
1623 new_adv_reg |= ADVERTISE_10FULL;
1624 if (bp->advertising & ADVERTISED_100baseT_Half)
1625 new_adv_reg |= ADVERTISE_100HALF;
1626 if (bp->advertising & ADVERTISED_100baseT_Full)
1627 new_adv_reg |= ADVERTISE_100FULL;
1628 if (bp->advertising & ADVERTISED_1000baseT_Full)
1629 new_adv1000_reg |= ADVERTISE_1000FULL;
1630
1631 new_adv_reg |= ADVERTISE_CSMA;
1632
1633 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1634
1635 if ((adv1000_reg != new_adv1000_reg) ||
1636 (adv_reg != new_adv_reg) ||
1637 ((bmcr & BMCR_ANENABLE) == 0)) {
1638
1639 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1640 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1641 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1642 BMCR_ANENABLE);
1643 }
1644 else if (bp->link_up) {
1645 /* Flow ctrl may have changed from auto to forced */
1646 /* or vice-versa. */
1647
1648 bnx2_resolve_flow_ctrl(bp);
1649 bnx2_set_mac_link(bp);
1650 }
1651 return 0;
1652 }
1653
1654 new_bmcr = 0;
1655 if (bp->req_line_speed == SPEED_100) {
1656 new_bmcr |= BMCR_SPEED100;
1657 }
1658 if (bp->req_duplex == DUPLEX_FULL) {
1659 new_bmcr |= BMCR_FULLDPLX;
1660 }
1661 if (new_bmcr != bmcr) {
1662 u32 bmsr;
1663
1664 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1665 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1666
1667 if (bmsr & BMSR_LSTATUS) {
1668 /* Force link down */
1669 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1670 spin_unlock_bh(&bp->phy_lock);
1671 msleep(50);
1672 spin_lock_bh(&bp->phy_lock);
1673
1674 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1675 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1676 }
1677
1678 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1679
1680 /* Normally, the new speed is setup after the link has
1681 * gone down and up again. In some cases, link will not go
1682 * down so we need to set up the new speed here.
1683 */
1684 if (bmsr & BMSR_LSTATUS) {
1685 bp->line_speed = bp->req_line_speed;
1686 bp->duplex = bp->req_duplex;
1687 bnx2_resolve_flow_ctrl(bp);
1688 bnx2_set_mac_link(bp);
1689 }
1690 } else {
1691 bnx2_resolve_flow_ctrl(bp);
1692 bnx2_set_mac_link(bp);
1693 }
1694 return 0;
1695 }
1696
1697 static int
1698 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1699 {
1700 if (bp->loopback == MAC_LOOPBACK)
1701 return 0;
1702
1703 if (bp->phy_flags & PHY_SERDES_FLAG) {
1704 return (bnx2_setup_serdes_phy(bp, port));
1705 }
1706 else {
1707 return (bnx2_setup_copper_phy(bp));
1708 }
1709 }
1710
1711 static int
1712 bnx2_init_5709s_phy(struct bnx2 *bp)
1713 {
1714 u32 val;
1715
1716 bp->mii_bmcr = MII_BMCR + 0x10;
1717 bp->mii_bmsr = MII_BMSR + 0x10;
1718 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1719 bp->mii_adv = MII_ADVERTISE + 0x10;
1720 bp->mii_lpa = MII_LPA + 0x10;
1721 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1722
1723 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1724 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1725
1726 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1727 bnx2_reset_phy(bp);
1728
1729 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1730
1731 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1732 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1733 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1734 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1735
1736 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1737 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1738 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1739 val |= BCM5708S_UP1_2G5;
1740 else
1741 val &= ~BCM5708S_UP1_2G5;
1742 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1743
1744 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1745 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1746 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1747 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1748
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1750
1751 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1752 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1753 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1754
1755 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1756
1757 return 0;
1758 }
1759
1760 static int
1761 bnx2_init_5708s_phy(struct bnx2 *bp)
1762 {
1763 u32 val;
1764
1765 bnx2_reset_phy(bp);
1766
1767 bp->mii_up1 = BCM5708S_UP1;
1768
1769 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1770 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1771 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1772
1773 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1774 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1775 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1776
1777 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1778 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1779 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1780
1781 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1782 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1783 val |= BCM5708S_UP1_2G5;
1784 bnx2_write_phy(bp, BCM5708S_UP1, val);
1785 }
1786
1787 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1788 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1789 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1790 /* increase tx signal amplitude */
1791 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1792 BCM5708S_BLK_ADDR_TX_MISC);
1793 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1794 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1795 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1796 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1797 }
1798
1799 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1800 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1801
1802 if (val) {
1803 u32 is_backplane;
1804
1805 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1806 BNX2_SHARED_HW_CFG_CONFIG);
1807 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1808 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1809 BCM5708S_BLK_ADDR_TX_MISC);
1810 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1811 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1812 BCM5708S_BLK_ADDR_DIG);
1813 }
1814 }
1815 return 0;
1816 }
1817
1818 static int
1819 bnx2_init_5706s_phy(struct bnx2 *bp)
1820 {
1821 bnx2_reset_phy(bp);
1822
1823 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1824
1825 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1826 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1827
1828 if (bp->dev->mtu > 1500) {
1829 u32 val;
1830
1831 /* Set extended packet length bit */
1832 bnx2_write_phy(bp, 0x18, 0x7);
1833 bnx2_read_phy(bp, 0x18, &val);
1834 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1835
1836 bnx2_write_phy(bp, 0x1c, 0x6c00);
1837 bnx2_read_phy(bp, 0x1c, &val);
1838 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1839 }
1840 else {
1841 u32 val;
1842
1843 bnx2_write_phy(bp, 0x18, 0x7);
1844 bnx2_read_phy(bp, 0x18, &val);
1845 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1846
1847 bnx2_write_phy(bp, 0x1c, 0x6c00);
1848 bnx2_read_phy(bp, 0x1c, &val);
1849 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1850 }
1851
1852 return 0;
1853 }
1854
1855 static int
1856 bnx2_init_copper_phy(struct bnx2 *bp)
1857 {
1858 u32 val;
1859
1860 bnx2_reset_phy(bp);
1861
1862 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1863 bnx2_write_phy(bp, 0x18, 0x0c00);
1864 bnx2_write_phy(bp, 0x17, 0x000a);
1865 bnx2_write_phy(bp, 0x15, 0x310b);
1866 bnx2_write_phy(bp, 0x17, 0x201f);
1867 bnx2_write_phy(bp, 0x15, 0x9506);
1868 bnx2_write_phy(bp, 0x17, 0x401f);
1869 bnx2_write_phy(bp, 0x15, 0x14e2);
1870 bnx2_write_phy(bp, 0x18, 0x0400);
1871 }
1872
1873 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1874 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1875 MII_BNX2_DSP_EXPAND_REG | 0x8);
1876 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1877 val &= ~(1 << 8);
1878 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1879 }
1880
1881 if (bp->dev->mtu > 1500) {
1882 /* Set extended packet length bit */
1883 bnx2_write_phy(bp, 0x18, 0x7);
1884 bnx2_read_phy(bp, 0x18, &val);
1885 bnx2_write_phy(bp, 0x18, val | 0x4000);
1886
1887 bnx2_read_phy(bp, 0x10, &val);
1888 bnx2_write_phy(bp, 0x10, val | 0x1);
1889 }
1890 else {
1891 bnx2_write_phy(bp, 0x18, 0x7);
1892 bnx2_read_phy(bp, 0x18, &val);
1893 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1894
1895 bnx2_read_phy(bp, 0x10, &val);
1896 bnx2_write_phy(bp, 0x10, val & ~0x1);
1897 }
1898
1899 /* ethernet@wirespeed */
1900 bnx2_write_phy(bp, 0x18, 0x7007);
1901 bnx2_read_phy(bp, 0x18, &val);
1902 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1903 return 0;
1904 }
1905
1906
1907 static int
1908 bnx2_init_phy(struct bnx2 *bp)
1909 {
1910 u32 val;
1911 int rc = 0;
1912
1913 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1914 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1915
1916 bp->mii_bmcr = MII_BMCR;
1917 bp->mii_bmsr = MII_BMSR;
1918 bp->mii_bmsr1 = MII_BMSR;
1919 bp->mii_adv = MII_ADVERTISE;
1920 bp->mii_lpa = MII_LPA;
1921
1922 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1923
1924 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1925 goto setup_phy;
1926
1927 bnx2_read_phy(bp, MII_PHYSID1, &val);
1928 bp->phy_id = val << 16;
1929 bnx2_read_phy(bp, MII_PHYSID2, &val);
1930 bp->phy_id |= val & 0xffff;
1931
1932 if (bp->phy_flags & PHY_SERDES_FLAG) {
1933 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1934 rc = bnx2_init_5706s_phy(bp);
1935 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1936 rc = bnx2_init_5708s_phy(bp);
1937 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1938 rc = bnx2_init_5709s_phy(bp);
1939 }
1940 else {
1941 rc = bnx2_init_copper_phy(bp);
1942 }
1943
1944 setup_phy:
1945 if (!rc)
1946 rc = bnx2_setup_phy(bp, bp->phy_port);
1947
1948 return rc;
1949 }
1950
1951 static int
1952 bnx2_set_mac_loopback(struct bnx2 *bp)
1953 {
1954 u32 mac_mode;
1955
1956 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1957 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1958 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1959 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1960 bp->link_up = 1;
1961 return 0;
1962 }
1963
1964 static int bnx2_test_link(struct bnx2 *);
1965
1966 static int
1967 bnx2_set_phy_loopback(struct bnx2 *bp)
1968 {
1969 u32 mac_mode;
1970 int rc, i;
1971
1972 spin_lock_bh(&bp->phy_lock);
1973 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1974 BMCR_SPEED1000);
1975 spin_unlock_bh(&bp->phy_lock);
1976 if (rc)
1977 return rc;
1978
1979 for (i = 0; i < 10; i++) {
1980 if (bnx2_test_link(bp) == 0)
1981 break;
1982 msleep(100);
1983 }
1984
1985 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1986 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1987 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1988 BNX2_EMAC_MODE_25G_MODE);
1989
1990 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1991 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1992 bp->link_up = 1;
1993 return 0;
1994 }
1995
1996 static int
1997 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1998 {
1999 int i;
2000 u32 val;
2001
2002 bp->fw_wr_seq++;
2003 msg_data |= bp->fw_wr_seq;
2004
2005 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2006
2007 /* wait for an acknowledgement. */
2008 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2009 msleep(10);
2010
2011 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2012
2013 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2014 break;
2015 }
2016 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2017 return 0;
2018
2019 /* If we timed out, inform the firmware that this is the case. */
2020 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2021 if (!silent)
2022 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2023 "%x\n", msg_data);
2024
2025 msg_data &= ~BNX2_DRV_MSG_CODE;
2026 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2027
2028 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2029
2030 return -EBUSY;
2031 }
2032
2033 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2034 return -EIO;
2035
2036 return 0;
2037 }
2038
2039 static int
2040 bnx2_init_5709_context(struct bnx2 *bp)
2041 {
2042 int i, ret = 0;
2043 u32 val;
2044
2045 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2046 val |= (BCM_PAGE_BITS - 8) << 16;
2047 REG_WR(bp, BNX2_CTX_COMMAND, val);
2048 for (i = 0; i < 10; i++) {
2049 val = REG_RD(bp, BNX2_CTX_COMMAND);
2050 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2051 break;
2052 udelay(2);
2053 }
2054 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2055 return -EBUSY;
2056
2057 for (i = 0; i < bp->ctx_pages; i++) {
2058 int j;
2059
2060 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2061 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2062 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2063 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2064 (u64) bp->ctx_blk_mapping[i] >> 32);
2065 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2066 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2067 for (j = 0; j < 10; j++) {
2068
2069 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2070 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2071 break;
2072 udelay(5);
2073 }
2074 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2075 ret = -EBUSY;
2076 break;
2077 }
2078 }
2079 return ret;
2080 }
2081
2082 static void
2083 bnx2_init_context(struct bnx2 *bp)
2084 {
2085 u32 vcid;
2086
2087 vcid = 96;
2088 while (vcid) {
2089 u32 vcid_addr, pcid_addr, offset;
2090 int i;
2091
2092 vcid--;
2093
2094 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2095 u32 new_vcid;
2096
2097 vcid_addr = GET_PCID_ADDR(vcid);
2098 if (vcid & 0x8) {
2099 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2100 }
2101 else {
2102 new_vcid = vcid;
2103 }
2104 pcid_addr = GET_PCID_ADDR(new_vcid);
2105 }
2106 else {
2107 vcid_addr = GET_CID_ADDR(vcid);
2108 pcid_addr = vcid_addr;
2109 }
2110
2111 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2112 vcid_addr += (i << PHY_CTX_SHIFT);
2113 pcid_addr += (i << PHY_CTX_SHIFT);
2114
2115 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2116 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2117
2118 /* Zero out the context. */
2119 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2120 CTX_WR(bp, 0x00, offset, 0);
2121
2122 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2123 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2124 }
2125 }
2126 }
2127
2128 static int
2129 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2130 {
2131 u16 *good_mbuf;
2132 u32 good_mbuf_cnt;
2133 u32 val;
2134
2135 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2136 if (good_mbuf == NULL) {
2137 printk(KERN_ERR PFX "Failed to allocate memory in "
2138 "bnx2_alloc_bad_rbuf\n");
2139 return -ENOMEM;
2140 }
2141
2142 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2143 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2144
2145 good_mbuf_cnt = 0;
2146
2147 /* Allocate a bunch of mbufs and save the good ones in an array. */
2148 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2149 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2150 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2151
2152 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2153
2154 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2155
2156 /* The addresses with Bit 9 set are bad memory blocks. */
2157 if (!(val & (1 << 9))) {
2158 good_mbuf[good_mbuf_cnt] = (u16) val;
2159 good_mbuf_cnt++;
2160 }
2161
2162 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2163 }
2164
2165 /* Free the good ones back to the mbuf pool thus discarding
2166 * all the bad ones. */
2167 while (good_mbuf_cnt) {
2168 good_mbuf_cnt--;
2169
2170 val = good_mbuf[good_mbuf_cnt];
2171 val = (val << 9) | val | 1;
2172
2173 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2174 }
2175 kfree(good_mbuf);
2176 return 0;
2177 }
2178
2179 static void
2180 bnx2_set_mac_addr(struct bnx2 *bp)
2181 {
2182 u32 val;
2183 u8 *mac_addr = bp->dev->dev_addr;
2184
2185 val = (mac_addr[0] << 8) | mac_addr[1];
2186
2187 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2188
2189 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2190 (mac_addr[4] << 8) | mac_addr[5];
2191
2192 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2193 }
2194
2195 static inline int
2196 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2197 {
2198 struct sk_buff *skb;
2199 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2200 dma_addr_t mapping;
2201 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2202 unsigned long align;
2203
2204 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2205 if (skb == NULL) {
2206 return -ENOMEM;
2207 }
2208
2209 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2210 skb_reserve(skb, BNX2_RX_ALIGN - align);
2211
2212 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2213 PCI_DMA_FROMDEVICE);
2214
2215 rx_buf->skb = skb;
2216 pci_unmap_addr_set(rx_buf, mapping, mapping);
2217
2218 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2219 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2220
2221 bp->rx_prod_bseq += bp->rx_buf_use_size;
2222
2223 return 0;
2224 }
2225
2226 static int
2227 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2228 {
2229 struct status_block *sblk = bp->status_blk;
2230 u32 new_link_state, old_link_state;
2231 int is_set = 1;
2232
2233 new_link_state = sblk->status_attn_bits & event;
2234 old_link_state = sblk->status_attn_bits_ack & event;
2235 if (new_link_state != old_link_state) {
2236 if (new_link_state)
2237 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2238 else
2239 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2240 } else
2241 is_set = 0;
2242
2243 return is_set;
2244 }
2245
2246 static void
2247 bnx2_phy_int(struct bnx2 *bp)
2248 {
2249 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2250 spin_lock(&bp->phy_lock);
2251 bnx2_set_link(bp);
2252 spin_unlock(&bp->phy_lock);
2253 }
2254 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2255 bnx2_set_remote_link(bp);
2256
2257 }
2258
2259 static void
2260 bnx2_tx_int(struct bnx2 *bp)
2261 {
2262 struct status_block *sblk = bp->status_blk;
2263 u16 hw_cons, sw_cons, sw_ring_cons;
2264 int tx_free_bd = 0;
2265
2266 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2267 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2268 hw_cons++;
2269 }
2270 sw_cons = bp->tx_cons;
2271
2272 while (sw_cons != hw_cons) {
2273 struct sw_bd *tx_buf;
2274 struct sk_buff *skb;
2275 int i, last;
2276
2277 sw_ring_cons = TX_RING_IDX(sw_cons);
2278
2279 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2280 skb = tx_buf->skb;
2281
2282 /* partial BD completions possible with TSO packets */
2283 if (skb_is_gso(skb)) {
2284 u16 last_idx, last_ring_idx;
2285
2286 last_idx = sw_cons +
2287 skb_shinfo(skb)->nr_frags + 1;
2288 last_ring_idx = sw_ring_cons +
2289 skb_shinfo(skb)->nr_frags + 1;
2290 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2291 last_idx++;
2292 }
2293 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2294 break;
2295 }
2296 }
2297
2298 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2299 skb_headlen(skb), PCI_DMA_TODEVICE);
2300
2301 tx_buf->skb = NULL;
2302 last = skb_shinfo(skb)->nr_frags;
2303
2304 for (i = 0; i < last; i++) {
2305 sw_cons = NEXT_TX_BD(sw_cons);
2306
2307 pci_unmap_page(bp->pdev,
2308 pci_unmap_addr(
2309 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2310 mapping),
2311 skb_shinfo(skb)->frags[i].size,
2312 PCI_DMA_TODEVICE);
2313 }
2314
2315 sw_cons = NEXT_TX_BD(sw_cons);
2316
2317 tx_free_bd += last + 1;
2318
2319 dev_kfree_skb(skb);
2320
2321 hw_cons = bp->hw_tx_cons =
2322 sblk->status_tx_quick_consumer_index0;
2323
2324 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2325 hw_cons++;
2326 }
2327 }
2328
2329 bp->tx_cons = sw_cons;
2330 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2331 * before checking for netif_queue_stopped(). Without the
2332 * memory barrier, there is a small possibility that bnx2_start_xmit()
2333 * will miss it and cause the queue to be stopped forever.
2334 */
2335 smp_mb();
2336
2337 if (unlikely(netif_queue_stopped(bp->dev)) &&
2338 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2339 netif_tx_lock(bp->dev);
2340 if ((netif_queue_stopped(bp->dev)) &&
2341 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2342 netif_wake_queue(bp->dev);
2343 netif_tx_unlock(bp->dev);
2344 }
2345 }
2346
2347 static inline void
2348 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2349 u16 cons, u16 prod)
2350 {
2351 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2352 struct rx_bd *cons_bd, *prod_bd;
2353
2354 cons_rx_buf = &bp->rx_buf_ring[cons];
2355 prod_rx_buf = &bp->rx_buf_ring[prod];
2356
2357 pci_dma_sync_single_for_device(bp->pdev,
2358 pci_unmap_addr(cons_rx_buf, mapping),
2359 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2360
2361 bp->rx_prod_bseq += bp->rx_buf_use_size;
2362
2363 prod_rx_buf->skb = skb;
2364
2365 if (cons == prod)
2366 return;
2367
2368 pci_unmap_addr_set(prod_rx_buf, mapping,
2369 pci_unmap_addr(cons_rx_buf, mapping));
2370
2371 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2372 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2373 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2374 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2375 }
2376
2377 static int
2378 bnx2_rx_int(struct bnx2 *bp, int budget)
2379 {
2380 struct status_block *sblk = bp->status_blk;
2381 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2382 struct l2_fhdr *rx_hdr;
2383 int rx_pkt = 0;
2384
2385 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2386 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2387 hw_cons++;
2388 }
2389 sw_cons = bp->rx_cons;
2390 sw_prod = bp->rx_prod;
2391
2392 /* Memory barrier necessary as speculative reads of the rx
2393 * buffer can be ahead of the index in the status block
2394 */
2395 rmb();
2396 while (sw_cons != hw_cons) {
2397 unsigned int len;
2398 u32 status;
2399 struct sw_bd *rx_buf;
2400 struct sk_buff *skb;
2401 dma_addr_t dma_addr;
2402
2403 sw_ring_cons = RX_RING_IDX(sw_cons);
2404 sw_ring_prod = RX_RING_IDX(sw_prod);
2405
2406 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2407 skb = rx_buf->skb;
2408
2409 rx_buf->skb = NULL;
2410
2411 dma_addr = pci_unmap_addr(rx_buf, mapping);
2412
2413 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2414 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2415
2416 rx_hdr = (struct l2_fhdr *) skb->data;
2417 len = rx_hdr->l2_fhdr_pkt_len - 4;
2418
2419 if ((status = rx_hdr->l2_fhdr_status) &
2420 (L2_FHDR_ERRORS_BAD_CRC |
2421 L2_FHDR_ERRORS_PHY_DECODE |
2422 L2_FHDR_ERRORS_ALIGNMENT |
2423 L2_FHDR_ERRORS_TOO_SHORT |
2424 L2_FHDR_ERRORS_GIANT_FRAME)) {
2425
2426 goto reuse_rx;
2427 }
2428
2429 /* Since we don't have a jumbo ring, copy small packets
2430 * if mtu > 1500
2431 */
2432 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2433 struct sk_buff *new_skb;
2434
2435 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2436 if (new_skb == NULL)
2437 goto reuse_rx;
2438
2439 /* aligned copy */
2440 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2441 new_skb->data, len + 2);
2442 skb_reserve(new_skb, 2);
2443 skb_put(new_skb, len);
2444
2445 bnx2_reuse_rx_skb(bp, skb,
2446 sw_ring_cons, sw_ring_prod);
2447
2448 skb = new_skb;
2449 }
2450 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2451 pci_unmap_single(bp->pdev, dma_addr,
2452 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2453
2454 skb_reserve(skb, bp->rx_offset);
2455 skb_put(skb, len);
2456 }
2457 else {
2458 reuse_rx:
2459 bnx2_reuse_rx_skb(bp, skb,
2460 sw_ring_cons, sw_ring_prod);
2461 goto next_rx;
2462 }
2463
2464 skb->protocol = eth_type_trans(skb, bp->dev);
2465
2466 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2467 (ntohs(skb->protocol) != 0x8100)) {
2468
2469 dev_kfree_skb(skb);
2470 goto next_rx;
2471
2472 }
2473
2474 skb->ip_summed = CHECKSUM_NONE;
2475 if (bp->rx_csum &&
2476 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2477 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2478
2479 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2480 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2481 skb->ip_summed = CHECKSUM_UNNECESSARY;
2482 }
2483
2484 #ifdef BCM_VLAN
2485 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2486 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2487 rx_hdr->l2_fhdr_vlan_tag);
2488 }
2489 else
2490 #endif
2491 netif_receive_skb(skb);
2492
2493 bp->dev->last_rx = jiffies;
2494 rx_pkt++;
2495
2496 next_rx:
2497 sw_cons = NEXT_RX_BD(sw_cons);
2498 sw_prod = NEXT_RX_BD(sw_prod);
2499
2500 if ((rx_pkt == budget))
2501 break;
2502
2503 /* Refresh hw_cons to see if there is new work */
2504 if (sw_cons == hw_cons) {
2505 hw_cons = bp->hw_rx_cons =
2506 sblk->status_rx_quick_consumer_index0;
2507 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2508 hw_cons++;
2509 rmb();
2510 }
2511 }
2512 bp->rx_cons = sw_cons;
2513 bp->rx_prod = sw_prod;
2514
2515 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2516
2517 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2518
2519 mmiowb();
2520
2521 return rx_pkt;
2522
2523 }
2524
2525 /* MSI ISR - The only difference between this and the INTx ISR
2526 * is that the MSI interrupt is always serviced.
2527 */
2528 static irqreturn_t
2529 bnx2_msi(int irq, void *dev_instance)
2530 {
2531 struct net_device *dev = dev_instance;
2532 struct bnx2 *bp = netdev_priv(dev);
2533
2534 prefetch(bp->status_blk);
2535 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2536 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2537 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2538
2539 /* Return here if interrupt is disabled. */
2540 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2541 return IRQ_HANDLED;
2542
2543 netif_rx_schedule(dev);
2544
2545 return IRQ_HANDLED;
2546 }
2547
2548 static irqreturn_t
2549 bnx2_msi_1shot(int irq, void *dev_instance)
2550 {
2551 struct net_device *dev = dev_instance;
2552 struct bnx2 *bp = netdev_priv(dev);
2553
2554 prefetch(bp->status_blk);
2555
2556 /* Return here if interrupt is disabled. */
2557 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2558 return IRQ_HANDLED;
2559
2560 netif_rx_schedule(dev);
2561
2562 return IRQ_HANDLED;
2563 }
2564
2565 static irqreturn_t
2566 bnx2_interrupt(int irq, void *dev_instance)
2567 {
2568 struct net_device *dev = dev_instance;
2569 struct bnx2 *bp = netdev_priv(dev);
2570 struct status_block *sblk = bp->status_blk;
2571
2572 /* When using INTx, it is possible for the interrupt to arrive
2573 * at the CPU before the status block posted prior to the
2574 * interrupt. Reading a register will flush the status block.
2575 * When using MSI, the MSI message will always complete after
2576 * the status block write.
2577 */
2578 if ((sblk->status_idx == bp->last_status_idx) &&
2579 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2580 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2581 return IRQ_NONE;
2582
2583 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2584 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2585 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2586
2587 /* Read back to deassert IRQ immediately to avoid too many
2588 * spurious interrupts.
2589 */
2590 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2591
2592 /* Return here if interrupt is shared and is disabled. */
2593 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2594 return IRQ_HANDLED;
2595
2596 if (netif_rx_schedule_prep(dev)) {
2597 bp->last_status_idx = sblk->status_idx;
2598 __netif_rx_schedule(dev);
2599 }
2600
2601 return IRQ_HANDLED;
2602 }
2603
2604 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2605 STATUS_ATTN_BITS_TIMER_ABORT)
2606
2607 static inline int
2608 bnx2_has_work(struct bnx2 *bp)
2609 {
2610 struct status_block *sblk = bp->status_blk;
2611
2612 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2613 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2614 return 1;
2615
2616 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2617 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2618 return 1;
2619
2620 return 0;
2621 }
2622
2623 static int
2624 bnx2_poll(struct net_device *dev, int *budget)
2625 {
2626 struct bnx2 *bp = netdev_priv(dev);
2627 struct status_block *sblk = bp->status_blk;
2628 u32 status_attn_bits = sblk->status_attn_bits;
2629 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2630
2631 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2632 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2633
2634 bnx2_phy_int(bp);
2635
2636 /* This is needed to take care of transient status
2637 * during link changes.
2638 */
2639 REG_WR(bp, BNX2_HC_COMMAND,
2640 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2641 REG_RD(bp, BNX2_HC_COMMAND);
2642 }
2643
2644 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2645 bnx2_tx_int(bp);
2646
2647 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2648 int orig_budget = *budget;
2649 int work_done;
2650
2651 if (orig_budget > dev->quota)
2652 orig_budget = dev->quota;
2653
2654 work_done = bnx2_rx_int(bp, orig_budget);
2655 *budget -= work_done;
2656 dev->quota -= work_done;
2657 }
2658
2659 bp->last_status_idx = bp->status_blk->status_idx;
2660 rmb();
2661
2662 if (!bnx2_has_work(bp)) {
2663 netif_rx_complete(dev);
2664 if (likely(bp->flags & USING_MSI_FLAG)) {
2665 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2666 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2667 bp->last_status_idx);
2668 return 0;
2669 }
2670 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2671 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2672 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2673 bp->last_status_idx);
2674
2675 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2676 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2677 bp->last_status_idx);
2678 return 0;
2679 }
2680
2681 return 1;
2682 }
2683
2684 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2685 * from set_multicast.
2686 */
2687 static void
2688 bnx2_set_rx_mode(struct net_device *dev)
2689 {
2690 struct bnx2 *bp = netdev_priv(dev);
2691 u32 rx_mode, sort_mode;
2692 int i;
2693
2694 spin_lock_bh(&bp->phy_lock);
2695
2696 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2697 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2698 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2699 #ifdef BCM_VLAN
2700 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2701 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2702 #else
2703 if (!(bp->flags & ASF_ENABLE_FLAG))
2704 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2705 #endif
2706 if (dev->flags & IFF_PROMISC) {
2707 /* Promiscuous mode. */
2708 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2709 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2710 BNX2_RPM_SORT_USER0_PROM_VLAN;
2711 }
2712 else if (dev->flags & IFF_ALLMULTI) {
2713 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2714 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2715 0xffffffff);
2716 }
2717 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2718 }
2719 else {
2720 /* Accept one or more multicast(s). */
2721 struct dev_mc_list *mclist;
2722 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2723 u32 regidx;
2724 u32 bit;
2725 u32 crc;
2726
2727 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2728
2729 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2730 i++, mclist = mclist->next) {
2731
2732 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2733 bit = crc & 0xff;
2734 regidx = (bit & 0xe0) >> 5;
2735 bit &= 0x1f;
2736 mc_filter[regidx] |= (1 << bit);
2737 }
2738
2739 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2740 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2741 mc_filter[i]);
2742 }
2743
2744 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2745 }
2746
2747 if (rx_mode != bp->rx_mode) {
2748 bp->rx_mode = rx_mode;
2749 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2750 }
2751
2752 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2753 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2754 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2755
2756 spin_unlock_bh(&bp->phy_lock);
2757 }
2758
2759 #define FW_BUF_SIZE 0x8000
2760
2761 static int
2762 bnx2_gunzip_init(struct bnx2 *bp)
2763 {
2764 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2765 goto gunzip_nomem1;
2766
2767 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2768 goto gunzip_nomem2;
2769
2770 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2771 if (bp->strm->workspace == NULL)
2772 goto gunzip_nomem3;
2773
2774 return 0;
2775
2776 gunzip_nomem3:
2777 kfree(bp->strm);
2778 bp->strm = NULL;
2779
2780 gunzip_nomem2:
2781 vfree(bp->gunzip_buf);
2782 bp->gunzip_buf = NULL;
2783
2784 gunzip_nomem1:
2785 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2786 "uncompression.\n", bp->dev->name);
2787 return -ENOMEM;
2788 }
2789
2790 static void
2791 bnx2_gunzip_end(struct bnx2 *bp)
2792 {
2793 kfree(bp->strm->workspace);
2794
2795 kfree(bp->strm);
2796 bp->strm = NULL;
2797
2798 if (bp->gunzip_buf) {
2799 vfree(bp->gunzip_buf);
2800 bp->gunzip_buf = NULL;
2801 }
2802 }
2803
2804 static int
2805 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2806 {
2807 int n, rc;
2808
2809 /* check gzip header */
2810 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2811 return -EINVAL;
2812
2813 n = 10;
2814
2815 #define FNAME 0x8
2816 if (zbuf[3] & FNAME)
2817 while ((zbuf[n++] != 0) && (n < len));
2818
2819 bp->strm->next_in = zbuf + n;
2820 bp->strm->avail_in = len - n;
2821 bp->strm->next_out = bp->gunzip_buf;
2822 bp->strm->avail_out = FW_BUF_SIZE;
2823
2824 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2825 if (rc != Z_OK)
2826 return rc;
2827
2828 rc = zlib_inflate(bp->strm, Z_FINISH);
2829
2830 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2831 *outbuf = bp->gunzip_buf;
2832
2833 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2834 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2835 bp->dev->name, bp->strm->msg);
2836
2837 zlib_inflateEnd(bp->strm);
2838
2839 if (rc == Z_STREAM_END)
2840 return 0;
2841
2842 return rc;
2843 }
2844
2845 static void
2846 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2847 u32 rv2p_proc)
2848 {
2849 int i;
2850 u32 val;
2851
2852
2853 for (i = 0; i < rv2p_code_len; i += 8) {
2854 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2855 rv2p_code++;
2856 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2857 rv2p_code++;
2858
2859 if (rv2p_proc == RV2P_PROC1) {
2860 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2861 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2862 }
2863 else {
2864 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2865 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2866 }
2867 }
2868
2869 /* Reset the processor, un-stall is done later. */
2870 if (rv2p_proc == RV2P_PROC1) {
2871 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2872 }
2873 else {
2874 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2875 }
2876 }
2877
2878 static int
2879 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2880 {
2881 u32 offset;
2882 u32 val;
2883 int rc;
2884
2885 /* Halt the CPU. */
2886 val = REG_RD_IND(bp, cpu_reg->mode);
2887 val |= cpu_reg->mode_value_halt;
2888 REG_WR_IND(bp, cpu_reg->mode, val);
2889 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2890
2891 /* Load the Text area. */
2892 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2893 if (fw->gz_text) {
2894 u32 text_len;
2895 void *text;
2896
2897 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2898 &text_len);
2899 if (rc)
2900 return rc;
2901
2902 fw->text = text;
2903 }
2904 if (fw->gz_text) {
2905 int j;
2906
2907 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2908 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2909 }
2910 }
2911
2912 /* Load the Data area. */
2913 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2914 if (fw->data) {
2915 int j;
2916
2917 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2918 REG_WR_IND(bp, offset, fw->data[j]);
2919 }
2920 }
2921
2922 /* Load the SBSS area. */
2923 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2924 if (fw->sbss) {
2925 int j;
2926
2927 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2928 REG_WR_IND(bp, offset, fw->sbss[j]);
2929 }
2930 }
2931
2932 /* Load the BSS area. */
2933 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2934 if (fw->bss) {
2935 int j;
2936
2937 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2938 REG_WR_IND(bp, offset, fw->bss[j]);
2939 }
2940 }
2941
2942 /* Load the Read-Only area. */
2943 offset = cpu_reg->spad_base +
2944 (fw->rodata_addr - cpu_reg->mips_view_base);
2945 if (fw->rodata) {
2946 int j;
2947
2948 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2949 REG_WR_IND(bp, offset, fw->rodata[j]);
2950 }
2951 }
2952
2953 /* Clear the pre-fetch instruction. */
2954 REG_WR_IND(bp, cpu_reg->inst, 0);
2955 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2956
2957 /* Start the CPU. */
2958 val = REG_RD_IND(bp, cpu_reg->mode);
2959 val &= ~cpu_reg->mode_value_halt;
2960 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2961 REG_WR_IND(bp, cpu_reg->mode, val);
2962
2963 return 0;
2964 }
2965
2966 static int
2967 bnx2_init_cpus(struct bnx2 *bp)
2968 {
2969 struct cpu_reg cpu_reg;
2970 struct fw_info *fw;
2971 int rc = 0;
2972 void *text;
2973 u32 text_len;
2974
2975 if ((rc = bnx2_gunzip_init(bp)) != 0)
2976 return rc;
2977
2978 /* Initialize the RV2P processor. */
2979 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2980 &text_len);
2981 if (rc)
2982 goto init_cpu_err;
2983
2984 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2985
2986 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2987 &text_len);
2988 if (rc)
2989 goto init_cpu_err;
2990
2991 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2992
2993 /* Initialize the RX Processor. */
2994 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2995 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2996 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2997 cpu_reg.state = BNX2_RXP_CPU_STATE;
2998 cpu_reg.state_value_clear = 0xffffff;
2999 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3000 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3001 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3002 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3003 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3004 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3005 cpu_reg.mips_view_base = 0x8000000;
3006
3007 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3008 fw = &bnx2_rxp_fw_09;
3009 else
3010 fw = &bnx2_rxp_fw_06;
3011
3012 rc = load_cpu_fw(bp, &cpu_reg, fw);
3013 if (rc)
3014 goto init_cpu_err;
3015
3016 /* Initialize the TX Processor. */
3017 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3018 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3019 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3020 cpu_reg.state = BNX2_TXP_CPU_STATE;
3021 cpu_reg.state_value_clear = 0xffffff;
3022 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3023 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3024 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3025 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3026 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3027 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3028 cpu_reg.mips_view_base = 0x8000000;
3029
3030 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3031 fw = &bnx2_txp_fw_09;
3032 else
3033 fw = &bnx2_txp_fw_06;
3034
3035 rc = load_cpu_fw(bp, &cpu_reg, fw);
3036 if (rc)
3037 goto init_cpu_err;
3038
3039 /* Initialize the TX Patch-up Processor. */
3040 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3041 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3042 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3043 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3044 cpu_reg.state_value_clear = 0xffffff;
3045 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3046 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3047 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3048 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3049 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3050 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3051 cpu_reg.mips_view_base = 0x8000000;
3052
3053 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3054 fw = &bnx2_tpat_fw_09;
3055 else
3056 fw = &bnx2_tpat_fw_06;
3057
3058 rc = load_cpu_fw(bp, &cpu_reg, fw);
3059 if (rc)
3060 goto init_cpu_err;
3061
3062 /* Initialize the Completion Processor. */
3063 cpu_reg.mode = BNX2_COM_CPU_MODE;
3064 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3065 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3066 cpu_reg.state = BNX2_COM_CPU_STATE;
3067 cpu_reg.state_value_clear = 0xffffff;
3068 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3069 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3070 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3071 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3072 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3073 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3074 cpu_reg.mips_view_base = 0x8000000;
3075
3076 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3077 fw = &bnx2_com_fw_09;
3078 else
3079 fw = &bnx2_com_fw_06;
3080
3081 rc = load_cpu_fw(bp, &cpu_reg, fw);
3082 if (rc)
3083 goto init_cpu_err;
3084
3085 /* Initialize the Command Processor. */
3086 cpu_reg.mode = BNX2_CP_CPU_MODE;
3087 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3088 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3089 cpu_reg.state = BNX2_CP_CPU_STATE;
3090 cpu_reg.state_value_clear = 0xffffff;
3091 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3092 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3093 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3094 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3095 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3096 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3097 cpu_reg.mips_view_base = 0x8000000;
3098
3099 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3100 fw = &bnx2_cp_fw_09;
3101
3102 rc = load_cpu_fw(bp, &cpu_reg, fw);
3103 if (rc)
3104 goto init_cpu_err;
3105 }
3106 init_cpu_err:
3107 bnx2_gunzip_end(bp);
3108 return rc;
3109 }
3110
3111 static int
3112 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3113 {
3114 u16 pmcsr;
3115
3116 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3117
3118 switch (state) {
3119 case PCI_D0: {
3120 u32 val;
3121
3122 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3123 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3124 PCI_PM_CTRL_PME_STATUS);
3125
3126 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3127 /* delay required during transition out of D3hot */
3128 msleep(20);
3129
3130 val = REG_RD(bp, BNX2_EMAC_MODE);
3131 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3132 val &= ~BNX2_EMAC_MODE_MPKT;
3133 REG_WR(bp, BNX2_EMAC_MODE, val);
3134
3135 val = REG_RD(bp, BNX2_RPM_CONFIG);
3136 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3137 REG_WR(bp, BNX2_RPM_CONFIG, val);
3138 break;
3139 }
3140 case PCI_D3hot: {
3141 int i;
3142 u32 val, wol_msg;
3143
3144 if (bp->wol) {
3145 u32 advertising;
3146 u8 autoneg;
3147
3148 autoneg = bp->autoneg;
3149 advertising = bp->advertising;
3150
3151 bp->autoneg = AUTONEG_SPEED;
3152 bp->advertising = ADVERTISED_10baseT_Half |
3153 ADVERTISED_10baseT_Full |
3154 ADVERTISED_100baseT_Half |
3155 ADVERTISED_100baseT_Full |
3156 ADVERTISED_Autoneg;
3157
3158 bnx2_setup_copper_phy(bp);
3159
3160 bp->autoneg = autoneg;
3161 bp->advertising = advertising;
3162
3163 bnx2_set_mac_addr(bp);
3164
3165 val = REG_RD(bp, BNX2_EMAC_MODE);
3166
3167 /* Enable port mode. */
3168 val &= ~BNX2_EMAC_MODE_PORT;
3169 val |= BNX2_EMAC_MODE_PORT_MII |
3170 BNX2_EMAC_MODE_MPKT_RCVD |
3171 BNX2_EMAC_MODE_ACPI_RCVD |
3172 BNX2_EMAC_MODE_MPKT;
3173
3174 REG_WR(bp, BNX2_EMAC_MODE, val);
3175
3176 /* receive all multicast */
3177 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3178 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3179 0xffffffff);
3180 }
3181 REG_WR(bp, BNX2_EMAC_RX_MODE,
3182 BNX2_EMAC_RX_MODE_SORT_MODE);
3183
3184 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3185 BNX2_RPM_SORT_USER0_MC_EN;
3186 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3187 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3188 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3189 BNX2_RPM_SORT_USER0_ENA);
3190
3191 /* Need to enable EMAC and RPM for WOL. */
3192 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3193 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3194 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3195 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3196
3197 val = REG_RD(bp, BNX2_RPM_CONFIG);
3198 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3199 REG_WR(bp, BNX2_RPM_CONFIG, val);
3200
3201 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3202 }
3203 else {
3204 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3205 }
3206
3207 if (!(bp->flags & NO_WOL_FLAG))
3208 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3209
3210 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3211 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3212 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3213
3214 if (bp->wol)
3215 pmcsr |= 3;
3216 }
3217 else {
3218 pmcsr |= 3;
3219 }
3220 if (bp->wol) {
3221 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3222 }
3223 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3224 pmcsr);
3225
3226 /* No more memory access after this point until
3227 * device is brought back to D0.
3228 */
3229 udelay(50);
3230 break;
3231 }
3232 default:
3233 return -EINVAL;
3234 }
3235 return 0;
3236 }
3237
3238 static int
3239 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3240 {
3241 u32 val;
3242 int j;
3243
3244 /* Request access to the flash interface. */
3245 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3246 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3247 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3248 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3249 break;
3250
3251 udelay(5);
3252 }
3253
3254 if (j >= NVRAM_TIMEOUT_COUNT)
3255 return -EBUSY;
3256
3257 return 0;
3258 }
3259
3260 static int
3261 bnx2_release_nvram_lock(struct bnx2 *bp)
3262 {
3263 int j;
3264 u32 val;
3265
3266 /* Relinquish nvram interface. */
3267 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3268
3269 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3270 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3271 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3272 break;
3273
3274 udelay(5);
3275 }
3276
3277 if (j >= NVRAM_TIMEOUT_COUNT)
3278 return -EBUSY;
3279
3280 return 0;
3281 }
3282
3283
3284 static int
3285 bnx2_enable_nvram_write(struct bnx2 *bp)
3286 {
3287 u32 val;
3288
3289 val = REG_RD(bp, BNX2_MISC_CFG);
3290 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3291
3292 if (!bp->flash_info->buffered) {
3293 int j;
3294
3295 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3296 REG_WR(bp, BNX2_NVM_COMMAND,
3297 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3298
3299 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3300 udelay(5);
3301
3302 val = REG_RD(bp, BNX2_NVM_COMMAND);
3303 if (val & BNX2_NVM_COMMAND_DONE)
3304 break;
3305 }
3306
3307 if (j >= NVRAM_TIMEOUT_COUNT)
3308 return -EBUSY;
3309 }
3310 return 0;
3311 }
3312
3313 static void
3314 bnx2_disable_nvram_write(struct bnx2 *bp)
3315 {
3316 u32 val;
3317
3318 val = REG_RD(bp, BNX2_MISC_CFG);
3319 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3320 }
3321
3322
3323 static void
3324 bnx2_enable_nvram_access(struct bnx2 *bp)
3325 {
3326 u32 val;
3327
3328 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3329 /* Enable both bits, even on read. */
3330 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3331 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3332 }
3333
3334 static void
3335 bnx2_disable_nvram_access(struct bnx2 *bp)
3336 {
3337 u32 val;
3338
3339 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3340 /* Disable both bits, even after read. */
3341 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3342 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3343 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3344 }
3345
3346 static int
3347 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3348 {
3349 u32 cmd;
3350 int j;
3351
3352 if (bp->flash_info->buffered)
3353 /* Buffered flash, no erase needed */
3354 return 0;
3355
3356 /* Build an erase command */
3357 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3358 BNX2_NVM_COMMAND_DOIT;
3359
3360 /* Need to clear DONE bit separately. */
3361 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3362
3363 /* Address of the NVRAM to read from. */
3364 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3365
3366 /* Issue an erase command. */
3367 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3368
3369 /* Wait for completion. */
3370 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3371 u32 val;
3372
3373 udelay(5);
3374
3375 val = REG_RD(bp, BNX2_NVM_COMMAND);
3376 if (val & BNX2_NVM_COMMAND_DONE)
3377 break;
3378 }
3379
3380 if (j >= NVRAM_TIMEOUT_COUNT)
3381 return -EBUSY;
3382
3383 return 0;
3384 }
3385
3386 static int
3387 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3388 {
3389 u32 cmd;
3390 int j;
3391
3392 /* Build the command word. */
3393 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3394
3395 /* Calculate an offset of a buffered flash. */
3396 if (bp->flash_info->buffered) {
3397 offset = ((offset / bp->flash_info->page_size) <<
3398 bp->flash_info->page_bits) +
3399 (offset % bp->flash_info->page_size);
3400 }
3401
3402 /* Need to clear DONE bit separately. */
3403 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3404
3405 /* Address of the NVRAM to read from. */
3406 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3407
3408 /* Issue a read command. */
3409 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3410
3411 /* Wait for completion. */
3412 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3413 u32 val;
3414
3415 udelay(5);
3416
3417 val = REG_RD(bp, BNX2_NVM_COMMAND);
3418 if (val & BNX2_NVM_COMMAND_DONE) {
3419 val = REG_RD(bp, BNX2_NVM_READ);
3420
3421 val = be32_to_cpu(val);
3422 memcpy(ret_val, &val, 4);
3423 break;
3424 }
3425 }
3426 if (j >= NVRAM_TIMEOUT_COUNT)
3427 return -EBUSY;
3428
3429 return 0;
3430 }
3431
3432
3433 static int
3434 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3435 {
3436 u32 cmd, val32;
3437 int j;
3438
3439 /* Build the command word. */
3440 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3441
3442 /* Calculate an offset of a buffered flash. */
3443 if (bp->flash_info->buffered) {
3444 offset = ((offset / bp->flash_info->page_size) <<
3445 bp->flash_info->page_bits) +
3446 (offset % bp->flash_info->page_size);
3447 }
3448
3449 /* Need to clear DONE bit separately. */
3450 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3451
3452 memcpy(&val32, val, 4);
3453 val32 = cpu_to_be32(val32);
3454
3455 /* Write the data. */
3456 REG_WR(bp, BNX2_NVM_WRITE, val32);
3457
3458 /* Address of the NVRAM to write to. */
3459 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3460
3461 /* Issue the write command. */
3462 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3463
3464 /* Wait for completion. */
3465 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3466 udelay(5);
3467
3468 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3469 break;
3470 }
3471 if (j >= NVRAM_TIMEOUT_COUNT)
3472 return -EBUSY;
3473
3474 return 0;
3475 }
3476
3477 static int
3478 bnx2_init_nvram(struct bnx2 *bp)
3479 {
3480 u32 val;
3481 int j, entry_count, rc;
3482 struct flash_spec *flash;
3483
3484 /* Determine the selected interface. */
3485 val = REG_RD(bp, BNX2_NVM_CFG1);
3486
3487 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3488
3489 rc = 0;
3490 if (val & 0x40000000) {
3491
3492 /* Flash interface has been reconfigured */
3493 for (j = 0, flash = &flash_table[0]; j < entry_count;
3494 j++, flash++) {
3495 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3496 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3497 bp->flash_info = flash;
3498 break;
3499 }
3500 }
3501 }
3502 else {
3503 u32 mask;
3504 /* Not yet been reconfigured */
3505
3506 if (val & (1 << 23))
3507 mask = FLASH_BACKUP_STRAP_MASK;
3508 else
3509 mask = FLASH_STRAP_MASK;
3510
3511 for (j = 0, flash = &flash_table[0]; j < entry_count;
3512 j++, flash++) {
3513
3514 if ((val & mask) == (flash->strapping & mask)) {
3515 bp->flash_info = flash;
3516
3517 /* Request access to the flash interface. */
3518 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3519 return rc;
3520
3521 /* Enable access to flash interface */
3522 bnx2_enable_nvram_access(bp);
3523
3524 /* Reconfigure the flash interface */
3525 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3526 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3527 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3528 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3529
3530 /* Disable access to flash interface */
3531 bnx2_disable_nvram_access(bp);
3532 bnx2_release_nvram_lock(bp);
3533
3534 break;
3535 }
3536 }
3537 } /* if (val & 0x40000000) */
3538
3539 if (j == entry_count) {
3540 bp->flash_info = NULL;
3541 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3542 return -ENODEV;
3543 }
3544
3545 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3546 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3547 if (val)
3548 bp->flash_size = val;
3549 else
3550 bp->flash_size = bp->flash_info->total_size;
3551
3552 return rc;
3553 }
3554
3555 static int
3556 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3557 int buf_size)
3558 {
3559 int rc = 0;
3560 u32 cmd_flags, offset32, len32, extra;
3561
3562 if (buf_size == 0)
3563 return 0;
3564
3565 /* Request access to the flash interface. */
3566 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3567 return rc;
3568
3569 /* Enable access to flash interface */
3570 bnx2_enable_nvram_access(bp);
3571
3572 len32 = buf_size;
3573 offset32 = offset;
3574 extra = 0;
3575
3576 cmd_flags = 0;
3577
3578 if (offset32 & 3) {
3579 u8 buf[4];
3580 u32 pre_len;
3581
3582 offset32 &= ~3;
3583 pre_len = 4 - (offset & 3);
3584
3585 if (pre_len >= len32) {
3586 pre_len = len32;
3587 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3588 BNX2_NVM_COMMAND_LAST;
3589 }
3590 else {
3591 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3592 }
3593
3594 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3595
3596 if (rc)
3597 return rc;
3598
3599 memcpy(ret_buf, buf + (offset & 3), pre_len);
3600
3601 offset32 += 4;
3602 ret_buf += pre_len;
3603 len32 -= pre_len;
3604 }
3605 if (len32 & 3) {
3606 extra = 4 - (len32 & 3);
3607 len32 = (len32 + 4) & ~3;
3608 }
3609
3610 if (len32 == 4) {
3611 u8 buf[4];
3612
3613 if (cmd_flags)
3614 cmd_flags = BNX2_NVM_COMMAND_LAST;
3615 else
3616 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3617 BNX2_NVM_COMMAND_LAST;
3618
3619 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3620
3621 memcpy(ret_buf, buf, 4 - extra);
3622 }
3623 else if (len32 > 0) {
3624 u8 buf[4];
3625
3626 /* Read the first word. */
3627 if (cmd_flags)
3628 cmd_flags = 0;
3629 else
3630 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3631
3632 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3633
3634 /* Advance to the next dword. */
3635 offset32 += 4;
3636 ret_buf += 4;
3637 len32 -= 4;
3638
3639 while (len32 > 4 && rc == 0) {
3640 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3641
3642 /* Advance to the next dword. */
3643 offset32 += 4;
3644 ret_buf += 4;
3645 len32 -= 4;
3646 }
3647
3648 if (rc)
3649 return rc;
3650
3651 cmd_flags = BNX2_NVM_COMMAND_LAST;
3652 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3653
3654 memcpy(ret_buf, buf, 4 - extra);
3655 }
3656
3657 /* Disable access to flash interface */
3658 bnx2_disable_nvram_access(bp);
3659
3660 bnx2_release_nvram_lock(bp);
3661
3662 return rc;
3663 }
3664
3665 static int
3666 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3667 int buf_size)
3668 {
3669 u32 written, offset32, len32;
3670 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3671 int rc = 0;
3672 int align_start, align_end;
3673
3674 buf = data_buf;
3675 offset32 = offset;
3676 len32 = buf_size;
3677 align_start = align_end = 0;
3678
3679 if ((align_start = (offset32 & 3))) {
3680 offset32 &= ~3;
3681 len32 += align_start;
3682 if (len32 < 4)
3683 len32 = 4;
3684 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3685 return rc;
3686 }
3687
3688 if (len32 & 3) {
3689 align_end = 4 - (len32 & 3);
3690 len32 += align_end;
3691 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3692 return rc;
3693 }
3694
3695 if (align_start || align_end) {
3696 align_buf = kmalloc(len32, GFP_KERNEL);
3697 if (align_buf == NULL)
3698 return -ENOMEM;
3699 if (align_start) {
3700 memcpy(align_buf, start, 4);
3701 }
3702 if (align_end) {
3703 memcpy(align_buf + len32 - 4, end, 4);
3704 }
3705 memcpy(align_buf + align_start, data_buf, buf_size);
3706 buf = align_buf;
3707 }
3708
3709 if (bp->flash_info->buffered == 0) {
3710 flash_buffer = kmalloc(264, GFP_KERNEL);
3711 if (flash_buffer == NULL) {
3712 rc = -ENOMEM;
3713 goto nvram_write_end;
3714 }
3715 }
3716
3717 written = 0;
3718 while ((written < len32) && (rc == 0)) {
3719 u32 page_start, page_end, data_start, data_end;
3720 u32 addr, cmd_flags;
3721 int i;
3722
3723 /* Find the page_start addr */
3724 page_start = offset32 + written;
3725 page_start -= (page_start % bp->flash_info->page_size);
3726 /* Find the page_end addr */
3727 page_end = page_start + bp->flash_info->page_size;
3728 /* Find the data_start addr */
3729 data_start = (written == 0) ? offset32 : page_start;
3730 /* Find the data_end addr */
3731 data_end = (page_end > offset32 + len32) ?
3732 (offset32 + len32) : page_end;
3733
3734 /* Request access to the flash interface. */
3735 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3736 goto nvram_write_end;
3737
3738 /* Enable access to flash interface */
3739 bnx2_enable_nvram_access(bp);
3740
3741 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3742 if (bp->flash_info->buffered == 0) {
3743 int j;
3744
3745 /* Read the whole page into the buffer
3746 * (non-buffer flash only) */
3747 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3748 if (j == (bp->flash_info->page_size - 4)) {
3749 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3750 }
3751 rc = bnx2_nvram_read_dword(bp,
3752 page_start + j,
3753 &flash_buffer[j],
3754 cmd_flags);
3755
3756 if (rc)
3757 goto nvram_write_end;
3758
3759 cmd_flags = 0;
3760 }
3761 }
3762
3763 /* Enable writes to flash interface (unlock write-protect) */
3764 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3765 goto nvram_write_end;
3766
3767 /* Loop to write back the buffer data from page_start to
3768 * data_start */
3769 i = 0;
3770 if (bp->flash_info->buffered == 0) {
3771 /* Erase the page */
3772 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3773 goto nvram_write_end;
3774
3775 /* Re-enable the write again for the actual write */
3776 bnx2_enable_nvram_write(bp);
3777
3778 for (addr = page_start; addr < data_start;
3779 addr += 4, i += 4) {
3780
3781 rc = bnx2_nvram_write_dword(bp, addr,
3782 &flash_buffer[i], cmd_flags);
3783
3784 if (rc != 0)
3785 goto nvram_write_end;
3786
3787 cmd_flags = 0;
3788 }
3789 }
3790
3791 /* Loop to write the new data from data_start to data_end */
3792 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3793 if ((addr == page_end - 4) ||
3794 ((bp->flash_info->buffered) &&
3795 (addr == data_end - 4))) {
3796
3797 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3798 }
3799 rc = bnx2_nvram_write_dword(bp, addr, buf,
3800 cmd_flags);
3801
3802 if (rc != 0)
3803 goto nvram_write_end;
3804
3805 cmd_flags = 0;
3806 buf += 4;
3807 }
3808
3809 /* Loop to write back the buffer data from data_end
3810 * to page_end */
3811 if (bp->flash_info->buffered == 0) {
3812 for (addr = data_end; addr < page_end;
3813 addr += 4, i += 4) {
3814
3815 if (addr == page_end-4) {
3816 cmd_flags = BNX2_NVM_COMMAND_LAST;
3817 }
3818 rc = bnx2_nvram_write_dword(bp, addr,
3819 &flash_buffer[i], cmd_flags);
3820
3821 if (rc != 0)
3822 goto nvram_write_end;
3823
3824 cmd_flags = 0;
3825 }
3826 }
3827
3828 /* Disable writes to flash interface (lock write-protect) */
3829 bnx2_disable_nvram_write(bp);
3830
3831 /* Disable access to flash interface */
3832 bnx2_disable_nvram_access(bp);
3833 bnx2_release_nvram_lock(bp);
3834
3835 /* Increment written */
3836 written += data_end - data_start;
3837 }
3838
3839 nvram_write_end:
3840 kfree(flash_buffer);
3841 kfree(align_buf);
3842 return rc;
3843 }
3844
3845 static void
3846 bnx2_init_remote_phy(struct bnx2 *bp)
3847 {
3848 u32 val;
3849
3850 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3851 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3852 return;
3853
3854 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3855 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3856 return;
3857
3858 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3859 if (netif_running(bp->dev)) {
3860 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3861 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3862 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3863 val);
3864 }
3865 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3866
3867 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3868 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3869 bp->phy_port = PORT_FIBRE;
3870 else
3871 bp->phy_port = PORT_TP;
3872 }
3873 }
3874
3875 static int
3876 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3877 {
3878 u32 val;
3879 int i, rc = 0;
3880
3881 /* Wait for the current PCI transaction to complete before
3882 * issuing a reset. */
3883 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3884 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3885 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3886 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3887 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3888 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3889 udelay(5);
3890
3891 /* Wait for the firmware to tell us it is ok to issue a reset. */
3892 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3893
3894 /* Deposit a driver reset signature so the firmware knows that
3895 * this is a soft reset. */
3896 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3897 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3898
3899 /* Do a dummy read to force the chip to complete all current transaction
3900 * before we issue a reset. */
3901 val = REG_RD(bp, BNX2_MISC_ID);
3902
3903 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3904 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3905 REG_RD(bp, BNX2_MISC_COMMAND);
3906 udelay(5);
3907
3908 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3909 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3910
3911 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3912
3913 } else {
3914 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3915 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3916 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3917
3918 /* Chip reset. */
3919 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3920
3921 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3922 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3923 current->state = TASK_UNINTERRUPTIBLE;
3924 schedule_timeout(HZ / 50);
3925 }
3926
3927 /* Reset takes approximate 30 usec */
3928 for (i = 0; i < 10; i++) {
3929 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3930 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3931 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3932 break;
3933 udelay(10);
3934 }
3935
3936 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3937 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3938 printk(KERN_ERR PFX "Chip reset did not complete\n");
3939 return -EBUSY;
3940 }
3941 }
3942
3943 /* Make sure byte swapping is properly configured. */
3944 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3945 if (val != 0x01020304) {
3946 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3947 return -ENODEV;
3948 }
3949
3950 /* Wait for the firmware to finish its initialization. */
3951 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3952 if (rc)
3953 return rc;
3954
3955 spin_lock_bh(&bp->phy_lock);
3956 bnx2_init_remote_phy(bp);
3957 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3958 bnx2_set_default_remote_link(bp);
3959 spin_unlock_bh(&bp->phy_lock);
3960
3961 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3962 /* Adjust the voltage regular to two steps lower. The default
3963 * of this register is 0x0000000e. */
3964 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3965
3966 /* Remove bad rbuf memory from the free pool. */
3967 rc = bnx2_alloc_bad_rbuf(bp);
3968 }
3969
3970 return rc;
3971 }
3972
3973 static int
3974 bnx2_init_chip(struct bnx2 *bp)
3975 {
3976 u32 val;
3977 int rc;
3978
3979 /* Make sure the interrupt is not active. */
3980 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3981
3982 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3983 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3984 #ifdef __BIG_ENDIAN
3985 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3986 #endif
3987 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3988 DMA_READ_CHANS << 12 |
3989 DMA_WRITE_CHANS << 16;
3990
3991 val |= (0x2 << 20) | (1 << 11);
3992
3993 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3994 val |= (1 << 23);
3995
3996 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3997 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3998 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3999
4000 REG_WR(bp, BNX2_DMA_CONFIG, val);
4001
4002 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4003 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4004 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4005 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4006 }
4007
4008 if (bp->flags & PCIX_FLAG) {
4009 u16 val16;
4010
4011 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4012 &val16);
4013 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4014 val16 & ~PCI_X_CMD_ERO);
4015 }
4016
4017 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4018 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4019 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4020 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4021
4022 /* Initialize context mapping and zero out the quick contexts. The
4023 * context block must have already been enabled. */
4024 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4025 rc = bnx2_init_5709_context(bp);
4026 if (rc)
4027 return rc;
4028 } else
4029 bnx2_init_context(bp);
4030
4031 if ((rc = bnx2_init_cpus(bp)) != 0)
4032 return rc;
4033
4034 bnx2_init_nvram(bp);
4035
4036 bnx2_set_mac_addr(bp);
4037
4038 val = REG_RD(bp, BNX2_MQ_CONFIG);
4039 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4040 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4041 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4042 val |= BNX2_MQ_CONFIG_HALT_DIS;
4043
4044 REG_WR(bp, BNX2_MQ_CONFIG, val);
4045
4046 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4047 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4048 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4049
4050 val = (BCM_PAGE_BITS - 8) << 24;
4051 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4052
4053 /* Configure page size. */
4054 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4055 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4056 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4057 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4058
4059 val = bp->mac_addr[0] +
4060 (bp->mac_addr[1] << 8) +
4061 (bp->mac_addr[2] << 16) +
4062 bp->mac_addr[3] +
4063 (bp->mac_addr[4] << 8) +
4064 (bp->mac_addr[5] << 16);
4065 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4066
4067 /* Program the MTU. Also include 4 bytes for CRC32. */
4068 val = bp->dev->mtu + ETH_HLEN + 4;
4069 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4070 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4071 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4072
4073 bp->last_status_idx = 0;
4074 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4075
4076 /* Set up how to generate a link change interrupt. */
4077 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4078
4079 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4080 (u64) bp->status_blk_mapping & 0xffffffff);
4081 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4082
4083 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4084 (u64) bp->stats_blk_mapping & 0xffffffff);
4085 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4086 (u64) bp->stats_blk_mapping >> 32);
4087
4088 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4089 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4090
4091 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4092 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4093
4094 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4095 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4096
4097 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4098
4099 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4100
4101 REG_WR(bp, BNX2_HC_COM_TICKS,
4102 (bp->com_ticks_int << 16) | bp->com_ticks);
4103
4104 REG_WR(bp, BNX2_HC_CMD_TICKS,
4105 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4106
4107 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4108 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4109 else
4110 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
4111 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4112
4113 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4114 val = BNX2_HC_CONFIG_COLLECT_STATS;
4115 else {
4116 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4117 BNX2_HC_CONFIG_COLLECT_STATS;
4118 }
4119
4120 if (bp->flags & ONE_SHOT_MSI_FLAG)
4121 val |= BNX2_HC_CONFIG_ONE_SHOT;
4122
4123 REG_WR(bp, BNX2_HC_CONFIG, val);
4124
4125 /* Clear internal stats counters. */
4126 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4127
4128 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4129
4130 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4131 BNX2_PORT_FEATURE_ASF_ENABLED)
4132 bp->flags |= ASF_ENABLE_FLAG;
4133
4134 /* Initialize the receive filter. */
4135 bnx2_set_rx_mode(bp->dev);
4136
4137 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4138 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4139 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4140 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4141 }
4142 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4143 0);
4144
4145 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4146 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4147
4148 udelay(20);
4149
4150 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4151
4152 return rc;
4153 }
4154
4155 static void
4156 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4157 {
4158 u32 val, offset0, offset1, offset2, offset3;
4159
4160 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4161 offset0 = BNX2_L2CTX_TYPE_XI;
4162 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4163 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4164 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4165 } else {
4166 offset0 = BNX2_L2CTX_TYPE;
4167 offset1 = BNX2_L2CTX_CMD_TYPE;
4168 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4169 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4170 }
4171 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4172 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4173
4174 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4175 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4176
4177 val = (u64) bp->tx_desc_mapping >> 32;
4178 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4179
4180 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4181 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4182 }
4183
4184 static void
4185 bnx2_init_tx_ring(struct bnx2 *bp)
4186 {
4187 struct tx_bd *txbd;
4188 u32 cid;
4189
4190 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4191
4192 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4193
4194 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4195 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4196
4197 bp->tx_prod = 0;
4198 bp->tx_cons = 0;
4199 bp->hw_tx_cons = 0;
4200 bp->tx_prod_bseq = 0;
4201
4202 cid = TX_CID;
4203 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4204 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4205
4206 bnx2_init_tx_context(bp, cid);
4207 }
4208
4209 static void
4210 bnx2_init_rx_ring(struct bnx2 *bp)
4211 {
4212 struct rx_bd *rxbd;
4213 int i;
4214 u16 prod, ring_prod;
4215 u32 val;
4216
4217 /* 8 for CRC and VLAN */
4218 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4219 /* hw alignment */
4220 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4221
4222 ring_prod = prod = bp->rx_prod = 0;
4223 bp->rx_cons = 0;
4224 bp->hw_rx_cons = 0;
4225 bp->rx_prod_bseq = 0;
4226
4227 for (i = 0; i < bp->rx_max_ring; i++) {
4228 int j;
4229
4230 rxbd = &bp->rx_desc_ring[i][0];
4231 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4232 rxbd->rx_bd_len = bp->rx_buf_use_size;
4233 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4234 }
4235 if (i == (bp->rx_max_ring - 1))
4236 j = 0;
4237 else
4238 j = i + 1;
4239 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4240 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4241 0xffffffff;
4242 }
4243
4244 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4245 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4246 val |= 0x02 << 8;
4247 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4248
4249 val = (u64) bp->rx_desc_mapping[0] >> 32;
4250 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4251
4252 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4253 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4254
4255 for (i = 0; i < bp->rx_ring_size; i++) {
4256 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4257 break;
4258 }
4259 prod = NEXT_RX_BD(prod);
4260 ring_prod = RX_RING_IDX(prod);
4261 }
4262 bp->rx_prod = prod;
4263
4264 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4265
4266 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4267 }
4268
4269 static void
4270 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4271 {
4272 u32 num_rings, max;
4273
4274 bp->rx_ring_size = size;
4275 num_rings = 1;
4276 while (size > MAX_RX_DESC_CNT) {
4277 size -= MAX_RX_DESC_CNT;
4278 num_rings++;
4279 }
4280 /* round to next power of 2 */
4281 max = MAX_RX_RINGS;
4282 while ((max & num_rings) == 0)
4283 max >>= 1;
4284
4285 if (num_rings != max)
4286 max <<= 1;
4287
4288 bp->rx_max_ring = max;
4289 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4290 }
4291
4292 static void
4293 bnx2_free_tx_skbs(struct bnx2 *bp)
4294 {
4295 int i;
4296
4297 if (bp->tx_buf_ring == NULL)
4298 return;
4299
4300 for (i = 0; i < TX_DESC_CNT; ) {
4301 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4302 struct sk_buff *skb = tx_buf->skb;
4303 int j, last;
4304
4305 if (skb == NULL) {
4306 i++;
4307 continue;
4308 }
4309
4310 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4311 skb_headlen(skb), PCI_DMA_TODEVICE);
4312
4313 tx_buf->skb = NULL;
4314
4315 last = skb_shinfo(skb)->nr_frags;
4316 for (j = 0; j < last; j++) {
4317 tx_buf = &bp->tx_buf_ring[i + j + 1];
4318 pci_unmap_page(bp->pdev,
4319 pci_unmap_addr(tx_buf, mapping),
4320 skb_shinfo(skb)->frags[j].size,
4321 PCI_DMA_TODEVICE);
4322 }
4323 dev_kfree_skb(skb);
4324 i += j + 1;
4325 }
4326
4327 }
4328
4329 static void
4330 bnx2_free_rx_skbs(struct bnx2 *bp)
4331 {
4332 int i;
4333
4334 if (bp->rx_buf_ring == NULL)
4335 return;
4336
4337 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4338 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4339 struct sk_buff *skb = rx_buf->skb;
4340
4341 if (skb == NULL)
4342 continue;
4343
4344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4345 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4346
4347 rx_buf->skb = NULL;
4348
4349 dev_kfree_skb(skb);
4350 }
4351 }
4352
4353 static void
4354 bnx2_free_skbs(struct bnx2 *bp)
4355 {
4356 bnx2_free_tx_skbs(bp);
4357 bnx2_free_rx_skbs(bp);
4358 }
4359
4360 static int
4361 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4362 {
4363 int rc;
4364
4365 rc = bnx2_reset_chip(bp, reset_code);
4366 bnx2_free_skbs(bp);
4367 if (rc)
4368 return rc;
4369
4370 if ((rc = bnx2_init_chip(bp)) != 0)
4371 return rc;
4372
4373 bnx2_init_tx_ring(bp);
4374 bnx2_init_rx_ring(bp);
4375 return 0;
4376 }
4377
4378 static int
4379 bnx2_init_nic(struct bnx2 *bp)
4380 {
4381 int rc;
4382
4383 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4384 return rc;
4385
4386 spin_lock_bh(&bp->phy_lock);
4387 bnx2_init_phy(bp);
4388 bnx2_set_link(bp);
4389 spin_unlock_bh(&bp->phy_lock);
4390 return 0;
4391 }
4392
4393 static int
4394 bnx2_test_registers(struct bnx2 *bp)
4395 {
4396 int ret;
4397 int i, is_5709;
4398 static const struct {
4399 u16 offset;
4400 u16 flags;
4401 #define BNX2_FL_NOT_5709 1
4402 u32 rw_mask;
4403 u32 ro_mask;
4404 } reg_tbl[] = {
4405 { 0x006c, 0, 0x00000000, 0x0000003f },
4406 { 0x0090, 0, 0xffffffff, 0x00000000 },
4407 { 0x0094, 0, 0x00000000, 0x00000000 },
4408
4409 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4410 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4411 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4412 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4413 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4414 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4415 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4416 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4417 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4418
4419 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4420 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4421 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4422 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4423 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4424 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4425
4426 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4427 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4428 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4429
4430 { 0x1000, 0, 0x00000000, 0x00000001 },
4431 { 0x1004, 0, 0x00000000, 0x000f0001 },
4432
4433 { 0x1408, 0, 0x01c00800, 0x00000000 },
4434 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4435 { 0x14a8, 0, 0x00000000, 0x000001ff },
4436 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4437 { 0x14b0, 0, 0x00000002, 0x00000001 },
4438 { 0x14b8, 0, 0x00000000, 0x00000000 },
4439 { 0x14c0, 0, 0x00000000, 0x00000009 },
4440 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4441 { 0x14cc, 0, 0x00000000, 0x00000001 },
4442 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4443
4444 { 0x1800, 0, 0x00000000, 0x00000001 },
4445 { 0x1804, 0, 0x00000000, 0x00000003 },
4446
4447 { 0x2800, 0, 0x00000000, 0x00000001 },
4448 { 0x2804, 0, 0x00000000, 0x00003f01 },
4449 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4450 { 0x2810, 0, 0xffff0000, 0x00000000 },
4451 { 0x2814, 0, 0xffff0000, 0x00000000 },
4452 { 0x2818, 0, 0xffff0000, 0x00000000 },
4453 { 0x281c, 0, 0xffff0000, 0x00000000 },
4454 { 0x2834, 0, 0xffffffff, 0x00000000 },
4455 { 0x2840, 0, 0x00000000, 0xffffffff },
4456 { 0x2844, 0, 0x00000000, 0xffffffff },
4457 { 0x2848, 0, 0xffffffff, 0x00000000 },
4458 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4459
4460 { 0x2c00, 0, 0x00000000, 0x00000011 },
4461 { 0x2c04, 0, 0x00000000, 0x00030007 },
4462
4463 { 0x3c00, 0, 0x00000000, 0x00000001 },
4464 { 0x3c04, 0, 0x00000000, 0x00070000 },
4465 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4466 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4467 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4468 { 0x3c14, 0, 0x00000000, 0xffffffff },
4469 { 0x3c18, 0, 0x00000000, 0xffffffff },
4470 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4471 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4472
4473 { 0x5004, 0, 0x00000000, 0x0000007f },
4474 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4475
4476 { 0x5c00, 0, 0x00000000, 0x00000001 },
4477 { 0x5c04, 0, 0x00000000, 0x0003000f },
4478 { 0x5c08, 0, 0x00000003, 0x00000000 },
4479 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4480 { 0x5c10, 0, 0x00000000, 0xffffffff },
4481 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4482 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4483 { 0x5c88, 0, 0x00000000, 0x00077373 },
4484 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4485
4486 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4487 { 0x680c, 0, 0xffffffff, 0x00000000 },
4488 { 0x6810, 0, 0xffffffff, 0x00000000 },
4489 { 0x6814, 0, 0xffffffff, 0x00000000 },
4490 { 0x6818, 0, 0xffffffff, 0x00000000 },
4491 { 0x681c, 0, 0xffffffff, 0x00000000 },
4492 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4493 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4494 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4495 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4496 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4497 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4498 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4499 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4500 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4501 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4502 { 0x684c, 0, 0xffffffff, 0x00000000 },
4503 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4504 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4505 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4506 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4507 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4508 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4509
4510 { 0xffff, 0, 0x00000000, 0x00000000 },
4511 };
4512
4513 ret = 0;
4514 is_5709 = 0;
4515 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4516 is_5709 = 1;
4517
4518 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4519 u32 offset, rw_mask, ro_mask, save_val, val;
4520 u16 flags = reg_tbl[i].flags;
4521
4522 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4523 continue;
4524
4525 offset = (u32) reg_tbl[i].offset;
4526 rw_mask = reg_tbl[i].rw_mask;
4527 ro_mask = reg_tbl[i].ro_mask;
4528
4529 save_val = readl(bp->regview + offset);
4530
4531 writel(0, bp->regview + offset);
4532
4533 val = readl(bp->regview + offset);
4534 if ((val & rw_mask) != 0) {
4535 goto reg_test_err;
4536 }
4537
4538 if ((val & ro_mask) != (save_val & ro_mask)) {
4539 goto reg_test_err;
4540 }
4541
4542 writel(0xffffffff, bp->regview + offset);
4543
4544 val = readl(bp->regview + offset);
4545 if ((val & rw_mask) != rw_mask) {
4546 goto reg_test_err;
4547 }
4548
4549 if ((val & ro_mask) != (save_val & ro_mask)) {
4550 goto reg_test_err;
4551 }
4552
4553 writel(save_val, bp->regview + offset);
4554 continue;
4555
4556 reg_test_err:
4557 writel(save_val, bp->regview + offset);
4558 ret = -ENODEV;
4559 break;
4560 }
4561 return ret;
4562 }
4563
4564 static int
4565 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4566 {
4567 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4568 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4569 int i;
4570
4571 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4572 u32 offset;
4573
4574 for (offset = 0; offset < size; offset += 4) {
4575
4576 REG_WR_IND(bp, start + offset, test_pattern[i]);
4577
4578 if (REG_RD_IND(bp, start + offset) !=
4579 test_pattern[i]) {
4580 return -ENODEV;
4581 }
4582 }
4583 }
4584 return 0;
4585 }
4586
4587 static int
4588 bnx2_test_memory(struct bnx2 *bp)
4589 {
4590 int ret = 0;
4591 int i;
4592 static struct mem_entry {
4593 u32 offset;
4594 u32 len;
4595 } mem_tbl_5706[] = {
4596 { 0x60000, 0x4000 },
4597 { 0xa0000, 0x3000 },
4598 { 0xe0000, 0x4000 },
4599 { 0x120000, 0x4000 },
4600 { 0x1a0000, 0x4000 },
4601 { 0x160000, 0x4000 },
4602 { 0xffffffff, 0 },
4603 },
4604 mem_tbl_5709[] = {
4605 { 0x60000, 0x4000 },
4606 { 0xa0000, 0x3000 },
4607 { 0xe0000, 0x4000 },
4608 { 0x120000, 0x4000 },
4609 { 0x1a0000, 0x4000 },
4610 { 0xffffffff, 0 },
4611 };
4612 struct mem_entry *mem_tbl;
4613
4614 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4615 mem_tbl = mem_tbl_5709;
4616 else
4617 mem_tbl = mem_tbl_5706;
4618
4619 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4620 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4621 mem_tbl[i].len)) != 0) {
4622 return ret;
4623 }
4624 }
4625
4626 return ret;
4627 }
4628
4629 #define BNX2_MAC_LOOPBACK 0
4630 #define BNX2_PHY_LOOPBACK 1
4631
4632 static int
4633 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4634 {
4635 unsigned int pkt_size, num_pkts, i;
4636 struct sk_buff *skb, *rx_skb;
4637 unsigned char *packet;
4638 u16 rx_start_idx, rx_idx;
4639 dma_addr_t map;
4640 struct tx_bd *txbd;
4641 struct sw_bd *rx_buf;
4642 struct l2_fhdr *rx_hdr;
4643 int ret = -ENODEV;
4644
4645 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4646 bp->loopback = MAC_LOOPBACK;
4647 bnx2_set_mac_loopback(bp);
4648 }
4649 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4650 bp->loopback = PHY_LOOPBACK;
4651 bnx2_set_phy_loopback(bp);
4652 }
4653 else
4654 return -EINVAL;
4655
4656 pkt_size = 1514;
4657 skb = netdev_alloc_skb(bp->dev, pkt_size);
4658 if (!skb)
4659 return -ENOMEM;
4660 packet = skb_put(skb, pkt_size);
4661 memcpy(packet, bp->dev->dev_addr, 6);
4662 memset(packet + 6, 0x0, 8);
4663 for (i = 14; i < pkt_size; i++)
4664 packet[i] = (unsigned char) (i & 0xff);
4665
4666 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4667 PCI_DMA_TODEVICE);
4668
4669 REG_WR(bp, BNX2_HC_COMMAND,
4670 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4671
4672 REG_RD(bp, BNX2_HC_COMMAND);
4673
4674 udelay(5);
4675 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4676
4677 num_pkts = 0;
4678
4679 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4680
4681 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4682 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4683 txbd->tx_bd_mss_nbytes = pkt_size;
4684 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4685
4686 num_pkts++;
4687 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4688 bp->tx_prod_bseq += pkt_size;
4689
4690 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4691 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4692
4693 udelay(100);
4694
4695 REG_WR(bp, BNX2_HC_COMMAND,
4696 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4697
4698 REG_RD(bp, BNX2_HC_COMMAND);
4699
4700 udelay(5);
4701
4702 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4703 dev_kfree_skb(skb);
4704
4705 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4706 goto loopback_test_done;
4707 }
4708
4709 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4710 if (rx_idx != rx_start_idx + num_pkts) {
4711 goto loopback_test_done;
4712 }
4713
4714 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4715 rx_skb = rx_buf->skb;
4716
4717 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4718 skb_reserve(rx_skb, bp->rx_offset);
4719
4720 pci_dma_sync_single_for_cpu(bp->pdev,
4721 pci_unmap_addr(rx_buf, mapping),
4722 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4723
4724 if (rx_hdr->l2_fhdr_status &
4725 (L2_FHDR_ERRORS_BAD_CRC |
4726 L2_FHDR_ERRORS_PHY_DECODE |
4727 L2_FHDR_ERRORS_ALIGNMENT |
4728 L2_FHDR_ERRORS_TOO_SHORT |
4729 L2_FHDR_ERRORS_GIANT_FRAME)) {
4730
4731 goto loopback_test_done;
4732 }
4733
4734 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4735 goto loopback_test_done;
4736 }
4737
4738 for (i = 14; i < pkt_size; i++) {
4739 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4740 goto loopback_test_done;
4741 }
4742 }
4743
4744 ret = 0;
4745
4746 loopback_test_done:
4747 bp->loopback = 0;
4748 return ret;
4749 }
4750
4751 #define BNX2_MAC_LOOPBACK_FAILED 1
4752 #define BNX2_PHY_LOOPBACK_FAILED 2
4753 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4754 BNX2_PHY_LOOPBACK_FAILED)
4755
4756 static int
4757 bnx2_test_loopback(struct bnx2 *bp)
4758 {
4759 int rc = 0;
4760
4761 if (!netif_running(bp->dev))
4762 return BNX2_LOOPBACK_FAILED;
4763
4764 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4765 spin_lock_bh(&bp->phy_lock);
4766 bnx2_init_phy(bp);
4767 spin_unlock_bh(&bp->phy_lock);
4768 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4769 rc |= BNX2_MAC_LOOPBACK_FAILED;
4770 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4771 rc |= BNX2_PHY_LOOPBACK_FAILED;
4772 return rc;
4773 }
4774
4775 #define NVRAM_SIZE 0x200
4776 #define CRC32_RESIDUAL 0xdebb20e3
4777
4778 static int
4779 bnx2_test_nvram(struct bnx2 *bp)
4780 {
4781 u32 buf[NVRAM_SIZE / 4];
4782 u8 *data = (u8 *) buf;
4783 int rc = 0;
4784 u32 magic, csum;
4785
4786 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4787 goto test_nvram_done;
4788
4789 magic = be32_to_cpu(buf[0]);
4790 if (magic != 0x669955aa) {
4791 rc = -ENODEV;
4792 goto test_nvram_done;
4793 }
4794
4795 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4796 goto test_nvram_done;
4797
4798 csum = ether_crc_le(0x100, data);
4799 if (csum != CRC32_RESIDUAL) {
4800 rc = -ENODEV;
4801 goto test_nvram_done;
4802 }
4803
4804 csum = ether_crc_le(0x100, data + 0x100);
4805 if (csum != CRC32_RESIDUAL) {
4806 rc = -ENODEV;
4807 }
4808
4809 test_nvram_done:
4810 return rc;
4811 }
4812
4813 static int
4814 bnx2_test_link(struct bnx2 *bp)
4815 {
4816 u32 bmsr;
4817
4818 spin_lock_bh(&bp->phy_lock);
4819 bnx2_enable_bmsr1(bp);
4820 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4821 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4822 bnx2_disable_bmsr1(bp);
4823 spin_unlock_bh(&bp->phy_lock);
4824
4825 if (bmsr & BMSR_LSTATUS) {
4826 return 0;
4827 }
4828 return -ENODEV;
4829 }
4830
4831 static int
4832 bnx2_test_intr(struct bnx2 *bp)
4833 {
4834 int i;
4835 u16 status_idx;
4836
4837 if (!netif_running(bp->dev))
4838 return -ENODEV;
4839
4840 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4841
4842 /* This register is not touched during run-time. */
4843 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4844 REG_RD(bp, BNX2_HC_COMMAND);
4845
4846 for (i = 0; i < 10; i++) {
4847 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4848 status_idx) {
4849
4850 break;
4851 }
4852
4853 msleep_interruptible(10);
4854 }
4855 if (i < 10)
4856 return 0;
4857
4858 return -ENODEV;
4859 }
4860
4861 static void
4862 bnx2_5706_serdes_timer(struct bnx2 *bp)
4863 {
4864 spin_lock(&bp->phy_lock);
4865 if (bp->serdes_an_pending)
4866 bp->serdes_an_pending--;
4867 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4868 u32 bmcr;
4869
4870 bp->current_interval = bp->timer_interval;
4871
4872 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4873
4874 if (bmcr & BMCR_ANENABLE) {
4875 u32 phy1, phy2;
4876
4877 bnx2_write_phy(bp, 0x1c, 0x7c00);
4878 bnx2_read_phy(bp, 0x1c, &phy1);
4879
4880 bnx2_write_phy(bp, 0x17, 0x0f01);
4881 bnx2_read_phy(bp, 0x15, &phy2);
4882 bnx2_write_phy(bp, 0x17, 0x0f01);
4883 bnx2_read_phy(bp, 0x15, &phy2);
4884
4885 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4886 !(phy2 & 0x20)) { /* no CONFIG */
4887
4888 bmcr &= ~BMCR_ANENABLE;
4889 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4890 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4891 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4892 }
4893 }
4894 }
4895 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4896 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4897 u32 phy2;
4898
4899 bnx2_write_phy(bp, 0x17, 0x0f01);
4900 bnx2_read_phy(bp, 0x15, &phy2);
4901 if (phy2 & 0x20) {
4902 u32 bmcr;
4903
4904 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4905 bmcr |= BMCR_ANENABLE;
4906 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4907
4908 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4909 }
4910 } else
4911 bp->current_interval = bp->timer_interval;
4912
4913 spin_unlock(&bp->phy_lock);
4914 }
4915
4916 static void
4917 bnx2_5708_serdes_timer(struct bnx2 *bp)
4918 {
4919 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4920 return;
4921
4922 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4923 bp->serdes_an_pending = 0;
4924 return;
4925 }
4926
4927 spin_lock(&bp->phy_lock);
4928 if (bp->serdes_an_pending)
4929 bp->serdes_an_pending--;
4930 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4931 u32 bmcr;
4932
4933 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4934 if (bmcr & BMCR_ANENABLE) {
4935 bnx2_enable_forced_2g5(bp);
4936 bp->current_interval = SERDES_FORCED_TIMEOUT;
4937 } else {
4938 bnx2_disable_forced_2g5(bp);
4939 bp->serdes_an_pending = 2;
4940 bp->current_interval = bp->timer_interval;
4941 }
4942
4943 } else
4944 bp->current_interval = bp->timer_interval;
4945
4946 spin_unlock(&bp->phy_lock);
4947 }
4948
4949 static void
4950 bnx2_timer(unsigned long data)
4951 {
4952 struct bnx2 *bp = (struct bnx2 *) data;
4953
4954 if (!netif_running(bp->dev))
4955 return;
4956
4957 if (atomic_read(&bp->intr_sem) != 0)
4958 goto bnx2_restart_timer;
4959
4960 bnx2_send_heart_beat(bp);
4961
4962 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4963
4964 /* workaround occasional corrupted counters */
4965 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4966 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4967 BNX2_HC_COMMAND_STATS_NOW);
4968
4969 if (bp->phy_flags & PHY_SERDES_FLAG) {
4970 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4971 bnx2_5706_serdes_timer(bp);
4972 else
4973 bnx2_5708_serdes_timer(bp);
4974 }
4975
4976 bnx2_restart_timer:
4977 mod_timer(&bp->timer, jiffies + bp->current_interval);
4978 }
4979
4980 static int
4981 bnx2_request_irq(struct bnx2 *bp)
4982 {
4983 struct net_device *dev = bp->dev;
4984 int rc = 0;
4985
4986 if (bp->flags & USING_MSI_FLAG) {
4987 irq_handler_t fn = bnx2_msi;
4988
4989 if (bp->flags & ONE_SHOT_MSI_FLAG)
4990 fn = bnx2_msi_1shot;
4991
4992 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4993 } else
4994 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4995 IRQF_SHARED, dev->name, dev);
4996 return rc;
4997 }
4998
4999 static void
5000 bnx2_free_irq(struct bnx2 *bp)
5001 {
5002 struct net_device *dev = bp->dev;
5003
5004 if (bp->flags & USING_MSI_FLAG) {
5005 free_irq(bp->pdev->irq, dev);
5006 pci_disable_msi(bp->pdev);
5007 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5008 } else
5009 free_irq(bp->pdev->irq, dev);
5010 }
5011
5012 /* Called with rtnl_lock */
5013 static int
5014 bnx2_open(struct net_device *dev)
5015 {
5016 struct bnx2 *bp = netdev_priv(dev);
5017 int rc;
5018
5019 netif_carrier_off(dev);
5020
5021 bnx2_set_power_state(bp, PCI_D0);
5022 bnx2_disable_int(bp);
5023
5024 rc = bnx2_alloc_mem(bp);
5025 if (rc)
5026 return rc;
5027
5028 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5029 if (pci_enable_msi(bp->pdev) == 0) {
5030 bp->flags |= USING_MSI_FLAG;
5031 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5032 bp->flags |= ONE_SHOT_MSI_FLAG;
5033 }
5034 }
5035 rc = bnx2_request_irq(bp);
5036
5037 if (rc) {
5038 bnx2_free_mem(bp);
5039 return rc;
5040 }
5041
5042 rc = bnx2_init_nic(bp);
5043
5044 if (rc) {
5045 bnx2_free_irq(bp);
5046 bnx2_free_skbs(bp);
5047 bnx2_free_mem(bp);
5048 return rc;
5049 }
5050
5051 mod_timer(&bp->timer, jiffies + bp->current_interval);
5052
5053 atomic_set(&bp->intr_sem, 0);
5054
5055 bnx2_enable_int(bp);
5056
5057 if (bp->flags & USING_MSI_FLAG) {
5058 /* Test MSI to make sure it is working
5059 * If MSI test fails, go back to INTx mode
5060 */
5061 if (bnx2_test_intr(bp) != 0) {
5062 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5063 " using MSI, switching to INTx mode. Please"
5064 " report this failure to the PCI maintainer"
5065 " and include system chipset information.\n",
5066 bp->dev->name);
5067
5068 bnx2_disable_int(bp);
5069 bnx2_free_irq(bp);
5070
5071 rc = bnx2_init_nic(bp);
5072
5073 if (!rc)
5074 rc = bnx2_request_irq(bp);
5075
5076 if (rc) {
5077 bnx2_free_skbs(bp);
5078 bnx2_free_mem(bp);
5079 del_timer_sync(&bp->timer);
5080 return rc;
5081 }
5082 bnx2_enable_int(bp);
5083 }
5084 }
5085 if (bp->flags & USING_MSI_FLAG) {
5086 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5087 }
5088
5089 netif_start_queue(dev);
5090
5091 return 0;
5092 }
5093
5094 static void
5095 bnx2_reset_task(struct work_struct *work)
5096 {
5097 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5098
5099 if (!netif_running(bp->dev))
5100 return;
5101
5102 bp->in_reset_task = 1;
5103 bnx2_netif_stop(bp);
5104
5105 bnx2_init_nic(bp);
5106
5107 atomic_set(&bp->intr_sem, 1);
5108 bnx2_netif_start(bp);
5109 bp->in_reset_task = 0;
5110 }
5111
5112 static void
5113 bnx2_tx_timeout(struct net_device *dev)
5114 {
5115 struct bnx2 *bp = netdev_priv(dev);
5116
5117 /* This allows the netif to be shutdown gracefully before resetting */
5118 schedule_work(&bp->reset_task);
5119 }
5120
5121 #ifdef BCM_VLAN
5122 /* Called with rtnl_lock */
5123 static void
5124 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5125 {
5126 struct bnx2 *bp = netdev_priv(dev);
5127
5128 bnx2_netif_stop(bp);
5129
5130 bp->vlgrp = vlgrp;
5131 bnx2_set_rx_mode(dev);
5132
5133 bnx2_netif_start(bp);
5134 }
5135 #endif
5136
5137 /* Called with netif_tx_lock.
5138 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5139 * netif_wake_queue().
5140 */
5141 static int
5142 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5143 {
5144 struct bnx2 *bp = netdev_priv(dev);
5145 dma_addr_t mapping;
5146 struct tx_bd *txbd;
5147 struct sw_bd *tx_buf;
5148 u32 len, vlan_tag_flags, last_frag, mss;
5149 u16 prod, ring_prod;
5150 int i;
5151
5152 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5153 netif_stop_queue(dev);
5154 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5155 dev->name);
5156
5157 return NETDEV_TX_BUSY;
5158 }
5159 len = skb_headlen(skb);
5160 prod = bp->tx_prod;
5161 ring_prod = TX_RING_IDX(prod);
5162
5163 vlan_tag_flags = 0;
5164 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5165 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5166 }
5167
5168 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5169 vlan_tag_flags |=
5170 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5171 }
5172 if ((mss = skb_shinfo(skb)->gso_size)) {
5173 u32 tcp_opt_len, ip_tcp_len;
5174 struct iphdr *iph;
5175
5176 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5177
5178 tcp_opt_len = tcp_optlen(skb);
5179
5180 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5181 u32 tcp_off = skb_transport_offset(skb) -
5182 sizeof(struct ipv6hdr) - ETH_HLEN;
5183
5184 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5185 TX_BD_FLAGS_SW_FLAGS;
5186 if (likely(tcp_off == 0))
5187 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5188 else {
5189 tcp_off >>= 3;
5190 vlan_tag_flags |= ((tcp_off & 0x3) <<
5191 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5192 ((tcp_off & 0x10) <<
5193 TX_BD_FLAGS_TCP6_OFF4_SHL);
5194 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5195 }
5196 } else {
5197 if (skb_header_cloned(skb) &&
5198 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5199 dev_kfree_skb(skb);
5200 return NETDEV_TX_OK;
5201 }
5202
5203 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5204
5205 iph = ip_hdr(skb);
5206 iph->check = 0;
5207 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5208 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5209 iph->daddr, 0,
5210 IPPROTO_TCP,
5211 0);
5212 if (tcp_opt_len || (iph->ihl > 5)) {
5213 vlan_tag_flags |= ((iph->ihl - 5) +
5214 (tcp_opt_len >> 2)) << 8;
5215 }
5216 }
5217 } else
5218 mss = 0;
5219
5220 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5221
5222 tx_buf = &bp->tx_buf_ring[ring_prod];
5223 tx_buf->skb = skb;
5224 pci_unmap_addr_set(tx_buf, mapping, mapping);
5225
5226 txbd = &bp->tx_desc_ring[ring_prod];
5227
5228 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5229 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5230 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5231 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5232
5233 last_frag = skb_shinfo(skb)->nr_frags;
5234
5235 for (i = 0; i < last_frag; i++) {
5236 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5237
5238 prod = NEXT_TX_BD(prod);
5239 ring_prod = TX_RING_IDX(prod);
5240 txbd = &bp->tx_desc_ring[ring_prod];
5241
5242 len = frag->size;
5243 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5244 len, PCI_DMA_TODEVICE);
5245 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5246 mapping, mapping);
5247
5248 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5249 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5250 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5251 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5252
5253 }
5254 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5255
5256 prod = NEXT_TX_BD(prod);
5257 bp->tx_prod_bseq += skb->len;
5258
5259 REG_WR16(bp, bp->tx_bidx_addr, prod);
5260 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5261
5262 mmiowb();
5263
5264 bp->tx_prod = prod;
5265 dev->trans_start = jiffies;
5266
5267 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5268 netif_stop_queue(dev);
5269 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5270 netif_wake_queue(dev);
5271 }
5272
5273 return NETDEV_TX_OK;
5274 }
5275
5276 /* Called with rtnl_lock */
5277 static int
5278 bnx2_close(struct net_device *dev)
5279 {
5280 struct bnx2 *bp = netdev_priv(dev);
5281 u32 reset_code;
5282
5283 /* Calling flush_scheduled_work() may deadlock because
5284 * linkwatch_event() may be on the workqueue and it will try to get
5285 * the rtnl_lock which we are holding.
5286 */
5287 while (bp->in_reset_task)
5288 msleep(1);
5289
5290 bnx2_netif_stop(bp);
5291 del_timer_sync(&bp->timer);
5292 if (bp->flags & NO_WOL_FLAG)
5293 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5294 else if (bp->wol)
5295 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5296 else
5297 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5298 bnx2_reset_chip(bp, reset_code);
5299 bnx2_free_irq(bp);
5300 bnx2_free_skbs(bp);
5301 bnx2_free_mem(bp);
5302 bp->link_up = 0;
5303 netif_carrier_off(bp->dev);
5304 bnx2_set_power_state(bp, PCI_D3hot);
5305 return 0;
5306 }
5307
5308 #define GET_NET_STATS64(ctr) \
5309 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5310 (unsigned long) (ctr##_lo)
5311
5312 #define GET_NET_STATS32(ctr) \
5313 (ctr##_lo)
5314
5315 #if (BITS_PER_LONG == 64)
5316 #define GET_NET_STATS GET_NET_STATS64
5317 #else
5318 #define GET_NET_STATS GET_NET_STATS32
5319 #endif
5320
5321 static struct net_device_stats *
5322 bnx2_get_stats(struct net_device *dev)
5323 {
5324 struct bnx2 *bp = netdev_priv(dev);
5325 struct statistics_block *stats_blk = bp->stats_blk;
5326 struct net_device_stats *net_stats = &bp->net_stats;
5327
5328 if (bp->stats_blk == NULL) {
5329 return net_stats;
5330 }
5331 net_stats->rx_packets =
5332 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5333 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5334 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5335
5336 net_stats->tx_packets =
5337 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5338 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5339 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5340
5341 net_stats->rx_bytes =
5342 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5343
5344 net_stats->tx_bytes =
5345 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5346
5347 net_stats->multicast =
5348 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5349
5350 net_stats->collisions =
5351 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5352
5353 net_stats->rx_length_errors =
5354 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5355 stats_blk->stat_EtherStatsOverrsizePkts);
5356
5357 net_stats->rx_over_errors =
5358 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5359
5360 net_stats->rx_frame_errors =
5361 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5362
5363 net_stats->rx_crc_errors =
5364 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5365
5366 net_stats->rx_errors = net_stats->rx_length_errors +
5367 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5368 net_stats->rx_crc_errors;
5369
5370 net_stats->tx_aborted_errors =
5371 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5372 stats_blk->stat_Dot3StatsLateCollisions);
5373
5374 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5375 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5376 net_stats->tx_carrier_errors = 0;
5377 else {
5378 net_stats->tx_carrier_errors =
5379 (unsigned long)
5380 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5381 }
5382
5383 net_stats->tx_errors =
5384 (unsigned long)
5385 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5386 +
5387 net_stats->tx_aborted_errors +
5388 net_stats->tx_carrier_errors;
5389
5390 net_stats->rx_missed_errors =
5391 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5392 stats_blk->stat_FwRxDrop);
5393
5394 return net_stats;
5395 }
5396
5397 /* All ethtool functions called with rtnl_lock */
5398
5399 static int
5400 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5401 {
5402 struct bnx2 *bp = netdev_priv(dev);
5403 int support_serdes = 0, support_copper = 0;
5404
5405 cmd->supported = SUPPORTED_Autoneg;
5406 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5407 support_serdes = 1;
5408 support_copper = 1;
5409 } else if (bp->phy_port == PORT_FIBRE)
5410 support_serdes = 1;
5411 else
5412 support_copper = 1;
5413
5414 if (support_serdes) {
5415 cmd->supported |= SUPPORTED_1000baseT_Full |
5416 SUPPORTED_FIBRE;
5417 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5418 cmd->supported |= SUPPORTED_2500baseX_Full;
5419
5420 }
5421 if (support_copper) {
5422 cmd->supported |= SUPPORTED_10baseT_Half |
5423 SUPPORTED_10baseT_Full |
5424 SUPPORTED_100baseT_Half |
5425 SUPPORTED_100baseT_Full |
5426 SUPPORTED_1000baseT_Full |
5427 SUPPORTED_TP;
5428
5429 }
5430
5431 spin_lock_bh(&bp->phy_lock);
5432 cmd->port = bp->phy_port;
5433 cmd->advertising = bp->advertising;
5434
5435 if (bp->autoneg & AUTONEG_SPEED) {
5436 cmd->autoneg = AUTONEG_ENABLE;
5437 }
5438 else {
5439 cmd->autoneg = AUTONEG_DISABLE;
5440 }
5441
5442 if (netif_carrier_ok(dev)) {
5443 cmd->speed = bp->line_speed;
5444 cmd->duplex = bp->duplex;
5445 }
5446 else {
5447 cmd->speed = -1;
5448 cmd->duplex = -1;
5449 }
5450 spin_unlock_bh(&bp->phy_lock);
5451
5452 cmd->transceiver = XCVR_INTERNAL;
5453 cmd->phy_address = bp->phy_addr;
5454
5455 return 0;
5456 }
5457
5458 static int
5459 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5460 {
5461 struct bnx2 *bp = netdev_priv(dev);
5462 u8 autoneg = bp->autoneg;
5463 u8 req_duplex = bp->req_duplex;
5464 u16 req_line_speed = bp->req_line_speed;
5465 u32 advertising = bp->advertising;
5466 int err = -EINVAL;
5467
5468 spin_lock_bh(&bp->phy_lock);
5469
5470 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5471 goto err_out_unlock;
5472
5473 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5474 goto err_out_unlock;
5475
5476 if (cmd->autoneg == AUTONEG_ENABLE) {
5477 autoneg |= AUTONEG_SPEED;
5478
5479 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5480
5481 /* allow advertising 1 speed */
5482 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5483 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5484 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5485 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5486
5487 if (cmd->port == PORT_FIBRE)
5488 goto err_out_unlock;
5489
5490 advertising = cmd->advertising;
5491
5492 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5493 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5494 (cmd->port == PORT_TP))
5495 goto err_out_unlock;
5496 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5497 advertising = cmd->advertising;
5498 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5499 goto err_out_unlock;
5500 else {
5501 if (cmd->port == PORT_FIBRE)
5502 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5503 else
5504 advertising = ETHTOOL_ALL_COPPER_SPEED;
5505 }
5506 advertising |= ADVERTISED_Autoneg;
5507 }
5508 else {
5509 if (cmd->port == PORT_FIBRE) {
5510 if ((cmd->speed != SPEED_1000 &&
5511 cmd->speed != SPEED_2500) ||
5512 (cmd->duplex != DUPLEX_FULL))
5513 goto err_out_unlock;
5514
5515 if (cmd->speed == SPEED_2500 &&
5516 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5517 goto err_out_unlock;
5518 }
5519 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5520 goto err_out_unlock;
5521
5522 autoneg &= ~AUTONEG_SPEED;
5523 req_line_speed = cmd->speed;
5524 req_duplex = cmd->duplex;
5525 advertising = 0;
5526 }
5527
5528 bp->autoneg = autoneg;
5529 bp->advertising = advertising;
5530 bp->req_line_speed = req_line_speed;
5531 bp->req_duplex = req_duplex;
5532
5533 err = bnx2_setup_phy(bp, cmd->port);
5534
5535 err_out_unlock:
5536 spin_unlock_bh(&bp->phy_lock);
5537
5538 return err;
5539 }
5540
5541 static void
5542 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5543 {
5544 struct bnx2 *bp = netdev_priv(dev);
5545
5546 strcpy(info->driver, DRV_MODULE_NAME);
5547 strcpy(info->version, DRV_MODULE_VERSION);
5548 strcpy(info->bus_info, pci_name(bp->pdev));
5549 strcpy(info->fw_version, bp->fw_version);
5550 }
5551
5552 #define BNX2_REGDUMP_LEN (32 * 1024)
5553
5554 static int
5555 bnx2_get_regs_len(struct net_device *dev)
5556 {
5557 return BNX2_REGDUMP_LEN;
5558 }
5559
5560 static void
5561 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5562 {
5563 u32 *p = _p, i, offset;
5564 u8 *orig_p = _p;
5565 struct bnx2 *bp = netdev_priv(dev);
5566 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5567 0x0800, 0x0880, 0x0c00, 0x0c10,
5568 0x0c30, 0x0d08, 0x1000, 0x101c,
5569 0x1040, 0x1048, 0x1080, 0x10a4,
5570 0x1400, 0x1490, 0x1498, 0x14f0,
5571 0x1500, 0x155c, 0x1580, 0x15dc,
5572 0x1600, 0x1658, 0x1680, 0x16d8,
5573 0x1800, 0x1820, 0x1840, 0x1854,
5574 0x1880, 0x1894, 0x1900, 0x1984,
5575 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5576 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5577 0x2000, 0x2030, 0x23c0, 0x2400,
5578 0x2800, 0x2820, 0x2830, 0x2850,
5579 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5580 0x3c00, 0x3c94, 0x4000, 0x4010,
5581 0x4080, 0x4090, 0x43c0, 0x4458,
5582 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5583 0x4fc0, 0x5010, 0x53c0, 0x5444,
5584 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5585 0x5fc0, 0x6000, 0x6400, 0x6428,
5586 0x6800, 0x6848, 0x684c, 0x6860,
5587 0x6888, 0x6910, 0x8000 };
5588
5589 regs->version = 0;
5590
5591 memset(p, 0, BNX2_REGDUMP_LEN);
5592
5593 if (!netif_running(bp->dev))
5594 return;
5595
5596 i = 0;
5597 offset = reg_boundaries[0];
5598 p += offset;
5599 while (offset < BNX2_REGDUMP_LEN) {
5600 *p++ = REG_RD(bp, offset);
5601 offset += 4;
5602 if (offset == reg_boundaries[i + 1]) {
5603 offset = reg_boundaries[i + 2];
5604 p = (u32 *) (orig_p + offset);
5605 i += 2;
5606 }
5607 }
5608 }
5609
5610 static void
5611 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5612 {
5613 struct bnx2 *bp = netdev_priv(dev);
5614
5615 if (bp->flags & NO_WOL_FLAG) {
5616 wol->supported = 0;
5617 wol->wolopts = 0;
5618 }
5619 else {
5620 wol->supported = WAKE_MAGIC;
5621 if (bp->wol)
5622 wol->wolopts = WAKE_MAGIC;
5623 else
5624 wol->wolopts = 0;
5625 }
5626 memset(&wol->sopass, 0, sizeof(wol->sopass));
5627 }
5628
5629 static int
5630 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5631 {
5632 struct bnx2 *bp = netdev_priv(dev);
5633
5634 if (wol->wolopts & ~WAKE_MAGIC)
5635 return -EINVAL;
5636
5637 if (wol->wolopts & WAKE_MAGIC) {
5638 if (bp->flags & NO_WOL_FLAG)
5639 return -EINVAL;
5640
5641 bp->wol = 1;
5642 }
5643 else {
5644 bp->wol = 0;
5645 }
5646 return 0;
5647 }
5648
5649 static int
5650 bnx2_nway_reset(struct net_device *dev)
5651 {
5652 struct bnx2 *bp = netdev_priv(dev);
5653 u32 bmcr;
5654
5655 if (!(bp->autoneg & AUTONEG_SPEED)) {
5656 return -EINVAL;
5657 }
5658
5659 spin_lock_bh(&bp->phy_lock);
5660
5661 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5662 int rc;
5663
5664 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5665 spin_unlock_bh(&bp->phy_lock);
5666 return rc;
5667 }
5668
5669 /* Force a link down visible on the other side */
5670 if (bp->phy_flags & PHY_SERDES_FLAG) {
5671 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5672 spin_unlock_bh(&bp->phy_lock);
5673
5674 msleep(20);
5675
5676 spin_lock_bh(&bp->phy_lock);
5677
5678 bp->current_interval = SERDES_AN_TIMEOUT;
5679 bp->serdes_an_pending = 1;
5680 mod_timer(&bp->timer, jiffies + bp->current_interval);
5681 }
5682
5683 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5684 bmcr &= ~BMCR_LOOPBACK;
5685 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5686
5687 spin_unlock_bh(&bp->phy_lock);
5688
5689 return 0;
5690 }
5691
5692 static int
5693 bnx2_get_eeprom_len(struct net_device *dev)
5694 {
5695 struct bnx2 *bp = netdev_priv(dev);
5696
5697 if (bp->flash_info == NULL)
5698 return 0;
5699
5700 return (int) bp->flash_size;
5701 }
5702
5703 static int
5704 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5705 u8 *eebuf)
5706 {
5707 struct bnx2 *bp = netdev_priv(dev);
5708 int rc;
5709
5710 /* parameters already validated in ethtool_get_eeprom */
5711
5712 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5713
5714 return rc;
5715 }
5716
5717 static int
5718 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5719 u8 *eebuf)
5720 {
5721 struct bnx2 *bp = netdev_priv(dev);
5722 int rc;
5723
5724 /* parameters already validated in ethtool_set_eeprom */
5725
5726 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5727
5728 return rc;
5729 }
5730
5731 static int
5732 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5733 {
5734 struct bnx2 *bp = netdev_priv(dev);
5735
5736 memset(coal, 0, sizeof(struct ethtool_coalesce));
5737
5738 coal->rx_coalesce_usecs = bp->rx_ticks;
5739 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5740 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5741 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5742
5743 coal->tx_coalesce_usecs = bp->tx_ticks;
5744 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5745 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5746 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5747
5748 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5749
5750 return 0;
5751 }
5752
5753 static int
5754 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5755 {
5756 struct bnx2 *bp = netdev_priv(dev);
5757
5758 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5759 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5760
5761 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5762 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5763
5764 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5765 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5766
5767 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5768 if (bp->rx_quick_cons_trip_int > 0xff)
5769 bp->rx_quick_cons_trip_int = 0xff;
5770
5771 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5772 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5773
5774 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5775 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5776
5777 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5778 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5779
5780 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5781 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5782 0xff;
5783
5784 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5785 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5786 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5787 bp->stats_ticks = USEC_PER_SEC;
5788 }
5789 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5790 bp->stats_ticks &= 0xffff00;
5791
5792 if (netif_running(bp->dev)) {
5793 bnx2_netif_stop(bp);
5794 bnx2_init_nic(bp);
5795 bnx2_netif_start(bp);
5796 }
5797
5798 return 0;
5799 }
5800
5801 static void
5802 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5803 {
5804 struct bnx2 *bp = netdev_priv(dev);
5805
5806 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5807 ering->rx_mini_max_pending = 0;
5808 ering->rx_jumbo_max_pending = 0;
5809
5810 ering->rx_pending = bp->rx_ring_size;
5811 ering->rx_mini_pending = 0;
5812 ering->rx_jumbo_pending = 0;
5813
5814 ering->tx_max_pending = MAX_TX_DESC_CNT;
5815 ering->tx_pending = bp->tx_ring_size;
5816 }
5817
5818 static int
5819 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5820 {
5821 struct bnx2 *bp = netdev_priv(dev);
5822
5823 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5824 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5825 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5826
5827 return -EINVAL;
5828 }
5829 if (netif_running(bp->dev)) {
5830 bnx2_netif_stop(bp);
5831 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5832 bnx2_free_skbs(bp);
5833 bnx2_free_mem(bp);
5834 }
5835
5836 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5837 bp->tx_ring_size = ering->tx_pending;
5838
5839 if (netif_running(bp->dev)) {
5840 int rc;
5841
5842 rc = bnx2_alloc_mem(bp);
5843 if (rc)
5844 return rc;
5845 bnx2_init_nic(bp);
5846 bnx2_netif_start(bp);
5847 }
5848
5849 return 0;
5850 }
5851
5852 static void
5853 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5854 {
5855 struct bnx2 *bp = netdev_priv(dev);
5856
5857 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5858 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5859 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5860 }
5861
5862 static int
5863 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5864 {
5865 struct bnx2 *bp = netdev_priv(dev);
5866
5867 bp->req_flow_ctrl = 0;
5868 if (epause->rx_pause)
5869 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5870 if (epause->tx_pause)
5871 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5872
5873 if (epause->autoneg) {
5874 bp->autoneg |= AUTONEG_FLOW_CTRL;
5875 }
5876 else {
5877 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5878 }
5879
5880 spin_lock_bh(&bp->phy_lock);
5881
5882 bnx2_setup_phy(bp, bp->phy_port);
5883
5884 spin_unlock_bh(&bp->phy_lock);
5885
5886 return 0;
5887 }
5888
5889 static u32
5890 bnx2_get_rx_csum(struct net_device *dev)
5891 {
5892 struct bnx2 *bp = netdev_priv(dev);
5893
5894 return bp->rx_csum;
5895 }
5896
5897 static int
5898 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5899 {
5900 struct bnx2 *bp = netdev_priv(dev);
5901
5902 bp->rx_csum = data;
5903 return 0;
5904 }
5905
5906 static int
5907 bnx2_set_tso(struct net_device *dev, u32 data)
5908 {
5909 struct bnx2 *bp = netdev_priv(dev);
5910
5911 if (data) {
5912 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5913 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5914 dev->features |= NETIF_F_TSO6;
5915 } else
5916 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5917 NETIF_F_TSO_ECN);
5918 return 0;
5919 }
5920
5921 #define BNX2_NUM_STATS 46
5922
5923 static struct {
5924 char string[ETH_GSTRING_LEN];
5925 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5926 { "rx_bytes" },
5927 { "rx_error_bytes" },
5928 { "tx_bytes" },
5929 { "tx_error_bytes" },
5930 { "rx_ucast_packets" },
5931 { "rx_mcast_packets" },
5932 { "rx_bcast_packets" },
5933 { "tx_ucast_packets" },
5934 { "tx_mcast_packets" },
5935 { "tx_bcast_packets" },
5936 { "tx_mac_errors" },
5937 { "tx_carrier_errors" },
5938 { "rx_crc_errors" },
5939 { "rx_align_errors" },
5940 { "tx_single_collisions" },
5941 { "tx_multi_collisions" },
5942 { "tx_deferred" },
5943 { "tx_excess_collisions" },
5944 { "tx_late_collisions" },
5945 { "tx_total_collisions" },
5946 { "rx_fragments" },
5947 { "rx_jabbers" },
5948 { "rx_undersize_packets" },
5949 { "rx_oversize_packets" },
5950 { "rx_64_byte_packets" },
5951 { "rx_65_to_127_byte_packets" },
5952 { "rx_128_to_255_byte_packets" },
5953 { "rx_256_to_511_byte_packets" },
5954 { "rx_512_to_1023_byte_packets" },
5955 { "rx_1024_to_1522_byte_packets" },
5956 { "rx_1523_to_9022_byte_packets" },
5957 { "tx_64_byte_packets" },
5958 { "tx_65_to_127_byte_packets" },
5959 { "tx_128_to_255_byte_packets" },
5960 { "tx_256_to_511_byte_packets" },
5961 { "tx_512_to_1023_byte_packets" },
5962 { "tx_1024_to_1522_byte_packets" },
5963 { "tx_1523_to_9022_byte_packets" },
5964 { "rx_xon_frames" },
5965 { "rx_xoff_frames" },
5966 { "tx_xon_frames" },
5967 { "tx_xoff_frames" },
5968 { "rx_mac_ctrl_frames" },
5969 { "rx_filtered_packets" },
5970 { "rx_discards" },
5971 { "rx_fw_discards" },
5972 };
5973
5974 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5975
5976 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5977 STATS_OFFSET32(stat_IfHCInOctets_hi),
5978 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5979 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5980 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5981 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5982 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5983 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5984 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5985 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5986 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5987 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5988 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5989 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5990 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5991 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5992 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5993 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5994 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5995 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5996 STATS_OFFSET32(stat_EtherStatsCollisions),
5997 STATS_OFFSET32(stat_EtherStatsFragments),
5998 STATS_OFFSET32(stat_EtherStatsJabbers),
5999 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6000 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6001 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6002 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6003 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6004 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6005 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6006 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6007 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6008 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6009 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6010 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6011 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6012 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6013 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6014 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6015 STATS_OFFSET32(stat_XonPauseFramesReceived),
6016 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6017 STATS_OFFSET32(stat_OutXonSent),
6018 STATS_OFFSET32(stat_OutXoffSent),
6019 STATS_OFFSET32(stat_MacControlFramesReceived),
6020 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6021 STATS_OFFSET32(stat_IfInMBUFDiscards),
6022 STATS_OFFSET32(stat_FwRxDrop),
6023 };
6024
6025 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6026 * skipped because of errata.
6027 */
6028 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6029 8,0,8,8,8,8,8,8,8,8,
6030 4,0,4,4,4,4,4,4,4,4,
6031 4,4,4,4,4,4,4,4,4,4,
6032 4,4,4,4,4,4,4,4,4,4,
6033 4,4,4,4,4,4,
6034 };
6035
6036 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6037 8,0,8,8,8,8,8,8,8,8,
6038 4,4,4,4,4,4,4,4,4,4,
6039 4,4,4,4,4,4,4,4,4,4,
6040 4,4,4,4,4,4,4,4,4,4,
6041 4,4,4,4,4,4,
6042 };
6043
6044 #define BNX2_NUM_TESTS 6
6045
6046 static struct {
6047 char string[ETH_GSTRING_LEN];
6048 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6049 { "register_test (offline)" },
6050 { "memory_test (offline)" },
6051 { "loopback_test (offline)" },
6052 { "nvram_test (online)" },
6053 { "interrupt_test (online)" },
6054 { "link_test (online)" },
6055 };
6056
6057 static int
6058 bnx2_self_test_count(struct net_device *dev)
6059 {
6060 return BNX2_NUM_TESTS;
6061 }
6062
6063 static void
6064 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6065 {
6066 struct bnx2 *bp = netdev_priv(dev);
6067
6068 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6069 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6070 int i;
6071
6072 bnx2_netif_stop(bp);
6073 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6074 bnx2_free_skbs(bp);
6075
6076 if (bnx2_test_registers(bp) != 0) {
6077 buf[0] = 1;
6078 etest->flags |= ETH_TEST_FL_FAILED;
6079 }
6080 if (bnx2_test_memory(bp) != 0) {
6081 buf[1] = 1;
6082 etest->flags |= ETH_TEST_FL_FAILED;
6083 }
6084 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6085 etest->flags |= ETH_TEST_FL_FAILED;
6086
6087 if (!netif_running(bp->dev)) {
6088 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6089 }
6090 else {
6091 bnx2_init_nic(bp);
6092 bnx2_netif_start(bp);
6093 }
6094
6095 /* wait for link up */
6096 for (i = 0; i < 7; i++) {
6097 if (bp->link_up)
6098 break;
6099 msleep_interruptible(1000);
6100 }
6101 }
6102
6103 if (bnx2_test_nvram(bp) != 0) {
6104 buf[3] = 1;
6105 etest->flags |= ETH_TEST_FL_FAILED;
6106 }
6107 if (bnx2_test_intr(bp) != 0) {
6108 buf[4] = 1;
6109 etest->flags |= ETH_TEST_FL_FAILED;
6110 }
6111
6112 if (bnx2_test_link(bp) != 0) {
6113 buf[5] = 1;
6114 etest->flags |= ETH_TEST_FL_FAILED;
6115
6116 }
6117 }
6118
6119 static void
6120 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6121 {
6122 switch (stringset) {
6123 case ETH_SS_STATS:
6124 memcpy(buf, bnx2_stats_str_arr,
6125 sizeof(bnx2_stats_str_arr));
6126 break;
6127 case ETH_SS_TEST:
6128 memcpy(buf, bnx2_tests_str_arr,
6129 sizeof(bnx2_tests_str_arr));
6130 break;
6131 }
6132 }
6133
6134 static int
6135 bnx2_get_stats_count(struct net_device *dev)
6136 {
6137 return BNX2_NUM_STATS;
6138 }
6139
6140 static void
6141 bnx2_get_ethtool_stats(struct net_device *dev,
6142 struct ethtool_stats *stats, u64 *buf)
6143 {
6144 struct bnx2 *bp = netdev_priv(dev);
6145 int i;
6146 u32 *hw_stats = (u32 *) bp->stats_blk;
6147 u8 *stats_len_arr = NULL;
6148
6149 if (hw_stats == NULL) {
6150 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6151 return;
6152 }
6153
6154 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6155 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6156 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6157 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6158 stats_len_arr = bnx2_5706_stats_len_arr;
6159 else
6160 stats_len_arr = bnx2_5708_stats_len_arr;
6161
6162 for (i = 0; i < BNX2_NUM_STATS; i++) {
6163 if (stats_len_arr[i] == 0) {
6164 /* skip this counter */
6165 buf[i] = 0;
6166 continue;
6167 }
6168 if (stats_len_arr[i] == 4) {
6169 /* 4-byte counter */
6170 buf[i] = (u64)
6171 *(hw_stats + bnx2_stats_offset_arr[i]);
6172 continue;
6173 }
6174 /* 8-byte counter */
6175 buf[i] = (((u64) *(hw_stats +
6176 bnx2_stats_offset_arr[i])) << 32) +
6177 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6178 }
6179 }
6180
6181 static int
6182 bnx2_phys_id(struct net_device *dev, u32 data)
6183 {
6184 struct bnx2 *bp = netdev_priv(dev);
6185 int i;
6186 u32 save;
6187
6188 if (data == 0)
6189 data = 2;
6190
6191 save = REG_RD(bp, BNX2_MISC_CFG);
6192 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6193
6194 for (i = 0; i < (data * 2); i++) {
6195 if ((i % 2) == 0) {
6196 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6197 }
6198 else {
6199 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6200 BNX2_EMAC_LED_1000MB_OVERRIDE |
6201 BNX2_EMAC_LED_100MB_OVERRIDE |
6202 BNX2_EMAC_LED_10MB_OVERRIDE |
6203 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6204 BNX2_EMAC_LED_TRAFFIC);
6205 }
6206 msleep_interruptible(500);
6207 if (signal_pending(current))
6208 break;
6209 }
6210 REG_WR(bp, BNX2_EMAC_LED, 0);
6211 REG_WR(bp, BNX2_MISC_CFG, save);
6212 return 0;
6213 }
6214
6215 static int
6216 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6217 {
6218 struct bnx2 *bp = netdev_priv(dev);
6219
6220 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6221 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6222 else
6223 return (ethtool_op_set_tx_csum(dev, data));
6224 }
6225
6226 static const struct ethtool_ops bnx2_ethtool_ops = {
6227 .get_settings = bnx2_get_settings,
6228 .set_settings = bnx2_set_settings,
6229 .get_drvinfo = bnx2_get_drvinfo,
6230 .get_regs_len = bnx2_get_regs_len,
6231 .get_regs = bnx2_get_regs,
6232 .get_wol = bnx2_get_wol,
6233 .set_wol = bnx2_set_wol,
6234 .nway_reset = bnx2_nway_reset,
6235 .get_link = ethtool_op_get_link,
6236 .get_eeprom_len = bnx2_get_eeprom_len,
6237 .get_eeprom = bnx2_get_eeprom,
6238 .set_eeprom = bnx2_set_eeprom,
6239 .get_coalesce = bnx2_get_coalesce,
6240 .set_coalesce = bnx2_set_coalesce,
6241 .get_ringparam = bnx2_get_ringparam,
6242 .set_ringparam = bnx2_set_ringparam,
6243 .get_pauseparam = bnx2_get_pauseparam,
6244 .set_pauseparam = bnx2_set_pauseparam,
6245 .get_rx_csum = bnx2_get_rx_csum,
6246 .set_rx_csum = bnx2_set_rx_csum,
6247 .get_tx_csum = ethtool_op_get_tx_csum,
6248 .set_tx_csum = bnx2_set_tx_csum,
6249 .get_sg = ethtool_op_get_sg,
6250 .set_sg = ethtool_op_set_sg,
6251 .get_tso = ethtool_op_get_tso,
6252 .set_tso = bnx2_set_tso,
6253 .self_test_count = bnx2_self_test_count,
6254 .self_test = bnx2_self_test,
6255 .get_strings = bnx2_get_strings,
6256 .phys_id = bnx2_phys_id,
6257 .get_stats_count = bnx2_get_stats_count,
6258 .get_ethtool_stats = bnx2_get_ethtool_stats,
6259 .get_perm_addr = ethtool_op_get_perm_addr,
6260 };
6261
6262 /* Called with rtnl_lock */
6263 static int
6264 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6265 {
6266 struct mii_ioctl_data *data = if_mii(ifr);
6267 struct bnx2 *bp = netdev_priv(dev);
6268 int err;
6269
6270 switch(cmd) {
6271 case SIOCGMIIPHY:
6272 data->phy_id = bp->phy_addr;
6273
6274 /* fallthru */
6275 case SIOCGMIIREG: {
6276 u32 mii_regval;
6277
6278 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6279 return -EOPNOTSUPP;
6280
6281 if (!netif_running(dev))
6282 return -EAGAIN;
6283
6284 spin_lock_bh(&bp->phy_lock);
6285 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6286 spin_unlock_bh(&bp->phy_lock);
6287
6288 data->val_out = mii_regval;
6289
6290 return err;
6291 }
6292
6293 case SIOCSMIIREG:
6294 if (!capable(CAP_NET_ADMIN))
6295 return -EPERM;
6296
6297 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6298 return -EOPNOTSUPP;
6299
6300 if (!netif_running(dev))
6301 return -EAGAIN;
6302
6303 spin_lock_bh(&bp->phy_lock);
6304 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6305 spin_unlock_bh(&bp->phy_lock);
6306
6307 return err;
6308
6309 default:
6310 /* do nothing */
6311 break;
6312 }
6313 return -EOPNOTSUPP;
6314 }
6315
6316 /* Called with rtnl_lock */
6317 static int
6318 bnx2_change_mac_addr(struct net_device *dev, void *p)
6319 {
6320 struct sockaddr *addr = p;
6321 struct bnx2 *bp = netdev_priv(dev);
6322
6323 if (!is_valid_ether_addr(addr->sa_data))
6324 return -EINVAL;
6325
6326 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6327 if (netif_running(dev))
6328 bnx2_set_mac_addr(bp);
6329
6330 return 0;
6331 }
6332
6333 /* Called with rtnl_lock */
6334 static int
6335 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6336 {
6337 struct bnx2 *bp = netdev_priv(dev);
6338
6339 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6340 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6341 return -EINVAL;
6342
6343 dev->mtu = new_mtu;
6344 if (netif_running(dev)) {
6345 bnx2_netif_stop(bp);
6346
6347 bnx2_init_nic(bp);
6348
6349 bnx2_netif_start(bp);
6350 }
6351 return 0;
6352 }
6353
6354 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6355 static void
6356 poll_bnx2(struct net_device *dev)
6357 {
6358 struct bnx2 *bp = netdev_priv(dev);
6359
6360 disable_irq(bp->pdev->irq);
6361 bnx2_interrupt(bp->pdev->irq, dev);
6362 enable_irq(bp->pdev->irq);
6363 }
6364 #endif
6365
6366 static void __devinit
6367 bnx2_get_5709_media(struct bnx2 *bp)
6368 {
6369 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6370 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6371 u32 strap;
6372
6373 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6374 return;
6375 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6376 bp->phy_flags |= PHY_SERDES_FLAG;
6377 return;
6378 }
6379
6380 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6381 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6382 else
6383 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6384
6385 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6386 switch (strap) {
6387 case 0x4:
6388 case 0x5:
6389 case 0x6:
6390 bp->phy_flags |= PHY_SERDES_FLAG;
6391 return;
6392 }
6393 } else {
6394 switch (strap) {
6395 case 0x1:
6396 case 0x2:
6397 case 0x4:
6398 bp->phy_flags |= PHY_SERDES_FLAG;
6399 return;
6400 }
6401 }
6402 }
6403
6404 static void __devinit
6405 bnx2_get_pci_speed(struct bnx2 *bp)
6406 {
6407 u32 reg;
6408
6409 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6410 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6411 u32 clkreg;
6412
6413 bp->flags |= PCIX_FLAG;
6414
6415 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6416
6417 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6418 switch (clkreg) {
6419 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6420 bp->bus_speed_mhz = 133;
6421 break;
6422
6423 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6424 bp->bus_speed_mhz = 100;
6425 break;
6426
6427 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6428 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6429 bp->bus_speed_mhz = 66;
6430 break;
6431
6432 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6433 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6434 bp->bus_speed_mhz = 50;
6435 break;
6436
6437 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6438 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6439 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6440 bp->bus_speed_mhz = 33;
6441 break;
6442 }
6443 }
6444 else {
6445 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6446 bp->bus_speed_mhz = 66;
6447 else
6448 bp->bus_speed_mhz = 33;
6449 }
6450
6451 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6452 bp->flags |= PCI_32BIT_FLAG;
6453
6454 }
6455
6456 static int __devinit
6457 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6458 {
6459 struct bnx2 *bp;
6460 unsigned long mem_len;
6461 int rc, i, j;
6462 u32 reg;
6463 u64 dma_mask, persist_dma_mask;
6464
6465 SET_MODULE_OWNER(dev);
6466 SET_NETDEV_DEV(dev, &pdev->dev);
6467 bp = netdev_priv(dev);
6468
6469 bp->flags = 0;
6470 bp->phy_flags = 0;
6471
6472 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6473 rc = pci_enable_device(pdev);
6474 if (rc) {
6475 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6476 goto err_out;
6477 }
6478
6479 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6480 dev_err(&pdev->dev,
6481 "Cannot find PCI device base address, aborting.\n");
6482 rc = -ENODEV;
6483 goto err_out_disable;
6484 }
6485
6486 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6487 if (rc) {
6488 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6489 goto err_out_disable;
6490 }
6491
6492 pci_set_master(pdev);
6493
6494 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6495 if (bp->pm_cap == 0) {
6496 dev_err(&pdev->dev,
6497 "Cannot find power management capability, aborting.\n");
6498 rc = -EIO;
6499 goto err_out_release;
6500 }
6501
6502 bp->dev = dev;
6503 bp->pdev = pdev;
6504
6505 spin_lock_init(&bp->phy_lock);
6506 spin_lock_init(&bp->indirect_lock);
6507 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6508
6509 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6510 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6511 dev->mem_end = dev->mem_start + mem_len;
6512 dev->irq = pdev->irq;
6513
6514 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6515
6516 if (!bp->regview) {
6517 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6518 rc = -ENOMEM;
6519 goto err_out_release;
6520 }
6521
6522 /* Configure byte swap and enable write to the reg_window registers.
6523 * Rely on CPU to do target byte swapping on big endian systems
6524 * The chip's target access swapping will not swap all accesses
6525 */
6526 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6527 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6528 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6529
6530 bnx2_set_power_state(bp, PCI_D0);
6531
6532 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6533
6534 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6535 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6536 dev_err(&pdev->dev,
6537 "Cannot find PCIE capability, aborting.\n");
6538 rc = -EIO;
6539 goto err_out_unmap;
6540 }
6541 bp->flags |= PCIE_FLAG;
6542 } else {
6543 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6544 if (bp->pcix_cap == 0) {
6545 dev_err(&pdev->dev,
6546 "Cannot find PCIX capability, aborting.\n");
6547 rc = -EIO;
6548 goto err_out_unmap;
6549 }
6550 }
6551
6552 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6553 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6554 bp->flags |= MSI_CAP_FLAG;
6555 }
6556
6557 /* 5708 cannot support DMA addresses > 40-bit. */
6558 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6559 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6560 else
6561 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6562
6563 /* Configure DMA attributes. */
6564 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6565 dev->features |= NETIF_F_HIGHDMA;
6566 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6567 if (rc) {
6568 dev_err(&pdev->dev,
6569 "pci_set_consistent_dma_mask failed, aborting.\n");
6570 goto err_out_unmap;
6571 }
6572 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6573 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6574 goto err_out_unmap;
6575 }
6576
6577 if (!(bp->flags & PCIE_FLAG))
6578 bnx2_get_pci_speed(bp);
6579
6580 /* 5706A0 may falsely detect SERR and PERR. */
6581 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6582 reg = REG_RD(bp, PCI_COMMAND);
6583 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6584 REG_WR(bp, PCI_COMMAND, reg);
6585 }
6586 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6587 !(bp->flags & PCIX_FLAG)) {
6588
6589 dev_err(&pdev->dev,
6590 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6591 goto err_out_unmap;
6592 }
6593
6594 bnx2_init_nvram(bp);
6595
6596 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6597
6598 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6599 BNX2_SHM_HDR_SIGNATURE_SIG) {
6600 u32 off = PCI_FUNC(pdev->devfn) << 2;
6601
6602 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6603 } else
6604 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6605
6606 /* Get the permanent MAC address. First we need to make sure the
6607 * firmware is actually running.
6608 */
6609 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6610
6611 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6612 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6613 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6614 rc = -ENODEV;
6615 goto err_out_unmap;
6616 }
6617
6618 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6619 for (i = 0, j = 0; i < 3; i++) {
6620 u8 num, k, skip0;
6621
6622 num = (u8) (reg >> (24 - (i * 8)));
6623 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6624 if (num >= k || !skip0 || k == 1) {
6625 bp->fw_version[j++] = (num / k) + '0';
6626 skip0 = 0;
6627 }
6628 }
6629 if (i != 2)
6630 bp->fw_version[j++] = '.';
6631 }
6632 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6633 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6634 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6635 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6636 int i;
6637 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6638
6639 bp->fw_version[j++] = ' ';
6640 for (i = 0; i < 3; i++) {
6641 reg = REG_RD_IND(bp, addr + i * 4);
6642 reg = swab32(reg);
6643 memcpy(&bp->fw_version[j], &reg, 4);
6644 j += 4;
6645 }
6646 }
6647
6648 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6649 bp->mac_addr[0] = (u8) (reg >> 8);
6650 bp->mac_addr[1] = (u8) reg;
6651
6652 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6653 bp->mac_addr[2] = (u8) (reg >> 24);
6654 bp->mac_addr[3] = (u8) (reg >> 16);
6655 bp->mac_addr[4] = (u8) (reg >> 8);
6656 bp->mac_addr[5] = (u8) reg;
6657
6658 bp->tx_ring_size = MAX_TX_DESC_CNT;
6659 bnx2_set_rx_ring_size(bp, 255);
6660
6661 bp->rx_csum = 1;
6662
6663 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6664
6665 bp->tx_quick_cons_trip_int = 20;
6666 bp->tx_quick_cons_trip = 20;
6667 bp->tx_ticks_int = 80;
6668 bp->tx_ticks = 80;
6669
6670 bp->rx_quick_cons_trip_int = 6;
6671 bp->rx_quick_cons_trip = 6;
6672 bp->rx_ticks_int = 18;
6673 bp->rx_ticks = 18;
6674
6675 bp->stats_ticks = 1000000 & 0xffff00;
6676
6677 bp->timer_interval = HZ;
6678 bp->current_interval = HZ;
6679
6680 bp->phy_addr = 1;
6681
6682 /* Disable WOL support if we are running on a SERDES chip. */
6683 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6684 bnx2_get_5709_media(bp);
6685 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6686 bp->phy_flags |= PHY_SERDES_FLAG;
6687
6688 bp->phy_port = PORT_TP;
6689 if (bp->phy_flags & PHY_SERDES_FLAG) {
6690 bp->phy_port = PORT_FIBRE;
6691 bp->flags |= NO_WOL_FLAG;
6692 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6693 bp->phy_addr = 2;
6694 reg = REG_RD_IND(bp, bp->shmem_base +
6695 BNX2_SHARED_HW_CFG_CONFIG);
6696 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6697 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6698 }
6699 bnx2_init_remote_phy(bp);
6700
6701 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6702 CHIP_NUM(bp) == CHIP_NUM_5708)
6703 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6704 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6705 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6706
6707 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6708 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6709 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6710 bp->flags |= NO_WOL_FLAG;
6711
6712 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6713 bp->tx_quick_cons_trip_int =
6714 bp->tx_quick_cons_trip;
6715 bp->tx_ticks_int = bp->tx_ticks;
6716 bp->rx_quick_cons_trip_int =
6717 bp->rx_quick_cons_trip;
6718 bp->rx_ticks_int = bp->rx_ticks;
6719 bp->comp_prod_trip_int = bp->comp_prod_trip;
6720 bp->com_ticks_int = bp->com_ticks;
6721 bp->cmd_ticks_int = bp->cmd_ticks;
6722 }
6723
6724 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6725 *
6726 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6727 * with byte enables disabled on the unused 32-bit word. This is legal
6728 * but causes problems on the AMD 8132 which will eventually stop
6729 * responding after a while.
6730 *
6731 * AMD believes this incompatibility is unique to the 5706, and
6732 * prefers to locally disable MSI rather than globally disabling it.
6733 */
6734 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6735 struct pci_dev *amd_8132 = NULL;
6736
6737 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6738 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6739 amd_8132))) {
6740
6741 if (amd_8132->revision >= 0x10 &&
6742 amd_8132->revision <= 0x13) {
6743 disable_msi = 1;
6744 pci_dev_put(amd_8132);
6745 break;
6746 }
6747 }
6748 }
6749
6750 bnx2_set_default_link(bp);
6751 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6752
6753 init_timer(&bp->timer);
6754 bp->timer.expires = RUN_AT(bp->timer_interval);
6755 bp->timer.data = (unsigned long) bp;
6756 bp->timer.function = bnx2_timer;
6757
6758 return 0;
6759
6760 err_out_unmap:
6761 if (bp->regview) {
6762 iounmap(bp->regview);
6763 bp->regview = NULL;
6764 }
6765
6766 err_out_release:
6767 pci_release_regions(pdev);
6768
6769 err_out_disable:
6770 pci_disable_device(pdev);
6771 pci_set_drvdata(pdev, NULL);
6772
6773 err_out:
6774 return rc;
6775 }
6776
6777 static char * __devinit
6778 bnx2_bus_string(struct bnx2 *bp, char *str)
6779 {
6780 char *s = str;
6781
6782 if (bp->flags & PCIE_FLAG) {
6783 s += sprintf(s, "PCI Express");
6784 } else {
6785 s += sprintf(s, "PCI");
6786 if (bp->flags & PCIX_FLAG)
6787 s += sprintf(s, "-X");
6788 if (bp->flags & PCI_32BIT_FLAG)
6789 s += sprintf(s, " 32-bit");
6790 else
6791 s += sprintf(s, " 64-bit");
6792 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6793 }
6794 return str;
6795 }
6796
6797 static int __devinit
6798 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6799 {
6800 static int version_printed = 0;
6801 struct net_device *dev = NULL;
6802 struct bnx2 *bp;
6803 int rc, i;
6804 char str[40];
6805
6806 if (version_printed++ == 0)
6807 printk(KERN_INFO "%s", version);
6808
6809 /* dev zeroed in init_etherdev */
6810 dev = alloc_etherdev(sizeof(*bp));
6811
6812 if (!dev)
6813 return -ENOMEM;
6814
6815 rc = bnx2_init_board(pdev, dev);
6816 if (rc < 0) {
6817 free_netdev(dev);
6818 return rc;
6819 }
6820
6821 dev->open = bnx2_open;
6822 dev->hard_start_xmit = bnx2_start_xmit;
6823 dev->stop = bnx2_close;
6824 dev->get_stats = bnx2_get_stats;
6825 dev->set_multicast_list = bnx2_set_rx_mode;
6826 dev->do_ioctl = bnx2_ioctl;
6827 dev->set_mac_address = bnx2_change_mac_addr;
6828 dev->change_mtu = bnx2_change_mtu;
6829 dev->tx_timeout = bnx2_tx_timeout;
6830 dev->watchdog_timeo = TX_TIMEOUT;
6831 #ifdef BCM_VLAN
6832 dev->vlan_rx_register = bnx2_vlan_rx_register;
6833 #endif
6834 dev->poll = bnx2_poll;
6835 dev->ethtool_ops = &bnx2_ethtool_ops;
6836 dev->weight = 64;
6837
6838 bp = netdev_priv(dev);
6839
6840 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6841 dev->poll_controller = poll_bnx2;
6842 #endif
6843
6844 pci_set_drvdata(pdev, dev);
6845
6846 memcpy(dev->dev_addr, bp->mac_addr, 6);
6847 memcpy(dev->perm_addr, bp->mac_addr, 6);
6848 bp->name = board_info[ent->driver_data].name;
6849
6850 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6851 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6852 dev->features |= NETIF_F_IPV6_CSUM;
6853
6854 #ifdef BCM_VLAN
6855 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6856 #endif
6857 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6858 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6859 dev->features |= NETIF_F_TSO6;
6860
6861 if ((rc = register_netdev(dev))) {
6862 dev_err(&pdev->dev, "Cannot register net device\n");
6863 if (bp->regview)
6864 iounmap(bp->regview);
6865 pci_release_regions(pdev);
6866 pci_disable_device(pdev);
6867 pci_set_drvdata(pdev, NULL);
6868 free_netdev(dev);
6869 return rc;
6870 }
6871
6872 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6873 "IRQ %d, ",
6874 dev->name,
6875 bp->name,
6876 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6877 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6878 bnx2_bus_string(bp, str),
6879 dev->base_addr,
6880 bp->pdev->irq);
6881
6882 printk("node addr ");
6883 for (i = 0; i < 6; i++)
6884 printk("%2.2x", dev->dev_addr[i]);
6885 printk("\n");
6886
6887 return 0;
6888 }
6889
6890 static void __devexit
6891 bnx2_remove_one(struct pci_dev *pdev)
6892 {
6893 struct net_device *dev = pci_get_drvdata(pdev);
6894 struct bnx2 *bp = netdev_priv(dev);
6895
6896 flush_scheduled_work();
6897
6898 unregister_netdev(dev);
6899
6900 if (bp->regview)
6901 iounmap(bp->regview);
6902
6903 free_netdev(dev);
6904 pci_release_regions(pdev);
6905 pci_disable_device(pdev);
6906 pci_set_drvdata(pdev, NULL);
6907 }
6908
6909 static int
6910 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6911 {
6912 struct net_device *dev = pci_get_drvdata(pdev);
6913 struct bnx2 *bp = netdev_priv(dev);
6914 u32 reset_code;
6915
6916 if (!netif_running(dev))
6917 return 0;
6918
6919 flush_scheduled_work();
6920 bnx2_netif_stop(bp);
6921 netif_device_detach(dev);
6922 del_timer_sync(&bp->timer);
6923 if (bp->flags & NO_WOL_FLAG)
6924 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6925 else if (bp->wol)
6926 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6927 else
6928 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6929 bnx2_reset_chip(bp, reset_code);
6930 bnx2_free_skbs(bp);
6931 pci_save_state(pdev);
6932 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6933 return 0;
6934 }
6935
6936 static int
6937 bnx2_resume(struct pci_dev *pdev)
6938 {
6939 struct net_device *dev = pci_get_drvdata(pdev);
6940 struct bnx2 *bp = netdev_priv(dev);
6941
6942 if (!netif_running(dev))
6943 return 0;
6944
6945 pci_restore_state(pdev);
6946 bnx2_set_power_state(bp, PCI_D0);
6947 netif_device_attach(dev);
6948 bnx2_init_nic(bp);
6949 bnx2_netif_start(bp);
6950 return 0;
6951 }
6952
6953 static struct pci_driver bnx2_pci_driver = {
6954 .name = DRV_MODULE_NAME,
6955 .id_table = bnx2_pci_tbl,
6956 .probe = bnx2_init_one,
6957 .remove = __devexit_p(bnx2_remove_one),
6958 .suspend = bnx2_suspend,
6959 .resume = bnx2_resume,
6960 };
6961
6962 static int __init bnx2_init(void)
6963 {
6964 return pci_register_driver(&bnx2_pci_driver);
6965 }
6966
6967 static void __exit bnx2_cleanup(void)
6968 {
6969 pci_unregister_driver(&bnx2_pci_driver);
6970 }
6971
6972 module_init(bnx2_init);
6973 module_exit(bnx2_cleanup);
6974
6975
6976
This page took 0.169678 seconds and 6 git commands to generate.