363ca8bcc974dd75ffcba3cce7b2aa7f08487ec4
[deliverable/linux.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52 #include <linux/aer.h>
53
54 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
55 #define BCM_CNIC 1
56 #include "cnic_if.h"
57 #endif
58 #include "bnx2.h"
59 #include "bnx2_fw.h"
60
61 #define DRV_MODULE_NAME "bnx2"
62 #define DRV_MODULE_VERSION "2.0.18"
63 #define DRV_MODULE_RELDATE "Oct 7, 2010"
64 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw"
65 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
66 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw"
67 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
68 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
69
70 #define RUN_AT(x) (jiffies + (x))
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT (5*HZ)
74
75 static char version[] __devinitdata =
76 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87
88 static int disable_msi = 0;
89
90 module_param(disable_msi, int, 0);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92
93 typedef enum {
94 BCM5706 = 0,
95 NC370T,
96 NC370I,
97 BCM5706S,
98 NC370F,
99 BCM5708,
100 BCM5708S,
101 BCM5709,
102 BCM5709S,
103 BCM5716,
104 BCM5716S,
105 } board_t;
106
107 /* indexed by board_t, above */
108 static struct {
109 char *name;
110 } board_info[] __devinitdata = {
111 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
112 { "HP NC370T Multifunction Gigabit Server Adapter" },
113 { "HP NC370i Multifunction Gigabit Server Adapter" },
114 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115 { "HP NC370F Multifunction Gigabit Server Adapter" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
117 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
119 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
121 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122 };
123
124 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143 { PCI_VENDOR_ID_BROADCOM, 0x163b,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145 { PCI_VENDOR_ID_BROADCOM, 0x163c,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147 { 0, }
148 };
149
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
154 /* Slow EEPROM */
155 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158 "EEPROM - slow"},
159 /* Expansion entry 0001 */
160 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 "Entry 0001"},
164 /* Saifun SA25F010 (non-buffered flash) */
165 /* strap, cfg1, & write1 need updates */
166 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169 "Non-buffered flash (128kB)"},
170 /* Saifun SA25F020 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175 "Non-buffered flash (256kB)"},
176 /* Expansion entry 0100 */
177 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 0100"},
181 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
187 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191 /* Saifun SA25F005 (non-buffered flash) */
192 /* strap, cfg1, & write1 need updates */
193 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196 "Non-buffered flash (64kB)"},
197 /* Fast EEPROM */
198 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201 "EEPROM - fast"},
202 /* Expansion entry 1001 */
203 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1001"},
207 /* Expansion entry 1010 */
208 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211 "Entry 1010"},
212 /* ATMEL AT45DB011B (buffered flash) */
213 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216 "Buffered flash (128kB)"},
217 /* Expansion entry 1100 */
218 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 "Entry 1100"},
222 /* Expansion entry 1101 */
223 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 "Entry 1101"},
227 /* Ateml Expansion entry 1110 */
228 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231 "Entry 1110 (Atmel)"},
232 /* ATMEL AT45DB021B (buffered flash) */
233 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236 "Buffered flash (256kB)"},
237 };
238
239 static const struct flash_spec flash_5709 = {
240 .flags = BNX2_NV_BUFFERED,
241 .page_bits = BCM5709_FLASH_PAGE_BITS,
242 .page_size = BCM5709_FLASH_PAGE_SIZE,
243 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
244 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
245 .name = "5709 Buffered flash (256kB)",
246 };
247
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249
250 static void bnx2_init_napi(struct bnx2 *bp);
251 static void bnx2_del_napi(struct bnx2 *bp);
252
253 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
254 {
255 u32 diff;
256
257 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
258 barrier();
259
260 /* The ring uses 256 indices for 255 entries, one of them
261 * needs to be skipped.
262 */
263 diff = txr->tx_prod - txr->tx_cons;
264 if (unlikely(diff >= TX_DESC_CNT)) {
265 diff &= 0xffff;
266 if (diff == TX_DESC_CNT)
267 diff = MAX_TX_DESC_CNT;
268 }
269 return bp->tx_ring_size - diff;
270 }
271
272 static u32
273 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
274 {
275 u32 val;
276
277 spin_lock_bh(&bp->indirect_lock);
278 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
279 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
280 spin_unlock_bh(&bp->indirect_lock);
281 return val;
282 }
283
284 static void
285 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
286 {
287 spin_lock_bh(&bp->indirect_lock);
288 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
289 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
290 spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static void
294 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
295 {
296 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
297 }
298
299 static u32
300 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
301 {
302 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
303 }
304
305 static void
306 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
307 {
308 offset += cid_addr;
309 spin_lock_bh(&bp->indirect_lock);
310 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
311 int i;
312
313 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
314 REG_WR(bp, BNX2_CTX_CTX_CTRL,
315 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
316 for (i = 0; i < 5; i++) {
317 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
318 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
319 break;
320 udelay(5);
321 }
322 } else {
323 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
324 REG_WR(bp, BNX2_CTX_DATA, val);
325 }
326 spin_unlock_bh(&bp->indirect_lock);
327 }
328
329 #ifdef BCM_CNIC
330 static int
331 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
332 {
333 struct bnx2 *bp = netdev_priv(dev);
334 struct drv_ctl_io *io = &info->data.io;
335
336 switch (info->cmd) {
337 case DRV_CTL_IO_WR_CMD:
338 bnx2_reg_wr_ind(bp, io->offset, io->data);
339 break;
340 case DRV_CTL_IO_RD_CMD:
341 io->data = bnx2_reg_rd_ind(bp, io->offset);
342 break;
343 case DRV_CTL_CTX_WR_CMD:
344 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
345 break;
346 default:
347 return -EINVAL;
348 }
349 return 0;
350 }
351
352 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
353 {
354 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
355 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
356 int sb_id;
357
358 if (bp->flags & BNX2_FLAG_USING_MSIX) {
359 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
360 bnapi->cnic_present = 0;
361 sb_id = bp->irq_nvecs;
362 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
363 } else {
364 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
365 bnapi->cnic_tag = bnapi->last_status_idx;
366 bnapi->cnic_present = 1;
367 sb_id = 0;
368 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
369 }
370
371 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
372 cp->irq_arr[0].status_blk = (void *)
373 ((unsigned long) bnapi->status_blk.msi +
374 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
375 cp->irq_arr[0].status_blk_num = sb_id;
376 cp->num_irq = 1;
377 }
378
379 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
380 void *data)
381 {
382 struct bnx2 *bp = netdev_priv(dev);
383 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
384
385 if (ops == NULL)
386 return -EINVAL;
387
388 if (cp->drv_state & CNIC_DRV_STATE_REGD)
389 return -EBUSY;
390
391 bp->cnic_data = data;
392 rcu_assign_pointer(bp->cnic_ops, ops);
393
394 cp->num_irq = 0;
395 cp->drv_state = CNIC_DRV_STATE_REGD;
396
397 bnx2_setup_cnic_irq_info(bp);
398
399 return 0;
400 }
401
402 static int bnx2_unregister_cnic(struct net_device *dev)
403 {
404 struct bnx2 *bp = netdev_priv(dev);
405 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
406 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
407
408 mutex_lock(&bp->cnic_lock);
409 cp->drv_state = 0;
410 bnapi->cnic_present = 0;
411 rcu_assign_pointer(bp->cnic_ops, NULL);
412 mutex_unlock(&bp->cnic_lock);
413 synchronize_rcu();
414 return 0;
415 }
416
417 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
418 {
419 struct bnx2 *bp = netdev_priv(dev);
420 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
421
422 cp->drv_owner = THIS_MODULE;
423 cp->chip_id = bp->chip_id;
424 cp->pdev = bp->pdev;
425 cp->io_base = bp->regview;
426 cp->drv_ctl = bnx2_drv_ctl;
427 cp->drv_register_cnic = bnx2_register_cnic;
428 cp->drv_unregister_cnic = bnx2_unregister_cnic;
429
430 return cp;
431 }
432 EXPORT_SYMBOL(bnx2_cnic_probe);
433
434 static void
435 bnx2_cnic_stop(struct bnx2 *bp)
436 {
437 struct cnic_ops *c_ops;
438 struct cnic_ctl_info info;
439
440 mutex_lock(&bp->cnic_lock);
441 c_ops = bp->cnic_ops;
442 if (c_ops) {
443 info.cmd = CNIC_CTL_STOP_CMD;
444 c_ops->cnic_ctl(bp->cnic_data, &info);
445 }
446 mutex_unlock(&bp->cnic_lock);
447 }
448
449 static void
450 bnx2_cnic_start(struct bnx2 *bp)
451 {
452 struct cnic_ops *c_ops;
453 struct cnic_ctl_info info;
454
455 mutex_lock(&bp->cnic_lock);
456 c_ops = bp->cnic_ops;
457 if (c_ops) {
458 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
459 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
460
461 bnapi->cnic_tag = bnapi->last_status_idx;
462 }
463 info.cmd = CNIC_CTL_START_CMD;
464 c_ops->cnic_ctl(bp->cnic_data, &info);
465 }
466 mutex_unlock(&bp->cnic_lock);
467 }
468
469 #else
470
471 static void
472 bnx2_cnic_stop(struct bnx2 *bp)
473 {
474 }
475
476 static void
477 bnx2_cnic_start(struct bnx2 *bp)
478 {
479 }
480
481 #endif
482
483 static int
484 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
485 {
486 u32 val1;
487 int i, ret;
488
489 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
490 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
491 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
492
493 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
494 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
495
496 udelay(40);
497 }
498
499 val1 = (bp->phy_addr << 21) | (reg << 16) |
500 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
501 BNX2_EMAC_MDIO_COMM_START_BUSY;
502 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
503
504 for (i = 0; i < 50; i++) {
505 udelay(10);
506
507 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
508 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
509 udelay(5);
510
511 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
512 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
513
514 break;
515 }
516 }
517
518 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
519 *val = 0x0;
520 ret = -EBUSY;
521 }
522 else {
523 *val = val1;
524 ret = 0;
525 }
526
527 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
528 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
529 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
530
531 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
532 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
533
534 udelay(40);
535 }
536
537 return ret;
538 }
539
540 static int
541 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
542 {
543 u32 val1;
544 int i, ret;
545
546 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
547 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
548 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
549
550 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
551 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
552
553 udelay(40);
554 }
555
556 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
557 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
558 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
559 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
560
561 for (i = 0; i < 50; i++) {
562 udelay(10);
563
564 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
565 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
566 udelay(5);
567 break;
568 }
569 }
570
571 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
572 ret = -EBUSY;
573 else
574 ret = 0;
575
576 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
577 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
578 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
579
580 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
581 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
582
583 udelay(40);
584 }
585
586 return ret;
587 }
588
589 static void
590 bnx2_disable_int(struct bnx2 *bp)
591 {
592 int i;
593 struct bnx2_napi *bnapi;
594
595 for (i = 0; i < bp->irq_nvecs; i++) {
596 bnapi = &bp->bnx2_napi[i];
597 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
598 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
599 }
600 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
601 }
602
603 static void
604 bnx2_enable_int(struct bnx2 *bp)
605 {
606 int i;
607 struct bnx2_napi *bnapi;
608
609 for (i = 0; i < bp->irq_nvecs; i++) {
610 bnapi = &bp->bnx2_napi[i];
611
612 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
613 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
614 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
615 bnapi->last_status_idx);
616
617 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
618 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
619 bnapi->last_status_idx);
620 }
621 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
622 }
623
624 static void
625 bnx2_disable_int_sync(struct bnx2 *bp)
626 {
627 int i;
628
629 atomic_inc(&bp->intr_sem);
630 if (!netif_running(bp->dev))
631 return;
632
633 bnx2_disable_int(bp);
634 for (i = 0; i < bp->irq_nvecs; i++)
635 synchronize_irq(bp->irq_tbl[i].vector);
636 }
637
638 static void
639 bnx2_napi_disable(struct bnx2 *bp)
640 {
641 int i;
642
643 for (i = 0; i < bp->irq_nvecs; i++)
644 napi_disable(&bp->bnx2_napi[i].napi);
645 }
646
647 static void
648 bnx2_napi_enable(struct bnx2 *bp)
649 {
650 int i;
651
652 for (i = 0; i < bp->irq_nvecs; i++)
653 napi_enable(&bp->bnx2_napi[i].napi);
654 }
655
656 static void
657 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
658 {
659 if (stop_cnic)
660 bnx2_cnic_stop(bp);
661 if (netif_running(bp->dev)) {
662 bnx2_napi_disable(bp);
663 netif_tx_disable(bp->dev);
664 }
665 bnx2_disable_int_sync(bp);
666 netif_carrier_off(bp->dev); /* prevent tx timeout */
667 }
668
669 static void
670 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
671 {
672 if (atomic_dec_and_test(&bp->intr_sem)) {
673 if (netif_running(bp->dev)) {
674 netif_tx_wake_all_queues(bp->dev);
675 spin_lock_bh(&bp->phy_lock);
676 if (bp->link_up)
677 netif_carrier_on(bp->dev);
678 spin_unlock_bh(&bp->phy_lock);
679 bnx2_napi_enable(bp);
680 bnx2_enable_int(bp);
681 if (start_cnic)
682 bnx2_cnic_start(bp);
683 }
684 }
685 }
686
687 static void
688 bnx2_free_tx_mem(struct bnx2 *bp)
689 {
690 int i;
691
692 for (i = 0; i < bp->num_tx_rings; i++) {
693 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
694 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
695
696 if (txr->tx_desc_ring) {
697 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
698 txr->tx_desc_ring,
699 txr->tx_desc_mapping);
700 txr->tx_desc_ring = NULL;
701 }
702 kfree(txr->tx_buf_ring);
703 txr->tx_buf_ring = NULL;
704 }
705 }
706
707 static void
708 bnx2_free_rx_mem(struct bnx2 *bp)
709 {
710 int i;
711
712 for (i = 0; i < bp->num_rx_rings; i++) {
713 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
714 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
715 int j;
716
717 for (j = 0; j < bp->rx_max_ring; j++) {
718 if (rxr->rx_desc_ring[j])
719 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
720 rxr->rx_desc_ring[j],
721 rxr->rx_desc_mapping[j]);
722 rxr->rx_desc_ring[j] = NULL;
723 }
724 vfree(rxr->rx_buf_ring);
725 rxr->rx_buf_ring = NULL;
726
727 for (j = 0; j < bp->rx_max_pg_ring; j++) {
728 if (rxr->rx_pg_desc_ring[j])
729 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
730 rxr->rx_pg_desc_ring[j],
731 rxr->rx_pg_desc_mapping[j]);
732 rxr->rx_pg_desc_ring[j] = NULL;
733 }
734 vfree(rxr->rx_pg_ring);
735 rxr->rx_pg_ring = NULL;
736 }
737 }
738
739 static int
740 bnx2_alloc_tx_mem(struct bnx2 *bp)
741 {
742 int i;
743
744 for (i = 0; i < bp->num_tx_rings; i++) {
745 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
746 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
747
748 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
749 if (txr->tx_buf_ring == NULL)
750 return -ENOMEM;
751
752 txr->tx_desc_ring =
753 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
754 &txr->tx_desc_mapping, GFP_KERNEL);
755 if (txr->tx_desc_ring == NULL)
756 return -ENOMEM;
757 }
758 return 0;
759 }
760
761 static int
762 bnx2_alloc_rx_mem(struct bnx2 *bp)
763 {
764 int i;
765
766 for (i = 0; i < bp->num_rx_rings; i++) {
767 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
768 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
769 int j;
770
771 rxr->rx_buf_ring =
772 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
773 if (rxr->rx_buf_ring == NULL)
774 return -ENOMEM;
775
776 memset(rxr->rx_buf_ring, 0,
777 SW_RXBD_RING_SIZE * bp->rx_max_ring);
778
779 for (j = 0; j < bp->rx_max_ring; j++) {
780 rxr->rx_desc_ring[j] =
781 dma_alloc_coherent(&bp->pdev->dev,
782 RXBD_RING_SIZE,
783 &rxr->rx_desc_mapping[j],
784 GFP_KERNEL);
785 if (rxr->rx_desc_ring[j] == NULL)
786 return -ENOMEM;
787
788 }
789
790 if (bp->rx_pg_ring_size) {
791 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
792 bp->rx_max_pg_ring);
793 if (rxr->rx_pg_ring == NULL)
794 return -ENOMEM;
795
796 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
797 bp->rx_max_pg_ring);
798 }
799
800 for (j = 0; j < bp->rx_max_pg_ring; j++) {
801 rxr->rx_pg_desc_ring[j] =
802 dma_alloc_coherent(&bp->pdev->dev,
803 RXBD_RING_SIZE,
804 &rxr->rx_pg_desc_mapping[j],
805 GFP_KERNEL);
806 if (rxr->rx_pg_desc_ring[j] == NULL)
807 return -ENOMEM;
808
809 }
810 }
811 return 0;
812 }
813
814 static void
815 bnx2_free_mem(struct bnx2 *bp)
816 {
817 int i;
818 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
819
820 bnx2_free_tx_mem(bp);
821 bnx2_free_rx_mem(bp);
822
823 for (i = 0; i < bp->ctx_pages; i++) {
824 if (bp->ctx_blk[i]) {
825 dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
826 bp->ctx_blk[i],
827 bp->ctx_blk_mapping[i]);
828 bp->ctx_blk[i] = NULL;
829 }
830 }
831 if (bnapi->status_blk.msi) {
832 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
833 bnapi->status_blk.msi,
834 bp->status_blk_mapping);
835 bnapi->status_blk.msi = NULL;
836 bp->stats_blk = NULL;
837 }
838 }
839
840 static int
841 bnx2_alloc_mem(struct bnx2 *bp)
842 {
843 int i, status_blk_size, err;
844 struct bnx2_napi *bnapi;
845 void *status_blk;
846
847 /* Combine status and statistics blocks into one allocation. */
848 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
849 if (bp->flags & BNX2_FLAG_MSIX_CAP)
850 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
851 BNX2_SBLK_MSIX_ALIGN_SIZE);
852 bp->status_stats_size = status_blk_size +
853 sizeof(struct statistics_block);
854
855 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
856 &bp->status_blk_mapping, GFP_KERNEL);
857 if (status_blk == NULL)
858 goto alloc_mem_err;
859
860 memset(status_blk, 0, bp->status_stats_size);
861
862 bnapi = &bp->bnx2_napi[0];
863 bnapi->status_blk.msi = status_blk;
864 bnapi->hw_tx_cons_ptr =
865 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
866 bnapi->hw_rx_cons_ptr =
867 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
868 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
869 for (i = 1; i < bp->irq_nvecs; i++) {
870 struct status_block_msix *sblk;
871
872 bnapi = &bp->bnx2_napi[i];
873
874 sblk = (void *) (status_blk +
875 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
876 bnapi->status_blk.msix = sblk;
877 bnapi->hw_tx_cons_ptr =
878 &sblk->status_tx_quick_consumer_index;
879 bnapi->hw_rx_cons_ptr =
880 &sblk->status_rx_quick_consumer_index;
881 bnapi->int_num = i << 24;
882 }
883 }
884
885 bp->stats_blk = status_blk + status_blk_size;
886
887 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
888
889 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
890 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
891 if (bp->ctx_pages == 0)
892 bp->ctx_pages = 1;
893 for (i = 0; i < bp->ctx_pages; i++) {
894 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
895 BCM_PAGE_SIZE,
896 &bp->ctx_blk_mapping[i],
897 GFP_KERNEL);
898 if (bp->ctx_blk[i] == NULL)
899 goto alloc_mem_err;
900 }
901 }
902
903 err = bnx2_alloc_rx_mem(bp);
904 if (err)
905 goto alloc_mem_err;
906
907 err = bnx2_alloc_tx_mem(bp);
908 if (err)
909 goto alloc_mem_err;
910
911 return 0;
912
913 alloc_mem_err:
914 bnx2_free_mem(bp);
915 return -ENOMEM;
916 }
917
918 static void
919 bnx2_report_fw_link(struct bnx2 *bp)
920 {
921 u32 fw_link_status = 0;
922
923 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
924 return;
925
926 if (bp->link_up) {
927 u32 bmsr;
928
929 switch (bp->line_speed) {
930 case SPEED_10:
931 if (bp->duplex == DUPLEX_HALF)
932 fw_link_status = BNX2_LINK_STATUS_10HALF;
933 else
934 fw_link_status = BNX2_LINK_STATUS_10FULL;
935 break;
936 case SPEED_100:
937 if (bp->duplex == DUPLEX_HALF)
938 fw_link_status = BNX2_LINK_STATUS_100HALF;
939 else
940 fw_link_status = BNX2_LINK_STATUS_100FULL;
941 break;
942 case SPEED_1000:
943 if (bp->duplex == DUPLEX_HALF)
944 fw_link_status = BNX2_LINK_STATUS_1000HALF;
945 else
946 fw_link_status = BNX2_LINK_STATUS_1000FULL;
947 break;
948 case SPEED_2500:
949 if (bp->duplex == DUPLEX_HALF)
950 fw_link_status = BNX2_LINK_STATUS_2500HALF;
951 else
952 fw_link_status = BNX2_LINK_STATUS_2500FULL;
953 break;
954 }
955
956 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
957
958 if (bp->autoneg) {
959 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
960
961 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963
964 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
965 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
966 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
967 else
968 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
969 }
970 }
971 else
972 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
973
974 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
975 }
976
977 static char *
978 bnx2_xceiver_str(struct bnx2 *bp)
979 {
980 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
981 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
982 "Copper");
983 }
984
985 static void
986 bnx2_report_link(struct bnx2 *bp)
987 {
988 if (bp->link_up) {
989 netif_carrier_on(bp->dev);
990 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
991 bnx2_xceiver_str(bp),
992 bp->line_speed,
993 bp->duplex == DUPLEX_FULL ? "full" : "half");
994
995 if (bp->flow_ctrl) {
996 if (bp->flow_ctrl & FLOW_CTRL_RX) {
997 pr_cont(", receive ");
998 if (bp->flow_ctrl & FLOW_CTRL_TX)
999 pr_cont("& transmit ");
1000 }
1001 else {
1002 pr_cont(", transmit ");
1003 }
1004 pr_cont("flow control ON");
1005 }
1006 pr_cont("\n");
1007 } else {
1008 netif_carrier_off(bp->dev);
1009 netdev_err(bp->dev, "NIC %s Link is Down\n",
1010 bnx2_xceiver_str(bp));
1011 }
1012
1013 bnx2_report_fw_link(bp);
1014 }
1015
1016 static void
1017 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1018 {
1019 u32 local_adv, remote_adv;
1020
1021 bp->flow_ctrl = 0;
1022 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1023 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1024
1025 if (bp->duplex == DUPLEX_FULL) {
1026 bp->flow_ctrl = bp->req_flow_ctrl;
1027 }
1028 return;
1029 }
1030
1031 if (bp->duplex != DUPLEX_FULL) {
1032 return;
1033 }
1034
1035 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1036 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1037 u32 val;
1038
1039 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1040 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1041 bp->flow_ctrl |= FLOW_CTRL_TX;
1042 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1043 bp->flow_ctrl |= FLOW_CTRL_RX;
1044 return;
1045 }
1046
1047 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1048 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1049
1050 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1051 u32 new_local_adv = 0;
1052 u32 new_remote_adv = 0;
1053
1054 if (local_adv & ADVERTISE_1000XPAUSE)
1055 new_local_adv |= ADVERTISE_PAUSE_CAP;
1056 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1057 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1058 if (remote_adv & ADVERTISE_1000XPAUSE)
1059 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1060 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1061 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1062
1063 local_adv = new_local_adv;
1064 remote_adv = new_remote_adv;
1065 }
1066
1067 /* See Table 28B-3 of 802.3ab-1999 spec. */
1068 if (local_adv & ADVERTISE_PAUSE_CAP) {
1069 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1070 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072 }
1073 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1074 bp->flow_ctrl = FLOW_CTRL_RX;
1075 }
1076 }
1077 else {
1078 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1079 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1080 }
1081 }
1082 }
1083 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1084 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1085 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1086
1087 bp->flow_ctrl = FLOW_CTRL_TX;
1088 }
1089 }
1090 }
1091
1092 static int
1093 bnx2_5709s_linkup(struct bnx2 *bp)
1094 {
1095 u32 val, speed;
1096
1097 bp->link_up = 1;
1098
1099 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1100 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1101 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1102
1103 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1104 bp->line_speed = bp->req_line_speed;
1105 bp->duplex = bp->req_duplex;
1106 return 0;
1107 }
1108 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1109 switch (speed) {
1110 case MII_BNX2_GP_TOP_AN_SPEED_10:
1111 bp->line_speed = SPEED_10;
1112 break;
1113 case MII_BNX2_GP_TOP_AN_SPEED_100:
1114 bp->line_speed = SPEED_100;
1115 break;
1116 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1117 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1118 bp->line_speed = SPEED_1000;
1119 break;
1120 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1121 bp->line_speed = SPEED_2500;
1122 break;
1123 }
1124 if (val & MII_BNX2_GP_TOP_AN_FD)
1125 bp->duplex = DUPLEX_FULL;
1126 else
1127 bp->duplex = DUPLEX_HALF;
1128 return 0;
1129 }
1130
1131 static int
1132 bnx2_5708s_linkup(struct bnx2 *bp)
1133 {
1134 u32 val;
1135
1136 bp->link_up = 1;
1137 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1138 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1139 case BCM5708S_1000X_STAT1_SPEED_10:
1140 bp->line_speed = SPEED_10;
1141 break;
1142 case BCM5708S_1000X_STAT1_SPEED_100:
1143 bp->line_speed = SPEED_100;
1144 break;
1145 case BCM5708S_1000X_STAT1_SPEED_1G:
1146 bp->line_speed = SPEED_1000;
1147 break;
1148 case BCM5708S_1000X_STAT1_SPEED_2G5:
1149 bp->line_speed = SPEED_2500;
1150 break;
1151 }
1152 if (val & BCM5708S_1000X_STAT1_FD)
1153 bp->duplex = DUPLEX_FULL;
1154 else
1155 bp->duplex = DUPLEX_HALF;
1156
1157 return 0;
1158 }
1159
1160 static int
1161 bnx2_5706s_linkup(struct bnx2 *bp)
1162 {
1163 u32 bmcr, local_adv, remote_adv, common;
1164
1165 bp->link_up = 1;
1166 bp->line_speed = SPEED_1000;
1167
1168 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1169 if (bmcr & BMCR_FULLDPLX) {
1170 bp->duplex = DUPLEX_FULL;
1171 }
1172 else {
1173 bp->duplex = DUPLEX_HALF;
1174 }
1175
1176 if (!(bmcr & BMCR_ANENABLE)) {
1177 return 0;
1178 }
1179
1180 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1181 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1182
1183 common = local_adv & remote_adv;
1184 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1185
1186 if (common & ADVERTISE_1000XFULL) {
1187 bp->duplex = DUPLEX_FULL;
1188 }
1189 else {
1190 bp->duplex = DUPLEX_HALF;
1191 }
1192 }
1193
1194 return 0;
1195 }
1196
1197 static int
1198 bnx2_copper_linkup(struct bnx2 *bp)
1199 {
1200 u32 bmcr;
1201
1202 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203 if (bmcr & BMCR_ANENABLE) {
1204 u32 local_adv, remote_adv, common;
1205
1206 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208
1209 common = local_adv & (remote_adv >> 2);
1210 if (common & ADVERTISE_1000FULL) {
1211 bp->line_speed = SPEED_1000;
1212 bp->duplex = DUPLEX_FULL;
1213 }
1214 else if (common & ADVERTISE_1000HALF) {
1215 bp->line_speed = SPEED_1000;
1216 bp->duplex = DUPLEX_HALF;
1217 }
1218 else {
1219 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221
1222 common = local_adv & remote_adv;
1223 if (common & ADVERTISE_100FULL) {
1224 bp->line_speed = SPEED_100;
1225 bp->duplex = DUPLEX_FULL;
1226 }
1227 else if (common & ADVERTISE_100HALF) {
1228 bp->line_speed = SPEED_100;
1229 bp->duplex = DUPLEX_HALF;
1230 }
1231 else if (common & ADVERTISE_10FULL) {
1232 bp->line_speed = SPEED_10;
1233 bp->duplex = DUPLEX_FULL;
1234 }
1235 else if (common & ADVERTISE_10HALF) {
1236 bp->line_speed = SPEED_10;
1237 bp->duplex = DUPLEX_HALF;
1238 }
1239 else {
1240 bp->line_speed = 0;
1241 bp->link_up = 0;
1242 }
1243 }
1244 }
1245 else {
1246 if (bmcr & BMCR_SPEED100) {
1247 bp->line_speed = SPEED_100;
1248 }
1249 else {
1250 bp->line_speed = SPEED_10;
1251 }
1252 if (bmcr & BMCR_FULLDPLX) {
1253 bp->duplex = DUPLEX_FULL;
1254 }
1255 else {
1256 bp->duplex = DUPLEX_HALF;
1257 }
1258 }
1259
1260 return 0;
1261 }
1262
1263 static void
1264 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1265 {
1266 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1267
1268 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1269 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1270 val |= 0x02 << 8;
1271
1272 if (bp->flow_ctrl & FLOW_CTRL_TX)
1273 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1274
1275 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1276 }
1277
1278 static void
1279 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1280 {
1281 int i;
1282 u32 cid;
1283
1284 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1285 if (i == 1)
1286 cid = RX_RSS_CID;
1287 bnx2_init_rx_context(bp, cid);
1288 }
1289 }
1290
1291 static void
1292 bnx2_set_mac_link(struct bnx2 *bp)
1293 {
1294 u32 val;
1295
1296 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1297 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1298 (bp->duplex == DUPLEX_HALF)) {
1299 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1300 }
1301
1302 /* Configure the EMAC mode register. */
1303 val = REG_RD(bp, BNX2_EMAC_MODE);
1304
1305 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1306 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1307 BNX2_EMAC_MODE_25G_MODE);
1308
1309 if (bp->link_up) {
1310 switch (bp->line_speed) {
1311 case SPEED_10:
1312 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1313 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1314 break;
1315 }
1316 /* fall through */
1317 case SPEED_100:
1318 val |= BNX2_EMAC_MODE_PORT_MII;
1319 break;
1320 case SPEED_2500:
1321 val |= BNX2_EMAC_MODE_25G_MODE;
1322 /* fall through */
1323 case SPEED_1000:
1324 val |= BNX2_EMAC_MODE_PORT_GMII;
1325 break;
1326 }
1327 }
1328 else {
1329 val |= BNX2_EMAC_MODE_PORT_GMII;
1330 }
1331
1332 /* Set the MAC to operate in the appropriate duplex mode. */
1333 if (bp->duplex == DUPLEX_HALF)
1334 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1335 REG_WR(bp, BNX2_EMAC_MODE, val);
1336
1337 /* Enable/disable rx PAUSE. */
1338 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1339
1340 if (bp->flow_ctrl & FLOW_CTRL_RX)
1341 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1342 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1343
1344 /* Enable/disable tx PAUSE. */
1345 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1346 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1347
1348 if (bp->flow_ctrl & FLOW_CTRL_TX)
1349 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1350 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1351
1352 /* Acknowledge the interrupt. */
1353 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1354
1355 bnx2_init_all_rx_contexts(bp);
1356 }
1357
1358 static void
1359 bnx2_enable_bmsr1(struct bnx2 *bp)
1360 {
1361 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1362 (CHIP_NUM(bp) == CHIP_NUM_5709))
1363 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1364 MII_BNX2_BLK_ADDR_GP_STATUS);
1365 }
1366
1367 static void
1368 bnx2_disable_bmsr1(struct bnx2 *bp)
1369 {
1370 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1371 (CHIP_NUM(bp) == CHIP_NUM_5709))
1372 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1373 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1374 }
1375
1376 static int
1377 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1378 {
1379 u32 up1;
1380 int ret = 1;
1381
1382 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1383 return 0;
1384
1385 if (bp->autoneg & AUTONEG_SPEED)
1386 bp->advertising |= ADVERTISED_2500baseX_Full;
1387
1388 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1389 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1390
1391 bnx2_read_phy(bp, bp->mii_up1, &up1);
1392 if (!(up1 & BCM5708S_UP1_2G5)) {
1393 up1 |= BCM5708S_UP1_2G5;
1394 bnx2_write_phy(bp, bp->mii_up1, up1);
1395 ret = 0;
1396 }
1397
1398 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1399 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1400 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1401
1402 return ret;
1403 }
1404
1405 static int
1406 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1407 {
1408 u32 up1;
1409 int ret = 0;
1410
1411 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1412 return 0;
1413
1414 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1415 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1416
1417 bnx2_read_phy(bp, bp->mii_up1, &up1);
1418 if (up1 & BCM5708S_UP1_2G5) {
1419 up1 &= ~BCM5708S_UP1_2G5;
1420 bnx2_write_phy(bp, bp->mii_up1, up1);
1421 ret = 1;
1422 }
1423
1424 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1425 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1426 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1427
1428 return ret;
1429 }
1430
1431 static void
1432 bnx2_enable_forced_2g5(struct bnx2 *bp)
1433 {
1434 u32 uninitialized_var(bmcr);
1435 int err;
1436
1437 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1438 return;
1439
1440 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1441 u32 val;
1442
1443 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444 MII_BNX2_BLK_ADDR_SERDES_DIG);
1445 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1446 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1447 val |= MII_BNX2_SD_MISC1_FORCE |
1448 MII_BNX2_SD_MISC1_FORCE_2_5G;
1449 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1450 }
1451
1452 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1453 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1454 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1455
1456 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1457 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1458 if (!err)
1459 bmcr |= BCM5708S_BMCR_FORCE_2500;
1460 } else {
1461 return;
1462 }
1463
1464 if (err)
1465 return;
1466
1467 if (bp->autoneg & AUTONEG_SPEED) {
1468 bmcr &= ~BMCR_ANENABLE;
1469 if (bp->req_duplex == DUPLEX_FULL)
1470 bmcr |= BMCR_FULLDPLX;
1471 }
1472 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1473 }
1474
1475 static void
1476 bnx2_disable_forced_2g5(struct bnx2 *bp)
1477 {
1478 u32 uninitialized_var(bmcr);
1479 int err;
1480
1481 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1482 return;
1483
1484 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1485 u32 val;
1486
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1488 MII_BNX2_BLK_ADDR_SERDES_DIG);
1489 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1490 val &= ~MII_BNX2_SD_MISC1_FORCE;
1491 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1492 }
1493
1494 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1496 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1497
1498 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1499 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1500 if (!err)
1501 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1502 } else {
1503 return;
1504 }
1505
1506 if (err)
1507 return;
1508
1509 if (bp->autoneg & AUTONEG_SPEED)
1510 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1511 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1512 }
1513
1514 static void
1515 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1516 {
1517 u32 val;
1518
1519 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1520 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1521 if (start)
1522 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1523 else
1524 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1525 }
1526
1527 static int
1528 bnx2_set_link(struct bnx2 *bp)
1529 {
1530 u32 bmsr;
1531 u8 link_up;
1532
1533 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1534 bp->link_up = 1;
1535 return 0;
1536 }
1537
1538 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1539 return 0;
1540
1541 link_up = bp->link_up;
1542
1543 bnx2_enable_bmsr1(bp);
1544 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546 bnx2_disable_bmsr1(bp);
1547
1548 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1549 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1550 u32 val, an_dbg;
1551
1552 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1553 bnx2_5706s_force_link_dn(bp, 0);
1554 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1555 }
1556 val = REG_RD(bp, BNX2_EMAC_STATUS);
1557
1558 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1559 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561
1562 if ((val & BNX2_EMAC_STATUS_LINK) &&
1563 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1564 bmsr |= BMSR_LSTATUS;
1565 else
1566 bmsr &= ~BMSR_LSTATUS;
1567 }
1568
1569 if (bmsr & BMSR_LSTATUS) {
1570 bp->link_up = 1;
1571
1572 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1573 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1574 bnx2_5706s_linkup(bp);
1575 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1576 bnx2_5708s_linkup(bp);
1577 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1578 bnx2_5709s_linkup(bp);
1579 }
1580 else {
1581 bnx2_copper_linkup(bp);
1582 }
1583 bnx2_resolve_flow_ctrl(bp);
1584 }
1585 else {
1586 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1587 (bp->autoneg & AUTONEG_SPEED))
1588 bnx2_disable_forced_2g5(bp);
1589
1590 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1591 u32 bmcr;
1592
1593 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1594 bmcr |= BMCR_ANENABLE;
1595 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1596
1597 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1598 }
1599 bp->link_up = 0;
1600 }
1601
1602 if (bp->link_up != link_up) {
1603 bnx2_report_link(bp);
1604 }
1605
1606 bnx2_set_mac_link(bp);
1607
1608 return 0;
1609 }
1610
1611 static int
1612 bnx2_reset_phy(struct bnx2 *bp)
1613 {
1614 int i;
1615 u32 reg;
1616
1617 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1618
1619 #define PHY_RESET_MAX_WAIT 100
1620 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1621 udelay(10);
1622
1623 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1624 if (!(reg & BMCR_RESET)) {
1625 udelay(20);
1626 break;
1627 }
1628 }
1629 if (i == PHY_RESET_MAX_WAIT) {
1630 return -EBUSY;
1631 }
1632 return 0;
1633 }
1634
1635 static u32
1636 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1637 {
1638 u32 adv = 0;
1639
1640 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1641 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1642
1643 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644 adv = ADVERTISE_1000XPAUSE;
1645 }
1646 else {
1647 adv = ADVERTISE_PAUSE_CAP;
1648 }
1649 }
1650 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1651 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652 adv = ADVERTISE_1000XPSE_ASYM;
1653 }
1654 else {
1655 adv = ADVERTISE_PAUSE_ASYM;
1656 }
1657 }
1658 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1659 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1661 }
1662 else {
1663 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1664 }
1665 }
1666 return adv;
1667 }
1668
1669 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1670
1671 static int
1672 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1673 __releases(&bp->phy_lock)
1674 __acquires(&bp->phy_lock)
1675 {
1676 u32 speed_arg = 0, pause_adv;
1677
1678 pause_adv = bnx2_phy_get_pause_adv(bp);
1679
1680 if (bp->autoneg & AUTONEG_SPEED) {
1681 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1682 if (bp->advertising & ADVERTISED_10baseT_Half)
1683 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1684 if (bp->advertising & ADVERTISED_10baseT_Full)
1685 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1686 if (bp->advertising & ADVERTISED_100baseT_Half)
1687 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1688 if (bp->advertising & ADVERTISED_100baseT_Full)
1689 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1690 if (bp->advertising & ADVERTISED_1000baseT_Full)
1691 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1692 if (bp->advertising & ADVERTISED_2500baseX_Full)
1693 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1694 } else {
1695 if (bp->req_line_speed == SPEED_2500)
1696 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697 else if (bp->req_line_speed == SPEED_1000)
1698 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699 else if (bp->req_line_speed == SPEED_100) {
1700 if (bp->req_duplex == DUPLEX_FULL)
1701 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1702 else
1703 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1704 } else if (bp->req_line_speed == SPEED_10) {
1705 if (bp->req_duplex == DUPLEX_FULL)
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1707 else
1708 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1709 }
1710 }
1711
1712 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1713 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1714 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1715 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1716
1717 if (port == PORT_TP)
1718 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1719 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1720
1721 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1722
1723 spin_unlock_bh(&bp->phy_lock);
1724 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1725 spin_lock_bh(&bp->phy_lock);
1726
1727 return 0;
1728 }
1729
1730 static int
1731 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1732 __releases(&bp->phy_lock)
1733 __acquires(&bp->phy_lock)
1734 {
1735 u32 adv, bmcr;
1736 u32 new_adv = 0;
1737
1738 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1739 return bnx2_setup_remote_phy(bp, port);
1740
1741 if (!(bp->autoneg & AUTONEG_SPEED)) {
1742 u32 new_bmcr;
1743 int force_link_down = 0;
1744
1745 if (bp->req_line_speed == SPEED_2500) {
1746 if (!bnx2_test_and_enable_2g5(bp))
1747 force_link_down = 1;
1748 } else if (bp->req_line_speed == SPEED_1000) {
1749 if (bnx2_test_and_disable_2g5(bp))
1750 force_link_down = 1;
1751 }
1752 bnx2_read_phy(bp, bp->mii_adv, &adv);
1753 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1754
1755 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1756 new_bmcr = bmcr & ~BMCR_ANENABLE;
1757 new_bmcr |= BMCR_SPEED1000;
1758
1759 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1760 if (bp->req_line_speed == SPEED_2500)
1761 bnx2_enable_forced_2g5(bp);
1762 else if (bp->req_line_speed == SPEED_1000) {
1763 bnx2_disable_forced_2g5(bp);
1764 new_bmcr &= ~0x2000;
1765 }
1766
1767 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1768 if (bp->req_line_speed == SPEED_2500)
1769 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1770 else
1771 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1772 }
1773
1774 if (bp->req_duplex == DUPLEX_FULL) {
1775 adv |= ADVERTISE_1000XFULL;
1776 new_bmcr |= BMCR_FULLDPLX;
1777 }
1778 else {
1779 adv |= ADVERTISE_1000XHALF;
1780 new_bmcr &= ~BMCR_FULLDPLX;
1781 }
1782 if ((new_bmcr != bmcr) || (force_link_down)) {
1783 /* Force a link down visible on the other side */
1784 if (bp->link_up) {
1785 bnx2_write_phy(bp, bp->mii_adv, adv &
1786 ~(ADVERTISE_1000XFULL |
1787 ADVERTISE_1000XHALF));
1788 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1789 BMCR_ANRESTART | BMCR_ANENABLE);
1790
1791 bp->link_up = 0;
1792 netif_carrier_off(bp->dev);
1793 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1794 bnx2_report_link(bp);
1795 }
1796 bnx2_write_phy(bp, bp->mii_adv, adv);
1797 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1798 } else {
1799 bnx2_resolve_flow_ctrl(bp);
1800 bnx2_set_mac_link(bp);
1801 }
1802 return 0;
1803 }
1804
1805 bnx2_test_and_enable_2g5(bp);
1806
1807 if (bp->advertising & ADVERTISED_1000baseT_Full)
1808 new_adv |= ADVERTISE_1000XFULL;
1809
1810 new_adv |= bnx2_phy_get_pause_adv(bp);
1811
1812 bnx2_read_phy(bp, bp->mii_adv, &adv);
1813 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1814
1815 bp->serdes_an_pending = 0;
1816 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1817 /* Force a link down visible on the other side */
1818 if (bp->link_up) {
1819 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1820 spin_unlock_bh(&bp->phy_lock);
1821 msleep(20);
1822 spin_lock_bh(&bp->phy_lock);
1823 }
1824
1825 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1826 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1827 BMCR_ANENABLE);
1828 /* Speed up link-up time when the link partner
1829 * does not autonegotiate which is very common
1830 * in blade servers. Some blade servers use
1831 * IPMI for kerboard input and it's important
1832 * to minimize link disruptions. Autoneg. involves
1833 * exchanging base pages plus 3 next pages and
1834 * normally completes in about 120 msec.
1835 */
1836 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1837 bp->serdes_an_pending = 1;
1838 mod_timer(&bp->timer, jiffies + bp->current_interval);
1839 } else {
1840 bnx2_resolve_flow_ctrl(bp);
1841 bnx2_set_mac_link(bp);
1842 }
1843
1844 return 0;
1845 }
1846
1847 #define ETHTOOL_ALL_FIBRE_SPEED \
1848 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1849 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1850 (ADVERTISED_1000baseT_Full)
1851
1852 #define ETHTOOL_ALL_COPPER_SPEED \
1853 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1854 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1855 ADVERTISED_1000baseT_Full)
1856
1857 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1858 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1859
1860 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1861
1862 static void
1863 bnx2_set_default_remote_link(struct bnx2 *bp)
1864 {
1865 u32 link;
1866
1867 if (bp->phy_port == PORT_TP)
1868 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1869 else
1870 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1871
1872 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1873 bp->req_line_speed = 0;
1874 bp->autoneg |= AUTONEG_SPEED;
1875 bp->advertising = ADVERTISED_Autoneg;
1876 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1877 bp->advertising |= ADVERTISED_10baseT_Half;
1878 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1879 bp->advertising |= ADVERTISED_10baseT_Full;
1880 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1881 bp->advertising |= ADVERTISED_100baseT_Half;
1882 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1883 bp->advertising |= ADVERTISED_100baseT_Full;
1884 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1885 bp->advertising |= ADVERTISED_1000baseT_Full;
1886 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1887 bp->advertising |= ADVERTISED_2500baseX_Full;
1888 } else {
1889 bp->autoneg = 0;
1890 bp->advertising = 0;
1891 bp->req_duplex = DUPLEX_FULL;
1892 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1893 bp->req_line_speed = SPEED_10;
1894 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1895 bp->req_duplex = DUPLEX_HALF;
1896 }
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1898 bp->req_line_speed = SPEED_100;
1899 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1900 bp->req_duplex = DUPLEX_HALF;
1901 }
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1903 bp->req_line_speed = SPEED_1000;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1905 bp->req_line_speed = SPEED_2500;
1906 }
1907 }
1908
1909 static void
1910 bnx2_set_default_link(struct bnx2 *bp)
1911 {
1912 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1913 bnx2_set_default_remote_link(bp);
1914 return;
1915 }
1916
1917 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1918 bp->req_line_speed = 0;
1919 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1920 u32 reg;
1921
1922 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1923
1924 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1925 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1926 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1927 bp->autoneg = 0;
1928 bp->req_line_speed = bp->line_speed = SPEED_1000;
1929 bp->req_duplex = DUPLEX_FULL;
1930 }
1931 } else
1932 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1933 }
1934
1935 static void
1936 bnx2_send_heart_beat(struct bnx2 *bp)
1937 {
1938 u32 msg;
1939 u32 addr;
1940
1941 spin_lock(&bp->indirect_lock);
1942 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1943 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1944 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1945 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1946 spin_unlock(&bp->indirect_lock);
1947 }
1948
1949 static void
1950 bnx2_remote_phy_event(struct bnx2 *bp)
1951 {
1952 u32 msg;
1953 u8 link_up = bp->link_up;
1954 u8 old_port;
1955
1956 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1957
1958 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1959 bnx2_send_heart_beat(bp);
1960
1961 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1962
1963 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1964 bp->link_up = 0;
1965 else {
1966 u32 speed;
1967
1968 bp->link_up = 1;
1969 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1970 bp->duplex = DUPLEX_FULL;
1971 switch (speed) {
1972 case BNX2_LINK_STATUS_10HALF:
1973 bp->duplex = DUPLEX_HALF;
1974 case BNX2_LINK_STATUS_10FULL:
1975 bp->line_speed = SPEED_10;
1976 break;
1977 case BNX2_LINK_STATUS_100HALF:
1978 bp->duplex = DUPLEX_HALF;
1979 case BNX2_LINK_STATUS_100BASE_T4:
1980 case BNX2_LINK_STATUS_100FULL:
1981 bp->line_speed = SPEED_100;
1982 break;
1983 case BNX2_LINK_STATUS_1000HALF:
1984 bp->duplex = DUPLEX_HALF;
1985 case BNX2_LINK_STATUS_1000FULL:
1986 bp->line_speed = SPEED_1000;
1987 break;
1988 case BNX2_LINK_STATUS_2500HALF:
1989 bp->duplex = DUPLEX_HALF;
1990 case BNX2_LINK_STATUS_2500FULL:
1991 bp->line_speed = SPEED_2500;
1992 break;
1993 default:
1994 bp->line_speed = 0;
1995 break;
1996 }
1997
1998 bp->flow_ctrl = 0;
1999 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2000 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2001 if (bp->duplex == DUPLEX_FULL)
2002 bp->flow_ctrl = bp->req_flow_ctrl;
2003 } else {
2004 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2005 bp->flow_ctrl |= FLOW_CTRL_TX;
2006 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2007 bp->flow_ctrl |= FLOW_CTRL_RX;
2008 }
2009
2010 old_port = bp->phy_port;
2011 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2012 bp->phy_port = PORT_FIBRE;
2013 else
2014 bp->phy_port = PORT_TP;
2015
2016 if (old_port != bp->phy_port)
2017 bnx2_set_default_link(bp);
2018
2019 }
2020 if (bp->link_up != link_up)
2021 bnx2_report_link(bp);
2022
2023 bnx2_set_mac_link(bp);
2024 }
2025
2026 static int
2027 bnx2_set_remote_link(struct bnx2 *bp)
2028 {
2029 u32 evt_code;
2030
2031 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2032 switch (evt_code) {
2033 case BNX2_FW_EVT_CODE_LINK_EVENT:
2034 bnx2_remote_phy_event(bp);
2035 break;
2036 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2037 default:
2038 bnx2_send_heart_beat(bp);
2039 break;
2040 }
2041 return 0;
2042 }
2043
2044 static int
2045 bnx2_setup_copper_phy(struct bnx2 *bp)
2046 __releases(&bp->phy_lock)
2047 __acquires(&bp->phy_lock)
2048 {
2049 u32 bmcr;
2050 u32 new_bmcr;
2051
2052 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2053
2054 if (bp->autoneg & AUTONEG_SPEED) {
2055 u32 adv_reg, adv1000_reg;
2056 u32 new_adv_reg = 0;
2057 u32 new_adv1000_reg = 0;
2058
2059 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2060 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2061 ADVERTISE_PAUSE_ASYM);
2062
2063 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2064 adv1000_reg &= PHY_ALL_1000_SPEED;
2065
2066 if (bp->advertising & ADVERTISED_10baseT_Half)
2067 new_adv_reg |= ADVERTISE_10HALF;
2068 if (bp->advertising & ADVERTISED_10baseT_Full)
2069 new_adv_reg |= ADVERTISE_10FULL;
2070 if (bp->advertising & ADVERTISED_100baseT_Half)
2071 new_adv_reg |= ADVERTISE_100HALF;
2072 if (bp->advertising & ADVERTISED_100baseT_Full)
2073 new_adv_reg |= ADVERTISE_100FULL;
2074 if (bp->advertising & ADVERTISED_1000baseT_Full)
2075 new_adv1000_reg |= ADVERTISE_1000FULL;
2076
2077 new_adv_reg |= ADVERTISE_CSMA;
2078
2079 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2080
2081 if ((adv1000_reg != new_adv1000_reg) ||
2082 (adv_reg != new_adv_reg) ||
2083 ((bmcr & BMCR_ANENABLE) == 0)) {
2084
2085 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2086 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2087 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2088 BMCR_ANENABLE);
2089 }
2090 else if (bp->link_up) {
2091 /* Flow ctrl may have changed from auto to forced */
2092 /* or vice-versa. */
2093
2094 bnx2_resolve_flow_ctrl(bp);
2095 bnx2_set_mac_link(bp);
2096 }
2097 return 0;
2098 }
2099
2100 new_bmcr = 0;
2101 if (bp->req_line_speed == SPEED_100) {
2102 new_bmcr |= BMCR_SPEED100;
2103 }
2104 if (bp->req_duplex == DUPLEX_FULL) {
2105 new_bmcr |= BMCR_FULLDPLX;
2106 }
2107 if (new_bmcr != bmcr) {
2108 u32 bmsr;
2109
2110 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2111 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112
2113 if (bmsr & BMSR_LSTATUS) {
2114 /* Force link down */
2115 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2116 spin_unlock_bh(&bp->phy_lock);
2117 msleep(50);
2118 spin_lock_bh(&bp->phy_lock);
2119
2120 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2121 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2122 }
2123
2124 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2125
2126 /* Normally, the new speed is setup after the link has
2127 * gone down and up again. In some cases, link will not go
2128 * down so we need to set up the new speed here.
2129 */
2130 if (bmsr & BMSR_LSTATUS) {
2131 bp->line_speed = bp->req_line_speed;
2132 bp->duplex = bp->req_duplex;
2133 bnx2_resolve_flow_ctrl(bp);
2134 bnx2_set_mac_link(bp);
2135 }
2136 } else {
2137 bnx2_resolve_flow_ctrl(bp);
2138 bnx2_set_mac_link(bp);
2139 }
2140 return 0;
2141 }
2142
2143 static int
2144 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2145 __releases(&bp->phy_lock)
2146 __acquires(&bp->phy_lock)
2147 {
2148 if (bp->loopback == MAC_LOOPBACK)
2149 return 0;
2150
2151 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2152 return bnx2_setup_serdes_phy(bp, port);
2153 }
2154 else {
2155 return bnx2_setup_copper_phy(bp);
2156 }
2157 }
2158
2159 static int
2160 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2161 {
2162 u32 val;
2163
2164 bp->mii_bmcr = MII_BMCR + 0x10;
2165 bp->mii_bmsr = MII_BMSR + 0x10;
2166 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2167 bp->mii_adv = MII_ADVERTISE + 0x10;
2168 bp->mii_lpa = MII_LPA + 0x10;
2169 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2170
2171 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2172 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2173
2174 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2175 if (reset_phy)
2176 bnx2_reset_phy(bp);
2177
2178 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2179
2180 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2181 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2182 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2183 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2184
2185 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2186 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2187 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2188 val |= BCM5708S_UP1_2G5;
2189 else
2190 val &= ~BCM5708S_UP1_2G5;
2191 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2192
2193 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2194 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2195 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2196 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2197
2198 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2199
2200 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2201 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2202 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2203
2204 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2205
2206 return 0;
2207 }
2208
2209 static int
2210 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2211 {
2212 u32 val;
2213
2214 if (reset_phy)
2215 bnx2_reset_phy(bp);
2216
2217 bp->mii_up1 = BCM5708S_UP1;
2218
2219 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2220 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2221 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2222
2223 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2224 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2225 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2226
2227 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2228 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2229 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2230
2231 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2232 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2233 val |= BCM5708S_UP1_2G5;
2234 bnx2_write_phy(bp, BCM5708S_UP1, val);
2235 }
2236
2237 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2238 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2239 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2240 /* increase tx signal amplitude */
2241 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2242 BCM5708S_BLK_ADDR_TX_MISC);
2243 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2244 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2245 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2247 }
2248
2249 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2250 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2251
2252 if (val) {
2253 u32 is_backplane;
2254
2255 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2256 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2257 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2258 BCM5708S_BLK_ADDR_TX_MISC);
2259 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2260 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261 BCM5708S_BLK_ADDR_DIG);
2262 }
2263 }
2264 return 0;
2265 }
2266
2267 static int
2268 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2269 {
2270 if (reset_phy)
2271 bnx2_reset_phy(bp);
2272
2273 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2274
2275 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2276 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2277
2278 if (bp->dev->mtu > 1500) {
2279 u32 val;
2280
2281 /* Set extended packet length bit */
2282 bnx2_write_phy(bp, 0x18, 0x7);
2283 bnx2_read_phy(bp, 0x18, &val);
2284 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2285
2286 bnx2_write_phy(bp, 0x1c, 0x6c00);
2287 bnx2_read_phy(bp, 0x1c, &val);
2288 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2289 }
2290 else {
2291 u32 val;
2292
2293 bnx2_write_phy(bp, 0x18, 0x7);
2294 bnx2_read_phy(bp, 0x18, &val);
2295 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2296
2297 bnx2_write_phy(bp, 0x1c, 0x6c00);
2298 bnx2_read_phy(bp, 0x1c, &val);
2299 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2300 }
2301
2302 return 0;
2303 }
2304
2305 static int
2306 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2307 {
2308 u32 val;
2309
2310 if (reset_phy)
2311 bnx2_reset_phy(bp);
2312
2313 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2314 bnx2_write_phy(bp, 0x18, 0x0c00);
2315 bnx2_write_phy(bp, 0x17, 0x000a);
2316 bnx2_write_phy(bp, 0x15, 0x310b);
2317 bnx2_write_phy(bp, 0x17, 0x201f);
2318 bnx2_write_phy(bp, 0x15, 0x9506);
2319 bnx2_write_phy(bp, 0x17, 0x401f);
2320 bnx2_write_phy(bp, 0x15, 0x14e2);
2321 bnx2_write_phy(bp, 0x18, 0x0400);
2322 }
2323
2324 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2325 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2326 MII_BNX2_DSP_EXPAND_REG | 0x8);
2327 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2328 val &= ~(1 << 8);
2329 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2330 }
2331
2332 if (bp->dev->mtu > 1500) {
2333 /* Set extended packet length bit */
2334 bnx2_write_phy(bp, 0x18, 0x7);
2335 bnx2_read_phy(bp, 0x18, &val);
2336 bnx2_write_phy(bp, 0x18, val | 0x4000);
2337
2338 bnx2_read_phy(bp, 0x10, &val);
2339 bnx2_write_phy(bp, 0x10, val | 0x1);
2340 }
2341 else {
2342 bnx2_write_phy(bp, 0x18, 0x7);
2343 bnx2_read_phy(bp, 0x18, &val);
2344 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2345
2346 bnx2_read_phy(bp, 0x10, &val);
2347 bnx2_write_phy(bp, 0x10, val & ~0x1);
2348 }
2349
2350 /* ethernet@wirespeed */
2351 bnx2_write_phy(bp, 0x18, 0x7007);
2352 bnx2_read_phy(bp, 0x18, &val);
2353 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2354 return 0;
2355 }
2356
2357
2358 static int
2359 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2360 __releases(&bp->phy_lock)
2361 __acquires(&bp->phy_lock)
2362 {
2363 u32 val;
2364 int rc = 0;
2365
2366 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2367 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2368
2369 bp->mii_bmcr = MII_BMCR;
2370 bp->mii_bmsr = MII_BMSR;
2371 bp->mii_bmsr1 = MII_BMSR;
2372 bp->mii_adv = MII_ADVERTISE;
2373 bp->mii_lpa = MII_LPA;
2374
2375 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2376
2377 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2378 goto setup_phy;
2379
2380 bnx2_read_phy(bp, MII_PHYSID1, &val);
2381 bp->phy_id = val << 16;
2382 bnx2_read_phy(bp, MII_PHYSID2, &val);
2383 bp->phy_id |= val & 0xffff;
2384
2385 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2386 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2387 rc = bnx2_init_5706s_phy(bp, reset_phy);
2388 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2389 rc = bnx2_init_5708s_phy(bp, reset_phy);
2390 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2391 rc = bnx2_init_5709s_phy(bp, reset_phy);
2392 }
2393 else {
2394 rc = bnx2_init_copper_phy(bp, reset_phy);
2395 }
2396
2397 setup_phy:
2398 if (!rc)
2399 rc = bnx2_setup_phy(bp, bp->phy_port);
2400
2401 return rc;
2402 }
2403
2404 static int
2405 bnx2_set_mac_loopback(struct bnx2 *bp)
2406 {
2407 u32 mac_mode;
2408
2409 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2410 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2411 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2412 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2413 bp->link_up = 1;
2414 return 0;
2415 }
2416
2417 static int bnx2_test_link(struct bnx2 *);
2418
2419 static int
2420 bnx2_set_phy_loopback(struct bnx2 *bp)
2421 {
2422 u32 mac_mode;
2423 int rc, i;
2424
2425 spin_lock_bh(&bp->phy_lock);
2426 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2427 BMCR_SPEED1000);
2428 spin_unlock_bh(&bp->phy_lock);
2429 if (rc)
2430 return rc;
2431
2432 for (i = 0; i < 10; i++) {
2433 if (bnx2_test_link(bp) == 0)
2434 break;
2435 msleep(100);
2436 }
2437
2438 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2439 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2440 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2441 BNX2_EMAC_MODE_25G_MODE);
2442
2443 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2444 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2445 bp->link_up = 1;
2446 return 0;
2447 }
2448
2449 static int
2450 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2451 {
2452 int i;
2453 u32 val;
2454
2455 bp->fw_wr_seq++;
2456 msg_data |= bp->fw_wr_seq;
2457
2458 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2459
2460 if (!ack)
2461 return 0;
2462
2463 /* wait for an acknowledgement. */
2464 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2465 msleep(10);
2466
2467 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2468
2469 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2470 break;
2471 }
2472 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2473 return 0;
2474
2475 /* If we timed out, inform the firmware that this is the case. */
2476 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2477 if (!silent)
2478 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2479
2480 msg_data &= ~BNX2_DRV_MSG_CODE;
2481 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2482
2483 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2484
2485 return -EBUSY;
2486 }
2487
2488 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2489 return -EIO;
2490
2491 return 0;
2492 }
2493
2494 static int
2495 bnx2_init_5709_context(struct bnx2 *bp)
2496 {
2497 int i, ret = 0;
2498 u32 val;
2499
2500 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2501 val |= (BCM_PAGE_BITS - 8) << 16;
2502 REG_WR(bp, BNX2_CTX_COMMAND, val);
2503 for (i = 0; i < 10; i++) {
2504 val = REG_RD(bp, BNX2_CTX_COMMAND);
2505 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2506 break;
2507 udelay(2);
2508 }
2509 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2510 return -EBUSY;
2511
2512 for (i = 0; i < bp->ctx_pages; i++) {
2513 int j;
2514
2515 if (bp->ctx_blk[i])
2516 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2517 else
2518 return -ENOMEM;
2519
2520 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2521 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2522 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2524 (u64) bp->ctx_blk_mapping[i] >> 32);
2525 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2526 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2527 for (j = 0; j < 10; j++) {
2528
2529 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2530 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2531 break;
2532 udelay(5);
2533 }
2534 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2535 ret = -EBUSY;
2536 break;
2537 }
2538 }
2539 return ret;
2540 }
2541
2542 static void
2543 bnx2_init_context(struct bnx2 *bp)
2544 {
2545 u32 vcid;
2546
2547 vcid = 96;
2548 while (vcid) {
2549 u32 vcid_addr, pcid_addr, offset;
2550 int i;
2551
2552 vcid--;
2553
2554 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2555 u32 new_vcid;
2556
2557 vcid_addr = GET_PCID_ADDR(vcid);
2558 if (vcid & 0x8) {
2559 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2560 }
2561 else {
2562 new_vcid = vcid;
2563 }
2564 pcid_addr = GET_PCID_ADDR(new_vcid);
2565 }
2566 else {
2567 vcid_addr = GET_CID_ADDR(vcid);
2568 pcid_addr = vcid_addr;
2569 }
2570
2571 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2572 vcid_addr += (i << PHY_CTX_SHIFT);
2573 pcid_addr += (i << PHY_CTX_SHIFT);
2574
2575 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2576 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2577
2578 /* Zero out the context. */
2579 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2580 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2581 }
2582 }
2583 }
2584
2585 static int
2586 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2587 {
2588 u16 *good_mbuf;
2589 u32 good_mbuf_cnt;
2590 u32 val;
2591
2592 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2593 if (good_mbuf == NULL) {
2594 pr_err("Failed to allocate memory in %s\n", __func__);
2595 return -ENOMEM;
2596 }
2597
2598 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2599 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2600
2601 good_mbuf_cnt = 0;
2602
2603 /* Allocate a bunch of mbufs and save the good ones in an array. */
2604 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2605 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2606 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2607 BNX2_RBUF_COMMAND_ALLOC_REQ);
2608
2609 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2610
2611 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2612
2613 /* The addresses with Bit 9 set are bad memory blocks. */
2614 if (!(val & (1 << 9))) {
2615 good_mbuf[good_mbuf_cnt] = (u16) val;
2616 good_mbuf_cnt++;
2617 }
2618
2619 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2620 }
2621
2622 /* Free the good ones back to the mbuf pool thus discarding
2623 * all the bad ones. */
2624 while (good_mbuf_cnt) {
2625 good_mbuf_cnt--;
2626
2627 val = good_mbuf[good_mbuf_cnt];
2628 val = (val << 9) | val | 1;
2629
2630 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2631 }
2632 kfree(good_mbuf);
2633 return 0;
2634 }
2635
2636 static void
2637 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2638 {
2639 u32 val;
2640
2641 val = (mac_addr[0] << 8) | mac_addr[1];
2642
2643 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2644
2645 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2646 (mac_addr[4] << 8) | mac_addr[5];
2647
2648 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2649 }
2650
2651 static inline int
2652 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2653 {
2654 dma_addr_t mapping;
2655 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2656 struct rx_bd *rxbd =
2657 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2658 struct page *page = alloc_page(gfp);
2659
2660 if (!page)
2661 return -ENOMEM;
2662 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2663 PCI_DMA_FROMDEVICE);
2664 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2665 __free_page(page);
2666 return -EIO;
2667 }
2668
2669 rx_pg->page = page;
2670 dma_unmap_addr_set(rx_pg, mapping, mapping);
2671 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2672 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2673 return 0;
2674 }
2675
2676 static void
2677 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2678 {
2679 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2680 struct page *page = rx_pg->page;
2681
2682 if (!page)
2683 return;
2684
2685 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2686 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2687
2688 __free_page(page);
2689 rx_pg->page = NULL;
2690 }
2691
2692 static inline int
2693 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2694 {
2695 struct sk_buff *skb;
2696 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2697 dma_addr_t mapping;
2698 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2699 unsigned long align;
2700
2701 skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2702 if (skb == NULL) {
2703 return -ENOMEM;
2704 }
2705
2706 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2707 skb_reserve(skb, BNX2_RX_ALIGN - align);
2708
2709 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2710 PCI_DMA_FROMDEVICE);
2711 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2712 dev_kfree_skb(skb);
2713 return -EIO;
2714 }
2715
2716 rx_buf->skb = skb;
2717 rx_buf->desc = (struct l2_fhdr *) skb->data;
2718 dma_unmap_addr_set(rx_buf, mapping, mapping);
2719
2720 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2721 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2722
2723 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2724
2725 return 0;
2726 }
2727
2728 static int
2729 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2730 {
2731 struct status_block *sblk = bnapi->status_blk.msi;
2732 u32 new_link_state, old_link_state;
2733 int is_set = 1;
2734
2735 new_link_state = sblk->status_attn_bits & event;
2736 old_link_state = sblk->status_attn_bits_ack & event;
2737 if (new_link_state != old_link_state) {
2738 if (new_link_state)
2739 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2740 else
2741 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2742 } else
2743 is_set = 0;
2744
2745 return is_set;
2746 }
2747
2748 static void
2749 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2750 {
2751 spin_lock(&bp->phy_lock);
2752
2753 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2754 bnx2_set_link(bp);
2755 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2756 bnx2_set_remote_link(bp);
2757
2758 spin_unlock(&bp->phy_lock);
2759
2760 }
2761
2762 static inline u16
2763 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2764 {
2765 u16 cons;
2766
2767 /* Tell compiler that status block fields can change. */
2768 barrier();
2769 cons = *bnapi->hw_tx_cons_ptr;
2770 barrier();
2771 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2772 cons++;
2773 return cons;
2774 }
2775
2776 static int
2777 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2778 {
2779 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2780 u16 hw_cons, sw_cons, sw_ring_cons;
2781 int tx_pkt = 0, index;
2782 struct netdev_queue *txq;
2783
2784 index = (bnapi - bp->bnx2_napi);
2785 txq = netdev_get_tx_queue(bp->dev, index);
2786
2787 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2788 sw_cons = txr->tx_cons;
2789
2790 while (sw_cons != hw_cons) {
2791 struct sw_tx_bd *tx_buf;
2792 struct sk_buff *skb;
2793 int i, last;
2794
2795 sw_ring_cons = TX_RING_IDX(sw_cons);
2796
2797 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2798 skb = tx_buf->skb;
2799
2800 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2801 prefetch(&skb->end);
2802
2803 /* partial BD completions possible with TSO packets */
2804 if (tx_buf->is_gso) {
2805 u16 last_idx, last_ring_idx;
2806
2807 last_idx = sw_cons + tx_buf->nr_frags + 1;
2808 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2809 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2810 last_idx++;
2811 }
2812 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2813 break;
2814 }
2815 }
2816
2817 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2818 skb_headlen(skb), PCI_DMA_TODEVICE);
2819
2820 tx_buf->skb = NULL;
2821 last = tx_buf->nr_frags;
2822
2823 for (i = 0; i < last; i++) {
2824 sw_cons = NEXT_TX_BD(sw_cons);
2825
2826 dma_unmap_page(&bp->pdev->dev,
2827 dma_unmap_addr(
2828 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2829 mapping),
2830 skb_shinfo(skb)->frags[i].size,
2831 PCI_DMA_TODEVICE);
2832 }
2833
2834 sw_cons = NEXT_TX_BD(sw_cons);
2835
2836 dev_kfree_skb(skb);
2837 tx_pkt++;
2838 if (tx_pkt == budget)
2839 break;
2840
2841 if (hw_cons == sw_cons)
2842 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2843 }
2844
2845 txr->hw_tx_cons = hw_cons;
2846 txr->tx_cons = sw_cons;
2847
2848 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2849 * before checking for netif_tx_queue_stopped(). Without the
2850 * memory barrier, there is a small possibility that bnx2_start_xmit()
2851 * will miss it and cause the queue to be stopped forever.
2852 */
2853 smp_mb();
2854
2855 if (unlikely(netif_tx_queue_stopped(txq)) &&
2856 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2857 __netif_tx_lock(txq, smp_processor_id());
2858 if ((netif_tx_queue_stopped(txq)) &&
2859 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2860 netif_tx_wake_queue(txq);
2861 __netif_tx_unlock(txq);
2862 }
2863
2864 return tx_pkt;
2865 }
2866
2867 static void
2868 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2869 struct sk_buff *skb, int count)
2870 {
2871 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2872 struct rx_bd *cons_bd, *prod_bd;
2873 int i;
2874 u16 hw_prod, prod;
2875 u16 cons = rxr->rx_pg_cons;
2876
2877 cons_rx_pg = &rxr->rx_pg_ring[cons];
2878
2879 /* The caller was unable to allocate a new page to replace the
2880 * last one in the frags array, so we need to recycle that page
2881 * and then free the skb.
2882 */
2883 if (skb) {
2884 struct page *page;
2885 struct skb_shared_info *shinfo;
2886
2887 shinfo = skb_shinfo(skb);
2888 shinfo->nr_frags--;
2889 page = shinfo->frags[shinfo->nr_frags].page;
2890 shinfo->frags[shinfo->nr_frags].page = NULL;
2891
2892 cons_rx_pg->page = page;
2893 dev_kfree_skb(skb);
2894 }
2895
2896 hw_prod = rxr->rx_pg_prod;
2897
2898 for (i = 0; i < count; i++) {
2899 prod = RX_PG_RING_IDX(hw_prod);
2900
2901 prod_rx_pg = &rxr->rx_pg_ring[prod];
2902 cons_rx_pg = &rxr->rx_pg_ring[cons];
2903 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2904 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2905
2906 if (prod != cons) {
2907 prod_rx_pg->page = cons_rx_pg->page;
2908 cons_rx_pg->page = NULL;
2909 dma_unmap_addr_set(prod_rx_pg, mapping,
2910 dma_unmap_addr(cons_rx_pg, mapping));
2911
2912 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2913 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2914
2915 }
2916 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2917 hw_prod = NEXT_RX_BD(hw_prod);
2918 }
2919 rxr->rx_pg_prod = hw_prod;
2920 rxr->rx_pg_cons = cons;
2921 }
2922
2923 static inline void
2924 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2925 struct sk_buff *skb, u16 cons, u16 prod)
2926 {
2927 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2928 struct rx_bd *cons_bd, *prod_bd;
2929
2930 cons_rx_buf = &rxr->rx_buf_ring[cons];
2931 prod_rx_buf = &rxr->rx_buf_ring[prod];
2932
2933 dma_sync_single_for_device(&bp->pdev->dev,
2934 dma_unmap_addr(cons_rx_buf, mapping),
2935 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2936
2937 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2938
2939 prod_rx_buf->skb = skb;
2940 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2941
2942 if (cons == prod)
2943 return;
2944
2945 dma_unmap_addr_set(prod_rx_buf, mapping,
2946 dma_unmap_addr(cons_rx_buf, mapping));
2947
2948 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2949 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2950 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2951 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2952 }
2953
2954 static int
2955 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2956 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2957 u32 ring_idx)
2958 {
2959 int err;
2960 u16 prod = ring_idx & 0xffff;
2961
2962 err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
2963 if (unlikely(err)) {
2964 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2965 if (hdr_len) {
2966 unsigned int raw_len = len + 4;
2967 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2968
2969 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2970 }
2971 return err;
2972 }
2973
2974 skb_reserve(skb, BNX2_RX_OFFSET);
2975 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
2976 PCI_DMA_FROMDEVICE);
2977
2978 if (hdr_len == 0) {
2979 skb_put(skb, len);
2980 return 0;
2981 } else {
2982 unsigned int i, frag_len, frag_size, pages;
2983 struct sw_pg *rx_pg;
2984 u16 pg_cons = rxr->rx_pg_cons;
2985 u16 pg_prod = rxr->rx_pg_prod;
2986
2987 frag_size = len + 4 - hdr_len;
2988 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2989 skb_put(skb, hdr_len);
2990
2991 for (i = 0; i < pages; i++) {
2992 dma_addr_t mapping_old;
2993
2994 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2995 if (unlikely(frag_len <= 4)) {
2996 unsigned int tail = 4 - frag_len;
2997
2998 rxr->rx_pg_cons = pg_cons;
2999 rxr->rx_pg_prod = pg_prod;
3000 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3001 pages - i);
3002 skb->len -= tail;
3003 if (i == 0) {
3004 skb->tail -= tail;
3005 } else {
3006 skb_frag_t *frag =
3007 &skb_shinfo(skb)->frags[i - 1];
3008 frag->size -= tail;
3009 skb->data_len -= tail;
3010 skb->truesize -= tail;
3011 }
3012 return 0;
3013 }
3014 rx_pg = &rxr->rx_pg_ring[pg_cons];
3015
3016 /* Don't unmap yet. If we're unable to allocate a new
3017 * page, we need to recycle the page and the DMA addr.
3018 */
3019 mapping_old = dma_unmap_addr(rx_pg, mapping);
3020 if (i == pages - 1)
3021 frag_len -= 4;
3022
3023 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3024 rx_pg->page = NULL;
3025
3026 err = bnx2_alloc_rx_page(bp, rxr,
3027 RX_PG_RING_IDX(pg_prod),
3028 GFP_ATOMIC);
3029 if (unlikely(err)) {
3030 rxr->rx_pg_cons = pg_cons;
3031 rxr->rx_pg_prod = pg_prod;
3032 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3033 pages - i);
3034 return err;
3035 }
3036
3037 dma_unmap_page(&bp->pdev->dev, mapping_old,
3038 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3039
3040 frag_size -= frag_len;
3041 skb->data_len += frag_len;
3042 skb->truesize += frag_len;
3043 skb->len += frag_len;
3044
3045 pg_prod = NEXT_RX_BD(pg_prod);
3046 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3047 }
3048 rxr->rx_pg_prod = pg_prod;
3049 rxr->rx_pg_cons = pg_cons;
3050 }
3051 return 0;
3052 }
3053
3054 static inline u16
3055 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3056 {
3057 u16 cons;
3058
3059 /* Tell compiler that status block fields can change. */
3060 barrier();
3061 cons = *bnapi->hw_rx_cons_ptr;
3062 barrier();
3063 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3064 cons++;
3065 return cons;
3066 }
3067
3068 static int
3069 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3070 {
3071 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3072 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3073 struct l2_fhdr *rx_hdr;
3074 int rx_pkt = 0, pg_ring_used = 0;
3075
3076 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3077 sw_cons = rxr->rx_cons;
3078 sw_prod = rxr->rx_prod;
3079
3080 /* Memory barrier necessary as speculative reads of the rx
3081 * buffer can be ahead of the index in the status block
3082 */
3083 rmb();
3084 while (sw_cons != hw_cons) {
3085 unsigned int len, hdr_len;
3086 u32 status;
3087 struct sw_bd *rx_buf, *next_rx_buf;
3088 struct sk_buff *skb;
3089 dma_addr_t dma_addr;
3090 u16 vtag = 0;
3091 int hw_vlan __maybe_unused = 0;
3092
3093 sw_ring_cons = RX_RING_IDX(sw_cons);
3094 sw_ring_prod = RX_RING_IDX(sw_prod);
3095
3096 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3097 skb = rx_buf->skb;
3098 prefetchw(skb);
3099
3100 next_rx_buf =
3101 &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3102 prefetch(next_rx_buf->desc);
3103
3104 rx_buf->skb = NULL;
3105
3106 dma_addr = dma_unmap_addr(rx_buf, mapping);
3107
3108 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3109 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3110 PCI_DMA_FROMDEVICE);
3111
3112 rx_hdr = rx_buf->desc;
3113 len = rx_hdr->l2_fhdr_pkt_len;
3114 status = rx_hdr->l2_fhdr_status;
3115
3116 hdr_len = 0;
3117 if (status & L2_FHDR_STATUS_SPLIT) {
3118 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3119 pg_ring_used = 1;
3120 } else if (len > bp->rx_jumbo_thresh) {
3121 hdr_len = bp->rx_jumbo_thresh;
3122 pg_ring_used = 1;
3123 }
3124
3125 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3126 L2_FHDR_ERRORS_PHY_DECODE |
3127 L2_FHDR_ERRORS_ALIGNMENT |
3128 L2_FHDR_ERRORS_TOO_SHORT |
3129 L2_FHDR_ERRORS_GIANT_FRAME))) {
3130
3131 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3132 sw_ring_prod);
3133 if (pg_ring_used) {
3134 int pages;
3135
3136 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3137
3138 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3139 }
3140 goto next_rx;
3141 }
3142
3143 len -= 4;
3144
3145 if (len <= bp->rx_copy_thresh) {
3146 struct sk_buff *new_skb;
3147
3148 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3149 if (new_skb == NULL) {
3150 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3151 sw_ring_prod);
3152 goto next_rx;
3153 }
3154
3155 /* aligned copy */
3156 skb_copy_from_linear_data_offset(skb,
3157 BNX2_RX_OFFSET - 6,
3158 new_skb->data, len + 6);
3159 skb_reserve(new_skb, 6);
3160 skb_put(new_skb, len);
3161
3162 bnx2_reuse_rx_skb(bp, rxr, skb,
3163 sw_ring_cons, sw_ring_prod);
3164
3165 skb = new_skb;
3166 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3167 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3168 goto next_rx;
3169
3170 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3171 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3172 vtag = rx_hdr->l2_fhdr_vlan_tag;
3173 #ifdef BCM_VLAN
3174 if (bp->vlgrp)
3175 hw_vlan = 1;
3176 else
3177 #endif
3178 {
3179 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3180 __skb_push(skb, 4);
3181
3182 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3183 ve->h_vlan_proto = htons(ETH_P_8021Q);
3184 ve->h_vlan_TCI = htons(vtag);
3185 len += 4;
3186 }
3187 }
3188
3189 skb->protocol = eth_type_trans(skb, bp->dev);
3190
3191 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3192 (ntohs(skb->protocol) != 0x8100)) {
3193
3194 dev_kfree_skb(skb);
3195 goto next_rx;
3196
3197 }
3198
3199 skb_checksum_none_assert(skb);
3200 if (bp->rx_csum &&
3201 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3202 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3203
3204 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3205 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3206 skb->ip_summed = CHECKSUM_UNNECESSARY;
3207 }
3208 if ((bp->dev->features & NETIF_F_RXHASH) &&
3209 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3210 L2_FHDR_STATUS_USE_RXHASH))
3211 skb->rxhash = rx_hdr->l2_fhdr_hash;
3212
3213 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3214
3215 #ifdef BCM_VLAN
3216 if (hw_vlan)
3217 vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3218 else
3219 #endif
3220 napi_gro_receive(&bnapi->napi, skb);
3221
3222 rx_pkt++;
3223
3224 next_rx:
3225 sw_cons = NEXT_RX_BD(sw_cons);
3226 sw_prod = NEXT_RX_BD(sw_prod);
3227
3228 if ((rx_pkt == budget))
3229 break;
3230
3231 /* Refresh hw_cons to see if there is new work */
3232 if (sw_cons == hw_cons) {
3233 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3234 rmb();
3235 }
3236 }
3237 rxr->rx_cons = sw_cons;
3238 rxr->rx_prod = sw_prod;
3239
3240 if (pg_ring_used)
3241 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3242
3243 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3244
3245 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3246
3247 mmiowb();
3248
3249 return rx_pkt;
3250
3251 }
3252
3253 /* MSI ISR - The only difference between this and the INTx ISR
3254 * is that the MSI interrupt is always serviced.
3255 */
3256 static irqreturn_t
3257 bnx2_msi(int irq, void *dev_instance)
3258 {
3259 struct bnx2_napi *bnapi = dev_instance;
3260 struct bnx2 *bp = bnapi->bp;
3261
3262 prefetch(bnapi->status_blk.msi);
3263 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3264 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3265 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3266
3267 /* Return here if interrupt is disabled. */
3268 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3269 return IRQ_HANDLED;
3270
3271 napi_schedule(&bnapi->napi);
3272
3273 return IRQ_HANDLED;
3274 }
3275
3276 static irqreturn_t
3277 bnx2_msi_1shot(int irq, void *dev_instance)
3278 {
3279 struct bnx2_napi *bnapi = dev_instance;
3280 struct bnx2 *bp = bnapi->bp;
3281
3282 prefetch(bnapi->status_blk.msi);
3283
3284 /* Return here if interrupt is disabled. */
3285 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3286 return IRQ_HANDLED;
3287
3288 napi_schedule(&bnapi->napi);
3289
3290 return IRQ_HANDLED;
3291 }
3292
3293 static irqreturn_t
3294 bnx2_interrupt(int irq, void *dev_instance)
3295 {
3296 struct bnx2_napi *bnapi = dev_instance;
3297 struct bnx2 *bp = bnapi->bp;
3298 struct status_block *sblk = bnapi->status_blk.msi;
3299
3300 /* When using INTx, it is possible for the interrupt to arrive
3301 * at the CPU before the status block posted prior to the
3302 * interrupt. Reading a register will flush the status block.
3303 * When using MSI, the MSI message will always complete after
3304 * the status block write.
3305 */
3306 if ((sblk->status_idx == bnapi->last_status_idx) &&
3307 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3308 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3309 return IRQ_NONE;
3310
3311 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3312 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3313 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3314
3315 /* Read back to deassert IRQ immediately to avoid too many
3316 * spurious interrupts.
3317 */
3318 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3319
3320 /* Return here if interrupt is shared and is disabled. */
3321 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3322 return IRQ_HANDLED;
3323
3324 if (napi_schedule_prep(&bnapi->napi)) {
3325 bnapi->last_status_idx = sblk->status_idx;
3326 __napi_schedule(&bnapi->napi);
3327 }
3328
3329 return IRQ_HANDLED;
3330 }
3331
3332 static inline int
3333 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3334 {
3335 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3336 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3337
3338 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3339 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3340 return 1;
3341 return 0;
3342 }
3343
3344 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3345 STATUS_ATTN_BITS_TIMER_ABORT)
3346
3347 static inline int
3348 bnx2_has_work(struct bnx2_napi *bnapi)
3349 {
3350 struct status_block *sblk = bnapi->status_blk.msi;
3351
3352 if (bnx2_has_fast_work(bnapi))
3353 return 1;
3354
3355 #ifdef BCM_CNIC
3356 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3357 return 1;
3358 #endif
3359
3360 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3361 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3362 return 1;
3363
3364 return 0;
3365 }
3366
3367 static void
3368 bnx2_chk_missed_msi(struct bnx2 *bp)
3369 {
3370 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3371 u32 msi_ctrl;
3372
3373 if (bnx2_has_work(bnapi)) {
3374 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3375 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3376 return;
3377
3378 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3379 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3380 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3381 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3382 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3383 }
3384 }
3385
3386 bp->idle_chk_status_idx = bnapi->last_status_idx;
3387 }
3388
3389 #ifdef BCM_CNIC
3390 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3391 {
3392 struct cnic_ops *c_ops;
3393
3394 if (!bnapi->cnic_present)
3395 return;
3396
3397 rcu_read_lock();
3398 c_ops = rcu_dereference(bp->cnic_ops);
3399 if (c_ops)
3400 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3401 bnapi->status_blk.msi);
3402 rcu_read_unlock();
3403 }
3404 #endif
3405
3406 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3407 {
3408 struct status_block *sblk = bnapi->status_blk.msi;
3409 u32 status_attn_bits = sblk->status_attn_bits;
3410 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3411
3412 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3413 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3414
3415 bnx2_phy_int(bp, bnapi);
3416
3417 /* This is needed to take care of transient status
3418 * during link changes.
3419 */
3420 REG_WR(bp, BNX2_HC_COMMAND,
3421 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3422 REG_RD(bp, BNX2_HC_COMMAND);
3423 }
3424 }
3425
3426 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3427 int work_done, int budget)
3428 {
3429 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3430 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3431
3432 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3433 bnx2_tx_int(bp, bnapi, 0);
3434
3435 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3436 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3437
3438 return work_done;
3439 }
3440
3441 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3442 {
3443 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3444 struct bnx2 *bp = bnapi->bp;
3445 int work_done = 0;
3446 struct status_block_msix *sblk = bnapi->status_blk.msix;
3447
3448 while (1) {
3449 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3450 if (unlikely(work_done >= budget))
3451 break;
3452
3453 bnapi->last_status_idx = sblk->status_idx;
3454 /* status idx must be read before checking for more work. */
3455 rmb();
3456 if (likely(!bnx2_has_fast_work(bnapi))) {
3457
3458 napi_complete(napi);
3459 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3460 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3461 bnapi->last_status_idx);
3462 break;
3463 }
3464 }
3465 return work_done;
3466 }
3467
3468 static int bnx2_poll(struct napi_struct *napi, int budget)
3469 {
3470 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3471 struct bnx2 *bp = bnapi->bp;
3472 int work_done = 0;
3473 struct status_block *sblk = bnapi->status_blk.msi;
3474
3475 while (1) {
3476 bnx2_poll_link(bp, bnapi);
3477
3478 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3479
3480 #ifdef BCM_CNIC
3481 bnx2_poll_cnic(bp, bnapi);
3482 #endif
3483
3484 /* bnapi->last_status_idx is used below to tell the hw how
3485 * much work has been processed, so we must read it before
3486 * checking for more work.
3487 */
3488 bnapi->last_status_idx = sblk->status_idx;
3489
3490 if (unlikely(work_done >= budget))
3491 break;
3492
3493 rmb();
3494 if (likely(!bnx2_has_work(bnapi))) {
3495 napi_complete(napi);
3496 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3497 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3498 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3499 bnapi->last_status_idx);
3500 break;
3501 }
3502 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3503 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3504 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3505 bnapi->last_status_idx);
3506
3507 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3508 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3509 bnapi->last_status_idx);
3510 break;
3511 }
3512 }
3513
3514 return work_done;
3515 }
3516
3517 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3518 * from set_multicast.
3519 */
3520 static void
3521 bnx2_set_rx_mode(struct net_device *dev)
3522 {
3523 struct bnx2 *bp = netdev_priv(dev);
3524 u32 rx_mode, sort_mode;
3525 struct netdev_hw_addr *ha;
3526 int i;
3527
3528 if (!netif_running(dev))
3529 return;
3530
3531 spin_lock_bh(&bp->phy_lock);
3532
3533 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3534 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3535 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3536 #ifdef BCM_VLAN
3537 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3538 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3539 #else
3540 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3541 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3542 #endif
3543 if (dev->flags & IFF_PROMISC) {
3544 /* Promiscuous mode. */
3545 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3546 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3547 BNX2_RPM_SORT_USER0_PROM_VLAN;
3548 }
3549 else if (dev->flags & IFF_ALLMULTI) {
3550 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3551 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3552 0xffffffff);
3553 }
3554 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3555 }
3556 else {
3557 /* Accept one or more multicast(s). */
3558 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3559 u32 regidx;
3560 u32 bit;
3561 u32 crc;
3562
3563 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3564
3565 netdev_for_each_mc_addr(ha, dev) {
3566 crc = ether_crc_le(ETH_ALEN, ha->addr);
3567 bit = crc & 0xff;
3568 regidx = (bit & 0xe0) >> 5;
3569 bit &= 0x1f;
3570 mc_filter[regidx] |= (1 << bit);
3571 }
3572
3573 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3574 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3575 mc_filter[i]);
3576 }
3577
3578 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3579 }
3580
3581 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3582 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3583 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3584 BNX2_RPM_SORT_USER0_PROM_VLAN;
3585 } else if (!(dev->flags & IFF_PROMISC)) {
3586 /* Add all entries into to the match filter list */
3587 i = 0;
3588 netdev_for_each_uc_addr(ha, dev) {
3589 bnx2_set_mac_addr(bp, ha->addr,
3590 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3591 sort_mode |= (1 <<
3592 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3593 i++;
3594 }
3595
3596 }
3597
3598 if (rx_mode != bp->rx_mode) {
3599 bp->rx_mode = rx_mode;
3600 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3601 }
3602
3603 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3604 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3605 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3606
3607 spin_unlock_bh(&bp->phy_lock);
3608 }
3609
3610 static int __devinit
3611 check_fw_section(const struct firmware *fw,
3612 const struct bnx2_fw_file_section *section,
3613 u32 alignment, bool non_empty)
3614 {
3615 u32 offset = be32_to_cpu(section->offset);
3616 u32 len = be32_to_cpu(section->len);
3617
3618 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3619 return -EINVAL;
3620 if ((non_empty && len == 0) || len > fw->size - offset ||
3621 len & (alignment - 1))
3622 return -EINVAL;
3623 return 0;
3624 }
3625
3626 static int __devinit
3627 check_mips_fw_entry(const struct firmware *fw,
3628 const struct bnx2_mips_fw_file_entry *entry)
3629 {
3630 if (check_fw_section(fw, &entry->text, 4, true) ||
3631 check_fw_section(fw, &entry->data, 4, false) ||
3632 check_fw_section(fw, &entry->rodata, 4, false))
3633 return -EINVAL;
3634 return 0;
3635 }
3636
3637 static int __devinit
3638 bnx2_request_firmware(struct bnx2 *bp)
3639 {
3640 const char *mips_fw_file, *rv2p_fw_file;
3641 const struct bnx2_mips_fw_file *mips_fw;
3642 const struct bnx2_rv2p_fw_file *rv2p_fw;
3643 int rc;
3644
3645 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3646 mips_fw_file = FW_MIPS_FILE_09;
3647 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3648 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3649 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3650 else
3651 rv2p_fw_file = FW_RV2P_FILE_09;
3652 } else {
3653 mips_fw_file = FW_MIPS_FILE_06;
3654 rv2p_fw_file = FW_RV2P_FILE_06;
3655 }
3656
3657 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3658 if (rc) {
3659 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3660 return rc;
3661 }
3662
3663 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3664 if (rc) {
3665 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3666 return rc;
3667 }
3668 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3669 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3670 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3671 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3672 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3673 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3674 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3675 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3676 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3677 return -EINVAL;
3678 }
3679 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3680 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3681 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3682 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3683 return -EINVAL;
3684 }
3685
3686 return 0;
3687 }
3688
3689 static u32
3690 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3691 {
3692 switch (idx) {
3693 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3694 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3695 rv2p_code |= RV2P_BD_PAGE_SIZE;
3696 break;
3697 }
3698 return rv2p_code;
3699 }
3700
3701 static int
3702 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3703 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3704 {
3705 u32 rv2p_code_len, file_offset;
3706 __be32 *rv2p_code;
3707 int i;
3708 u32 val, cmd, addr;
3709
3710 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3711 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3712
3713 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3714
3715 if (rv2p_proc == RV2P_PROC1) {
3716 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3717 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3718 } else {
3719 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3720 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3721 }
3722
3723 for (i = 0; i < rv2p_code_len; i += 8) {
3724 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3725 rv2p_code++;
3726 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3727 rv2p_code++;
3728
3729 val = (i / 8) | cmd;
3730 REG_WR(bp, addr, val);
3731 }
3732
3733 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3734 for (i = 0; i < 8; i++) {
3735 u32 loc, code;
3736
3737 loc = be32_to_cpu(fw_entry->fixup[i]);
3738 if (loc && ((loc * 4) < rv2p_code_len)) {
3739 code = be32_to_cpu(*(rv2p_code + loc - 1));
3740 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3741 code = be32_to_cpu(*(rv2p_code + loc));
3742 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3743 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3744
3745 val = (loc / 2) | cmd;
3746 REG_WR(bp, addr, val);
3747 }
3748 }
3749
3750 /* Reset the processor, un-stall is done later. */
3751 if (rv2p_proc == RV2P_PROC1) {
3752 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3753 }
3754 else {
3755 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3756 }
3757
3758 return 0;
3759 }
3760
3761 static int
3762 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3763 const struct bnx2_mips_fw_file_entry *fw_entry)
3764 {
3765 u32 addr, len, file_offset;
3766 __be32 *data;
3767 u32 offset;
3768 u32 val;
3769
3770 /* Halt the CPU. */
3771 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3772 val |= cpu_reg->mode_value_halt;
3773 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3774 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3775
3776 /* Load the Text area. */
3777 addr = be32_to_cpu(fw_entry->text.addr);
3778 len = be32_to_cpu(fw_entry->text.len);
3779 file_offset = be32_to_cpu(fw_entry->text.offset);
3780 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3781
3782 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3783 if (len) {
3784 int j;
3785
3786 for (j = 0; j < (len / 4); j++, offset += 4)
3787 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3788 }
3789
3790 /* Load the Data area. */
3791 addr = be32_to_cpu(fw_entry->data.addr);
3792 len = be32_to_cpu(fw_entry->data.len);
3793 file_offset = be32_to_cpu(fw_entry->data.offset);
3794 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3795
3796 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3797 if (len) {
3798 int j;
3799
3800 for (j = 0; j < (len / 4); j++, offset += 4)
3801 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3802 }
3803
3804 /* Load the Read-Only area. */
3805 addr = be32_to_cpu(fw_entry->rodata.addr);
3806 len = be32_to_cpu(fw_entry->rodata.len);
3807 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3808 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3809
3810 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3811 if (len) {
3812 int j;
3813
3814 for (j = 0; j < (len / 4); j++, offset += 4)
3815 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3816 }
3817
3818 /* Clear the pre-fetch instruction. */
3819 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3820
3821 val = be32_to_cpu(fw_entry->start_addr);
3822 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3823
3824 /* Start the CPU. */
3825 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3826 val &= ~cpu_reg->mode_value_halt;
3827 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3828 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3829
3830 return 0;
3831 }
3832
3833 static int
3834 bnx2_init_cpus(struct bnx2 *bp)
3835 {
3836 const struct bnx2_mips_fw_file *mips_fw =
3837 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3838 const struct bnx2_rv2p_fw_file *rv2p_fw =
3839 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3840 int rc;
3841
3842 /* Initialize the RV2P processor. */
3843 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3844 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3845
3846 /* Initialize the RX Processor. */
3847 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3848 if (rc)
3849 goto init_cpu_err;
3850
3851 /* Initialize the TX Processor. */
3852 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3853 if (rc)
3854 goto init_cpu_err;
3855
3856 /* Initialize the TX Patch-up Processor. */
3857 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3858 if (rc)
3859 goto init_cpu_err;
3860
3861 /* Initialize the Completion Processor. */
3862 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3863 if (rc)
3864 goto init_cpu_err;
3865
3866 /* Initialize the Command Processor. */
3867 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3868
3869 init_cpu_err:
3870 return rc;
3871 }
3872
3873 static int
3874 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3875 {
3876 u16 pmcsr;
3877
3878 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3879
3880 switch (state) {
3881 case PCI_D0: {
3882 u32 val;
3883
3884 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3885 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3886 PCI_PM_CTRL_PME_STATUS);
3887
3888 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3889 /* delay required during transition out of D3hot */
3890 msleep(20);
3891
3892 val = REG_RD(bp, BNX2_EMAC_MODE);
3893 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3894 val &= ~BNX2_EMAC_MODE_MPKT;
3895 REG_WR(bp, BNX2_EMAC_MODE, val);
3896
3897 val = REG_RD(bp, BNX2_RPM_CONFIG);
3898 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3899 REG_WR(bp, BNX2_RPM_CONFIG, val);
3900 break;
3901 }
3902 case PCI_D3hot: {
3903 int i;
3904 u32 val, wol_msg;
3905
3906 if (bp->wol) {
3907 u32 advertising;
3908 u8 autoneg;
3909
3910 autoneg = bp->autoneg;
3911 advertising = bp->advertising;
3912
3913 if (bp->phy_port == PORT_TP) {
3914 bp->autoneg = AUTONEG_SPEED;
3915 bp->advertising = ADVERTISED_10baseT_Half |
3916 ADVERTISED_10baseT_Full |
3917 ADVERTISED_100baseT_Half |
3918 ADVERTISED_100baseT_Full |
3919 ADVERTISED_Autoneg;
3920 }
3921
3922 spin_lock_bh(&bp->phy_lock);
3923 bnx2_setup_phy(bp, bp->phy_port);
3924 spin_unlock_bh(&bp->phy_lock);
3925
3926 bp->autoneg = autoneg;
3927 bp->advertising = advertising;
3928
3929 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3930
3931 val = REG_RD(bp, BNX2_EMAC_MODE);
3932
3933 /* Enable port mode. */
3934 val &= ~BNX2_EMAC_MODE_PORT;
3935 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3936 BNX2_EMAC_MODE_ACPI_RCVD |
3937 BNX2_EMAC_MODE_MPKT;
3938 if (bp->phy_port == PORT_TP)
3939 val |= BNX2_EMAC_MODE_PORT_MII;
3940 else {
3941 val |= BNX2_EMAC_MODE_PORT_GMII;
3942 if (bp->line_speed == SPEED_2500)
3943 val |= BNX2_EMAC_MODE_25G_MODE;
3944 }
3945
3946 REG_WR(bp, BNX2_EMAC_MODE, val);
3947
3948 /* receive all multicast */
3949 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3950 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3951 0xffffffff);
3952 }
3953 REG_WR(bp, BNX2_EMAC_RX_MODE,
3954 BNX2_EMAC_RX_MODE_SORT_MODE);
3955
3956 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3957 BNX2_RPM_SORT_USER0_MC_EN;
3958 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3959 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3960 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3961 BNX2_RPM_SORT_USER0_ENA);
3962
3963 /* Need to enable EMAC and RPM for WOL. */
3964 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3965 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3966 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3967 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3968
3969 val = REG_RD(bp, BNX2_RPM_CONFIG);
3970 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3971 REG_WR(bp, BNX2_RPM_CONFIG, val);
3972
3973 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3974 }
3975 else {
3976 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3977 }
3978
3979 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3980 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3981 1, 0);
3982
3983 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3984 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3985 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3986
3987 if (bp->wol)
3988 pmcsr |= 3;
3989 }
3990 else {
3991 pmcsr |= 3;
3992 }
3993 if (bp->wol) {
3994 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3995 }
3996 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3997 pmcsr);
3998
3999 /* No more memory access after this point until
4000 * device is brought back to D0.
4001 */
4002 udelay(50);
4003 break;
4004 }
4005 default:
4006 return -EINVAL;
4007 }
4008 return 0;
4009 }
4010
4011 static int
4012 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4013 {
4014 u32 val;
4015 int j;
4016
4017 /* Request access to the flash interface. */
4018 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4019 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4020 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4021 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4022 break;
4023
4024 udelay(5);
4025 }
4026
4027 if (j >= NVRAM_TIMEOUT_COUNT)
4028 return -EBUSY;
4029
4030 return 0;
4031 }
4032
4033 static int
4034 bnx2_release_nvram_lock(struct bnx2 *bp)
4035 {
4036 int j;
4037 u32 val;
4038
4039 /* Relinquish nvram interface. */
4040 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4041
4042 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4043 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4044 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4045 break;
4046
4047 udelay(5);
4048 }
4049
4050 if (j >= NVRAM_TIMEOUT_COUNT)
4051 return -EBUSY;
4052
4053 return 0;
4054 }
4055
4056
4057 static int
4058 bnx2_enable_nvram_write(struct bnx2 *bp)
4059 {
4060 u32 val;
4061
4062 val = REG_RD(bp, BNX2_MISC_CFG);
4063 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4064
4065 if (bp->flash_info->flags & BNX2_NV_WREN) {
4066 int j;
4067
4068 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4069 REG_WR(bp, BNX2_NVM_COMMAND,
4070 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4071
4072 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4073 udelay(5);
4074
4075 val = REG_RD(bp, BNX2_NVM_COMMAND);
4076 if (val & BNX2_NVM_COMMAND_DONE)
4077 break;
4078 }
4079
4080 if (j >= NVRAM_TIMEOUT_COUNT)
4081 return -EBUSY;
4082 }
4083 return 0;
4084 }
4085
4086 static void
4087 bnx2_disable_nvram_write(struct bnx2 *bp)
4088 {
4089 u32 val;
4090
4091 val = REG_RD(bp, BNX2_MISC_CFG);
4092 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4093 }
4094
4095
4096 static void
4097 bnx2_enable_nvram_access(struct bnx2 *bp)
4098 {
4099 u32 val;
4100
4101 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4102 /* Enable both bits, even on read. */
4103 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4104 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4105 }
4106
4107 static void
4108 bnx2_disable_nvram_access(struct bnx2 *bp)
4109 {
4110 u32 val;
4111
4112 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4113 /* Disable both bits, even after read. */
4114 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4115 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4116 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4117 }
4118
4119 static int
4120 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4121 {
4122 u32 cmd;
4123 int j;
4124
4125 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4126 /* Buffered flash, no erase needed */
4127 return 0;
4128
4129 /* Build an erase command */
4130 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4131 BNX2_NVM_COMMAND_DOIT;
4132
4133 /* Need to clear DONE bit separately. */
4134 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4135
4136 /* Address of the NVRAM to read from. */
4137 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4138
4139 /* Issue an erase command. */
4140 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4141
4142 /* Wait for completion. */
4143 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4144 u32 val;
4145
4146 udelay(5);
4147
4148 val = REG_RD(bp, BNX2_NVM_COMMAND);
4149 if (val & BNX2_NVM_COMMAND_DONE)
4150 break;
4151 }
4152
4153 if (j >= NVRAM_TIMEOUT_COUNT)
4154 return -EBUSY;
4155
4156 return 0;
4157 }
4158
4159 static int
4160 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4161 {
4162 u32 cmd;
4163 int j;
4164
4165 /* Build the command word. */
4166 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4167
4168 /* Calculate an offset of a buffered flash, not needed for 5709. */
4169 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4170 offset = ((offset / bp->flash_info->page_size) <<
4171 bp->flash_info->page_bits) +
4172 (offset % bp->flash_info->page_size);
4173 }
4174
4175 /* Need to clear DONE bit separately. */
4176 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4177
4178 /* Address of the NVRAM to read from. */
4179 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4180
4181 /* Issue a read command. */
4182 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4183
4184 /* Wait for completion. */
4185 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4186 u32 val;
4187
4188 udelay(5);
4189
4190 val = REG_RD(bp, BNX2_NVM_COMMAND);
4191 if (val & BNX2_NVM_COMMAND_DONE) {
4192 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4193 memcpy(ret_val, &v, 4);
4194 break;
4195 }
4196 }
4197 if (j >= NVRAM_TIMEOUT_COUNT)
4198 return -EBUSY;
4199
4200 return 0;
4201 }
4202
4203
4204 static int
4205 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4206 {
4207 u32 cmd;
4208 __be32 val32;
4209 int j;
4210
4211 /* Build the command word. */
4212 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4213
4214 /* Calculate an offset of a buffered flash, not needed for 5709. */
4215 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4216 offset = ((offset / bp->flash_info->page_size) <<
4217 bp->flash_info->page_bits) +
4218 (offset % bp->flash_info->page_size);
4219 }
4220
4221 /* Need to clear DONE bit separately. */
4222 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4223
4224 memcpy(&val32, val, 4);
4225
4226 /* Write the data. */
4227 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4228
4229 /* Address of the NVRAM to write to. */
4230 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4231
4232 /* Issue the write command. */
4233 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4234
4235 /* Wait for completion. */
4236 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4237 udelay(5);
4238
4239 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4240 break;
4241 }
4242 if (j >= NVRAM_TIMEOUT_COUNT)
4243 return -EBUSY;
4244
4245 return 0;
4246 }
4247
4248 static int
4249 bnx2_init_nvram(struct bnx2 *bp)
4250 {
4251 u32 val;
4252 int j, entry_count, rc = 0;
4253 const struct flash_spec *flash;
4254
4255 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4256 bp->flash_info = &flash_5709;
4257 goto get_flash_size;
4258 }
4259
4260 /* Determine the selected interface. */
4261 val = REG_RD(bp, BNX2_NVM_CFG1);
4262
4263 entry_count = ARRAY_SIZE(flash_table);
4264
4265 if (val & 0x40000000) {
4266
4267 /* Flash interface has been reconfigured */
4268 for (j = 0, flash = &flash_table[0]; j < entry_count;
4269 j++, flash++) {
4270 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4271 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4272 bp->flash_info = flash;
4273 break;
4274 }
4275 }
4276 }
4277 else {
4278 u32 mask;
4279 /* Not yet been reconfigured */
4280
4281 if (val & (1 << 23))
4282 mask = FLASH_BACKUP_STRAP_MASK;
4283 else
4284 mask = FLASH_STRAP_MASK;
4285
4286 for (j = 0, flash = &flash_table[0]; j < entry_count;
4287 j++, flash++) {
4288
4289 if ((val & mask) == (flash->strapping & mask)) {
4290 bp->flash_info = flash;
4291
4292 /* Request access to the flash interface. */
4293 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4294 return rc;
4295
4296 /* Enable access to flash interface */
4297 bnx2_enable_nvram_access(bp);
4298
4299 /* Reconfigure the flash interface */
4300 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4301 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4302 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4303 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4304
4305 /* Disable access to flash interface */
4306 bnx2_disable_nvram_access(bp);
4307 bnx2_release_nvram_lock(bp);
4308
4309 break;
4310 }
4311 }
4312 } /* if (val & 0x40000000) */
4313
4314 if (j == entry_count) {
4315 bp->flash_info = NULL;
4316 pr_alert("Unknown flash/EEPROM type\n");
4317 return -ENODEV;
4318 }
4319
4320 get_flash_size:
4321 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4322 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4323 if (val)
4324 bp->flash_size = val;
4325 else
4326 bp->flash_size = bp->flash_info->total_size;
4327
4328 return rc;
4329 }
4330
4331 static int
4332 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4333 int buf_size)
4334 {
4335 int rc = 0;
4336 u32 cmd_flags, offset32, len32, extra;
4337
4338 if (buf_size == 0)
4339 return 0;
4340
4341 /* Request access to the flash interface. */
4342 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4343 return rc;
4344
4345 /* Enable access to flash interface */
4346 bnx2_enable_nvram_access(bp);
4347
4348 len32 = buf_size;
4349 offset32 = offset;
4350 extra = 0;
4351
4352 cmd_flags = 0;
4353
4354 if (offset32 & 3) {
4355 u8 buf[4];
4356 u32 pre_len;
4357
4358 offset32 &= ~3;
4359 pre_len = 4 - (offset & 3);
4360
4361 if (pre_len >= len32) {
4362 pre_len = len32;
4363 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4364 BNX2_NVM_COMMAND_LAST;
4365 }
4366 else {
4367 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4368 }
4369
4370 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4371
4372 if (rc)
4373 return rc;
4374
4375 memcpy(ret_buf, buf + (offset & 3), pre_len);
4376
4377 offset32 += 4;
4378 ret_buf += pre_len;
4379 len32 -= pre_len;
4380 }
4381 if (len32 & 3) {
4382 extra = 4 - (len32 & 3);
4383 len32 = (len32 + 4) & ~3;
4384 }
4385
4386 if (len32 == 4) {
4387 u8 buf[4];
4388
4389 if (cmd_flags)
4390 cmd_flags = BNX2_NVM_COMMAND_LAST;
4391 else
4392 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4393 BNX2_NVM_COMMAND_LAST;
4394
4395 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4396
4397 memcpy(ret_buf, buf, 4 - extra);
4398 }
4399 else if (len32 > 0) {
4400 u8 buf[4];
4401
4402 /* Read the first word. */
4403 if (cmd_flags)
4404 cmd_flags = 0;
4405 else
4406 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4407
4408 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4409
4410 /* Advance to the next dword. */
4411 offset32 += 4;
4412 ret_buf += 4;
4413 len32 -= 4;
4414
4415 while (len32 > 4 && rc == 0) {
4416 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4417
4418 /* Advance to the next dword. */
4419 offset32 += 4;
4420 ret_buf += 4;
4421 len32 -= 4;
4422 }
4423
4424 if (rc)
4425 return rc;
4426
4427 cmd_flags = BNX2_NVM_COMMAND_LAST;
4428 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4429
4430 memcpy(ret_buf, buf, 4 - extra);
4431 }
4432
4433 /* Disable access to flash interface */
4434 bnx2_disable_nvram_access(bp);
4435
4436 bnx2_release_nvram_lock(bp);
4437
4438 return rc;
4439 }
4440
4441 static int
4442 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4443 int buf_size)
4444 {
4445 u32 written, offset32, len32;
4446 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4447 int rc = 0;
4448 int align_start, align_end;
4449
4450 buf = data_buf;
4451 offset32 = offset;
4452 len32 = buf_size;
4453 align_start = align_end = 0;
4454
4455 if ((align_start = (offset32 & 3))) {
4456 offset32 &= ~3;
4457 len32 += align_start;
4458 if (len32 < 4)
4459 len32 = 4;
4460 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4461 return rc;
4462 }
4463
4464 if (len32 & 3) {
4465 align_end = 4 - (len32 & 3);
4466 len32 += align_end;
4467 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4468 return rc;
4469 }
4470
4471 if (align_start || align_end) {
4472 align_buf = kmalloc(len32, GFP_KERNEL);
4473 if (align_buf == NULL)
4474 return -ENOMEM;
4475 if (align_start) {
4476 memcpy(align_buf, start, 4);
4477 }
4478 if (align_end) {
4479 memcpy(align_buf + len32 - 4, end, 4);
4480 }
4481 memcpy(align_buf + align_start, data_buf, buf_size);
4482 buf = align_buf;
4483 }
4484
4485 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4486 flash_buffer = kmalloc(264, GFP_KERNEL);
4487 if (flash_buffer == NULL) {
4488 rc = -ENOMEM;
4489 goto nvram_write_end;
4490 }
4491 }
4492
4493 written = 0;
4494 while ((written < len32) && (rc == 0)) {
4495 u32 page_start, page_end, data_start, data_end;
4496 u32 addr, cmd_flags;
4497 int i;
4498
4499 /* Find the page_start addr */
4500 page_start = offset32 + written;
4501 page_start -= (page_start % bp->flash_info->page_size);
4502 /* Find the page_end addr */
4503 page_end = page_start + bp->flash_info->page_size;
4504 /* Find the data_start addr */
4505 data_start = (written == 0) ? offset32 : page_start;
4506 /* Find the data_end addr */
4507 data_end = (page_end > offset32 + len32) ?
4508 (offset32 + len32) : page_end;
4509
4510 /* Request access to the flash interface. */
4511 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4512 goto nvram_write_end;
4513
4514 /* Enable access to flash interface */
4515 bnx2_enable_nvram_access(bp);
4516
4517 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4518 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4519 int j;
4520
4521 /* Read the whole page into the buffer
4522 * (non-buffer flash only) */
4523 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4524 if (j == (bp->flash_info->page_size - 4)) {
4525 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4526 }
4527 rc = bnx2_nvram_read_dword(bp,
4528 page_start + j,
4529 &flash_buffer[j],
4530 cmd_flags);
4531
4532 if (rc)
4533 goto nvram_write_end;
4534
4535 cmd_flags = 0;
4536 }
4537 }
4538
4539 /* Enable writes to flash interface (unlock write-protect) */
4540 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4541 goto nvram_write_end;
4542
4543 /* Loop to write back the buffer data from page_start to
4544 * data_start */
4545 i = 0;
4546 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4547 /* Erase the page */
4548 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4549 goto nvram_write_end;
4550
4551 /* Re-enable the write again for the actual write */
4552 bnx2_enable_nvram_write(bp);
4553
4554 for (addr = page_start; addr < data_start;
4555 addr += 4, i += 4) {
4556
4557 rc = bnx2_nvram_write_dword(bp, addr,
4558 &flash_buffer[i], cmd_flags);
4559
4560 if (rc != 0)
4561 goto nvram_write_end;
4562
4563 cmd_flags = 0;
4564 }
4565 }
4566
4567 /* Loop to write the new data from data_start to data_end */
4568 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4569 if ((addr == page_end - 4) ||
4570 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4571 (addr == data_end - 4))) {
4572
4573 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4574 }
4575 rc = bnx2_nvram_write_dword(bp, addr, buf,
4576 cmd_flags);
4577
4578 if (rc != 0)
4579 goto nvram_write_end;
4580
4581 cmd_flags = 0;
4582 buf += 4;
4583 }
4584
4585 /* Loop to write back the buffer data from data_end
4586 * to page_end */
4587 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4588 for (addr = data_end; addr < page_end;
4589 addr += 4, i += 4) {
4590
4591 if (addr == page_end-4) {
4592 cmd_flags = BNX2_NVM_COMMAND_LAST;
4593 }
4594 rc = bnx2_nvram_write_dword(bp, addr,
4595 &flash_buffer[i], cmd_flags);
4596
4597 if (rc != 0)
4598 goto nvram_write_end;
4599
4600 cmd_flags = 0;
4601 }
4602 }
4603
4604 /* Disable writes to flash interface (lock write-protect) */
4605 bnx2_disable_nvram_write(bp);
4606
4607 /* Disable access to flash interface */
4608 bnx2_disable_nvram_access(bp);
4609 bnx2_release_nvram_lock(bp);
4610
4611 /* Increment written */
4612 written += data_end - data_start;
4613 }
4614
4615 nvram_write_end:
4616 kfree(flash_buffer);
4617 kfree(align_buf);
4618 return rc;
4619 }
4620
4621 static void
4622 bnx2_init_fw_cap(struct bnx2 *bp)
4623 {
4624 u32 val, sig = 0;
4625
4626 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4627 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4628
4629 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4630 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4631
4632 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4633 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4634 return;
4635
4636 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4637 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4638 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4639 }
4640
4641 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4642 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4643 u32 link;
4644
4645 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4646
4647 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4648 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4649 bp->phy_port = PORT_FIBRE;
4650 else
4651 bp->phy_port = PORT_TP;
4652
4653 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4654 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4655 }
4656
4657 if (netif_running(bp->dev) && sig)
4658 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4659 }
4660
4661 static void
4662 bnx2_setup_msix_tbl(struct bnx2 *bp)
4663 {
4664 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4665
4666 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4667 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4668 }
4669
4670 static int
4671 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4672 {
4673 u32 val;
4674 int i, rc = 0;
4675 u8 old_port;
4676
4677 /* Wait for the current PCI transaction to complete before
4678 * issuing a reset. */
4679 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4680 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4681 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4682 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4683 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4684 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4685 udelay(5);
4686
4687 /* Wait for the firmware to tell us it is ok to issue a reset. */
4688 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4689
4690 /* Deposit a driver reset signature so the firmware knows that
4691 * this is a soft reset. */
4692 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4693 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4694
4695 /* Do a dummy read to force the chip to complete all current transaction
4696 * before we issue a reset. */
4697 val = REG_RD(bp, BNX2_MISC_ID);
4698
4699 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4700 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4701 REG_RD(bp, BNX2_MISC_COMMAND);
4702 udelay(5);
4703
4704 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4705 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4706
4707 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4708
4709 } else {
4710 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4711 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4712 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4713
4714 /* Chip reset. */
4715 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4716
4717 /* Reading back any register after chip reset will hang the
4718 * bus on 5706 A0 and A1. The msleep below provides plenty
4719 * of margin for write posting.
4720 */
4721 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4722 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4723 msleep(20);
4724
4725 /* Reset takes approximate 30 usec */
4726 for (i = 0; i < 10; i++) {
4727 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4728 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4729 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4730 break;
4731 udelay(10);
4732 }
4733
4734 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4735 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4736 pr_err("Chip reset did not complete\n");
4737 return -EBUSY;
4738 }
4739 }
4740
4741 /* Make sure byte swapping is properly configured. */
4742 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4743 if (val != 0x01020304) {
4744 pr_err("Chip not in correct endian mode\n");
4745 return -ENODEV;
4746 }
4747
4748 /* Wait for the firmware to finish its initialization. */
4749 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4750 if (rc)
4751 return rc;
4752
4753 spin_lock_bh(&bp->phy_lock);
4754 old_port = bp->phy_port;
4755 bnx2_init_fw_cap(bp);
4756 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4757 old_port != bp->phy_port)
4758 bnx2_set_default_remote_link(bp);
4759 spin_unlock_bh(&bp->phy_lock);
4760
4761 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4762 /* Adjust the voltage regular to two steps lower. The default
4763 * of this register is 0x0000000e. */
4764 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4765
4766 /* Remove bad rbuf memory from the free pool. */
4767 rc = bnx2_alloc_bad_rbuf(bp);
4768 }
4769
4770 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4771 bnx2_setup_msix_tbl(bp);
4772 /* Prevent MSIX table reads and write from timing out */
4773 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4774 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4775 }
4776
4777 return rc;
4778 }
4779
4780 static int
4781 bnx2_init_chip(struct bnx2 *bp)
4782 {
4783 u32 val, mtu;
4784 int rc, i;
4785
4786 /* Make sure the interrupt is not active. */
4787 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4788
4789 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4790 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4791 #ifdef __BIG_ENDIAN
4792 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4793 #endif
4794 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4795 DMA_READ_CHANS << 12 |
4796 DMA_WRITE_CHANS << 16;
4797
4798 val |= (0x2 << 20) | (1 << 11);
4799
4800 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4801 val |= (1 << 23);
4802
4803 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4804 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4805 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4806
4807 REG_WR(bp, BNX2_DMA_CONFIG, val);
4808
4809 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4810 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4811 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4812 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4813 }
4814
4815 if (bp->flags & BNX2_FLAG_PCIX) {
4816 u16 val16;
4817
4818 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4819 &val16);
4820 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4821 val16 & ~PCI_X_CMD_ERO);
4822 }
4823
4824 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4825 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4826 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4827 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4828
4829 /* Initialize context mapping and zero out the quick contexts. The
4830 * context block must have already been enabled. */
4831 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4832 rc = bnx2_init_5709_context(bp);
4833 if (rc)
4834 return rc;
4835 } else
4836 bnx2_init_context(bp);
4837
4838 if ((rc = bnx2_init_cpus(bp)) != 0)
4839 return rc;
4840
4841 bnx2_init_nvram(bp);
4842
4843 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4844
4845 val = REG_RD(bp, BNX2_MQ_CONFIG);
4846 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4847 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4848 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4849 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4850 if (CHIP_REV(bp) == CHIP_REV_Ax)
4851 val |= BNX2_MQ_CONFIG_HALT_DIS;
4852 }
4853
4854 REG_WR(bp, BNX2_MQ_CONFIG, val);
4855
4856 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4857 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4858 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4859
4860 val = (BCM_PAGE_BITS - 8) << 24;
4861 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4862
4863 /* Configure page size. */
4864 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4865 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4866 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4867 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4868
4869 val = bp->mac_addr[0] +
4870 (bp->mac_addr[1] << 8) +
4871 (bp->mac_addr[2] << 16) +
4872 bp->mac_addr[3] +
4873 (bp->mac_addr[4] << 8) +
4874 (bp->mac_addr[5] << 16);
4875 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4876
4877 /* Program the MTU. Also include 4 bytes for CRC32. */
4878 mtu = bp->dev->mtu;
4879 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4880 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4881 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4882 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4883
4884 if (mtu < 1500)
4885 mtu = 1500;
4886
4887 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4888 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4889 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4890
4891 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4892 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4893 bp->bnx2_napi[i].last_status_idx = 0;
4894
4895 bp->idle_chk_status_idx = 0xffff;
4896
4897 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4898
4899 /* Set up how to generate a link change interrupt. */
4900 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4901
4902 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4903 (u64) bp->status_blk_mapping & 0xffffffff);
4904 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4905
4906 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4907 (u64) bp->stats_blk_mapping & 0xffffffff);
4908 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4909 (u64) bp->stats_blk_mapping >> 32);
4910
4911 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4912 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4913
4914 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4915 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4916
4917 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4918 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4919
4920 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4921
4922 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4923
4924 REG_WR(bp, BNX2_HC_COM_TICKS,
4925 (bp->com_ticks_int << 16) | bp->com_ticks);
4926
4927 REG_WR(bp, BNX2_HC_CMD_TICKS,
4928 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4929
4930 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4931 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4932 else
4933 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4934 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4935
4936 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4937 val = BNX2_HC_CONFIG_COLLECT_STATS;
4938 else {
4939 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4940 BNX2_HC_CONFIG_COLLECT_STATS;
4941 }
4942
4943 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4944 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4945 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4946
4947 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4948 }
4949
4950 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4951 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4952
4953 REG_WR(bp, BNX2_HC_CONFIG, val);
4954
4955 if (bp->rx_ticks < 25)
4956 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4957 else
4958 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4959
4960 for (i = 1; i < bp->irq_nvecs; i++) {
4961 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4962 BNX2_HC_SB_CONFIG_1;
4963
4964 REG_WR(bp, base,
4965 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4966 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4967 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4968
4969 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4970 (bp->tx_quick_cons_trip_int << 16) |
4971 bp->tx_quick_cons_trip);
4972
4973 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4974 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4975
4976 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4977 (bp->rx_quick_cons_trip_int << 16) |
4978 bp->rx_quick_cons_trip);
4979
4980 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4981 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4982 }
4983
4984 /* Clear internal stats counters. */
4985 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4986
4987 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4988
4989 /* Initialize the receive filter. */
4990 bnx2_set_rx_mode(bp->dev);
4991
4992 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4993 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4994 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4995 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4996 }
4997 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4998 1, 0);
4999
5000 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5001 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5002
5003 udelay(20);
5004
5005 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5006
5007 return rc;
5008 }
5009
5010 static void
5011 bnx2_clear_ring_states(struct bnx2 *bp)
5012 {
5013 struct bnx2_napi *bnapi;
5014 struct bnx2_tx_ring_info *txr;
5015 struct bnx2_rx_ring_info *rxr;
5016 int i;
5017
5018 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5019 bnapi = &bp->bnx2_napi[i];
5020 txr = &bnapi->tx_ring;
5021 rxr = &bnapi->rx_ring;
5022
5023 txr->tx_cons = 0;
5024 txr->hw_tx_cons = 0;
5025 rxr->rx_prod_bseq = 0;
5026 rxr->rx_prod = 0;
5027 rxr->rx_cons = 0;
5028 rxr->rx_pg_prod = 0;
5029 rxr->rx_pg_cons = 0;
5030 }
5031 }
5032
5033 static void
5034 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5035 {
5036 u32 val, offset0, offset1, offset2, offset3;
5037 u32 cid_addr = GET_CID_ADDR(cid);
5038
5039 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5040 offset0 = BNX2_L2CTX_TYPE_XI;
5041 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5042 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5043 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5044 } else {
5045 offset0 = BNX2_L2CTX_TYPE;
5046 offset1 = BNX2_L2CTX_CMD_TYPE;
5047 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5048 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5049 }
5050 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5051 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5052
5053 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5054 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5055
5056 val = (u64) txr->tx_desc_mapping >> 32;
5057 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5058
5059 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5060 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5061 }
5062
5063 static void
5064 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5065 {
5066 struct tx_bd *txbd;
5067 u32 cid = TX_CID;
5068 struct bnx2_napi *bnapi;
5069 struct bnx2_tx_ring_info *txr;
5070
5071 bnapi = &bp->bnx2_napi[ring_num];
5072 txr = &bnapi->tx_ring;
5073
5074 if (ring_num == 0)
5075 cid = TX_CID;
5076 else
5077 cid = TX_TSS_CID + ring_num - 1;
5078
5079 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5080
5081 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5082
5083 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5084 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5085
5086 txr->tx_prod = 0;
5087 txr->tx_prod_bseq = 0;
5088
5089 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5090 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5091
5092 bnx2_init_tx_context(bp, cid, txr);
5093 }
5094
5095 static void
5096 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5097 int num_rings)
5098 {
5099 int i;
5100 struct rx_bd *rxbd;
5101
5102 for (i = 0; i < num_rings; i++) {
5103 int j;
5104
5105 rxbd = &rx_ring[i][0];
5106 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5107 rxbd->rx_bd_len = buf_size;
5108 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5109 }
5110 if (i == (num_rings - 1))
5111 j = 0;
5112 else
5113 j = i + 1;
5114 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5115 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5116 }
5117 }
5118
5119 static void
5120 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5121 {
5122 int i;
5123 u16 prod, ring_prod;
5124 u32 cid, rx_cid_addr, val;
5125 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5126 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5127
5128 if (ring_num == 0)
5129 cid = RX_CID;
5130 else
5131 cid = RX_RSS_CID + ring_num - 1;
5132
5133 rx_cid_addr = GET_CID_ADDR(cid);
5134
5135 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5136 bp->rx_buf_use_size, bp->rx_max_ring);
5137
5138 bnx2_init_rx_context(bp, cid);
5139
5140 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5141 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5142 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5143 }
5144
5145 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5146 if (bp->rx_pg_ring_size) {
5147 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5148 rxr->rx_pg_desc_mapping,
5149 PAGE_SIZE, bp->rx_max_pg_ring);
5150 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5151 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5152 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5153 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5154
5155 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5156 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5157
5158 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5159 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5160
5161 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5162 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5163 }
5164
5165 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5166 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5167
5168 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5169 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5170
5171 ring_prod = prod = rxr->rx_pg_prod;
5172 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5173 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5174 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5175 ring_num, i, bp->rx_pg_ring_size);
5176 break;
5177 }
5178 prod = NEXT_RX_BD(prod);
5179 ring_prod = RX_PG_RING_IDX(prod);
5180 }
5181 rxr->rx_pg_prod = prod;
5182
5183 ring_prod = prod = rxr->rx_prod;
5184 for (i = 0; i < bp->rx_ring_size; i++) {
5185 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5186 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5187 ring_num, i, bp->rx_ring_size);
5188 break;
5189 }
5190 prod = NEXT_RX_BD(prod);
5191 ring_prod = RX_RING_IDX(prod);
5192 }
5193 rxr->rx_prod = prod;
5194
5195 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5196 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5197 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5198
5199 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5200 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5201
5202 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5203 }
5204
5205 static void
5206 bnx2_init_all_rings(struct bnx2 *bp)
5207 {
5208 int i;
5209 u32 val;
5210
5211 bnx2_clear_ring_states(bp);
5212
5213 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5214 for (i = 0; i < bp->num_tx_rings; i++)
5215 bnx2_init_tx_ring(bp, i);
5216
5217 if (bp->num_tx_rings > 1)
5218 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5219 (TX_TSS_CID << 7));
5220
5221 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5222 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5223
5224 for (i = 0; i < bp->num_rx_rings; i++)
5225 bnx2_init_rx_ring(bp, i);
5226
5227 if (bp->num_rx_rings > 1) {
5228 u32 tbl_32 = 0;
5229
5230 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5231 int shift = (i % 8) << 2;
5232
5233 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5234 if ((i % 8) == 7) {
5235 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5236 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5237 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5238 BNX2_RLUP_RSS_COMMAND_WRITE |
5239 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5240 tbl_32 = 0;
5241 }
5242 }
5243
5244 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5245 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5246
5247 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5248
5249 }
5250 }
5251
5252 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5253 {
5254 u32 max, num_rings = 1;
5255
5256 while (ring_size > MAX_RX_DESC_CNT) {
5257 ring_size -= MAX_RX_DESC_CNT;
5258 num_rings++;
5259 }
5260 /* round to next power of 2 */
5261 max = max_size;
5262 while ((max & num_rings) == 0)
5263 max >>= 1;
5264
5265 if (num_rings != max)
5266 max <<= 1;
5267
5268 return max;
5269 }
5270
5271 static void
5272 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5273 {
5274 u32 rx_size, rx_space, jumbo_size;
5275
5276 /* 8 for CRC and VLAN */
5277 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5278
5279 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5280 sizeof(struct skb_shared_info);
5281
5282 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5283 bp->rx_pg_ring_size = 0;
5284 bp->rx_max_pg_ring = 0;
5285 bp->rx_max_pg_ring_idx = 0;
5286 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5287 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5288
5289 jumbo_size = size * pages;
5290 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5291 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5292
5293 bp->rx_pg_ring_size = jumbo_size;
5294 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5295 MAX_RX_PG_RINGS);
5296 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5297 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5298 bp->rx_copy_thresh = 0;
5299 }
5300
5301 bp->rx_buf_use_size = rx_size;
5302 /* hw alignment */
5303 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5304 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5305 bp->rx_ring_size = size;
5306 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5307 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5308 }
5309
5310 static void
5311 bnx2_free_tx_skbs(struct bnx2 *bp)
5312 {
5313 int i;
5314
5315 for (i = 0; i < bp->num_tx_rings; i++) {
5316 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5317 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5318 int j;
5319
5320 if (txr->tx_buf_ring == NULL)
5321 continue;
5322
5323 for (j = 0; j < TX_DESC_CNT; ) {
5324 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5325 struct sk_buff *skb = tx_buf->skb;
5326 int k, last;
5327
5328 if (skb == NULL) {
5329 j++;
5330 continue;
5331 }
5332
5333 dma_unmap_single(&bp->pdev->dev,
5334 dma_unmap_addr(tx_buf, mapping),
5335 skb_headlen(skb),
5336 PCI_DMA_TODEVICE);
5337
5338 tx_buf->skb = NULL;
5339
5340 last = tx_buf->nr_frags;
5341 j++;
5342 for (k = 0; k < last; k++, j++) {
5343 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5344 dma_unmap_page(&bp->pdev->dev,
5345 dma_unmap_addr(tx_buf, mapping),
5346 skb_shinfo(skb)->frags[k].size,
5347 PCI_DMA_TODEVICE);
5348 }
5349 dev_kfree_skb(skb);
5350 }
5351 }
5352 }
5353
5354 static void
5355 bnx2_free_rx_skbs(struct bnx2 *bp)
5356 {
5357 int i;
5358
5359 for (i = 0; i < bp->num_rx_rings; i++) {
5360 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5361 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5362 int j;
5363
5364 if (rxr->rx_buf_ring == NULL)
5365 return;
5366
5367 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5368 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5369 struct sk_buff *skb = rx_buf->skb;
5370
5371 if (skb == NULL)
5372 continue;
5373
5374 dma_unmap_single(&bp->pdev->dev,
5375 dma_unmap_addr(rx_buf, mapping),
5376 bp->rx_buf_use_size,
5377 PCI_DMA_FROMDEVICE);
5378
5379 rx_buf->skb = NULL;
5380
5381 dev_kfree_skb(skb);
5382 }
5383 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5384 bnx2_free_rx_page(bp, rxr, j);
5385 }
5386 }
5387
5388 static void
5389 bnx2_free_skbs(struct bnx2 *bp)
5390 {
5391 bnx2_free_tx_skbs(bp);
5392 bnx2_free_rx_skbs(bp);
5393 }
5394
5395 static int
5396 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5397 {
5398 int rc;
5399
5400 rc = bnx2_reset_chip(bp, reset_code);
5401 bnx2_free_skbs(bp);
5402 if (rc)
5403 return rc;
5404
5405 if ((rc = bnx2_init_chip(bp)) != 0)
5406 return rc;
5407
5408 bnx2_init_all_rings(bp);
5409 return 0;
5410 }
5411
5412 static int
5413 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5414 {
5415 int rc;
5416
5417 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5418 return rc;
5419
5420 spin_lock_bh(&bp->phy_lock);
5421 bnx2_init_phy(bp, reset_phy);
5422 bnx2_set_link(bp);
5423 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5424 bnx2_remote_phy_event(bp);
5425 spin_unlock_bh(&bp->phy_lock);
5426 return 0;
5427 }
5428
5429 static int
5430 bnx2_shutdown_chip(struct bnx2 *bp)
5431 {
5432 u32 reset_code;
5433
5434 if (bp->flags & BNX2_FLAG_NO_WOL)
5435 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5436 else if (bp->wol)
5437 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5438 else
5439 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5440
5441 return bnx2_reset_chip(bp, reset_code);
5442 }
5443
5444 static int
5445 bnx2_test_registers(struct bnx2 *bp)
5446 {
5447 int ret;
5448 int i, is_5709;
5449 static const struct {
5450 u16 offset;
5451 u16 flags;
5452 #define BNX2_FL_NOT_5709 1
5453 u32 rw_mask;
5454 u32 ro_mask;
5455 } reg_tbl[] = {
5456 { 0x006c, 0, 0x00000000, 0x0000003f },
5457 { 0x0090, 0, 0xffffffff, 0x00000000 },
5458 { 0x0094, 0, 0x00000000, 0x00000000 },
5459
5460 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5461 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5462 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5463 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5464 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5465 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5466 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5467 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5468 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5469
5470 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5471 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5472 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5473 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5474 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5475 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5476
5477 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5478 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5479 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5480
5481 { 0x1000, 0, 0x00000000, 0x00000001 },
5482 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5483
5484 { 0x1408, 0, 0x01c00800, 0x00000000 },
5485 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5486 { 0x14a8, 0, 0x00000000, 0x000001ff },
5487 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5488 { 0x14b0, 0, 0x00000002, 0x00000001 },
5489 { 0x14b8, 0, 0x00000000, 0x00000000 },
5490 { 0x14c0, 0, 0x00000000, 0x00000009 },
5491 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5492 { 0x14cc, 0, 0x00000000, 0x00000001 },
5493 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5494
5495 { 0x1800, 0, 0x00000000, 0x00000001 },
5496 { 0x1804, 0, 0x00000000, 0x00000003 },
5497
5498 { 0x2800, 0, 0x00000000, 0x00000001 },
5499 { 0x2804, 0, 0x00000000, 0x00003f01 },
5500 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5501 { 0x2810, 0, 0xffff0000, 0x00000000 },
5502 { 0x2814, 0, 0xffff0000, 0x00000000 },
5503 { 0x2818, 0, 0xffff0000, 0x00000000 },
5504 { 0x281c, 0, 0xffff0000, 0x00000000 },
5505 { 0x2834, 0, 0xffffffff, 0x00000000 },
5506 { 0x2840, 0, 0x00000000, 0xffffffff },
5507 { 0x2844, 0, 0x00000000, 0xffffffff },
5508 { 0x2848, 0, 0xffffffff, 0x00000000 },
5509 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5510
5511 { 0x2c00, 0, 0x00000000, 0x00000011 },
5512 { 0x2c04, 0, 0x00000000, 0x00030007 },
5513
5514 { 0x3c00, 0, 0x00000000, 0x00000001 },
5515 { 0x3c04, 0, 0x00000000, 0x00070000 },
5516 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5517 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5518 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5519 { 0x3c14, 0, 0x00000000, 0xffffffff },
5520 { 0x3c18, 0, 0x00000000, 0xffffffff },
5521 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5522 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5523
5524 { 0x5004, 0, 0x00000000, 0x0000007f },
5525 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5526
5527 { 0x5c00, 0, 0x00000000, 0x00000001 },
5528 { 0x5c04, 0, 0x00000000, 0x0003000f },
5529 { 0x5c08, 0, 0x00000003, 0x00000000 },
5530 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5531 { 0x5c10, 0, 0x00000000, 0xffffffff },
5532 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5533 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5534 { 0x5c88, 0, 0x00000000, 0x00077373 },
5535 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5536
5537 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5538 { 0x680c, 0, 0xffffffff, 0x00000000 },
5539 { 0x6810, 0, 0xffffffff, 0x00000000 },
5540 { 0x6814, 0, 0xffffffff, 0x00000000 },
5541 { 0x6818, 0, 0xffffffff, 0x00000000 },
5542 { 0x681c, 0, 0xffffffff, 0x00000000 },
5543 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5544 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5545 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5546 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5547 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5548 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5549 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5550 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5551 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5552 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5553 { 0x684c, 0, 0xffffffff, 0x00000000 },
5554 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5555 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5556 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5557 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5558 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5559 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5560
5561 { 0xffff, 0, 0x00000000, 0x00000000 },
5562 };
5563
5564 ret = 0;
5565 is_5709 = 0;
5566 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5567 is_5709 = 1;
5568
5569 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5570 u32 offset, rw_mask, ro_mask, save_val, val;
5571 u16 flags = reg_tbl[i].flags;
5572
5573 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5574 continue;
5575
5576 offset = (u32) reg_tbl[i].offset;
5577 rw_mask = reg_tbl[i].rw_mask;
5578 ro_mask = reg_tbl[i].ro_mask;
5579
5580 save_val = readl(bp->regview + offset);
5581
5582 writel(0, bp->regview + offset);
5583
5584 val = readl(bp->regview + offset);
5585 if ((val & rw_mask) != 0) {
5586 goto reg_test_err;
5587 }
5588
5589 if ((val & ro_mask) != (save_val & ro_mask)) {
5590 goto reg_test_err;
5591 }
5592
5593 writel(0xffffffff, bp->regview + offset);
5594
5595 val = readl(bp->regview + offset);
5596 if ((val & rw_mask) != rw_mask) {
5597 goto reg_test_err;
5598 }
5599
5600 if ((val & ro_mask) != (save_val & ro_mask)) {
5601 goto reg_test_err;
5602 }
5603
5604 writel(save_val, bp->regview + offset);
5605 continue;
5606
5607 reg_test_err:
5608 writel(save_val, bp->regview + offset);
5609 ret = -ENODEV;
5610 break;
5611 }
5612 return ret;
5613 }
5614
5615 static int
5616 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5617 {
5618 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5619 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5620 int i;
5621
5622 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5623 u32 offset;
5624
5625 for (offset = 0; offset < size; offset += 4) {
5626
5627 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5628
5629 if (bnx2_reg_rd_ind(bp, start + offset) !=
5630 test_pattern[i]) {
5631 return -ENODEV;
5632 }
5633 }
5634 }
5635 return 0;
5636 }
5637
5638 static int
5639 bnx2_test_memory(struct bnx2 *bp)
5640 {
5641 int ret = 0;
5642 int i;
5643 static struct mem_entry {
5644 u32 offset;
5645 u32 len;
5646 } mem_tbl_5706[] = {
5647 { 0x60000, 0x4000 },
5648 { 0xa0000, 0x3000 },
5649 { 0xe0000, 0x4000 },
5650 { 0x120000, 0x4000 },
5651 { 0x1a0000, 0x4000 },
5652 { 0x160000, 0x4000 },
5653 { 0xffffffff, 0 },
5654 },
5655 mem_tbl_5709[] = {
5656 { 0x60000, 0x4000 },
5657 { 0xa0000, 0x3000 },
5658 { 0xe0000, 0x4000 },
5659 { 0x120000, 0x4000 },
5660 { 0x1a0000, 0x4000 },
5661 { 0xffffffff, 0 },
5662 };
5663 struct mem_entry *mem_tbl;
5664
5665 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5666 mem_tbl = mem_tbl_5709;
5667 else
5668 mem_tbl = mem_tbl_5706;
5669
5670 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5671 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5672 mem_tbl[i].len)) != 0) {
5673 return ret;
5674 }
5675 }
5676
5677 return ret;
5678 }
5679
5680 #define BNX2_MAC_LOOPBACK 0
5681 #define BNX2_PHY_LOOPBACK 1
5682
5683 static int
5684 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5685 {
5686 unsigned int pkt_size, num_pkts, i;
5687 struct sk_buff *skb, *rx_skb;
5688 unsigned char *packet;
5689 u16 rx_start_idx, rx_idx;
5690 dma_addr_t map;
5691 struct tx_bd *txbd;
5692 struct sw_bd *rx_buf;
5693 struct l2_fhdr *rx_hdr;
5694 int ret = -ENODEV;
5695 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5696 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5697 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5698
5699 tx_napi = bnapi;
5700
5701 txr = &tx_napi->tx_ring;
5702 rxr = &bnapi->rx_ring;
5703 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5704 bp->loopback = MAC_LOOPBACK;
5705 bnx2_set_mac_loopback(bp);
5706 }
5707 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5708 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5709 return 0;
5710
5711 bp->loopback = PHY_LOOPBACK;
5712 bnx2_set_phy_loopback(bp);
5713 }
5714 else
5715 return -EINVAL;
5716
5717 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5718 skb = netdev_alloc_skb(bp->dev, pkt_size);
5719 if (!skb)
5720 return -ENOMEM;
5721 packet = skb_put(skb, pkt_size);
5722 memcpy(packet, bp->dev->dev_addr, 6);
5723 memset(packet + 6, 0x0, 8);
5724 for (i = 14; i < pkt_size; i++)
5725 packet[i] = (unsigned char) (i & 0xff);
5726
5727 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5728 PCI_DMA_TODEVICE);
5729 if (dma_mapping_error(&bp->pdev->dev, map)) {
5730 dev_kfree_skb(skb);
5731 return -EIO;
5732 }
5733
5734 REG_WR(bp, BNX2_HC_COMMAND,
5735 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5736
5737 REG_RD(bp, BNX2_HC_COMMAND);
5738
5739 udelay(5);
5740 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5741
5742 num_pkts = 0;
5743
5744 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5745
5746 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5747 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5748 txbd->tx_bd_mss_nbytes = pkt_size;
5749 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5750
5751 num_pkts++;
5752 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5753 txr->tx_prod_bseq += pkt_size;
5754
5755 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5756 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5757
5758 udelay(100);
5759
5760 REG_WR(bp, BNX2_HC_COMMAND,
5761 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5762
5763 REG_RD(bp, BNX2_HC_COMMAND);
5764
5765 udelay(5);
5766
5767 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5768 dev_kfree_skb(skb);
5769
5770 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5771 goto loopback_test_done;
5772
5773 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5774 if (rx_idx != rx_start_idx + num_pkts) {
5775 goto loopback_test_done;
5776 }
5777
5778 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5779 rx_skb = rx_buf->skb;
5780
5781 rx_hdr = rx_buf->desc;
5782 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5783
5784 dma_sync_single_for_cpu(&bp->pdev->dev,
5785 dma_unmap_addr(rx_buf, mapping),
5786 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5787
5788 if (rx_hdr->l2_fhdr_status &
5789 (L2_FHDR_ERRORS_BAD_CRC |
5790 L2_FHDR_ERRORS_PHY_DECODE |
5791 L2_FHDR_ERRORS_ALIGNMENT |
5792 L2_FHDR_ERRORS_TOO_SHORT |
5793 L2_FHDR_ERRORS_GIANT_FRAME)) {
5794
5795 goto loopback_test_done;
5796 }
5797
5798 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5799 goto loopback_test_done;
5800 }
5801
5802 for (i = 14; i < pkt_size; i++) {
5803 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5804 goto loopback_test_done;
5805 }
5806 }
5807
5808 ret = 0;
5809
5810 loopback_test_done:
5811 bp->loopback = 0;
5812 return ret;
5813 }
5814
5815 #define BNX2_MAC_LOOPBACK_FAILED 1
5816 #define BNX2_PHY_LOOPBACK_FAILED 2
5817 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5818 BNX2_PHY_LOOPBACK_FAILED)
5819
5820 static int
5821 bnx2_test_loopback(struct bnx2 *bp)
5822 {
5823 int rc = 0;
5824
5825 if (!netif_running(bp->dev))
5826 return BNX2_LOOPBACK_FAILED;
5827
5828 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5829 spin_lock_bh(&bp->phy_lock);
5830 bnx2_init_phy(bp, 1);
5831 spin_unlock_bh(&bp->phy_lock);
5832 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5833 rc |= BNX2_MAC_LOOPBACK_FAILED;
5834 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5835 rc |= BNX2_PHY_LOOPBACK_FAILED;
5836 return rc;
5837 }
5838
5839 #define NVRAM_SIZE 0x200
5840 #define CRC32_RESIDUAL 0xdebb20e3
5841
5842 static int
5843 bnx2_test_nvram(struct bnx2 *bp)
5844 {
5845 __be32 buf[NVRAM_SIZE / 4];
5846 u8 *data = (u8 *) buf;
5847 int rc = 0;
5848 u32 magic, csum;
5849
5850 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5851 goto test_nvram_done;
5852
5853 magic = be32_to_cpu(buf[0]);
5854 if (magic != 0x669955aa) {
5855 rc = -ENODEV;
5856 goto test_nvram_done;
5857 }
5858
5859 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5860 goto test_nvram_done;
5861
5862 csum = ether_crc_le(0x100, data);
5863 if (csum != CRC32_RESIDUAL) {
5864 rc = -ENODEV;
5865 goto test_nvram_done;
5866 }
5867
5868 csum = ether_crc_le(0x100, data + 0x100);
5869 if (csum != CRC32_RESIDUAL) {
5870 rc = -ENODEV;
5871 }
5872
5873 test_nvram_done:
5874 return rc;
5875 }
5876
5877 static int
5878 bnx2_test_link(struct bnx2 *bp)
5879 {
5880 u32 bmsr;
5881
5882 if (!netif_running(bp->dev))
5883 return -ENODEV;
5884
5885 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5886 if (bp->link_up)
5887 return 0;
5888 return -ENODEV;
5889 }
5890 spin_lock_bh(&bp->phy_lock);
5891 bnx2_enable_bmsr1(bp);
5892 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5893 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5894 bnx2_disable_bmsr1(bp);
5895 spin_unlock_bh(&bp->phy_lock);
5896
5897 if (bmsr & BMSR_LSTATUS) {
5898 return 0;
5899 }
5900 return -ENODEV;
5901 }
5902
5903 static int
5904 bnx2_test_intr(struct bnx2 *bp)
5905 {
5906 int i;
5907 u16 status_idx;
5908
5909 if (!netif_running(bp->dev))
5910 return -ENODEV;
5911
5912 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5913
5914 /* This register is not touched during run-time. */
5915 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5916 REG_RD(bp, BNX2_HC_COMMAND);
5917
5918 for (i = 0; i < 10; i++) {
5919 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5920 status_idx) {
5921
5922 break;
5923 }
5924
5925 msleep_interruptible(10);
5926 }
5927 if (i < 10)
5928 return 0;
5929
5930 return -ENODEV;
5931 }
5932
5933 /* Determining link for parallel detection. */
5934 static int
5935 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5936 {
5937 u32 mode_ctl, an_dbg, exp;
5938
5939 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5940 return 0;
5941
5942 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5943 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5944
5945 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5946 return 0;
5947
5948 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5949 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5950 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5951
5952 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5953 return 0;
5954
5955 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5956 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5957 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5958
5959 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5960 return 0;
5961
5962 return 1;
5963 }
5964
5965 static void
5966 bnx2_5706_serdes_timer(struct bnx2 *bp)
5967 {
5968 int check_link = 1;
5969
5970 spin_lock(&bp->phy_lock);
5971 if (bp->serdes_an_pending) {
5972 bp->serdes_an_pending--;
5973 check_link = 0;
5974 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5975 u32 bmcr;
5976
5977 bp->current_interval = BNX2_TIMER_INTERVAL;
5978
5979 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5980
5981 if (bmcr & BMCR_ANENABLE) {
5982 if (bnx2_5706_serdes_has_link(bp)) {
5983 bmcr &= ~BMCR_ANENABLE;
5984 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5985 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5986 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5987 }
5988 }
5989 }
5990 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5991 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5992 u32 phy2;
5993
5994 bnx2_write_phy(bp, 0x17, 0x0f01);
5995 bnx2_read_phy(bp, 0x15, &phy2);
5996 if (phy2 & 0x20) {
5997 u32 bmcr;
5998
5999 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6000 bmcr |= BMCR_ANENABLE;
6001 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6002
6003 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6004 }
6005 } else
6006 bp->current_interval = BNX2_TIMER_INTERVAL;
6007
6008 if (check_link) {
6009 u32 val;
6010
6011 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6012 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6013 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6014
6015 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6016 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6017 bnx2_5706s_force_link_dn(bp, 1);
6018 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6019 } else
6020 bnx2_set_link(bp);
6021 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6022 bnx2_set_link(bp);
6023 }
6024 spin_unlock(&bp->phy_lock);
6025 }
6026
6027 static void
6028 bnx2_5708_serdes_timer(struct bnx2 *bp)
6029 {
6030 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6031 return;
6032
6033 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6034 bp->serdes_an_pending = 0;
6035 return;
6036 }
6037
6038 spin_lock(&bp->phy_lock);
6039 if (bp->serdes_an_pending)
6040 bp->serdes_an_pending--;
6041 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6042 u32 bmcr;
6043
6044 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6045 if (bmcr & BMCR_ANENABLE) {
6046 bnx2_enable_forced_2g5(bp);
6047 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6048 } else {
6049 bnx2_disable_forced_2g5(bp);
6050 bp->serdes_an_pending = 2;
6051 bp->current_interval = BNX2_TIMER_INTERVAL;
6052 }
6053
6054 } else
6055 bp->current_interval = BNX2_TIMER_INTERVAL;
6056
6057 spin_unlock(&bp->phy_lock);
6058 }
6059
6060 static void
6061 bnx2_timer(unsigned long data)
6062 {
6063 struct bnx2 *bp = (struct bnx2 *) data;
6064
6065 if (!netif_running(bp->dev))
6066 return;
6067
6068 if (atomic_read(&bp->intr_sem) != 0)
6069 goto bnx2_restart_timer;
6070
6071 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6072 BNX2_FLAG_USING_MSI)
6073 bnx2_chk_missed_msi(bp);
6074
6075 bnx2_send_heart_beat(bp);
6076
6077 bp->stats_blk->stat_FwRxDrop =
6078 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6079
6080 /* workaround occasional corrupted counters */
6081 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6082 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6083 BNX2_HC_COMMAND_STATS_NOW);
6084
6085 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6086 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6087 bnx2_5706_serdes_timer(bp);
6088 else
6089 bnx2_5708_serdes_timer(bp);
6090 }
6091
6092 bnx2_restart_timer:
6093 mod_timer(&bp->timer, jiffies + bp->current_interval);
6094 }
6095
6096 static int
6097 bnx2_request_irq(struct bnx2 *bp)
6098 {
6099 unsigned long flags;
6100 struct bnx2_irq *irq;
6101 int rc = 0, i;
6102
6103 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6104 flags = 0;
6105 else
6106 flags = IRQF_SHARED;
6107
6108 for (i = 0; i < bp->irq_nvecs; i++) {
6109 irq = &bp->irq_tbl[i];
6110 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6111 &bp->bnx2_napi[i]);
6112 if (rc)
6113 break;
6114 irq->requested = 1;
6115 }
6116 return rc;
6117 }
6118
6119 static void
6120 bnx2_free_irq(struct bnx2 *bp)
6121 {
6122 struct bnx2_irq *irq;
6123 int i;
6124
6125 for (i = 0; i < bp->irq_nvecs; i++) {
6126 irq = &bp->irq_tbl[i];
6127 if (irq->requested)
6128 free_irq(irq->vector, &bp->bnx2_napi[i]);
6129 irq->requested = 0;
6130 }
6131 if (bp->flags & BNX2_FLAG_USING_MSI)
6132 pci_disable_msi(bp->pdev);
6133 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6134 pci_disable_msix(bp->pdev);
6135
6136 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6137 }
6138
6139 static void
6140 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6141 {
6142 int i, total_vecs, rc;
6143 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6144 struct net_device *dev = bp->dev;
6145 const int len = sizeof(bp->irq_tbl[0].name);
6146
6147 bnx2_setup_msix_tbl(bp);
6148 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6149 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6150 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6151
6152 /* Need to flush the previous three writes to ensure MSI-X
6153 * is setup properly */
6154 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6155
6156 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6157 msix_ent[i].entry = i;
6158 msix_ent[i].vector = 0;
6159 }
6160
6161 total_vecs = msix_vecs;
6162 #ifdef BCM_CNIC
6163 total_vecs++;
6164 #endif
6165 rc = -ENOSPC;
6166 while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6167 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6168 if (rc <= 0)
6169 break;
6170 if (rc > 0)
6171 total_vecs = rc;
6172 }
6173
6174 if (rc != 0)
6175 return;
6176
6177 msix_vecs = total_vecs;
6178 #ifdef BCM_CNIC
6179 msix_vecs--;
6180 #endif
6181 bp->irq_nvecs = msix_vecs;
6182 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6183 for (i = 0; i < total_vecs; i++) {
6184 bp->irq_tbl[i].vector = msix_ent[i].vector;
6185 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6186 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6187 }
6188 }
6189
6190 static int
6191 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6192 {
6193 int cpus = num_online_cpus();
6194 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6195
6196 bp->irq_tbl[0].handler = bnx2_interrupt;
6197 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6198 bp->irq_nvecs = 1;
6199 bp->irq_tbl[0].vector = bp->pdev->irq;
6200
6201 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6202 bnx2_enable_msix(bp, msix_vecs);
6203
6204 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6205 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6206 if (pci_enable_msi(bp->pdev) == 0) {
6207 bp->flags |= BNX2_FLAG_USING_MSI;
6208 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6209 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6210 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6211 } else
6212 bp->irq_tbl[0].handler = bnx2_msi;
6213
6214 bp->irq_tbl[0].vector = bp->pdev->irq;
6215 }
6216 }
6217
6218 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6219 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6220
6221 bp->num_rx_rings = bp->irq_nvecs;
6222 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6223 }
6224
6225 /* Called with rtnl_lock */
6226 static int
6227 bnx2_open(struct net_device *dev)
6228 {
6229 struct bnx2 *bp = netdev_priv(dev);
6230 int rc;
6231
6232 netif_carrier_off(dev);
6233
6234 bnx2_set_power_state(bp, PCI_D0);
6235 bnx2_disable_int(bp);
6236
6237 rc = bnx2_setup_int_mode(bp, disable_msi);
6238 if (rc)
6239 goto open_err;
6240 bnx2_init_napi(bp);
6241 bnx2_napi_enable(bp);
6242 rc = bnx2_alloc_mem(bp);
6243 if (rc)
6244 goto open_err;
6245
6246 rc = bnx2_request_irq(bp);
6247 if (rc)
6248 goto open_err;
6249
6250 rc = bnx2_init_nic(bp, 1);
6251 if (rc)
6252 goto open_err;
6253
6254 mod_timer(&bp->timer, jiffies + bp->current_interval);
6255
6256 atomic_set(&bp->intr_sem, 0);
6257
6258 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6259
6260 bnx2_enable_int(bp);
6261
6262 if (bp->flags & BNX2_FLAG_USING_MSI) {
6263 /* Test MSI to make sure it is working
6264 * If MSI test fails, go back to INTx mode
6265 */
6266 if (bnx2_test_intr(bp) != 0) {
6267 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6268
6269 bnx2_disable_int(bp);
6270 bnx2_free_irq(bp);
6271
6272 bnx2_setup_int_mode(bp, 1);
6273
6274 rc = bnx2_init_nic(bp, 0);
6275
6276 if (!rc)
6277 rc = bnx2_request_irq(bp);
6278
6279 if (rc) {
6280 del_timer_sync(&bp->timer);
6281 goto open_err;
6282 }
6283 bnx2_enable_int(bp);
6284 }
6285 }
6286 if (bp->flags & BNX2_FLAG_USING_MSI)
6287 netdev_info(dev, "using MSI\n");
6288 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6289 netdev_info(dev, "using MSIX\n");
6290
6291 netif_tx_start_all_queues(dev);
6292
6293 return 0;
6294
6295 open_err:
6296 bnx2_napi_disable(bp);
6297 bnx2_free_skbs(bp);
6298 bnx2_free_irq(bp);
6299 bnx2_free_mem(bp);
6300 bnx2_del_napi(bp);
6301 return rc;
6302 }
6303
6304 static void
6305 bnx2_reset_task(struct work_struct *work)
6306 {
6307 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6308
6309 rtnl_lock();
6310 if (!netif_running(bp->dev)) {
6311 rtnl_unlock();
6312 return;
6313 }
6314
6315 bnx2_netif_stop(bp, true);
6316
6317 bnx2_init_nic(bp, 1);
6318
6319 atomic_set(&bp->intr_sem, 1);
6320 bnx2_netif_start(bp, true);
6321 rtnl_unlock();
6322 }
6323
6324 static void
6325 bnx2_dump_state(struct bnx2 *bp)
6326 {
6327 struct net_device *dev = bp->dev;
6328 u32 mcp_p0, mcp_p1, val1, val2;
6329
6330 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6331 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6332 atomic_read(&bp->intr_sem), val1);
6333 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6334 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6335 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6336 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6337 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6338 REG_RD(bp, BNX2_EMAC_RX_STATUS));
6339 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6340 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6341 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6342 mcp_p0 = BNX2_MCP_STATE_P0;
6343 mcp_p1 = BNX2_MCP_STATE_P1;
6344 } else {
6345 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6346 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6347 }
6348 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6349 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6350 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6351 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6352 if (bp->flags & BNX2_FLAG_USING_MSIX)
6353 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6354 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6355 }
6356
6357 static void
6358 bnx2_tx_timeout(struct net_device *dev)
6359 {
6360 struct bnx2 *bp = netdev_priv(dev);
6361
6362 bnx2_dump_state(bp);
6363
6364 /* This allows the netif to be shutdown gracefully before resetting */
6365 schedule_work(&bp->reset_task);
6366 }
6367
6368 #ifdef BCM_VLAN
6369 /* Called with rtnl_lock */
6370 static void
6371 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6372 {
6373 struct bnx2 *bp = netdev_priv(dev);
6374
6375 if (netif_running(dev))
6376 bnx2_netif_stop(bp, false);
6377
6378 bp->vlgrp = vlgrp;
6379
6380 if (!netif_running(dev))
6381 return;
6382
6383 bnx2_set_rx_mode(dev);
6384 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6385 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6386
6387 bnx2_netif_start(bp, false);
6388 }
6389 #endif
6390
6391 /* Called with netif_tx_lock.
6392 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6393 * netif_wake_queue().
6394 */
6395 static netdev_tx_t
6396 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6397 {
6398 struct bnx2 *bp = netdev_priv(dev);
6399 dma_addr_t mapping;
6400 struct tx_bd *txbd;
6401 struct sw_tx_bd *tx_buf;
6402 u32 len, vlan_tag_flags, last_frag, mss;
6403 u16 prod, ring_prod;
6404 int i;
6405 struct bnx2_napi *bnapi;
6406 struct bnx2_tx_ring_info *txr;
6407 struct netdev_queue *txq;
6408
6409 /* Determine which tx ring we will be placed on */
6410 i = skb_get_queue_mapping(skb);
6411 bnapi = &bp->bnx2_napi[i];
6412 txr = &bnapi->tx_ring;
6413 txq = netdev_get_tx_queue(dev, i);
6414
6415 if (unlikely(bnx2_tx_avail(bp, txr) <
6416 (skb_shinfo(skb)->nr_frags + 1))) {
6417 netif_tx_stop_queue(txq);
6418 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6419
6420 return NETDEV_TX_BUSY;
6421 }
6422 len = skb_headlen(skb);
6423 prod = txr->tx_prod;
6424 ring_prod = TX_RING_IDX(prod);
6425
6426 vlan_tag_flags = 0;
6427 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6428 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6429 }
6430
6431 #ifdef BCM_VLAN
6432 if (vlan_tx_tag_present(skb)) {
6433 vlan_tag_flags |=
6434 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6435 }
6436 #endif
6437 if ((mss = skb_shinfo(skb)->gso_size)) {
6438 u32 tcp_opt_len;
6439 struct iphdr *iph;
6440
6441 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6442
6443 tcp_opt_len = tcp_optlen(skb);
6444
6445 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6446 u32 tcp_off = skb_transport_offset(skb) -
6447 sizeof(struct ipv6hdr) - ETH_HLEN;
6448
6449 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6450 TX_BD_FLAGS_SW_FLAGS;
6451 if (likely(tcp_off == 0))
6452 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6453 else {
6454 tcp_off >>= 3;
6455 vlan_tag_flags |= ((tcp_off & 0x3) <<
6456 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6457 ((tcp_off & 0x10) <<
6458 TX_BD_FLAGS_TCP6_OFF4_SHL);
6459 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6460 }
6461 } else {
6462 iph = ip_hdr(skb);
6463 if (tcp_opt_len || (iph->ihl > 5)) {
6464 vlan_tag_flags |= ((iph->ihl - 5) +
6465 (tcp_opt_len >> 2)) << 8;
6466 }
6467 }
6468 } else
6469 mss = 0;
6470
6471 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6472 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6473 dev_kfree_skb(skb);
6474 return NETDEV_TX_OK;
6475 }
6476
6477 tx_buf = &txr->tx_buf_ring[ring_prod];
6478 tx_buf->skb = skb;
6479 dma_unmap_addr_set(tx_buf, mapping, mapping);
6480
6481 txbd = &txr->tx_desc_ring[ring_prod];
6482
6483 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6484 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6485 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6486 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6487
6488 last_frag = skb_shinfo(skb)->nr_frags;
6489 tx_buf->nr_frags = last_frag;
6490 tx_buf->is_gso = skb_is_gso(skb);
6491
6492 for (i = 0; i < last_frag; i++) {
6493 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6494
6495 prod = NEXT_TX_BD(prod);
6496 ring_prod = TX_RING_IDX(prod);
6497 txbd = &txr->tx_desc_ring[ring_prod];
6498
6499 len = frag->size;
6500 mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
6501 len, PCI_DMA_TODEVICE);
6502 if (dma_mapping_error(&bp->pdev->dev, mapping))
6503 goto dma_error;
6504 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6505 mapping);
6506
6507 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6508 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6509 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6510 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6511
6512 }
6513 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6514
6515 prod = NEXT_TX_BD(prod);
6516 txr->tx_prod_bseq += skb->len;
6517
6518 REG_WR16(bp, txr->tx_bidx_addr, prod);
6519 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6520
6521 mmiowb();
6522
6523 txr->tx_prod = prod;
6524
6525 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6526 netif_tx_stop_queue(txq);
6527
6528 /* netif_tx_stop_queue() must be done before checking
6529 * tx index in bnx2_tx_avail() below, because in
6530 * bnx2_tx_int(), we update tx index before checking for
6531 * netif_tx_queue_stopped().
6532 */
6533 smp_mb();
6534 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6535 netif_tx_wake_queue(txq);
6536 }
6537
6538 return NETDEV_TX_OK;
6539 dma_error:
6540 /* save value of frag that failed */
6541 last_frag = i;
6542
6543 /* start back at beginning and unmap skb */
6544 prod = txr->tx_prod;
6545 ring_prod = TX_RING_IDX(prod);
6546 tx_buf = &txr->tx_buf_ring[ring_prod];
6547 tx_buf->skb = NULL;
6548 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6549 skb_headlen(skb), PCI_DMA_TODEVICE);
6550
6551 /* unmap remaining mapped pages */
6552 for (i = 0; i < last_frag; i++) {
6553 prod = NEXT_TX_BD(prod);
6554 ring_prod = TX_RING_IDX(prod);
6555 tx_buf = &txr->tx_buf_ring[ring_prod];
6556 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6557 skb_shinfo(skb)->frags[i].size,
6558 PCI_DMA_TODEVICE);
6559 }
6560
6561 dev_kfree_skb(skb);
6562 return NETDEV_TX_OK;
6563 }
6564
6565 /* Called with rtnl_lock */
6566 static int
6567 bnx2_close(struct net_device *dev)
6568 {
6569 struct bnx2 *bp = netdev_priv(dev);
6570
6571 cancel_work_sync(&bp->reset_task);
6572
6573 bnx2_disable_int_sync(bp);
6574 bnx2_napi_disable(bp);
6575 del_timer_sync(&bp->timer);
6576 bnx2_shutdown_chip(bp);
6577 bnx2_free_irq(bp);
6578 bnx2_free_skbs(bp);
6579 bnx2_free_mem(bp);
6580 bnx2_del_napi(bp);
6581 bp->link_up = 0;
6582 netif_carrier_off(bp->dev);
6583 bnx2_set_power_state(bp, PCI_D3hot);
6584 return 0;
6585 }
6586
6587 static void
6588 bnx2_save_stats(struct bnx2 *bp)
6589 {
6590 u32 *hw_stats = (u32 *) bp->stats_blk;
6591 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6592 int i;
6593
6594 /* The 1st 10 counters are 64-bit counters */
6595 for (i = 0; i < 20; i += 2) {
6596 u32 hi;
6597 u64 lo;
6598
6599 hi = temp_stats[i] + hw_stats[i];
6600 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6601 if (lo > 0xffffffff)
6602 hi++;
6603 temp_stats[i] = hi;
6604 temp_stats[i + 1] = lo & 0xffffffff;
6605 }
6606
6607 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6608 temp_stats[i] += hw_stats[i];
6609 }
6610
6611 #define GET_64BIT_NET_STATS64(ctr) \
6612 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6613
6614 #define GET_64BIT_NET_STATS(ctr) \
6615 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6616 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6617
6618 #define GET_32BIT_NET_STATS(ctr) \
6619 (unsigned long) (bp->stats_blk->ctr + \
6620 bp->temp_stats_blk->ctr)
6621
6622 static struct rtnl_link_stats64 *
6623 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6624 {
6625 struct bnx2 *bp = netdev_priv(dev);
6626
6627 if (bp->stats_blk == NULL)
6628 return net_stats;
6629
6630 net_stats->rx_packets =
6631 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6632 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6633 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6634
6635 net_stats->tx_packets =
6636 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6637 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6638 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6639
6640 net_stats->rx_bytes =
6641 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6642
6643 net_stats->tx_bytes =
6644 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6645
6646 net_stats->multicast =
6647 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6648
6649 net_stats->collisions =
6650 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6651
6652 net_stats->rx_length_errors =
6653 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6654 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6655
6656 net_stats->rx_over_errors =
6657 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6658 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6659
6660 net_stats->rx_frame_errors =
6661 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6662
6663 net_stats->rx_crc_errors =
6664 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6665
6666 net_stats->rx_errors = net_stats->rx_length_errors +
6667 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6668 net_stats->rx_crc_errors;
6669
6670 net_stats->tx_aborted_errors =
6671 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6672 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6673
6674 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6675 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6676 net_stats->tx_carrier_errors = 0;
6677 else {
6678 net_stats->tx_carrier_errors =
6679 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6680 }
6681
6682 net_stats->tx_errors =
6683 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6684 net_stats->tx_aborted_errors +
6685 net_stats->tx_carrier_errors;
6686
6687 net_stats->rx_missed_errors =
6688 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6689 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6690 GET_32BIT_NET_STATS(stat_FwRxDrop);
6691
6692 return net_stats;
6693 }
6694
6695 /* All ethtool functions called with rtnl_lock */
6696
6697 static int
6698 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6699 {
6700 struct bnx2 *bp = netdev_priv(dev);
6701 int support_serdes = 0, support_copper = 0;
6702
6703 cmd->supported = SUPPORTED_Autoneg;
6704 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6705 support_serdes = 1;
6706 support_copper = 1;
6707 } else if (bp->phy_port == PORT_FIBRE)
6708 support_serdes = 1;
6709 else
6710 support_copper = 1;
6711
6712 if (support_serdes) {
6713 cmd->supported |= SUPPORTED_1000baseT_Full |
6714 SUPPORTED_FIBRE;
6715 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6716 cmd->supported |= SUPPORTED_2500baseX_Full;
6717
6718 }
6719 if (support_copper) {
6720 cmd->supported |= SUPPORTED_10baseT_Half |
6721 SUPPORTED_10baseT_Full |
6722 SUPPORTED_100baseT_Half |
6723 SUPPORTED_100baseT_Full |
6724 SUPPORTED_1000baseT_Full |
6725 SUPPORTED_TP;
6726
6727 }
6728
6729 spin_lock_bh(&bp->phy_lock);
6730 cmd->port = bp->phy_port;
6731 cmd->advertising = bp->advertising;
6732
6733 if (bp->autoneg & AUTONEG_SPEED) {
6734 cmd->autoneg = AUTONEG_ENABLE;
6735 }
6736 else {
6737 cmd->autoneg = AUTONEG_DISABLE;
6738 }
6739
6740 if (netif_carrier_ok(dev)) {
6741 cmd->speed = bp->line_speed;
6742 cmd->duplex = bp->duplex;
6743 }
6744 else {
6745 cmd->speed = -1;
6746 cmd->duplex = -1;
6747 }
6748 spin_unlock_bh(&bp->phy_lock);
6749
6750 cmd->transceiver = XCVR_INTERNAL;
6751 cmd->phy_address = bp->phy_addr;
6752
6753 return 0;
6754 }
6755
6756 static int
6757 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6758 {
6759 struct bnx2 *bp = netdev_priv(dev);
6760 u8 autoneg = bp->autoneg;
6761 u8 req_duplex = bp->req_duplex;
6762 u16 req_line_speed = bp->req_line_speed;
6763 u32 advertising = bp->advertising;
6764 int err = -EINVAL;
6765
6766 spin_lock_bh(&bp->phy_lock);
6767
6768 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6769 goto err_out_unlock;
6770
6771 if (cmd->port != bp->phy_port &&
6772 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6773 goto err_out_unlock;
6774
6775 /* If device is down, we can store the settings only if the user
6776 * is setting the currently active port.
6777 */
6778 if (!netif_running(dev) && cmd->port != bp->phy_port)
6779 goto err_out_unlock;
6780
6781 if (cmd->autoneg == AUTONEG_ENABLE) {
6782 autoneg |= AUTONEG_SPEED;
6783
6784 advertising = cmd->advertising;
6785 if (cmd->port == PORT_TP) {
6786 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6787 if (!advertising)
6788 advertising = ETHTOOL_ALL_COPPER_SPEED;
6789 } else {
6790 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6791 if (!advertising)
6792 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6793 }
6794 advertising |= ADVERTISED_Autoneg;
6795 }
6796 else {
6797 if (cmd->port == PORT_FIBRE) {
6798 if ((cmd->speed != SPEED_1000 &&
6799 cmd->speed != SPEED_2500) ||
6800 (cmd->duplex != DUPLEX_FULL))
6801 goto err_out_unlock;
6802
6803 if (cmd->speed == SPEED_2500 &&
6804 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6805 goto err_out_unlock;
6806 }
6807 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6808 goto err_out_unlock;
6809
6810 autoneg &= ~AUTONEG_SPEED;
6811 req_line_speed = cmd->speed;
6812 req_duplex = cmd->duplex;
6813 advertising = 0;
6814 }
6815
6816 bp->autoneg = autoneg;
6817 bp->advertising = advertising;
6818 bp->req_line_speed = req_line_speed;
6819 bp->req_duplex = req_duplex;
6820
6821 err = 0;
6822 /* If device is down, the new settings will be picked up when it is
6823 * brought up.
6824 */
6825 if (netif_running(dev))
6826 err = bnx2_setup_phy(bp, cmd->port);
6827
6828 err_out_unlock:
6829 spin_unlock_bh(&bp->phy_lock);
6830
6831 return err;
6832 }
6833
6834 static void
6835 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6836 {
6837 struct bnx2 *bp = netdev_priv(dev);
6838
6839 strcpy(info->driver, DRV_MODULE_NAME);
6840 strcpy(info->version, DRV_MODULE_VERSION);
6841 strcpy(info->bus_info, pci_name(bp->pdev));
6842 strcpy(info->fw_version, bp->fw_version);
6843 }
6844
6845 #define BNX2_REGDUMP_LEN (32 * 1024)
6846
6847 static int
6848 bnx2_get_regs_len(struct net_device *dev)
6849 {
6850 return BNX2_REGDUMP_LEN;
6851 }
6852
6853 static void
6854 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6855 {
6856 u32 *p = _p, i, offset;
6857 u8 *orig_p = _p;
6858 struct bnx2 *bp = netdev_priv(dev);
6859 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6860 0x0800, 0x0880, 0x0c00, 0x0c10,
6861 0x0c30, 0x0d08, 0x1000, 0x101c,
6862 0x1040, 0x1048, 0x1080, 0x10a4,
6863 0x1400, 0x1490, 0x1498, 0x14f0,
6864 0x1500, 0x155c, 0x1580, 0x15dc,
6865 0x1600, 0x1658, 0x1680, 0x16d8,
6866 0x1800, 0x1820, 0x1840, 0x1854,
6867 0x1880, 0x1894, 0x1900, 0x1984,
6868 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6869 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6870 0x2000, 0x2030, 0x23c0, 0x2400,
6871 0x2800, 0x2820, 0x2830, 0x2850,
6872 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6873 0x3c00, 0x3c94, 0x4000, 0x4010,
6874 0x4080, 0x4090, 0x43c0, 0x4458,
6875 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6876 0x4fc0, 0x5010, 0x53c0, 0x5444,
6877 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6878 0x5fc0, 0x6000, 0x6400, 0x6428,
6879 0x6800, 0x6848, 0x684c, 0x6860,
6880 0x6888, 0x6910, 0x8000 };
6881
6882 regs->version = 0;
6883
6884 memset(p, 0, BNX2_REGDUMP_LEN);
6885
6886 if (!netif_running(bp->dev))
6887 return;
6888
6889 i = 0;
6890 offset = reg_boundaries[0];
6891 p += offset;
6892 while (offset < BNX2_REGDUMP_LEN) {
6893 *p++ = REG_RD(bp, offset);
6894 offset += 4;
6895 if (offset == reg_boundaries[i + 1]) {
6896 offset = reg_boundaries[i + 2];
6897 p = (u32 *) (orig_p + offset);
6898 i += 2;
6899 }
6900 }
6901 }
6902
6903 static void
6904 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6905 {
6906 struct bnx2 *bp = netdev_priv(dev);
6907
6908 if (bp->flags & BNX2_FLAG_NO_WOL) {
6909 wol->supported = 0;
6910 wol->wolopts = 0;
6911 }
6912 else {
6913 wol->supported = WAKE_MAGIC;
6914 if (bp->wol)
6915 wol->wolopts = WAKE_MAGIC;
6916 else
6917 wol->wolopts = 0;
6918 }
6919 memset(&wol->sopass, 0, sizeof(wol->sopass));
6920 }
6921
6922 static int
6923 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6924 {
6925 struct bnx2 *bp = netdev_priv(dev);
6926
6927 if (wol->wolopts & ~WAKE_MAGIC)
6928 return -EINVAL;
6929
6930 if (wol->wolopts & WAKE_MAGIC) {
6931 if (bp->flags & BNX2_FLAG_NO_WOL)
6932 return -EINVAL;
6933
6934 bp->wol = 1;
6935 }
6936 else {
6937 bp->wol = 0;
6938 }
6939 return 0;
6940 }
6941
6942 static int
6943 bnx2_nway_reset(struct net_device *dev)
6944 {
6945 struct bnx2 *bp = netdev_priv(dev);
6946 u32 bmcr;
6947
6948 if (!netif_running(dev))
6949 return -EAGAIN;
6950
6951 if (!(bp->autoneg & AUTONEG_SPEED)) {
6952 return -EINVAL;
6953 }
6954
6955 spin_lock_bh(&bp->phy_lock);
6956
6957 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6958 int rc;
6959
6960 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6961 spin_unlock_bh(&bp->phy_lock);
6962 return rc;
6963 }
6964
6965 /* Force a link down visible on the other side */
6966 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6967 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6968 spin_unlock_bh(&bp->phy_lock);
6969
6970 msleep(20);
6971
6972 spin_lock_bh(&bp->phy_lock);
6973
6974 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6975 bp->serdes_an_pending = 1;
6976 mod_timer(&bp->timer, jiffies + bp->current_interval);
6977 }
6978
6979 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6980 bmcr &= ~BMCR_LOOPBACK;
6981 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6982
6983 spin_unlock_bh(&bp->phy_lock);
6984
6985 return 0;
6986 }
6987
6988 static u32
6989 bnx2_get_link(struct net_device *dev)
6990 {
6991 struct bnx2 *bp = netdev_priv(dev);
6992
6993 return bp->link_up;
6994 }
6995
6996 static int
6997 bnx2_get_eeprom_len(struct net_device *dev)
6998 {
6999 struct bnx2 *bp = netdev_priv(dev);
7000
7001 if (bp->flash_info == NULL)
7002 return 0;
7003
7004 return (int) bp->flash_size;
7005 }
7006
7007 static int
7008 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7009 u8 *eebuf)
7010 {
7011 struct bnx2 *bp = netdev_priv(dev);
7012 int rc;
7013
7014 if (!netif_running(dev))
7015 return -EAGAIN;
7016
7017 /* parameters already validated in ethtool_get_eeprom */
7018
7019 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7020
7021 return rc;
7022 }
7023
7024 static int
7025 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7026 u8 *eebuf)
7027 {
7028 struct bnx2 *bp = netdev_priv(dev);
7029 int rc;
7030
7031 if (!netif_running(dev))
7032 return -EAGAIN;
7033
7034 /* parameters already validated in ethtool_set_eeprom */
7035
7036 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7037
7038 return rc;
7039 }
7040
7041 static int
7042 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7043 {
7044 struct bnx2 *bp = netdev_priv(dev);
7045
7046 memset(coal, 0, sizeof(struct ethtool_coalesce));
7047
7048 coal->rx_coalesce_usecs = bp->rx_ticks;
7049 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7050 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7051 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7052
7053 coal->tx_coalesce_usecs = bp->tx_ticks;
7054 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7055 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7056 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7057
7058 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7059
7060 return 0;
7061 }
7062
7063 static int
7064 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7065 {
7066 struct bnx2 *bp = netdev_priv(dev);
7067
7068 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7069 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7070
7071 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7072 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7073
7074 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7075 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7076
7077 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7078 if (bp->rx_quick_cons_trip_int > 0xff)
7079 bp->rx_quick_cons_trip_int = 0xff;
7080
7081 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7082 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7083
7084 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7085 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7086
7087 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7088 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7089
7090 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7091 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7092 0xff;
7093
7094 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7095 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7096 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7097 bp->stats_ticks = USEC_PER_SEC;
7098 }
7099 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7100 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7101 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7102
7103 if (netif_running(bp->dev)) {
7104 bnx2_netif_stop(bp, true);
7105 bnx2_init_nic(bp, 0);
7106 bnx2_netif_start(bp, true);
7107 }
7108
7109 return 0;
7110 }
7111
7112 static void
7113 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7114 {
7115 struct bnx2 *bp = netdev_priv(dev);
7116
7117 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7118 ering->rx_mini_max_pending = 0;
7119 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7120
7121 ering->rx_pending = bp->rx_ring_size;
7122 ering->rx_mini_pending = 0;
7123 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7124
7125 ering->tx_max_pending = MAX_TX_DESC_CNT;
7126 ering->tx_pending = bp->tx_ring_size;
7127 }
7128
7129 static int
7130 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7131 {
7132 if (netif_running(bp->dev)) {
7133 /* Reset will erase chipset stats; save them */
7134 bnx2_save_stats(bp);
7135
7136 bnx2_netif_stop(bp, true);
7137 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7138 bnx2_free_skbs(bp);
7139 bnx2_free_mem(bp);
7140 }
7141
7142 bnx2_set_rx_ring_size(bp, rx);
7143 bp->tx_ring_size = tx;
7144
7145 if (netif_running(bp->dev)) {
7146 int rc;
7147
7148 rc = bnx2_alloc_mem(bp);
7149 if (!rc)
7150 rc = bnx2_init_nic(bp, 0);
7151
7152 if (rc) {
7153 bnx2_napi_enable(bp);
7154 dev_close(bp->dev);
7155 return rc;
7156 }
7157 #ifdef BCM_CNIC
7158 mutex_lock(&bp->cnic_lock);
7159 /* Let cnic know about the new status block. */
7160 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7161 bnx2_setup_cnic_irq_info(bp);
7162 mutex_unlock(&bp->cnic_lock);
7163 #endif
7164 bnx2_netif_start(bp, true);
7165 }
7166 return 0;
7167 }
7168
7169 static int
7170 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7171 {
7172 struct bnx2 *bp = netdev_priv(dev);
7173 int rc;
7174
7175 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7176 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7177 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7178
7179 return -EINVAL;
7180 }
7181 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7182 return rc;
7183 }
7184
7185 static void
7186 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7187 {
7188 struct bnx2 *bp = netdev_priv(dev);
7189
7190 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7191 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7192 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7193 }
7194
7195 static int
7196 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7197 {
7198 struct bnx2 *bp = netdev_priv(dev);
7199
7200 bp->req_flow_ctrl = 0;
7201 if (epause->rx_pause)
7202 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7203 if (epause->tx_pause)
7204 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7205
7206 if (epause->autoneg) {
7207 bp->autoneg |= AUTONEG_FLOW_CTRL;
7208 }
7209 else {
7210 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7211 }
7212
7213 if (netif_running(dev)) {
7214 spin_lock_bh(&bp->phy_lock);
7215 bnx2_setup_phy(bp, bp->phy_port);
7216 spin_unlock_bh(&bp->phy_lock);
7217 }
7218
7219 return 0;
7220 }
7221
7222 static u32
7223 bnx2_get_rx_csum(struct net_device *dev)
7224 {
7225 struct bnx2 *bp = netdev_priv(dev);
7226
7227 return bp->rx_csum;
7228 }
7229
7230 static int
7231 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7232 {
7233 struct bnx2 *bp = netdev_priv(dev);
7234
7235 bp->rx_csum = data;
7236 return 0;
7237 }
7238
7239 static int
7240 bnx2_set_tso(struct net_device *dev, u32 data)
7241 {
7242 struct bnx2 *bp = netdev_priv(dev);
7243
7244 if (data) {
7245 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7246 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7247 dev->features |= NETIF_F_TSO6;
7248 } else
7249 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7250 NETIF_F_TSO_ECN);
7251 return 0;
7252 }
7253
7254 static struct {
7255 char string[ETH_GSTRING_LEN];
7256 } bnx2_stats_str_arr[] = {
7257 { "rx_bytes" },
7258 { "rx_error_bytes" },
7259 { "tx_bytes" },
7260 { "tx_error_bytes" },
7261 { "rx_ucast_packets" },
7262 { "rx_mcast_packets" },
7263 { "rx_bcast_packets" },
7264 { "tx_ucast_packets" },
7265 { "tx_mcast_packets" },
7266 { "tx_bcast_packets" },
7267 { "tx_mac_errors" },
7268 { "tx_carrier_errors" },
7269 { "rx_crc_errors" },
7270 { "rx_align_errors" },
7271 { "tx_single_collisions" },
7272 { "tx_multi_collisions" },
7273 { "tx_deferred" },
7274 { "tx_excess_collisions" },
7275 { "tx_late_collisions" },
7276 { "tx_total_collisions" },
7277 { "rx_fragments" },
7278 { "rx_jabbers" },
7279 { "rx_undersize_packets" },
7280 { "rx_oversize_packets" },
7281 { "rx_64_byte_packets" },
7282 { "rx_65_to_127_byte_packets" },
7283 { "rx_128_to_255_byte_packets" },
7284 { "rx_256_to_511_byte_packets" },
7285 { "rx_512_to_1023_byte_packets" },
7286 { "rx_1024_to_1522_byte_packets" },
7287 { "rx_1523_to_9022_byte_packets" },
7288 { "tx_64_byte_packets" },
7289 { "tx_65_to_127_byte_packets" },
7290 { "tx_128_to_255_byte_packets" },
7291 { "tx_256_to_511_byte_packets" },
7292 { "tx_512_to_1023_byte_packets" },
7293 { "tx_1024_to_1522_byte_packets" },
7294 { "tx_1523_to_9022_byte_packets" },
7295 { "rx_xon_frames" },
7296 { "rx_xoff_frames" },
7297 { "tx_xon_frames" },
7298 { "tx_xoff_frames" },
7299 { "rx_mac_ctrl_frames" },
7300 { "rx_filtered_packets" },
7301 { "rx_ftq_discards" },
7302 { "rx_discards" },
7303 { "rx_fw_discards" },
7304 };
7305
7306 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7307 sizeof(bnx2_stats_str_arr[0]))
7308
7309 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7310
7311 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7312 STATS_OFFSET32(stat_IfHCInOctets_hi),
7313 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7314 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7315 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7316 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7317 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7318 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7319 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7320 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7321 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7322 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7323 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7324 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7325 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7326 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7327 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7328 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7329 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7330 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7331 STATS_OFFSET32(stat_EtherStatsCollisions),
7332 STATS_OFFSET32(stat_EtherStatsFragments),
7333 STATS_OFFSET32(stat_EtherStatsJabbers),
7334 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7335 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7336 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7337 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7338 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7339 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7340 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7341 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7342 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7343 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7344 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7345 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7346 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7347 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7348 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7349 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7350 STATS_OFFSET32(stat_XonPauseFramesReceived),
7351 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7352 STATS_OFFSET32(stat_OutXonSent),
7353 STATS_OFFSET32(stat_OutXoffSent),
7354 STATS_OFFSET32(stat_MacControlFramesReceived),
7355 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7356 STATS_OFFSET32(stat_IfInFTQDiscards),
7357 STATS_OFFSET32(stat_IfInMBUFDiscards),
7358 STATS_OFFSET32(stat_FwRxDrop),
7359 };
7360
7361 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7362 * skipped because of errata.
7363 */
7364 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7365 8,0,8,8,8,8,8,8,8,8,
7366 4,0,4,4,4,4,4,4,4,4,
7367 4,4,4,4,4,4,4,4,4,4,
7368 4,4,4,4,4,4,4,4,4,4,
7369 4,4,4,4,4,4,4,
7370 };
7371
7372 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7373 8,0,8,8,8,8,8,8,8,8,
7374 4,4,4,4,4,4,4,4,4,4,
7375 4,4,4,4,4,4,4,4,4,4,
7376 4,4,4,4,4,4,4,4,4,4,
7377 4,4,4,4,4,4,4,
7378 };
7379
7380 #define BNX2_NUM_TESTS 6
7381
7382 static struct {
7383 char string[ETH_GSTRING_LEN];
7384 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7385 { "register_test (offline)" },
7386 { "memory_test (offline)" },
7387 { "loopback_test (offline)" },
7388 { "nvram_test (online)" },
7389 { "interrupt_test (online)" },
7390 { "link_test (online)" },
7391 };
7392
7393 static int
7394 bnx2_get_sset_count(struct net_device *dev, int sset)
7395 {
7396 switch (sset) {
7397 case ETH_SS_TEST:
7398 return BNX2_NUM_TESTS;
7399 case ETH_SS_STATS:
7400 return BNX2_NUM_STATS;
7401 default:
7402 return -EOPNOTSUPP;
7403 }
7404 }
7405
7406 static void
7407 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7408 {
7409 struct bnx2 *bp = netdev_priv(dev);
7410
7411 bnx2_set_power_state(bp, PCI_D0);
7412
7413 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7414 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7415 int i;
7416
7417 bnx2_netif_stop(bp, true);
7418 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7419 bnx2_free_skbs(bp);
7420
7421 if (bnx2_test_registers(bp) != 0) {
7422 buf[0] = 1;
7423 etest->flags |= ETH_TEST_FL_FAILED;
7424 }
7425 if (bnx2_test_memory(bp) != 0) {
7426 buf[1] = 1;
7427 etest->flags |= ETH_TEST_FL_FAILED;
7428 }
7429 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7430 etest->flags |= ETH_TEST_FL_FAILED;
7431
7432 if (!netif_running(bp->dev))
7433 bnx2_shutdown_chip(bp);
7434 else {
7435 bnx2_init_nic(bp, 1);
7436 bnx2_netif_start(bp, true);
7437 }
7438
7439 /* wait for link up */
7440 for (i = 0; i < 7; i++) {
7441 if (bp->link_up)
7442 break;
7443 msleep_interruptible(1000);
7444 }
7445 }
7446
7447 if (bnx2_test_nvram(bp) != 0) {
7448 buf[3] = 1;
7449 etest->flags |= ETH_TEST_FL_FAILED;
7450 }
7451 if (bnx2_test_intr(bp) != 0) {
7452 buf[4] = 1;
7453 etest->flags |= ETH_TEST_FL_FAILED;
7454 }
7455
7456 if (bnx2_test_link(bp) != 0) {
7457 buf[5] = 1;
7458 etest->flags |= ETH_TEST_FL_FAILED;
7459
7460 }
7461 if (!netif_running(bp->dev))
7462 bnx2_set_power_state(bp, PCI_D3hot);
7463 }
7464
7465 static void
7466 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7467 {
7468 switch (stringset) {
7469 case ETH_SS_STATS:
7470 memcpy(buf, bnx2_stats_str_arr,
7471 sizeof(bnx2_stats_str_arr));
7472 break;
7473 case ETH_SS_TEST:
7474 memcpy(buf, bnx2_tests_str_arr,
7475 sizeof(bnx2_tests_str_arr));
7476 break;
7477 }
7478 }
7479
7480 static void
7481 bnx2_get_ethtool_stats(struct net_device *dev,
7482 struct ethtool_stats *stats, u64 *buf)
7483 {
7484 struct bnx2 *bp = netdev_priv(dev);
7485 int i;
7486 u32 *hw_stats = (u32 *) bp->stats_blk;
7487 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7488 u8 *stats_len_arr = NULL;
7489
7490 if (hw_stats == NULL) {
7491 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7492 return;
7493 }
7494
7495 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7496 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7497 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7498 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7499 stats_len_arr = bnx2_5706_stats_len_arr;
7500 else
7501 stats_len_arr = bnx2_5708_stats_len_arr;
7502
7503 for (i = 0; i < BNX2_NUM_STATS; i++) {
7504 unsigned long offset;
7505
7506 if (stats_len_arr[i] == 0) {
7507 /* skip this counter */
7508 buf[i] = 0;
7509 continue;
7510 }
7511
7512 offset = bnx2_stats_offset_arr[i];
7513 if (stats_len_arr[i] == 4) {
7514 /* 4-byte counter */
7515 buf[i] = (u64) *(hw_stats + offset) +
7516 *(temp_stats + offset);
7517 continue;
7518 }
7519 /* 8-byte counter */
7520 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7521 *(hw_stats + offset + 1) +
7522 (((u64) *(temp_stats + offset)) << 32) +
7523 *(temp_stats + offset + 1);
7524 }
7525 }
7526
7527 static int
7528 bnx2_phys_id(struct net_device *dev, u32 data)
7529 {
7530 struct bnx2 *bp = netdev_priv(dev);
7531 int i;
7532 u32 save;
7533
7534 bnx2_set_power_state(bp, PCI_D0);
7535
7536 if (data == 0)
7537 data = 2;
7538
7539 save = REG_RD(bp, BNX2_MISC_CFG);
7540 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7541
7542 for (i = 0; i < (data * 2); i++) {
7543 if ((i % 2) == 0) {
7544 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7545 }
7546 else {
7547 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7548 BNX2_EMAC_LED_1000MB_OVERRIDE |
7549 BNX2_EMAC_LED_100MB_OVERRIDE |
7550 BNX2_EMAC_LED_10MB_OVERRIDE |
7551 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7552 BNX2_EMAC_LED_TRAFFIC);
7553 }
7554 msleep_interruptible(500);
7555 if (signal_pending(current))
7556 break;
7557 }
7558 REG_WR(bp, BNX2_EMAC_LED, 0);
7559 REG_WR(bp, BNX2_MISC_CFG, save);
7560
7561 if (!netif_running(dev))
7562 bnx2_set_power_state(bp, PCI_D3hot);
7563
7564 return 0;
7565 }
7566
7567 static int
7568 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7569 {
7570 struct bnx2 *bp = netdev_priv(dev);
7571
7572 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7573 return ethtool_op_set_tx_ipv6_csum(dev, data);
7574 else
7575 return ethtool_op_set_tx_csum(dev, data);
7576 }
7577
7578 static int
7579 bnx2_set_flags(struct net_device *dev, u32 data)
7580 {
7581 return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
7582 }
7583
7584 static const struct ethtool_ops bnx2_ethtool_ops = {
7585 .get_settings = bnx2_get_settings,
7586 .set_settings = bnx2_set_settings,
7587 .get_drvinfo = bnx2_get_drvinfo,
7588 .get_regs_len = bnx2_get_regs_len,
7589 .get_regs = bnx2_get_regs,
7590 .get_wol = bnx2_get_wol,
7591 .set_wol = bnx2_set_wol,
7592 .nway_reset = bnx2_nway_reset,
7593 .get_link = bnx2_get_link,
7594 .get_eeprom_len = bnx2_get_eeprom_len,
7595 .get_eeprom = bnx2_get_eeprom,
7596 .set_eeprom = bnx2_set_eeprom,
7597 .get_coalesce = bnx2_get_coalesce,
7598 .set_coalesce = bnx2_set_coalesce,
7599 .get_ringparam = bnx2_get_ringparam,
7600 .set_ringparam = bnx2_set_ringparam,
7601 .get_pauseparam = bnx2_get_pauseparam,
7602 .set_pauseparam = bnx2_set_pauseparam,
7603 .get_rx_csum = bnx2_get_rx_csum,
7604 .set_rx_csum = bnx2_set_rx_csum,
7605 .set_tx_csum = bnx2_set_tx_csum,
7606 .set_sg = ethtool_op_set_sg,
7607 .set_tso = bnx2_set_tso,
7608 .self_test = bnx2_self_test,
7609 .get_strings = bnx2_get_strings,
7610 .phys_id = bnx2_phys_id,
7611 .get_ethtool_stats = bnx2_get_ethtool_stats,
7612 .get_sset_count = bnx2_get_sset_count,
7613 .set_flags = bnx2_set_flags,
7614 .get_flags = ethtool_op_get_flags,
7615 };
7616
7617 /* Called with rtnl_lock */
7618 static int
7619 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7620 {
7621 struct mii_ioctl_data *data = if_mii(ifr);
7622 struct bnx2 *bp = netdev_priv(dev);
7623 int err;
7624
7625 switch(cmd) {
7626 case SIOCGMIIPHY:
7627 data->phy_id = bp->phy_addr;
7628
7629 /* fallthru */
7630 case SIOCGMIIREG: {
7631 u32 mii_regval;
7632
7633 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7634 return -EOPNOTSUPP;
7635
7636 if (!netif_running(dev))
7637 return -EAGAIN;
7638
7639 spin_lock_bh(&bp->phy_lock);
7640 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7641 spin_unlock_bh(&bp->phy_lock);
7642
7643 data->val_out = mii_regval;
7644
7645 return err;
7646 }
7647
7648 case SIOCSMIIREG:
7649 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7650 return -EOPNOTSUPP;
7651
7652 if (!netif_running(dev))
7653 return -EAGAIN;
7654
7655 spin_lock_bh(&bp->phy_lock);
7656 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7657 spin_unlock_bh(&bp->phy_lock);
7658
7659 return err;
7660
7661 default:
7662 /* do nothing */
7663 break;
7664 }
7665 return -EOPNOTSUPP;
7666 }
7667
7668 /* Called with rtnl_lock */
7669 static int
7670 bnx2_change_mac_addr(struct net_device *dev, void *p)
7671 {
7672 struct sockaddr *addr = p;
7673 struct bnx2 *bp = netdev_priv(dev);
7674
7675 if (!is_valid_ether_addr(addr->sa_data))
7676 return -EINVAL;
7677
7678 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7679 if (netif_running(dev))
7680 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7681
7682 return 0;
7683 }
7684
7685 /* Called with rtnl_lock */
7686 static int
7687 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7688 {
7689 struct bnx2 *bp = netdev_priv(dev);
7690
7691 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7692 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7693 return -EINVAL;
7694
7695 dev->mtu = new_mtu;
7696 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
7697 }
7698
7699 #ifdef CONFIG_NET_POLL_CONTROLLER
7700 static void
7701 poll_bnx2(struct net_device *dev)
7702 {
7703 struct bnx2 *bp = netdev_priv(dev);
7704 int i;
7705
7706 for (i = 0; i < bp->irq_nvecs; i++) {
7707 struct bnx2_irq *irq = &bp->irq_tbl[i];
7708
7709 disable_irq(irq->vector);
7710 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7711 enable_irq(irq->vector);
7712 }
7713 }
7714 #endif
7715
7716 static void __devinit
7717 bnx2_get_5709_media(struct bnx2 *bp)
7718 {
7719 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7720 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7721 u32 strap;
7722
7723 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7724 return;
7725 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7726 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7727 return;
7728 }
7729
7730 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7731 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7732 else
7733 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7734
7735 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7736 switch (strap) {
7737 case 0x4:
7738 case 0x5:
7739 case 0x6:
7740 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7741 return;
7742 }
7743 } else {
7744 switch (strap) {
7745 case 0x1:
7746 case 0x2:
7747 case 0x4:
7748 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7749 return;
7750 }
7751 }
7752 }
7753
7754 static void __devinit
7755 bnx2_get_pci_speed(struct bnx2 *bp)
7756 {
7757 u32 reg;
7758
7759 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7760 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7761 u32 clkreg;
7762
7763 bp->flags |= BNX2_FLAG_PCIX;
7764
7765 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7766
7767 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7768 switch (clkreg) {
7769 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7770 bp->bus_speed_mhz = 133;
7771 break;
7772
7773 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7774 bp->bus_speed_mhz = 100;
7775 break;
7776
7777 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7778 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7779 bp->bus_speed_mhz = 66;
7780 break;
7781
7782 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7783 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7784 bp->bus_speed_mhz = 50;
7785 break;
7786
7787 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7788 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7789 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7790 bp->bus_speed_mhz = 33;
7791 break;
7792 }
7793 }
7794 else {
7795 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7796 bp->bus_speed_mhz = 66;
7797 else
7798 bp->bus_speed_mhz = 33;
7799 }
7800
7801 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7802 bp->flags |= BNX2_FLAG_PCI_32BIT;
7803
7804 }
7805
7806 static void __devinit
7807 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7808 {
7809 int rc, i, j;
7810 u8 *data;
7811 unsigned int block_end, rosize, len;
7812
7813 #define BNX2_VPD_NVRAM_OFFSET 0x300
7814 #define BNX2_VPD_LEN 128
7815 #define BNX2_MAX_VER_SLEN 30
7816
7817 data = kmalloc(256, GFP_KERNEL);
7818 if (!data)
7819 return;
7820
7821 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7822 BNX2_VPD_LEN);
7823 if (rc)
7824 goto vpd_done;
7825
7826 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7827 data[i] = data[i + BNX2_VPD_LEN + 3];
7828 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7829 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7830 data[i + 3] = data[i + BNX2_VPD_LEN];
7831 }
7832
7833 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7834 if (i < 0)
7835 goto vpd_done;
7836
7837 rosize = pci_vpd_lrdt_size(&data[i]);
7838 i += PCI_VPD_LRDT_TAG_SIZE;
7839 block_end = i + rosize;
7840
7841 if (block_end > BNX2_VPD_LEN)
7842 goto vpd_done;
7843
7844 j = pci_vpd_find_info_keyword(data, i, rosize,
7845 PCI_VPD_RO_KEYWORD_MFR_ID);
7846 if (j < 0)
7847 goto vpd_done;
7848
7849 len = pci_vpd_info_field_size(&data[j]);
7850
7851 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7852 if (j + len > block_end || len != 4 ||
7853 memcmp(&data[j], "1028", 4))
7854 goto vpd_done;
7855
7856 j = pci_vpd_find_info_keyword(data, i, rosize,
7857 PCI_VPD_RO_KEYWORD_VENDOR0);
7858 if (j < 0)
7859 goto vpd_done;
7860
7861 len = pci_vpd_info_field_size(&data[j]);
7862
7863 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7864 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7865 goto vpd_done;
7866
7867 memcpy(bp->fw_version, &data[j], len);
7868 bp->fw_version[len] = ' ';
7869
7870 vpd_done:
7871 kfree(data);
7872 }
7873
7874 static int __devinit
7875 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7876 {
7877 struct bnx2 *bp;
7878 unsigned long mem_len;
7879 int rc, i, j;
7880 u32 reg;
7881 u64 dma_mask, persist_dma_mask;
7882 int err;
7883
7884 SET_NETDEV_DEV(dev, &pdev->dev);
7885 bp = netdev_priv(dev);
7886
7887 bp->flags = 0;
7888 bp->phy_flags = 0;
7889
7890 bp->temp_stats_blk =
7891 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7892
7893 if (bp->temp_stats_blk == NULL) {
7894 rc = -ENOMEM;
7895 goto err_out;
7896 }
7897
7898 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7899 rc = pci_enable_device(pdev);
7900 if (rc) {
7901 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7902 goto err_out;
7903 }
7904
7905 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7906 dev_err(&pdev->dev,
7907 "Cannot find PCI device base address, aborting\n");
7908 rc = -ENODEV;
7909 goto err_out_disable;
7910 }
7911
7912 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7913 if (rc) {
7914 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7915 goto err_out_disable;
7916 }
7917
7918 pci_set_master(pdev);
7919
7920 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7921 if (bp->pm_cap == 0) {
7922 dev_err(&pdev->dev,
7923 "Cannot find power management capability, aborting\n");
7924 rc = -EIO;
7925 goto err_out_release;
7926 }
7927
7928 bp->dev = dev;
7929 bp->pdev = pdev;
7930
7931 spin_lock_init(&bp->phy_lock);
7932 spin_lock_init(&bp->indirect_lock);
7933 #ifdef BCM_CNIC
7934 mutex_init(&bp->cnic_lock);
7935 #endif
7936 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7937
7938 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7939 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7940 dev->mem_end = dev->mem_start + mem_len;
7941 dev->irq = pdev->irq;
7942
7943 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7944
7945 if (!bp->regview) {
7946 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7947 rc = -ENOMEM;
7948 goto err_out_release;
7949 }
7950
7951 /* Configure byte swap and enable write to the reg_window registers.
7952 * Rely on CPU to do target byte swapping on big endian systems
7953 * The chip's target access swapping will not swap all accesses
7954 */
7955 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7956 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7957 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7958
7959 bnx2_set_power_state(bp, PCI_D0);
7960
7961 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7962
7963 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7964 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7965 dev_err(&pdev->dev,
7966 "Cannot find PCIE capability, aborting\n");
7967 rc = -EIO;
7968 goto err_out_unmap;
7969 }
7970 bp->flags |= BNX2_FLAG_PCIE;
7971 if (CHIP_REV(bp) == CHIP_REV_Ax)
7972 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7973
7974 /* AER (Advanced Error Reporting) hooks */
7975 err = pci_enable_pcie_error_reporting(pdev);
7976 if (err) {
7977 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting "
7978 "failed 0x%x\n", err);
7979 /* non-fatal, continue */
7980 }
7981
7982 } else {
7983 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7984 if (bp->pcix_cap == 0) {
7985 dev_err(&pdev->dev,
7986 "Cannot find PCIX capability, aborting\n");
7987 rc = -EIO;
7988 goto err_out_unmap;
7989 }
7990 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7991 }
7992
7993 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7994 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7995 bp->flags |= BNX2_FLAG_MSIX_CAP;
7996 }
7997
7998 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7999 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8000 bp->flags |= BNX2_FLAG_MSI_CAP;
8001 }
8002
8003 /* 5708 cannot support DMA addresses > 40-bit. */
8004 if (CHIP_NUM(bp) == CHIP_NUM_5708)
8005 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8006 else
8007 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8008
8009 /* Configure DMA attributes. */
8010 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8011 dev->features |= NETIF_F_HIGHDMA;
8012 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8013 if (rc) {
8014 dev_err(&pdev->dev,
8015 "pci_set_consistent_dma_mask failed, aborting\n");
8016 goto err_out_unmap;
8017 }
8018 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8019 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8020 goto err_out_unmap;
8021 }
8022
8023 if (!(bp->flags & BNX2_FLAG_PCIE))
8024 bnx2_get_pci_speed(bp);
8025
8026 /* 5706A0 may falsely detect SERR and PERR. */
8027 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8028 reg = REG_RD(bp, PCI_COMMAND);
8029 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8030 REG_WR(bp, PCI_COMMAND, reg);
8031 }
8032 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8033 !(bp->flags & BNX2_FLAG_PCIX)) {
8034
8035 dev_err(&pdev->dev,
8036 "5706 A1 can only be used in a PCIX bus, aborting\n");
8037 goto err_out_unmap;
8038 }
8039
8040 bnx2_init_nvram(bp);
8041
8042 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8043
8044 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8045 BNX2_SHM_HDR_SIGNATURE_SIG) {
8046 u32 off = PCI_FUNC(pdev->devfn) << 2;
8047
8048 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8049 } else
8050 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8051
8052 /* Get the permanent MAC address. First we need to make sure the
8053 * firmware is actually running.
8054 */
8055 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8056
8057 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8058 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8059 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8060 rc = -ENODEV;
8061 goto err_out_unmap;
8062 }
8063
8064 bnx2_read_vpd_fw_ver(bp);
8065
8066 j = strlen(bp->fw_version);
8067 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8068 for (i = 0; i < 3 && j < 24; i++) {
8069 u8 num, k, skip0;
8070
8071 if (i == 0) {
8072 bp->fw_version[j++] = 'b';
8073 bp->fw_version[j++] = 'c';
8074 bp->fw_version[j++] = ' ';
8075 }
8076 num = (u8) (reg >> (24 - (i * 8)));
8077 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8078 if (num >= k || !skip0 || k == 1) {
8079 bp->fw_version[j++] = (num / k) + '0';
8080 skip0 = 0;
8081 }
8082 }
8083 if (i != 2)
8084 bp->fw_version[j++] = '.';
8085 }
8086 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8087 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8088 bp->wol = 1;
8089
8090 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8091 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8092
8093 for (i = 0; i < 30; i++) {
8094 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8095 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8096 break;
8097 msleep(10);
8098 }
8099 }
8100 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8101 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8102 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8103 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8104 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8105
8106 if (j < 32)
8107 bp->fw_version[j++] = ' ';
8108 for (i = 0; i < 3 && j < 28; i++) {
8109 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8110 reg = swab32(reg);
8111 memcpy(&bp->fw_version[j], &reg, 4);
8112 j += 4;
8113 }
8114 }
8115
8116 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8117 bp->mac_addr[0] = (u8) (reg >> 8);
8118 bp->mac_addr[1] = (u8) reg;
8119
8120 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8121 bp->mac_addr[2] = (u8) (reg >> 24);
8122 bp->mac_addr[3] = (u8) (reg >> 16);
8123 bp->mac_addr[4] = (u8) (reg >> 8);
8124 bp->mac_addr[5] = (u8) reg;
8125
8126 bp->tx_ring_size = MAX_TX_DESC_CNT;
8127 bnx2_set_rx_ring_size(bp, 255);
8128
8129 bp->rx_csum = 1;
8130
8131 bp->tx_quick_cons_trip_int = 2;
8132 bp->tx_quick_cons_trip = 20;
8133 bp->tx_ticks_int = 18;
8134 bp->tx_ticks = 80;
8135
8136 bp->rx_quick_cons_trip_int = 2;
8137 bp->rx_quick_cons_trip = 12;
8138 bp->rx_ticks_int = 18;
8139 bp->rx_ticks = 18;
8140
8141 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8142
8143 bp->current_interval = BNX2_TIMER_INTERVAL;
8144
8145 bp->phy_addr = 1;
8146
8147 /* Disable WOL support if we are running on a SERDES chip. */
8148 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8149 bnx2_get_5709_media(bp);
8150 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8151 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8152
8153 bp->phy_port = PORT_TP;
8154 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8155 bp->phy_port = PORT_FIBRE;
8156 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8157 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8158 bp->flags |= BNX2_FLAG_NO_WOL;
8159 bp->wol = 0;
8160 }
8161 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8162 /* Don't do parallel detect on this board because of
8163 * some board problems. The link will not go down
8164 * if we do parallel detect.
8165 */
8166 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8167 pdev->subsystem_device == 0x310c)
8168 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8169 } else {
8170 bp->phy_addr = 2;
8171 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8172 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8173 }
8174 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8175 CHIP_NUM(bp) == CHIP_NUM_5708)
8176 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8177 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8178 (CHIP_REV(bp) == CHIP_REV_Ax ||
8179 CHIP_REV(bp) == CHIP_REV_Bx))
8180 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8181
8182 bnx2_init_fw_cap(bp);
8183
8184 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8185 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8186 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8187 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8188 bp->flags |= BNX2_FLAG_NO_WOL;
8189 bp->wol = 0;
8190 }
8191
8192 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8193 bp->tx_quick_cons_trip_int =
8194 bp->tx_quick_cons_trip;
8195 bp->tx_ticks_int = bp->tx_ticks;
8196 bp->rx_quick_cons_trip_int =
8197 bp->rx_quick_cons_trip;
8198 bp->rx_ticks_int = bp->rx_ticks;
8199 bp->comp_prod_trip_int = bp->comp_prod_trip;
8200 bp->com_ticks_int = bp->com_ticks;
8201 bp->cmd_ticks_int = bp->cmd_ticks;
8202 }
8203
8204 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8205 *
8206 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8207 * with byte enables disabled on the unused 32-bit word. This is legal
8208 * but causes problems on the AMD 8132 which will eventually stop
8209 * responding after a while.
8210 *
8211 * AMD believes this incompatibility is unique to the 5706, and
8212 * prefers to locally disable MSI rather than globally disabling it.
8213 */
8214 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8215 struct pci_dev *amd_8132 = NULL;
8216
8217 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8218 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8219 amd_8132))) {
8220
8221 if (amd_8132->revision >= 0x10 &&
8222 amd_8132->revision <= 0x13) {
8223 disable_msi = 1;
8224 pci_dev_put(amd_8132);
8225 break;
8226 }
8227 }
8228 }
8229
8230 bnx2_set_default_link(bp);
8231 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8232
8233 init_timer(&bp->timer);
8234 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8235 bp->timer.data = (unsigned long) bp;
8236 bp->timer.function = bnx2_timer;
8237
8238 pci_save_state(pdev);
8239
8240 return 0;
8241
8242 err_out_unmap:
8243 if (bp->flags & BNX2_FLAG_PCIE)
8244 pci_disable_pcie_error_reporting(pdev);
8245
8246 if (bp->regview) {
8247 iounmap(bp->regview);
8248 bp->regview = NULL;
8249 }
8250
8251 err_out_release:
8252 pci_release_regions(pdev);
8253
8254 err_out_disable:
8255 pci_disable_device(pdev);
8256 pci_set_drvdata(pdev, NULL);
8257
8258 err_out:
8259 return rc;
8260 }
8261
8262 static char * __devinit
8263 bnx2_bus_string(struct bnx2 *bp, char *str)
8264 {
8265 char *s = str;
8266
8267 if (bp->flags & BNX2_FLAG_PCIE) {
8268 s += sprintf(s, "PCI Express");
8269 } else {
8270 s += sprintf(s, "PCI");
8271 if (bp->flags & BNX2_FLAG_PCIX)
8272 s += sprintf(s, "-X");
8273 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8274 s += sprintf(s, " 32-bit");
8275 else
8276 s += sprintf(s, " 64-bit");
8277 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8278 }
8279 return str;
8280 }
8281
8282 static void
8283 bnx2_del_napi(struct bnx2 *bp)
8284 {
8285 int i;
8286
8287 for (i = 0; i < bp->irq_nvecs; i++)
8288 netif_napi_del(&bp->bnx2_napi[i].napi);
8289 }
8290
8291 static void
8292 bnx2_init_napi(struct bnx2 *bp)
8293 {
8294 int i;
8295
8296 for (i = 0; i < bp->irq_nvecs; i++) {
8297 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8298 int (*poll)(struct napi_struct *, int);
8299
8300 if (i == 0)
8301 poll = bnx2_poll;
8302 else
8303 poll = bnx2_poll_msix;
8304
8305 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8306 bnapi->bp = bp;
8307 }
8308 }
8309
8310 static const struct net_device_ops bnx2_netdev_ops = {
8311 .ndo_open = bnx2_open,
8312 .ndo_start_xmit = bnx2_start_xmit,
8313 .ndo_stop = bnx2_close,
8314 .ndo_get_stats64 = bnx2_get_stats64,
8315 .ndo_set_rx_mode = bnx2_set_rx_mode,
8316 .ndo_do_ioctl = bnx2_ioctl,
8317 .ndo_validate_addr = eth_validate_addr,
8318 .ndo_set_mac_address = bnx2_change_mac_addr,
8319 .ndo_change_mtu = bnx2_change_mtu,
8320 .ndo_tx_timeout = bnx2_tx_timeout,
8321 #ifdef BCM_VLAN
8322 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8323 #endif
8324 #ifdef CONFIG_NET_POLL_CONTROLLER
8325 .ndo_poll_controller = poll_bnx2,
8326 #endif
8327 };
8328
8329 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8330 {
8331 #ifdef BCM_VLAN
8332 dev->vlan_features |= flags;
8333 #endif
8334 }
8335
8336 static int __devinit
8337 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8338 {
8339 static int version_printed = 0;
8340 struct net_device *dev = NULL;
8341 struct bnx2 *bp;
8342 int rc;
8343 char str[40];
8344
8345 if (version_printed++ == 0)
8346 pr_info("%s", version);
8347
8348 /* dev zeroed in init_etherdev */
8349 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8350
8351 if (!dev)
8352 return -ENOMEM;
8353
8354 rc = bnx2_init_board(pdev, dev);
8355 if (rc < 0) {
8356 free_netdev(dev);
8357 return rc;
8358 }
8359
8360 dev->netdev_ops = &bnx2_netdev_ops;
8361 dev->watchdog_timeo = TX_TIMEOUT;
8362 dev->ethtool_ops = &bnx2_ethtool_ops;
8363
8364 bp = netdev_priv(dev);
8365
8366 pci_set_drvdata(pdev, dev);
8367
8368 rc = bnx2_request_firmware(bp);
8369 if (rc)
8370 goto error;
8371
8372 memcpy(dev->dev_addr, bp->mac_addr, 6);
8373 memcpy(dev->perm_addr, bp->mac_addr, 6);
8374
8375 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
8376 NETIF_F_RXHASH;
8377 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8378 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8379 dev->features |= NETIF_F_IPV6_CSUM;
8380 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8381 }
8382 #ifdef BCM_VLAN
8383 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8384 #endif
8385 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8386 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8387 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8388 dev->features |= NETIF_F_TSO6;
8389 vlan_features_add(dev, NETIF_F_TSO6);
8390 }
8391 if ((rc = register_netdev(dev))) {
8392 dev_err(&pdev->dev, "Cannot register net device\n");
8393 goto error;
8394 }
8395
8396 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8397 board_info[ent->driver_data].name,
8398 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8399 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8400 bnx2_bus_string(bp, str),
8401 dev->base_addr,
8402 bp->pdev->irq, dev->dev_addr);
8403
8404 return 0;
8405
8406 error:
8407 if (bp->mips_firmware)
8408 release_firmware(bp->mips_firmware);
8409 if (bp->rv2p_firmware)
8410 release_firmware(bp->rv2p_firmware);
8411
8412 if (bp->regview)
8413 iounmap(bp->regview);
8414 pci_release_regions(pdev);
8415 pci_disable_device(pdev);
8416 pci_set_drvdata(pdev, NULL);
8417 free_netdev(dev);
8418 return rc;
8419 }
8420
8421 static void __devexit
8422 bnx2_remove_one(struct pci_dev *pdev)
8423 {
8424 struct net_device *dev = pci_get_drvdata(pdev);
8425 struct bnx2 *bp = netdev_priv(dev);
8426
8427 flush_scheduled_work();
8428
8429 unregister_netdev(dev);
8430
8431 if (bp->mips_firmware)
8432 release_firmware(bp->mips_firmware);
8433 if (bp->rv2p_firmware)
8434 release_firmware(bp->rv2p_firmware);
8435
8436 if (bp->regview)
8437 iounmap(bp->regview);
8438
8439 kfree(bp->temp_stats_blk);
8440
8441 if (bp->flags & BNX2_FLAG_PCIE)
8442 pci_disable_pcie_error_reporting(pdev);
8443
8444 free_netdev(dev);
8445
8446 pci_release_regions(pdev);
8447 pci_disable_device(pdev);
8448 pci_set_drvdata(pdev, NULL);
8449 }
8450
8451 static int
8452 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8453 {
8454 struct net_device *dev = pci_get_drvdata(pdev);
8455 struct bnx2 *bp = netdev_priv(dev);
8456
8457 /* PCI register 4 needs to be saved whether netif_running() or not.
8458 * MSI address and data need to be saved if using MSI and
8459 * netif_running().
8460 */
8461 pci_save_state(pdev);
8462 if (!netif_running(dev))
8463 return 0;
8464
8465 flush_scheduled_work();
8466 bnx2_netif_stop(bp, true);
8467 netif_device_detach(dev);
8468 del_timer_sync(&bp->timer);
8469 bnx2_shutdown_chip(bp);
8470 bnx2_free_skbs(bp);
8471 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8472 return 0;
8473 }
8474
8475 static int
8476 bnx2_resume(struct pci_dev *pdev)
8477 {
8478 struct net_device *dev = pci_get_drvdata(pdev);
8479 struct bnx2 *bp = netdev_priv(dev);
8480
8481 pci_restore_state(pdev);
8482 if (!netif_running(dev))
8483 return 0;
8484
8485 bnx2_set_power_state(bp, PCI_D0);
8486 netif_device_attach(dev);
8487 bnx2_init_nic(bp, 1);
8488 bnx2_netif_start(bp, true);
8489 return 0;
8490 }
8491
8492 /**
8493 * bnx2_io_error_detected - called when PCI error is detected
8494 * @pdev: Pointer to PCI device
8495 * @state: The current pci connection state
8496 *
8497 * This function is called after a PCI bus error affecting
8498 * this device has been detected.
8499 */
8500 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8501 pci_channel_state_t state)
8502 {
8503 struct net_device *dev = pci_get_drvdata(pdev);
8504 struct bnx2 *bp = netdev_priv(dev);
8505
8506 rtnl_lock();
8507 netif_device_detach(dev);
8508
8509 if (state == pci_channel_io_perm_failure) {
8510 rtnl_unlock();
8511 return PCI_ERS_RESULT_DISCONNECT;
8512 }
8513
8514 if (netif_running(dev)) {
8515 bnx2_netif_stop(bp, true);
8516 del_timer_sync(&bp->timer);
8517 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8518 }
8519
8520 pci_disable_device(pdev);
8521 rtnl_unlock();
8522
8523 /* Request a slot slot reset. */
8524 return PCI_ERS_RESULT_NEED_RESET;
8525 }
8526
8527 /**
8528 * bnx2_io_slot_reset - called after the pci bus has been reset.
8529 * @pdev: Pointer to PCI device
8530 *
8531 * Restart the card from scratch, as if from a cold-boot.
8532 */
8533 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8534 {
8535 struct net_device *dev = pci_get_drvdata(pdev);
8536 struct bnx2 *bp = netdev_priv(dev);
8537 pci_ers_result_t result;
8538 int err;
8539
8540 rtnl_lock();
8541 if (pci_enable_device(pdev)) {
8542 dev_err(&pdev->dev,
8543 "Cannot re-enable PCI device after reset\n");
8544 result = PCI_ERS_RESULT_DISCONNECT;
8545 } else {
8546 pci_set_master(pdev);
8547 pci_restore_state(pdev);
8548 pci_save_state(pdev);
8549
8550 if (netif_running(dev)) {
8551 bnx2_set_power_state(bp, PCI_D0);
8552 bnx2_init_nic(bp, 1);
8553 }
8554 result = PCI_ERS_RESULT_RECOVERED;
8555 }
8556 rtnl_unlock();
8557
8558 if (!(bp->flags & BNX2_FLAG_PCIE))
8559 return result;
8560
8561 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8562 if (err) {
8563 dev_err(&pdev->dev,
8564 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8565 err); /* non-fatal, continue */
8566 }
8567
8568 return result;
8569 }
8570
8571 /**
8572 * bnx2_io_resume - called when traffic can start flowing again.
8573 * @pdev: Pointer to PCI device
8574 *
8575 * This callback is called when the error recovery driver tells us that
8576 * its OK to resume normal operation.
8577 */
8578 static void bnx2_io_resume(struct pci_dev *pdev)
8579 {
8580 struct net_device *dev = pci_get_drvdata(pdev);
8581 struct bnx2 *bp = netdev_priv(dev);
8582
8583 rtnl_lock();
8584 if (netif_running(dev))
8585 bnx2_netif_start(bp, true);
8586
8587 netif_device_attach(dev);
8588 rtnl_unlock();
8589 }
8590
8591 static struct pci_error_handlers bnx2_err_handler = {
8592 .error_detected = bnx2_io_error_detected,
8593 .slot_reset = bnx2_io_slot_reset,
8594 .resume = bnx2_io_resume,
8595 };
8596
8597 static struct pci_driver bnx2_pci_driver = {
8598 .name = DRV_MODULE_NAME,
8599 .id_table = bnx2_pci_tbl,
8600 .probe = bnx2_init_one,
8601 .remove = __devexit_p(bnx2_remove_one),
8602 .suspend = bnx2_suspend,
8603 .resume = bnx2_resume,
8604 .err_handler = &bnx2_err_handler,
8605 };
8606
8607 static int __init bnx2_init(void)
8608 {
8609 return pci_register_driver(&bnx2_pci_driver);
8610 }
8611
8612 static void __exit bnx2_cleanup(void)
8613 {
8614 pci_unregister_driver(&bnx2_pci_driver);
8615 }
8616
8617 module_init(bnx2_init);
8618 module_exit(bnx2_cleanup);
8619
8620
8621
This page took 0.284214 seconds and 4 git commands to generate.