802b538502ebeaaf361d9025cb10a5611aa4c4a1
[deliverable/linux.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.0.8"
62 #define DRV_MODULE_RELDATE "Feb 15, 2010"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
73
74 static char version[] __devinitdata =
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93 BCM5706 = 0,
94 NC370T,
95 NC370I,
96 BCM5706S,
97 NC370F,
98 BCM5708,
99 BCM5708S,
100 BCM5709,
101 BCM5709S,
102 BCM5716,
103 BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108 char *name;
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
153 /* Slow EEPROM */
154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 "EEPROM - slow"},
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 "Entry 0001"},
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 0100"},
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
196 /* Fast EEPROM */
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 "EEPROM - fast"},
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1001"},
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1010"},
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1100"},
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1101"},
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253 u32 diff;
254
255 smp_mb();
256
257 /* The ring uses 256 indices for 255 entries, one of them
258 * needs to be skipped.
259 */
260 diff = txr->tx_prod - txr->tx_cons;
261 if (unlikely(diff >= TX_DESC_CNT)) {
262 diff &= 0xffff;
263 if (diff == TX_DESC_CNT)
264 diff = MAX_TX_DESC_CNT;
265 }
266 return (bp->tx_ring_size - diff);
267 }
268
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272 u32 val;
273
274 spin_lock_bh(&bp->indirect_lock);
275 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277 spin_unlock_bh(&bp->indirect_lock);
278 return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284 spin_lock_bh(&bp->indirect_lock);
285 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287 spin_unlock_bh(&bp->indirect_lock);
288 }
289
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 {
293 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 }
295
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 {
299 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
300 }
301
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 {
305 offset += cid_addr;
306 spin_lock_bh(&bp->indirect_lock);
307 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308 int i;
309
310 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313 for (i = 0; i < 5; i++) {
314 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316 break;
317 udelay(5);
318 }
319 } else {
320 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321 REG_WR(bp, BNX2_CTX_DATA, val);
322 }
323 spin_unlock_bh(&bp->indirect_lock);
324 }
325
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 {
330 struct bnx2 *bp = netdev_priv(dev);
331 struct drv_ctl_io *io = &info->data.io;
332
333 switch (info->cmd) {
334 case DRV_CTL_IO_WR_CMD:
335 bnx2_reg_wr_ind(bp, io->offset, io->data);
336 break;
337 case DRV_CTL_IO_RD_CMD:
338 io->data = bnx2_reg_rd_ind(bp, io->offset);
339 break;
340 case DRV_CTL_CTX_WR_CMD:
341 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342 break;
343 default:
344 return -EINVAL;
345 }
346 return 0;
347 }
348
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 {
351 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353 int sb_id;
354
355 if (bp->flags & BNX2_FLAG_USING_MSIX) {
356 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357 bnapi->cnic_present = 0;
358 sb_id = bp->irq_nvecs;
359 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360 } else {
361 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362 bnapi->cnic_tag = bnapi->last_status_idx;
363 bnapi->cnic_present = 1;
364 sb_id = 0;
365 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366 }
367
368 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369 cp->irq_arr[0].status_blk = (void *)
370 ((unsigned long) bnapi->status_blk.msi +
371 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372 cp->irq_arr[0].status_blk_num = sb_id;
373 cp->num_irq = 1;
374 }
375
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377 void *data)
378 {
379 struct bnx2 *bp = netdev_priv(dev);
380 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382 if (ops == NULL)
383 return -EINVAL;
384
385 if (cp->drv_state & CNIC_DRV_STATE_REGD)
386 return -EBUSY;
387
388 bp->cnic_data = data;
389 rcu_assign_pointer(bp->cnic_ops, ops);
390
391 cp->num_irq = 0;
392 cp->drv_state = CNIC_DRV_STATE_REGD;
393
394 bnx2_setup_cnic_irq_info(bp);
395
396 return 0;
397 }
398
399 static int bnx2_unregister_cnic(struct net_device *dev)
400 {
401 struct bnx2 *bp = netdev_priv(dev);
402 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
405 mutex_lock(&bp->cnic_lock);
406 cp->drv_state = 0;
407 bnapi->cnic_present = 0;
408 rcu_assign_pointer(bp->cnic_ops, NULL);
409 mutex_unlock(&bp->cnic_lock);
410 synchronize_rcu();
411 return 0;
412 }
413
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415 {
416 struct bnx2 *bp = netdev_priv(dev);
417 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419 cp->drv_owner = THIS_MODULE;
420 cp->chip_id = bp->chip_id;
421 cp->pdev = bp->pdev;
422 cp->io_base = bp->regview;
423 cp->drv_ctl = bnx2_drv_ctl;
424 cp->drv_register_cnic = bnx2_register_cnic;
425 cp->drv_unregister_cnic = bnx2_unregister_cnic;
426
427 return cp;
428 }
429 EXPORT_SYMBOL(bnx2_cnic_probe);
430
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
433 {
434 struct cnic_ops *c_ops;
435 struct cnic_ctl_info info;
436
437 mutex_lock(&bp->cnic_lock);
438 c_ops = bp->cnic_ops;
439 if (c_ops) {
440 info.cmd = CNIC_CTL_STOP_CMD;
441 c_ops->cnic_ctl(bp->cnic_data, &info);
442 }
443 mutex_unlock(&bp->cnic_lock);
444 }
445
446 static void
447 bnx2_cnic_start(struct bnx2 *bp)
448 {
449 struct cnic_ops *c_ops;
450 struct cnic_ctl_info info;
451
452 mutex_lock(&bp->cnic_lock);
453 c_ops = bp->cnic_ops;
454 if (c_ops) {
455 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
457
458 bnapi->cnic_tag = bnapi->last_status_idx;
459 }
460 info.cmd = CNIC_CTL_START_CMD;
461 c_ops->cnic_ctl(bp->cnic_data, &info);
462 }
463 mutex_unlock(&bp->cnic_lock);
464 }
465
466 #else
467
468 static void
469 bnx2_cnic_stop(struct bnx2 *bp)
470 {
471 }
472
473 static void
474 bnx2_cnic_start(struct bnx2 *bp)
475 {
476 }
477
478 #endif
479
480 static int
481 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
482 {
483 u32 val1;
484 int i, ret;
485
486 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
487 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
488 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
489
490 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
491 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
492
493 udelay(40);
494 }
495
496 val1 = (bp->phy_addr << 21) | (reg << 16) |
497 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
498 BNX2_EMAC_MDIO_COMM_START_BUSY;
499 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
500
501 for (i = 0; i < 50; i++) {
502 udelay(10);
503
504 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
505 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
506 udelay(5);
507
508 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
509 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
510
511 break;
512 }
513 }
514
515 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
516 *val = 0x0;
517 ret = -EBUSY;
518 }
519 else {
520 *val = val1;
521 ret = 0;
522 }
523
524 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
525 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
526 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
527
528 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
529 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
530
531 udelay(40);
532 }
533
534 return ret;
535 }
536
537 static int
538 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
539 {
540 u32 val1;
541 int i, ret;
542
543 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
544 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
545 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
546
547 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
548 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
549
550 udelay(40);
551 }
552
553 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
554 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
555 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
556 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
557
558 for (i = 0; i < 50; i++) {
559 udelay(10);
560
561 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
562 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
563 udelay(5);
564 break;
565 }
566 }
567
568 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
569 ret = -EBUSY;
570 else
571 ret = 0;
572
573 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
574 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
575 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
576
577 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
578 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
579
580 udelay(40);
581 }
582
583 return ret;
584 }
585
586 static void
587 bnx2_disable_int(struct bnx2 *bp)
588 {
589 int i;
590 struct bnx2_napi *bnapi;
591
592 for (i = 0; i < bp->irq_nvecs; i++) {
593 bnapi = &bp->bnx2_napi[i];
594 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
595 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
596 }
597 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
598 }
599
600 static void
601 bnx2_enable_int(struct bnx2 *bp)
602 {
603 int i;
604 struct bnx2_napi *bnapi;
605
606 for (i = 0; i < bp->irq_nvecs; i++) {
607 bnapi = &bp->bnx2_napi[i];
608
609 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
612 bnapi->last_status_idx);
613
614 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616 bnapi->last_status_idx);
617 }
618 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
619 }
620
621 static void
622 bnx2_disable_int_sync(struct bnx2 *bp)
623 {
624 int i;
625
626 atomic_inc(&bp->intr_sem);
627 if (!netif_running(bp->dev))
628 return;
629
630 bnx2_disable_int(bp);
631 for (i = 0; i < bp->irq_nvecs; i++)
632 synchronize_irq(bp->irq_tbl[i].vector);
633 }
634
635 static void
636 bnx2_napi_disable(struct bnx2 *bp)
637 {
638 int i;
639
640 for (i = 0; i < bp->irq_nvecs; i++)
641 napi_disable(&bp->bnx2_napi[i].napi);
642 }
643
644 static void
645 bnx2_napi_enable(struct bnx2 *bp)
646 {
647 int i;
648
649 for (i = 0; i < bp->irq_nvecs; i++)
650 napi_enable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_netif_stop(struct bnx2 *bp)
655 {
656 bnx2_cnic_stop(bp);
657 if (netif_running(bp->dev)) {
658 int i;
659
660 bnx2_napi_disable(bp);
661 netif_tx_disable(bp->dev);
662 /* prevent tx timeout */
663 for (i = 0; i < bp->dev->num_tx_queues; i++) {
664 struct netdev_queue *txq;
665
666 txq = netdev_get_tx_queue(bp->dev, i);
667 txq->trans_start = jiffies;
668 }
669 }
670 bnx2_disable_int_sync(bp);
671 }
672
673 static void
674 bnx2_netif_start(struct bnx2 *bp)
675 {
676 if (atomic_dec_and_test(&bp->intr_sem)) {
677 if (netif_running(bp->dev)) {
678 netif_tx_wake_all_queues(bp->dev);
679 bnx2_napi_enable(bp);
680 bnx2_enable_int(bp);
681 bnx2_cnic_start(bp);
682 }
683 }
684 }
685
686 static void
687 bnx2_free_tx_mem(struct bnx2 *bp)
688 {
689 int i;
690
691 for (i = 0; i < bp->num_tx_rings; i++) {
692 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
693 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
694
695 if (txr->tx_desc_ring) {
696 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
697 txr->tx_desc_ring,
698 txr->tx_desc_mapping);
699 txr->tx_desc_ring = NULL;
700 }
701 kfree(txr->tx_buf_ring);
702 txr->tx_buf_ring = NULL;
703 }
704 }
705
706 static void
707 bnx2_free_rx_mem(struct bnx2 *bp)
708 {
709 int i;
710
711 for (i = 0; i < bp->num_rx_rings; i++) {
712 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
713 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
714 int j;
715
716 for (j = 0; j < bp->rx_max_ring; j++) {
717 if (rxr->rx_desc_ring[j])
718 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
719 rxr->rx_desc_ring[j],
720 rxr->rx_desc_mapping[j]);
721 rxr->rx_desc_ring[j] = NULL;
722 }
723 vfree(rxr->rx_buf_ring);
724 rxr->rx_buf_ring = NULL;
725
726 for (j = 0; j < bp->rx_max_pg_ring; j++) {
727 if (rxr->rx_pg_desc_ring[j])
728 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
729 rxr->rx_pg_desc_ring[j],
730 rxr->rx_pg_desc_mapping[j]);
731 rxr->rx_pg_desc_ring[j] = NULL;
732 }
733 vfree(rxr->rx_pg_ring);
734 rxr->rx_pg_ring = NULL;
735 }
736 }
737
738 static int
739 bnx2_alloc_tx_mem(struct bnx2 *bp)
740 {
741 int i;
742
743 for (i = 0; i < bp->num_tx_rings; i++) {
744 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
745 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
746
747 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
748 if (txr->tx_buf_ring == NULL)
749 return -ENOMEM;
750
751 txr->tx_desc_ring =
752 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
753 &txr->tx_desc_mapping);
754 if (txr->tx_desc_ring == NULL)
755 return -ENOMEM;
756 }
757 return 0;
758 }
759
760 static int
761 bnx2_alloc_rx_mem(struct bnx2 *bp)
762 {
763 int i;
764
765 for (i = 0; i < bp->num_rx_rings; i++) {
766 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
767 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
768 int j;
769
770 rxr->rx_buf_ring =
771 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
772 if (rxr->rx_buf_ring == NULL)
773 return -ENOMEM;
774
775 memset(rxr->rx_buf_ring, 0,
776 SW_RXBD_RING_SIZE * bp->rx_max_ring);
777
778 for (j = 0; j < bp->rx_max_ring; j++) {
779 rxr->rx_desc_ring[j] =
780 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
781 &rxr->rx_desc_mapping[j]);
782 if (rxr->rx_desc_ring[j] == NULL)
783 return -ENOMEM;
784
785 }
786
787 if (bp->rx_pg_ring_size) {
788 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
789 bp->rx_max_pg_ring);
790 if (rxr->rx_pg_ring == NULL)
791 return -ENOMEM;
792
793 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
794 bp->rx_max_pg_ring);
795 }
796
797 for (j = 0; j < bp->rx_max_pg_ring; j++) {
798 rxr->rx_pg_desc_ring[j] =
799 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
800 &rxr->rx_pg_desc_mapping[j]);
801 if (rxr->rx_pg_desc_ring[j] == NULL)
802 return -ENOMEM;
803
804 }
805 }
806 return 0;
807 }
808
809 static void
810 bnx2_free_mem(struct bnx2 *bp)
811 {
812 int i;
813 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
814
815 bnx2_free_tx_mem(bp);
816 bnx2_free_rx_mem(bp);
817
818 for (i = 0; i < bp->ctx_pages; i++) {
819 if (bp->ctx_blk[i]) {
820 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
821 bp->ctx_blk[i],
822 bp->ctx_blk_mapping[i]);
823 bp->ctx_blk[i] = NULL;
824 }
825 }
826 if (bnapi->status_blk.msi) {
827 pci_free_consistent(bp->pdev, bp->status_stats_size,
828 bnapi->status_blk.msi,
829 bp->status_blk_mapping);
830 bnapi->status_blk.msi = NULL;
831 bp->stats_blk = NULL;
832 }
833 }
834
835 static int
836 bnx2_alloc_mem(struct bnx2 *bp)
837 {
838 int i, status_blk_size, err;
839 struct bnx2_napi *bnapi;
840 void *status_blk;
841
842 /* Combine status and statistics blocks into one allocation. */
843 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
844 if (bp->flags & BNX2_FLAG_MSIX_CAP)
845 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
846 BNX2_SBLK_MSIX_ALIGN_SIZE);
847 bp->status_stats_size = status_blk_size +
848 sizeof(struct statistics_block);
849
850 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
851 &bp->status_blk_mapping);
852 if (status_blk == NULL)
853 goto alloc_mem_err;
854
855 memset(status_blk, 0, bp->status_stats_size);
856
857 bnapi = &bp->bnx2_napi[0];
858 bnapi->status_blk.msi = status_blk;
859 bnapi->hw_tx_cons_ptr =
860 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
861 bnapi->hw_rx_cons_ptr =
862 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
863 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
864 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
865 struct status_block_msix *sblk;
866
867 bnapi = &bp->bnx2_napi[i];
868
869 sblk = (void *) (status_blk +
870 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
871 bnapi->status_blk.msix = sblk;
872 bnapi->hw_tx_cons_ptr =
873 &sblk->status_tx_quick_consumer_index;
874 bnapi->hw_rx_cons_ptr =
875 &sblk->status_rx_quick_consumer_index;
876 bnapi->int_num = i << 24;
877 }
878 }
879
880 bp->stats_blk = status_blk + status_blk_size;
881
882 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
883
884 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
885 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
886 if (bp->ctx_pages == 0)
887 bp->ctx_pages = 1;
888 for (i = 0; i < bp->ctx_pages; i++) {
889 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
890 BCM_PAGE_SIZE,
891 &bp->ctx_blk_mapping[i]);
892 if (bp->ctx_blk[i] == NULL)
893 goto alloc_mem_err;
894 }
895 }
896
897 err = bnx2_alloc_rx_mem(bp);
898 if (err)
899 goto alloc_mem_err;
900
901 err = bnx2_alloc_tx_mem(bp);
902 if (err)
903 goto alloc_mem_err;
904
905 return 0;
906
907 alloc_mem_err:
908 bnx2_free_mem(bp);
909 return -ENOMEM;
910 }
911
912 static void
913 bnx2_report_fw_link(struct bnx2 *bp)
914 {
915 u32 fw_link_status = 0;
916
917 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
918 return;
919
920 if (bp->link_up) {
921 u32 bmsr;
922
923 switch (bp->line_speed) {
924 case SPEED_10:
925 if (bp->duplex == DUPLEX_HALF)
926 fw_link_status = BNX2_LINK_STATUS_10HALF;
927 else
928 fw_link_status = BNX2_LINK_STATUS_10FULL;
929 break;
930 case SPEED_100:
931 if (bp->duplex == DUPLEX_HALF)
932 fw_link_status = BNX2_LINK_STATUS_100HALF;
933 else
934 fw_link_status = BNX2_LINK_STATUS_100FULL;
935 break;
936 case SPEED_1000:
937 if (bp->duplex == DUPLEX_HALF)
938 fw_link_status = BNX2_LINK_STATUS_1000HALF;
939 else
940 fw_link_status = BNX2_LINK_STATUS_1000FULL;
941 break;
942 case SPEED_2500:
943 if (bp->duplex == DUPLEX_HALF)
944 fw_link_status = BNX2_LINK_STATUS_2500HALF;
945 else
946 fw_link_status = BNX2_LINK_STATUS_2500FULL;
947 break;
948 }
949
950 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
951
952 if (bp->autoneg) {
953 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
954
955 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
956 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
957
958 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
959 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
960 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
961 else
962 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
963 }
964 }
965 else
966 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
967
968 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
969 }
970
971 static char *
972 bnx2_xceiver_str(struct bnx2 *bp)
973 {
974 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
975 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
976 "Copper"));
977 }
978
979 static void
980 bnx2_report_link(struct bnx2 *bp)
981 {
982 if (bp->link_up) {
983 netif_carrier_on(bp->dev);
984 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
985 bnx2_xceiver_str(bp),
986 bp->line_speed,
987 bp->duplex == DUPLEX_FULL ? "full" : "half");
988
989 if (bp->flow_ctrl) {
990 if (bp->flow_ctrl & FLOW_CTRL_RX) {
991 pr_cont(", receive ");
992 if (bp->flow_ctrl & FLOW_CTRL_TX)
993 pr_cont("& transmit ");
994 }
995 else {
996 pr_cont(", transmit ");
997 }
998 pr_cont("flow control ON");
999 }
1000 pr_cont("\n");
1001 } else {
1002 netif_carrier_off(bp->dev);
1003 netdev_err(bp->dev, "NIC %s Link is Down\n",
1004 bnx2_xceiver_str(bp));
1005 }
1006
1007 bnx2_report_fw_link(bp);
1008 }
1009
1010 static void
1011 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1012 {
1013 u32 local_adv, remote_adv;
1014
1015 bp->flow_ctrl = 0;
1016 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1017 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1018
1019 if (bp->duplex == DUPLEX_FULL) {
1020 bp->flow_ctrl = bp->req_flow_ctrl;
1021 }
1022 return;
1023 }
1024
1025 if (bp->duplex != DUPLEX_FULL) {
1026 return;
1027 }
1028
1029 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1030 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1031 u32 val;
1032
1033 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1034 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1035 bp->flow_ctrl |= FLOW_CTRL_TX;
1036 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1037 bp->flow_ctrl |= FLOW_CTRL_RX;
1038 return;
1039 }
1040
1041 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1043
1044 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1045 u32 new_local_adv = 0;
1046 u32 new_remote_adv = 0;
1047
1048 if (local_adv & ADVERTISE_1000XPAUSE)
1049 new_local_adv |= ADVERTISE_PAUSE_CAP;
1050 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1051 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1052 if (remote_adv & ADVERTISE_1000XPAUSE)
1053 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1054 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1055 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1056
1057 local_adv = new_local_adv;
1058 remote_adv = new_remote_adv;
1059 }
1060
1061 /* See Table 28B-3 of 802.3ab-1999 spec. */
1062 if (local_adv & ADVERTISE_PAUSE_CAP) {
1063 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1064 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1065 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1066 }
1067 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1068 bp->flow_ctrl = FLOW_CTRL_RX;
1069 }
1070 }
1071 else {
1072 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1073 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1074 }
1075 }
1076 }
1077 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1078 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1079 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1080
1081 bp->flow_ctrl = FLOW_CTRL_TX;
1082 }
1083 }
1084 }
1085
1086 static int
1087 bnx2_5709s_linkup(struct bnx2 *bp)
1088 {
1089 u32 val, speed;
1090
1091 bp->link_up = 1;
1092
1093 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1094 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1095 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1096
1097 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1098 bp->line_speed = bp->req_line_speed;
1099 bp->duplex = bp->req_duplex;
1100 return 0;
1101 }
1102 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1103 switch (speed) {
1104 case MII_BNX2_GP_TOP_AN_SPEED_10:
1105 bp->line_speed = SPEED_10;
1106 break;
1107 case MII_BNX2_GP_TOP_AN_SPEED_100:
1108 bp->line_speed = SPEED_100;
1109 break;
1110 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1111 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1112 bp->line_speed = SPEED_1000;
1113 break;
1114 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1115 bp->line_speed = SPEED_2500;
1116 break;
1117 }
1118 if (val & MII_BNX2_GP_TOP_AN_FD)
1119 bp->duplex = DUPLEX_FULL;
1120 else
1121 bp->duplex = DUPLEX_HALF;
1122 return 0;
1123 }
1124
1125 static int
1126 bnx2_5708s_linkup(struct bnx2 *bp)
1127 {
1128 u32 val;
1129
1130 bp->link_up = 1;
1131 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1132 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1133 case BCM5708S_1000X_STAT1_SPEED_10:
1134 bp->line_speed = SPEED_10;
1135 break;
1136 case BCM5708S_1000X_STAT1_SPEED_100:
1137 bp->line_speed = SPEED_100;
1138 break;
1139 case BCM5708S_1000X_STAT1_SPEED_1G:
1140 bp->line_speed = SPEED_1000;
1141 break;
1142 case BCM5708S_1000X_STAT1_SPEED_2G5:
1143 bp->line_speed = SPEED_2500;
1144 break;
1145 }
1146 if (val & BCM5708S_1000X_STAT1_FD)
1147 bp->duplex = DUPLEX_FULL;
1148 else
1149 bp->duplex = DUPLEX_HALF;
1150
1151 return 0;
1152 }
1153
1154 static int
1155 bnx2_5706s_linkup(struct bnx2 *bp)
1156 {
1157 u32 bmcr, local_adv, remote_adv, common;
1158
1159 bp->link_up = 1;
1160 bp->line_speed = SPEED_1000;
1161
1162 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1163 if (bmcr & BMCR_FULLDPLX) {
1164 bp->duplex = DUPLEX_FULL;
1165 }
1166 else {
1167 bp->duplex = DUPLEX_HALF;
1168 }
1169
1170 if (!(bmcr & BMCR_ANENABLE)) {
1171 return 0;
1172 }
1173
1174 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1175 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1176
1177 common = local_adv & remote_adv;
1178 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1179
1180 if (common & ADVERTISE_1000XFULL) {
1181 bp->duplex = DUPLEX_FULL;
1182 }
1183 else {
1184 bp->duplex = DUPLEX_HALF;
1185 }
1186 }
1187
1188 return 0;
1189 }
1190
1191 static int
1192 bnx2_copper_linkup(struct bnx2 *bp)
1193 {
1194 u32 bmcr;
1195
1196 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1197 if (bmcr & BMCR_ANENABLE) {
1198 u32 local_adv, remote_adv, common;
1199
1200 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1201 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1202
1203 common = local_adv & (remote_adv >> 2);
1204 if (common & ADVERTISE_1000FULL) {
1205 bp->line_speed = SPEED_1000;
1206 bp->duplex = DUPLEX_FULL;
1207 }
1208 else if (common & ADVERTISE_1000HALF) {
1209 bp->line_speed = SPEED_1000;
1210 bp->duplex = DUPLEX_HALF;
1211 }
1212 else {
1213 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1214 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1215
1216 common = local_adv & remote_adv;
1217 if (common & ADVERTISE_100FULL) {
1218 bp->line_speed = SPEED_100;
1219 bp->duplex = DUPLEX_FULL;
1220 }
1221 else if (common & ADVERTISE_100HALF) {
1222 bp->line_speed = SPEED_100;
1223 bp->duplex = DUPLEX_HALF;
1224 }
1225 else if (common & ADVERTISE_10FULL) {
1226 bp->line_speed = SPEED_10;
1227 bp->duplex = DUPLEX_FULL;
1228 }
1229 else if (common & ADVERTISE_10HALF) {
1230 bp->line_speed = SPEED_10;
1231 bp->duplex = DUPLEX_HALF;
1232 }
1233 else {
1234 bp->line_speed = 0;
1235 bp->link_up = 0;
1236 }
1237 }
1238 }
1239 else {
1240 if (bmcr & BMCR_SPEED100) {
1241 bp->line_speed = SPEED_100;
1242 }
1243 else {
1244 bp->line_speed = SPEED_10;
1245 }
1246 if (bmcr & BMCR_FULLDPLX) {
1247 bp->duplex = DUPLEX_FULL;
1248 }
1249 else {
1250 bp->duplex = DUPLEX_HALF;
1251 }
1252 }
1253
1254 return 0;
1255 }
1256
1257 static void
1258 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1259 {
1260 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1261
1262 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1263 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1264 val |= 0x02 << 8;
1265
1266 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1267 u32 lo_water, hi_water;
1268
1269 if (bp->flow_ctrl & FLOW_CTRL_TX)
1270 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1271 else
1272 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1273 if (lo_water >= bp->rx_ring_size)
1274 lo_water = 0;
1275
1276 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1277
1278 if (hi_water <= lo_water)
1279 lo_water = 0;
1280
1281 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1282 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1283
1284 if (hi_water > 0xf)
1285 hi_water = 0xf;
1286 else if (hi_water == 0)
1287 lo_water = 0;
1288 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1289 }
1290 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1291 }
1292
1293 static void
1294 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1295 {
1296 int i;
1297 u32 cid;
1298
1299 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1300 if (i == 1)
1301 cid = RX_RSS_CID;
1302 bnx2_init_rx_context(bp, cid);
1303 }
1304 }
1305
1306 static void
1307 bnx2_set_mac_link(struct bnx2 *bp)
1308 {
1309 u32 val;
1310
1311 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1312 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1313 (bp->duplex == DUPLEX_HALF)) {
1314 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1315 }
1316
1317 /* Configure the EMAC mode register. */
1318 val = REG_RD(bp, BNX2_EMAC_MODE);
1319
1320 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1321 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1322 BNX2_EMAC_MODE_25G_MODE);
1323
1324 if (bp->link_up) {
1325 switch (bp->line_speed) {
1326 case SPEED_10:
1327 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1328 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1329 break;
1330 }
1331 /* fall through */
1332 case SPEED_100:
1333 val |= BNX2_EMAC_MODE_PORT_MII;
1334 break;
1335 case SPEED_2500:
1336 val |= BNX2_EMAC_MODE_25G_MODE;
1337 /* fall through */
1338 case SPEED_1000:
1339 val |= BNX2_EMAC_MODE_PORT_GMII;
1340 break;
1341 }
1342 }
1343 else {
1344 val |= BNX2_EMAC_MODE_PORT_GMII;
1345 }
1346
1347 /* Set the MAC to operate in the appropriate duplex mode. */
1348 if (bp->duplex == DUPLEX_HALF)
1349 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1350 REG_WR(bp, BNX2_EMAC_MODE, val);
1351
1352 /* Enable/disable rx PAUSE. */
1353 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1354
1355 if (bp->flow_ctrl & FLOW_CTRL_RX)
1356 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1357 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1358
1359 /* Enable/disable tx PAUSE. */
1360 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1361 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1362
1363 if (bp->flow_ctrl & FLOW_CTRL_TX)
1364 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1365 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1366
1367 /* Acknowledge the interrupt. */
1368 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1369
1370 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1371 bnx2_init_all_rx_contexts(bp);
1372 }
1373
1374 static void
1375 bnx2_enable_bmsr1(struct bnx2 *bp)
1376 {
1377 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1378 (CHIP_NUM(bp) == CHIP_NUM_5709))
1379 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1380 MII_BNX2_BLK_ADDR_GP_STATUS);
1381 }
1382
1383 static void
1384 bnx2_disable_bmsr1(struct bnx2 *bp)
1385 {
1386 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1387 (CHIP_NUM(bp) == CHIP_NUM_5709))
1388 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1389 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1390 }
1391
1392 static int
1393 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1394 {
1395 u32 up1;
1396 int ret = 1;
1397
1398 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1399 return 0;
1400
1401 if (bp->autoneg & AUTONEG_SPEED)
1402 bp->advertising |= ADVERTISED_2500baseX_Full;
1403
1404 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1405 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1406
1407 bnx2_read_phy(bp, bp->mii_up1, &up1);
1408 if (!(up1 & BCM5708S_UP1_2G5)) {
1409 up1 |= BCM5708S_UP1_2G5;
1410 bnx2_write_phy(bp, bp->mii_up1, up1);
1411 ret = 0;
1412 }
1413
1414 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1415 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1416 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1417
1418 return ret;
1419 }
1420
1421 static int
1422 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1423 {
1424 u32 up1;
1425 int ret = 0;
1426
1427 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1428 return 0;
1429
1430 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1431 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1432
1433 bnx2_read_phy(bp, bp->mii_up1, &up1);
1434 if (up1 & BCM5708S_UP1_2G5) {
1435 up1 &= ~BCM5708S_UP1_2G5;
1436 bnx2_write_phy(bp, bp->mii_up1, up1);
1437 ret = 1;
1438 }
1439
1440 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1441 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1442 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1443
1444 return ret;
1445 }
1446
1447 static void
1448 bnx2_enable_forced_2g5(struct bnx2 *bp)
1449 {
1450 u32 bmcr;
1451
1452 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1453 return;
1454
1455 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1456 u32 val;
1457
1458 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1459 MII_BNX2_BLK_ADDR_SERDES_DIG);
1460 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1461 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1462 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1463 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1464
1465 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1466 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468
1469 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1470 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1471 bmcr |= BCM5708S_BMCR_FORCE_2500;
1472 } else {
1473 return;
1474 }
1475
1476 if (bp->autoneg & AUTONEG_SPEED) {
1477 bmcr &= ~BMCR_ANENABLE;
1478 if (bp->req_duplex == DUPLEX_FULL)
1479 bmcr |= BMCR_FULLDPLX;
1480 }
1481 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1482 }
1483
1484 static void
1485 bnx2_disable_forced_2g5(struct bnx2 *bp)
1486 {
1487 u32 bmcr;
1488
1489 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1490 return;
1491
1492 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1493 u32 val;
1494
1495 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496 MII_BNX2_BLK_ADDR_SERDES_DIG);
1497 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1498 val &= ~MII_BNX2_SD_MISC1_FORCE;
1499 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1500
1501 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1502 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1503 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1504
1505 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1506 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1507 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1508 } else {
1509 return;
1510 }
1511
1512 if (bp->autoneg & AUTONEG_SPEED)
1513 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1514 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1515 }
1516
1517 static void
1518 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1519 {
1520 u32 val;
1521
1522 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1523 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1524 if (start)
1525 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1526 else
1527 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1528 }
1529
1530 static int
1531 bnx2_set_link(struct bnx2 *bp)
1532 {
1533 u32 bmsr;
1534 u8 link_up;
1535
1536 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1537 bp->link_up = 1;
1538 return 0;
1539 }
1540
1541 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1542 return 0;
1543
1544 link_up = bp->link_up;
1545
1546 bnx2_enable_bmsr1(bp);
1547 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1548 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1549 bnx2_disable_bmsr1(bp);
1550
1551 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1552 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1553 u32 val, an_dbg;
1554
1555 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1556 bnx2_5706s_force_link_dn(bp, 0);
1557 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1558 }
1559 val = REG_RD(bp, BNX2_EMAC_STATUS);
1560
1561 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1562 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1563 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1564
1565 if ((val & BNX2_EMAC_STATUS_LINK) &&
1566 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1567 bmsr |= BMSR_LSTATUS;
1568 else
1569 bmsr &= ~BMSR_LSTATUS;
1570 }
1571
1572 if (bmsr & BMSR_LSTATUS) {
1573 bp->link_up = 1;
1574
1575 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1576 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1577 bnx2_5706s_linkup(bp);
1578 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1579 bnx2_5708s_linkup(bp);
1580 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1581 bnx2_5709s_linkup(bp);
1582 }
1583 else {
1584 bnx2_copper_linkup(bp);
1585 }
1586 bnx2_resolve_flow_ctrl(bp);
1587 }
1588 else {
1589 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1590 (bp->autoneg & AUTONEG_SPEED))
1591 bnx2_disable_forced_2g5(bp);
1592
1593 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1594 u32 bmcr;
1595
1596 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1597 bmcr |= BMCR_ANENABLE;
1598 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1599
1600 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1601 }
1602 bp->link_up = 0;
1603 }
1604
1605 if (bp->link_up != link_up) {
1606 bnx2_report_link(bp);
1607 }
1608
1609 bnx2_set_mac_link(bp);
1610
1611 return 0;
1612 }
1613
1614 static int
1615 bnx2_reset_phy(struct bnx2 *bp)
1616 {
1617 int i;
1618 u32 reg;
1619
1620 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1621
1622 #define PHY_RESET_MAX_WAIT 100
1623 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1624 udelay(10);
1625
1626 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1627 if (!(reg & BMCR_RESET)) {
1628 udelay(20);
1629 break;
1630 }
1631 }
1632 if (i == PHY_RESET_MAX_WAIT) {
1633 return -EBUSY;
1634 }
1635 return 0;
1636 }
1637
1638 static u32
1639 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1640 {
1641 u32 adv = 0;
1642
1643 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1644 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1645
1646 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1647 adv = ADVERTISE_1000XPAUSE;
1648 }
1649 else {
1650 adv = ADVERTISE_PAUSE_CAP;
1651 }
1652 }
1653 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1654 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1655 adv = ADVERTISE_1000XPSE_ASYM;
1656 }
1657 else {
1658 adv = ADVERTISE_PAUSE_ASYM;
1659 }
1660 }
1661 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1662 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1663 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1664 }
1665 else {
1666 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1667 }
1668 }
1669 return adv;
1670 }
1671
1672 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1673
1674 static int
1675 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1676 __releases(&bp->phy_lock)
1677 __acquires(&bp->phy_lock)
1678 {
1679 u32 speed_arg = 0, pause_adv;
1680
1681 pause_adv = bnx2_phy_get_pause_adv(bp);
1682
1683 if (bp->autoneg & AUTONEG_SPEED) {
1684 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1685 if (bp->advertising & ADVERTISED_10baseT_Half)
1686 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1687 if (bp->advertising & ADVERTISED_10baseT_Full)
1688 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1689 if (bp->advertising & ADVERTISED_100baseT_Half)
1690 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1691 if (bp->advertising & ADVERTISED_100baseT_Full)
1692 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1693 if (bp->advertising & ADVERTISED_1000baseT_Full)
1694 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1695 if (bp->advertising & ADVERTISED_2500baseX_Full)
1696 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697 } else {
1698 if (bp->req_line_speed == SPEED_2500)
1699 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1700 else if (bp->req_line_speed == SPEED_1000)
1701 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1702 else if (bp->req_line_speed == SPEED_100) {
1703 if (bp->req_duplex == DUPLEX_FULL)
1704 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1705 else
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1707 } else if (bp->req_line_speed == SPEED_10) {
1708 if (bp->req_duplex == DUPLEX_FULL)
1709 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1710 else
1711 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1712 }
1713 }
1714
1715 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1716 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1717 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1718 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1719
1720 if (port == PORT_TP)
1721 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1722 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1723
1724 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1725
1726 spin_unlock_bh(&bp->phy_lock);
1727 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1728 spin_lock_bh(&bp->phy_lock);
1729
1730 return 0;
1731 }
1732
1733 static int
1734 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1735 __releases(&bp->phy_lock)
1736 __acquires(&bp->phy_lock)
1737 {
1738 u32 adv, bmcr;
1739 u32 new_adv = 0;
1740
1741 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1742 return (bnx2_setup_remote_phy(bp, port));
1743
1744 if (!(bp->autoneg & AUTONEG_SPEED)) {
1745 u32 new_bmcr;
1746 int force_link_down = 0;
1747
1748 if (bp->req_line_speed == SPEED_2500) {
1749 if (!bnx2_test_and_enable_2g5(bp))
1750 force_link_down = 1;
1751 } else if (bp->req_line_speed == SPEED_1000) {
1752 if (bnx2_test_and_disable_2g5(bp))
1753 force_link_down = 1;
1754 }
1755 bnx2_read_phy(bp, bp->mii_adv, &adv);
1756 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1757
1758 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1759 new_bmcr = bmcr & ~BMCR_ANENABLE;
1760 new_bmcr |= BMCR_SPEED1000;
1761
1762 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1763 if (bp->req_line_speed == SPEED_2500)
1764 bnx2_enable_forced_2g5(bp);
1765 else if (bp->req_line_speed == SPEED_1000) {
1766 bnx2_disable_forced_2g5(bp);
1767 new_bmcr &= ~0x2000;
1768 }
1769
1770 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1771 if (bp->req_line_speed == SPEED_2500)
1772 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1773 else
1774 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1775 }
1776
1777 if (bp->req_duplex == DUPLEX_FULL) {
1778 adv |= ADVERTISE_1000XFULL;
1779 new_bmcr |= BMCR_FULLDPLX;
1780 }
1781 else {
1782 adv |= ADVERTISE_1000XHALF;
1783 new_bmcr &= ~BMCR_FULLDPLX;
1784 }
1785 if ((new_bmcr != bmcr) || (force_link_down)) {
1786 /* Force a link down visible on the other side */
1787 if (bp->link_up) {
1788 bnx2_write_phy(bp, bp->mii_adv, adv &
1789 ~(ADVERTISE_1000XFULL |
1790 ADVERTISE_1000XHALF));
1791 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1792 BMCR_ANRESTART | BMCR_ANENABLE);
1793
1794 bp->link_up = 0;
1795 netif_carrier_off(bp->dev);
1796 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1797 bnx2_report_link(bp);
1798 }
1799 bnx2_write_phy(bp, bp->mii_adv, adv);
1800 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1801 } else {
1802 bnx2_resolve_flow_ctrl(bp);
1803 bnx2_set_mac_link(bp);
1804 }
1805 return 0;
1806 }
1807
1808 bnx2_test_and_enable_2g5(bp);
1809
1810 if (bp->advertising & ADVERTISED_1000baseT_Full)
1811 new_adv |= ADVERTISE_1000XFULL;
1812
1813 new_adv |= bnx2_phy_get_pause_adv(bp);
1814
1815 bnx2_read_phy(bp, bp->mii_adv, &adv);
1816 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1817
1818 bp->serdes_an_pending = 0;
1819 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1820 /* Force a link down visible on the other side */
1821 if (bp->link_up) {
1822 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1823 spin_unlock_bh(&bp->phy_lock);
1824 msleep(20);
1825 spin_lock_bh(&bp->phy_lock);
1826 }
1827
1828 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1829 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1830 BMCR_ANENABLE);
1831 /* Speed up link-up time when the link partner
1832 * does not autonegotiate which is very common
1833 * in blade servers. Some blade servers use
1834 * IPMI for kerboard input and it's important
1835 * to minimize link disruptions. Autoneg. involves
1836 * exchanging base pages plus 3 next pages and
1837 * normally completes in about 120 msec.
1838 */
1839 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1840 bp->serdes_an_pending = 1;
1841 mod_timer(&bp->timer, jiffies + bp->current_interval);
1842 } else {
1843 bnx2_resolve_flow_ctrl(bp);
1844 bnx2_set_mac_link(bp);
1845 }
1846
1847 return 0;
1848 }
1849
1850 #define ETHTOOL_ALL_FIBRE_SPEED \
1851 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1852 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1853 (ADVERTISED_1000baseT_Full)
1854
1855 #define ETHTOOL_ALL_COPPER_SPEED \
1856 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1857 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1858 ADVERTISED_1000baseT_Full)
1859
1860 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1861 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1862
1863 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1864
1865 static void
1866 bnx2_set_default_remote_link(struct bnx2 *bp)
1867 {
1868 u32 link;
1869
1870 if (bp->phy_port == PORT_TP)
1871 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1872 else
1873 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1874
1875 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1876 bp->req_line_speed = 0;
1877 bp->autoneg |= AUTONEG_SPEED;
1878 bp->advertising = ADVERTISED_Autoneg;
1879 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1880 bp->advertising |= ADVERTISED_10baseT_Half;
1881 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1882 bp->advertising |= ADVERTISED_10baseT_Full;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1884 bp->advertising |= ADVERTISED_100baseT_Half;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1886 bp->advertising |= ADVERTISED_100baseT_Full;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1888 bp->advertising |= ADVERTISED_1000baseT_Full;
1889 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1890 bp->advertising |= ADVERTISED_2500baseX_Full;
1891 } else {
1892 bp->autoneg = 0;
1893 bp->advertising = 0;
1894 bp->req_duplex = DUPLEX_FULL;
1895 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1896 bp->req_line_speed = SPEED_10;
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1898 bp->req_duplex = DUPLEX_HALF;
1899 }
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1901 bp->req_line_speed = SPEED_100;
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1903 bp->req_duplex = DUPLEX_HALF;
1904 }
1905 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1906 bp->req_line_speed = SPEED_1000;
1907 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1908 bp->req_line_speed = SPEED_2500;
1909 }
1910 }
1911
1912 static void
1913 bnx2_set_default_link(struct bnx2 *bp)
1914 {
1915 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1916 bnx2_set_default_remote_link(bp);
1917 return;
1918 }
1919
1920 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1921 bp->req_line_speed = 0;
1922 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1923 u32 reg;
1924
1925 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1926
1927 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1928 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1929 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1930 bp->autoneg = 0;
1931 bp->req_line_speed = bp->line_speed = SPEED_1000;
1932 bp->req_duplex = DUPLEX_FULL;
1933 }
1934 } else
1935 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1936 }
1937
1938 static void
1939 bnx2_send_heart_beat(struct bnx2 *bp)
1940 {
1941 u32 msg;
1942 u32 addr;
1943
1944 spin_lock(&bp->indirect_lock);
1945 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1946 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1947 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1948 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1949 spin_unlock(&bp->indirect_lock);
1950 }
1951
1952 static void
1953 bnx2_remote_phy_event(struct bnx2 *bp)
1954 {
1955 u32 msg;
1956 u8 link_up = bp->link_up;
1957 u8 old_port;
1958
1959 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1960
1961 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1962 bnx2_send_heart_beat(bp);
1963
1964 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1965
1966 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1967 bp->link_up = 0;
1968 else {
1969 u32 speed;
1970
1971 bp->link_up = 1;
1972 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1973 bp->duplex = DUPLEX_FULL;
1974 switch (speed) {
1975 case BNX2_LINK_STATUS_10HALF:
1976 bp->duplex = DUPLEX_HALF;
1977 case BNX2_LINK_STATUS_10FULL:
1978 bp->line_speed = SPEED_10;
1979 break;
1980 case BNX2_LINK_STATUS_100HALF:
1981 bp->duplex = DUPLEX_HALF;
1982 case BNX2_LINK_STATUS_100BASE_T4:
1983 case BNX2_LINK_STATUS_100FULL:
1984 bp->line_speed = SPEED_100;
1985 break;
1986 case BNX2_LINK_STATUS_1000HALF:
1987 bp->duplex = DUPLEX_HALF;
1988 case BNX2_LINK_STATUS_1000FULL:
1989 bp->line_speed = SPEED_1000;
1990 break;
1991 case BNX2_LINK_STATUS_2500HALF:
1992 bp->duplex = DUPLEX_HALF;
1993 case BNX2_LINK_STATUS_2500FULL:
1994 bp->line_speed = SPEED_2500;
1995 break;
1996 default:
1997 bp->line_speed = 0;
1998 break;
1999 }
2000
2001 bp->flow_ctrl = 0;
2002 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2003 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2004 if (bp->duplex == DUPLEX_FULL)
2005 bp->flow_ctrl = bp->req_flow_ctrl;
2006 } else {
2007 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2008 bp->flow_ctrl |= FLOW_CTRL_TX;
2009 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2010 bp->flow_ctrl |= FLOW_CTRL_RX;
2011 }
2012
2013 old_port = bp->phy_port;
2014 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2015 bp->phy_port = PORT_FIBRE;
2016 else
2017 bp->phy_port = PORT_TP;
2018
2019 if (old_port != bp->phy_port)
2020 bnx2_set_default_link(bp);
2021
2022 }
2023 if (bp->link_up != link_up)
2024 bnx2_report_link(bp);
2025
2026 bnx2_set_mac_link(bp);
2027 }
2028
2029 static int
2030 bnx2_set_remote_link(struct bnx2 *bp)
2031 {
2032 u32 evt_code;
2033
2034 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2035 switch (evt_code) {
2036 case BNX2_FW_EVT_CODE_LINK_EVENT:
2037 bnx2_remote_phy_event(bp);
2038 break;
2039 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2040 default:
2041 bnx2_send_heart_beat(bp);
2042 break;
2043 }
2044 return 0;
2045 }
2046
2047 static int
2048 bnx2_setup_copper_phy(struct bnx2 *bp)
2049 __releases(&bp->phy_lock)
2050 __acquires(&bp->phy_lock)
2051 {
2052 u32 bmcr;
2053 u32 new_bmcr;
2054
2055 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2056
2057 if (bp->autoneg & AUTONEG_SPEED) {
2058 u32 adv_reg, adv1000_reg;
2059 u32 new_adv_reg = 0;
2060 u32 new_adv1000_reg = 0;
2061
2062 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2063 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2064 ADVERTISE_PAUSE_ASYM);
2065
2066 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2067 adv1000_reg &= PHY_ALL_1000_SPEED;
2068
2069 if (bp->advertising & ADVERTISED_10baseT_Half)
2070 new_adv_reg |= ADVERTISE_10HALF;
2071 if (bp->advertising & ADVERTISED_10baseT_Full)
2072 new_adv_reg |= ADVERTISE_10FULL;
2073 if (bp->advertising & ADVERTISED_100baseT_Half)
2074 new_adv_reg |= ADVERTISE_100HALF;
2075 if (bp->advertising & ADVERTISED_100baseT_Full)
2076 new_adv_reg |= ADVERTISE_100FULL;
2077 if (bp->advertising & ADVERTISED_1000baseT_Full)
2078 new_adv1000_reg |= ADVERTISE_1000FULL;
2079
2080 new_adv_reg |= ADVERTISE_CSMA;
2081
2082 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2083
2084 if ((adv1000_reg != new_adv1000_reg) ||
2085 (adv_reg != new_adv_reg) ||
2086 ((bmcr & BMCR_ANENABLE) == 0)) {
2087
2088 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2089 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2090 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2091 BMCR_ANENABLE);
2092 }
2093 else if (bp->link_up) {
2094 /* Flow ctrl may have changed from auto to forced */
2095 /* or vice-versa. */
2096
2097 bnx2_resolve_flow_ctrl(bp);
2098 bnx2_set_mac_link(bp);
2099 }
2100 return 0;
2101 }
2102
2103 new_bmcr = 0;
2104 if (bp->req_line_speed == SPEED_100) {
2105 new_bmcr |= BMCR_SPEED100;
2106 }
2107 if (bp->req_duplex == DUPLEX_FULL) {
2108 new_bmcr |= BMCR_FULLDPLX;
2109 }
2110 if (new_bmcr != bmcr) {
2111 u32 bmsr;
2112
2113 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115
2116 if (bmsr & BMSR_LSTATUS) {
2117 /* Force link down */
2118 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2119 spin_unlock_bh(&bp->phy_lock);
2120 msleep(50);
2121 spin_lock_bh(&bp->phy_lock);
2122
2123 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2124 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2125 }
2126
2127 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2128
2129 /* Normally, the new speed is setup after the link has
2130 * gone down and up again. In some cases, link will not go
2131 * down so we need to set up the new speed here.
2132 */
2133 if (bmsr & BMSR_LSTATUS) {
2134 bp->line_speed = bp->req_line_speed;
2135 bp->duplex = bp->req_duplex;
2136 bnx2_resolve_flow_ctrl(bp);
2137 bnx2_set_mac_link(bp);
2138 }
2139 } else {
2140 bnx2_resolve_flow_ctrl(bp);
2141 bnx2_set_mac_link(bp);
2142 }
2143 return 0;
2144 }
2145
2146 static int
2147 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2148 __releases(&bp->phy_lock)
2149 __acquires(&bp->phy_lock)
2150 {
2151 if (bp->loopback == MAC_LOOPBACK)
2152 return 0;
2153
2154 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2155 return (bnx2_setup_serdes_phy(bp, port));
2156 }
2157 else {
2158 return (bnx2_setup_copper_phy(bp));
2159 }
2160 }
2161
2162 static int
2163 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2164 {
2165 u32 val;
2166
2167 bp->mii_bmcr = MII_BMCR + 0x10;
2168 bp->mii_bmsr = MII_BMSR + 0x10;
2169 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2170 bp->mii_adv = MII_ADVERTISE + 0x10;
2171 bp->mii_lpa = MII_LPA + 0x10;
2172 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2173
2174 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2175 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2176
2177 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2178 if (reset_phy)
2179 bnx2_reset_phy(bp);
2180
2181 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2182
2183 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2184 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2185 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2186 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2187
2188 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2189 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2190 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2191 val |= BCM5708S_UP1_2G5;
2192 else
2193 val &= ~BCM5708S_UP1_2G5;
2194 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2195
2196 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2197 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2198 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2199 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2200
2201 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2202
2203 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2204 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2205 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2206
2207 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2208
2209 return 0;
2210 }
2211
2212 static int
2213 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2214 {
2215 u32 val;
2216
2217 if (reset_phy)
2218 bnx2_reset_phy(bp);
2219
2220 bp->mii_up1 = BCM5708S_UP1;
2221
2222 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2223 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2224 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2225
2226 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2227 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2228 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2229
2230 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2231 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2232 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2233
2234 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2235 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2236 val |= BCM5708S_UP1_2G5;
2237 bnx2_write_phy(bp, BCM5708S_UP1, val);
2238 }
2239
2240 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2241 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2242 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2243 /* increase tx signal amplitude */
2244 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2245 BCM5708S_BLK_ADDR_TX_MISC);
2246 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2247 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2248 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2249 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2250 }
2251
2252 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2253 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2254
2255 if (val) {
2256 u32 is_backplane;
2257
2258 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2259 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2260 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261 BCM5708S_BLK_ADDR_TX_MISC);
2262 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2263 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2264 BCM5708S_BLK_ADDR_DIG);
2265 }
2266 }
2267 return 0;
2268 }
2269
2270 static int
2271 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2272 {
2273 if (reset_phy)
2274 bnx2_reset_phy(bp);
2275
2276 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2277
2278 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2279 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2280
2281 if (bp->dev->mtu > 1500) {
2282 u32 val;
2283
2284 /* Set extended packet length bit */
2285 bnx2_write_phy(bp, 0x18, 0x7);
2286 bnx2_read_phy(bp, 0x18, &val);
2287 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2288
2289 bnx2_write_phy(bp, 0x1c, 0x6c00);
2290 bnx2_read_phy(bp, 0x1c, &val);
2291 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2292 }
2293 else {
2294 u32 val;
2295
2296 bnx2_write_phy(bp, 0x18, 0x7);
2297 bnx2_read_phy(bp, 0x18, &val);
2298 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2299
2300 bnx2_write_phy(bp, 0x1c, 0x6c00);
2301 bnx2_read_phy(bp, 0x1c, &val);
2302 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2303 }
2304
2305 return 0;
2306 }
2307
2308 static int
2309 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2310 {
2311 u32 val;
2312
2313 if (reset_phy)
2314 bnx2_reset_phy(bp);
2315
2316 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2317 bnx2_write_phy(bp, 0x18, 0x0c00);
2318 bnx2_write_phy(bp, 0x17, 0x000a);
2319 bnx2_write_phy(bp, 0x15, 0x310b);
2320 bnx2_write_phy(bp, 0x17, 0x201f);
2321 bnx2_write_phy(bp, 0x15, 0x9506);
2322 bnx2_write_phy(bp, 0x17, 0x401f);
2323 bnx2_write_phy(bp, 0x15, 0x14e2);
2324 bnx2_write_phy(bp, 0x18, 0x0400);
2325 }
2326
2327 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2328 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2329 MII_BNX2_DSP_EXPAND_REG | 0x8);
2330 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2331 val &= ~(1 << 8);
2332 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2333 }
2334
2335 if (bp->dev->mtu > 1500) {
2336 /* Set extended packet length bit */
2337 bnx2_write_phy(bp, 0x18, 0x7);
2338 bnx2_read_phy(bp, 0x18, &val);
2339 bnx2_write_phy(bp, 0x18, val | 0x4000);
2340
2341 bnx2_read_phy(bp, 0x10, &val);
2342 bnx2_write_phy(bp, 0x10, val | 0x1);
2343 }
2344 else {
2345 bnx2_write_phy(bp, 0x18, 0x7);
2346 bnx2_read_phy(bp, 0x18, &val);
2347 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2348
2349 bnx2_read_phy(bp, 0x10, &val);
2350 bnx2_write_phy(bp, 0x10, val & ~0x1);
2351 }
2352
2353 /* ethernet@wirespeed */
2354 bnx2_write_phy(bp, 0x18, 0x7007);
2355 bnx2_read_phy(bp, 0x18, &val);
2356 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2357 return 0;
2358 }
2359
2360
2361 static int
2362 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2363 __releases(&bp->phy_lock)
2364 __acquires(&bp->phy_lock)
2365 {
2366 u32 val;
2367 int rc = 0;
2368
2369 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2370 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2371
2372 bp->mii_bmcr = MII_BMCR;
2373 bp->mii_bmsr = MII_BMSR;
2374 bp->mii_bmsr1 = MII_BMSR;
2375 bp->mii_adv = MII_ADVERTISE;
2376 bp->mii_lpa = MII_LPA;
2377
2378 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2379
2380 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2381 goto setup_phy;
2382
2383 bnx2_read_phy(bp, MII_PHYSID1, &val);
2384 bp->phy_id = val << 16;
2385 bnx2_read_phy(bp, MII_PHYSID2, &val);
2386 bp->phy_id |= val & 0xffff;
2387
2388 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2389 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2390 rc = bnx2_init_5706s_phy(bp, reset_phy);
2391 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2392 rc = bnx2_init_5708s_phy(bp, reset_phy);
2393 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2394 rc = bnx2_init_5709s_phy(bp, reset_phy);
2395 }
2396 else {
2397 rc = bnx2_init_copper_phy(bp, reset_phy);
2398 }
2399
2400 setup_phy:
2401 if (!rc)
2402 rc = bnx2_setup_phy(bp, bp->phy_port);
2403
2404 return rc;
2405 }
2406
2407 static int
2408 bnx2_set_mac_loopback(struct bnx2 *bp)
2409 {
2410 u32 mac_mode;
2411
2412 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2413 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2414 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2415 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2416 bp->link_up = 1;
2417 return 0;
2418 }
2419
2420 static int bnx2_test_link(struct bnx2 *);
2421
2422 static int
2423 bnx2_set_phy_loopback(struct bnx2 *bp)
2424 {
2425 u32 mac_mode;
2426 int rc, i;
2427
2428 spin_lock_bh(&bp->phy_lock);
2429 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2430 BMCR_SPEED1000);
2431 spin_unlock_bh(&bp->phy_lock);
2432 if (rc)
2433 return rc;
2434
2435 for (i = 0; i < 10; i++) {
2436 if (bnx2_test_link(bp) == 0)
2437 break;
2438 msleep(100);
2439 }
2440
2441 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2442 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2443 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2444 BNX2_EMAC_MODE_25G_MODE);
2445
2446 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2447 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2448 bp->link_up = 1;
2449 return 0;
2450 }
2451
2452 static int
2453 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2454 {
2455 int i;
2456 u32 val;
2457
2458 bp->fw_wr_seq++;
2459 msg_data |= bp->fw_wr_seq;
2460
2461 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2462
2463 if (!ack)
2464 return 0;
2465
2466 /* wait for an acknowledgement. */
2467 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2468 msleep(10);
2469
2470 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2471
2472 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2473 break;
2474 }
2475 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2476 return 0;
2477
2478 /* If we timed out, inform the firmware that this is the case. */
2479 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2480 if (!silent)
2481 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2482
2483 msg_data &= ~BNX2_DRV_MSG_CODE;
2484 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2485
2486 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2487
2488 return -EBUSY;
2489 }
2490
2491 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2492 return -EIO;
2493
2494 return 0;
2495 }
2496
2497 static int
2498 bnx2_init_5709_context(struct bnx2 *bp)
2499 {
2500 int i, ret = 0;
2501 u32 val;
2502
2503 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2504 val |= (BCM_PAGE_BITS - 8) << 16;
2505 REG_WR(bp, BNX2_CTX_COMMAND, val);
2506 for (i = 0; i < 10; i++) {
2507 val = REG_RD(bp, BNX2_CTX_COMMAND);
2508 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2509 break;
2510 udelay(2);
2511 }
2512 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2513 return -EBUSY;
2514
2515 for (i = 0; i < bp->ctx_pages; i++) {
2516 int j;
2517
2518 if (bp->ctx_blk[i])
2519 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2520 else
2521 return -ENOMEM;
2522
2523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2524 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2525 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2526 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2527 (u64) bp->ctx_blk_mapping[i] >> 32);
2528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2529 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2530 for (j = 0; j < 10; j++) {
2531
2532 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2533 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2534 break;
2535 udelay(5);
2536 }
2537 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2538 ret = -EBUSY;
2539 break;
2540 }
2541 }
2542 return ret;
2543 }
2544
2545 static void
2546 bnx2_init_context(struct bnx2 *bp)
2547 {
2548 u32 vcid;
2549
2550 vcid = 96;
2551 while (vcid) {
2552 u32 vcid_addr, pcid_addr, offset;
2553 int i;
2554
2555 vcid--;
2556
2557 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2558 u32 new_vcid;
2559
2560 vcid_addr = GET_PCID_ADDR(vcid);
2561 if (vcid & 0x8) {
2562 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2563 }
2564 else {
2565 new_vcid = vcid;
2566 }
2567 pcid_addr = GET_PCID_ADDR(new_vcid);
2568 }
2569 else {
2570 vcid_addr = GET_CID_ADDR(vcid);
2571 pcid_addr = vcid_addr;
2572 }
2573
2574 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2575 vcid_addr += (i << PHY_CTX_SHIFT);
2576 pcid_addr += (i << PHY_CTX_SHIFT);
2577
2578 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2579 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2580
2581 /* Zero out the context. */
2582 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2583 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2584 }
2585 }
2586 }
2587
2588 static int
2589 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2590 {
2591 u16 *good_mbuf;
2592 u32 good_mbuf_cnt;
2593 u32 val;
2594
2595 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2596 if (good_mbuf == NULL) {
2597 pr_err("Failed to allocate memory in %s\n", __func__);
2598 return -ENOMEM;
2599 }
2600
2601 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2602 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2603
2604 good_mbuf_cnt = 0;
2605
2606 /* Allocate a bunch of mbufs and save the good ones in an array. */
2607 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2608 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2609 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2610 BNX2_RBUF_COMMAND_ALLOC_REQ);
2611
2612 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2613
2614 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2615
2616 /* The addresses with Bit 9 set are bad memory blocks. */
2617 if (!(val & (1 << 9))) {
2618 good_mbuf[good_mbuf_cnt] = (u16) val;
2619 good_mbuf_cnt++;
2620 }
2621
2622 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2623 }
2624
2625 /* Free the good ones back to the mbuf pool thus discarding
2626 * all the bad ones. */
2627 while (good_mbuf_cnt) {
2628 good_mbuf_cnt--;
2629
2630 val = good_mbuf[good_mbuf_cnt];
2631 val = (val << 9) | val | 1;
2632
2633 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2634 }
2635 kfree(good_mbuf);
2636 return 0;
2637 }
2638
2639 static void
2640 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2641 {
2642 u32 val;
2643
2644 val = (mac_addr[0] << 8) | mac_addr[1];
2645
2646 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2647
2648 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2649 (mac_addr[4] << 8) | mac_addr[5];
2650
2651 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2652 }
2653
2654 static inline int
2655 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2656 {
2657 dma_addr_t mapping;
2658 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2659 struct rx_bd *rxbd =
2660 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2661 struct page *page = alloc_page(GFP_ATOMIC);
2662
2663 if (!page)
2664 return -ENOMEM;
2665 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2666 PCI_DMA_FROMDEVICE);
2667 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2668 __free_page(page);
2669 return -EIO;
2670 }
2671
2672 rx_pg->page = page;
2673 pci_unmap_addr_set(rx_pg, mapping, mapping);
2674 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2675 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2676 return 0;
2677 }
2678
2679 static void
2680 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2681 {
2682 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2683 struct page *page = rx_pg->page;
2684
2685 if (!page)
2686 return;
2687
2688 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2689 PCI_DMA_FROMDEVICE);
2690
2691 __free_page(page);
2692 rx_pg->page = NULL;
2693 }
2694
2695 static inline int
2696 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2697 {
2698 struct sk_buff *skb;
2699 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2700 dma_addr_t mapping;
2701 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2702 unsigned long align;
2703
2704 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2705 if (skb == NULL) {
2706 return -ENOMEM;
2707 }
2708
2709 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2710 skb_reserve(skb, BNX2_RX_ALIGN - align);
2711
2712 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2713 PCI_DMA_FROMDEVICE);
2714 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2715 dev_kfree_skb(skb);
2716 return -EIO;
2717 }
2718
2719 rx_buf->skb = skb;
2720 pci_unmap_addr_set(rx_buf, mapping, mapping);
2721
2722 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2723 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2724
2725 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2726
2727 return 0;
2728 }
2729
2730 static int
2731 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2732 {
2733 struct status_block *sblk = bnapi->status_blk.msi;
2734 u32 new_link_state, old_link_state;
2735 int is_set = 1;
2736
2737 new_link_state = sblk->status_attn_bits & event;
2738 old_link_state = sblk->status_attn_bits_ack & event;
2739 if (new_link_state != old_link_state) {
2740 if (new_link_state)
2741 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2742 else
2743 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2744 } else
2745 is_set = 0;
2746
2747 return is_set;
2748 }
2749
2750 static void
2751 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2752 {
2753 spin_lock(&bp->phy_lock);
2754
2755 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2756 bnx2_set_link(bp);
2757 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2758 bnx2_set_remote_link(bp);
2759
2760 spin_unlock(&bp->phy_lock);
2761
2762 }
2763
2764 static inline u16
2765 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2766 {
2767 u16 cons;
2768
2769 /* Tell compiler that status block fields can change. */
2770 barrier();
2771 cons = *bnapi->hw_tx_cons_ptr;
2772 barrier();
2773 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2774 cons++;
2775 return cons;
2776 }
2777
2778 static int
2779 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2780 {
2781 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2782 u16 hw_cons, sw_cons, sw_ring_cons;
2783 int tx_pkt = 0, index;
2784 struct netdev_queue *txq;
2785
2786 index = (bnapi - bp->bnx2_napi);
2787 txq = netdev_get_tx_queue(bp->dev, index);
2788
2789 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2790 sw_cons = txr->tx_cons;
2791
2792 while (sw_cons != hw_cons) {
2793 struct sw_tx_bd *tx_buf;
2794 struct sk_buff *skb;
2795 int i, last;
2796
2797 sw_ring_cons = TX_RING_IDX(sw_cons);
2798
2799 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2800 skb = tx_buf->skb;
2801
2802 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2803 prefetch(&skb->end);
2804
2805 /* partial BD completions possible with TSO packets */
2806 if (tx_buf->is_gso) {
2807 u16 last_idx, last_ring_idx;
2808
2809 last_idx = sw_cons + tx_buf->nr_frags + 1;
2810 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2811 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2812 last_idx++;
2813 }
2814 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2815 break;
2816 }
2817 }
2818
2819 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2820 skb_headlen(skb), PCI_DMA_TODEVICE);
2821
2822 tx_buf->skb = NULL;
2823 last = tx_buf->nr_frags;
2824
2825 for (i = 0; i < last; i++) {
2826 sw_cons = NEXT_TX_BD(sw_cons);
2827
2828 pci_unmap_page(bp->pdev,
2829 pci_unmap_addr(
2830 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2831 mapping),
2832 skb_shinfo(skb)->frags[i].size,
2833 PCI_DMA_TODEVICE);
2834 }
2835
2836 sw_cons = NEXT_TX_BD(sw_cons);
2837
2838 dev_kfree_skb(skb);
2839 tx_pkt++;
2840 if (tx_pkt == budget)
2841 break;
2842
2843 if (hw_cons == sw_cons)
2844 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2845 }
2846
2847 txr->hw_tx_cons = hw_cons;
2848 txr->tx_cons = sw_cons;
2849
2850 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2851 * before checking for netif_tx_queue_stopped(). Without the
2852 * memory barrier, there is a small possibility that bnx2_start_xmit()
2853 * will miss it and cause the queue to be stopped forever.
2854 */
2855 smp_mb();
2856
2857 if (unlikely(netif_tx_queue_stopped(txq)) &&
2858 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2859 __netif_tx_lock(txq, smp_processor_id());
2860 if ((netif_tx_queue_stopped(txq)) &&
2861 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2862 netif_tx_wake_queue(txq);
2863 __netif_tx_unlock(txq);
2864 }
2865
2866 return tx_pkt;
2867 }
2868
2869 static void
2870 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2871 struct sk_buff *skb, int count)
2872 {
2873 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2874 struct rx_bd *cons_bd, *prod_bd;
2875 int i;
2876 u16 hw_prod, prod;
2877 u16 cons = rxr->rx_pg_cons;
2878
2879 cons_rx_pg = &rxr->rx_pg_ring[cons];
2880
2881 /* The caller was unable to allocate a new page to replace the
2882 * last one in the frags array, so we need to recycle that page
2883 * and then free the skb.
2884 */
2885 if (skb) {
2886 struct page *page;
2887 struct skb_shared_info *shinfo;
2888
2889 shinfo = skb_shinfo(skb);
2890 shinfo->nr_frags--;
2891 page = shinfo->frags[shinfo->nr_frags].page;
2892 shinfo->frags[shinfo->nr_frags].page = NULL;
2893
2894 cons_rx_pg->page = page;
2895 dev_kfree_skb(skb);
2896 }
2897
2898 hw_prod = rxr->rx_pg_prod;
2899
2900 for (i = 0; i < count; i++) {
2901 prod = RX_PG_RING_IDX(hw_prod);
2902
2903 prod_rx_pg = &rxr->rx_pg_ring[prod];
2904 cons_rx_pg = &rxr->rx_pg_ring[cons];
2905 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2906 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2907
2908 if (prod != cons) {
2909 prod_rx_pg->page = cons_rx_pg->page;
2910 cons_rx_pg->page = NULL;
2911 pci_unmap_addr_set(prod_rx_pg, mapping,
2912 pci_unmap_addr(cons_rx_pg, mapping));
2913
2914 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2915 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2916
2917 }
2918 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2919 hw_prod = NEXT_RX_BD(hw_prod);
2920 }
2921 rxr->rx_pg_prod = hw_prod;
2922 rxr->rx_pg_cons = cons;
2923 }
2924
2925 static inline void
2926 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2927 struct sk_buff *skb, u16 cons, u16 prod)
2928 {
2929 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2930 struct rx_bd *cons_bd, *prod_bd;
2931
2932 cons_rx_buf = &rxr->rx_buf_ring[cons];
2933 prod_rx_buf = &rxr->rx_buf_ring[prod];
2934
2935 pci_dma_sync_single_for_device(bp->pdev,
2936 pci_unmap_addr(cons_rx_buf, mapping),
2937 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2938
2939 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2940
2941 prod_rx_buf->skb = skb;
2942
2943 if (cons == prod)
2944 return;
2945
2946 pci_unmap_addr_set(prod_rx_buf, mapping,
2947 pci_unmap_addr(cons_rx_buf, mapping));
2948
2949 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2950 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2951 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2952 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2953 }
2954
2955 static int
2956 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2957 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2958 u32 ring_idx)
2959 {
2960 int err;
2961 u16 prod = ring_idx & 0xffff;
2962
2963 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2964 if (unlikely(err)) {
2965 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2966 if (hdr_len) {
2967 unsigned int raw_len = len + 4;
2968 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2969
2970 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2971 }
2972 return err;
2973 }
2974
2975 skb_reserve(skb, BNX2_RX_OFFSET);
2976 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2977 PCI_DMA_FROMDEVICE);
2978
2979 if (hdr_len == 0) {
2980 skb_put(skb, len);
2981 return 0;
2982 } else {
2983 unsigned int i, frag_len, frag_size, pages;
2984 struct sw_pg *rx_pg;
2985 u16 pg_cons = rxr->rx_pg_cons;
2986 u16 pg_prod = rxr->rx_pg_prod;
2987
2988 frag_size = len + 4 - hdr_len;
2989 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2990 skb_put(skb, hdr_len);
2991
2992 for (i = 0; i < pages; i++) {
2993 dma_addr_t mapping_old;
2994
2995 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2996 if (unlikely(frag_len <= 4)) {
2997 unsigned int tail = 4 - frag_len;
2998
2999 rxr->rx_pg_cons = pg_cons;
3000 rxr->rx_pg_prod = pg_prod;
3001 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3002 pages - i);
3003 skb->len -= tail;
3004 if (i == 0) {
3005 skb->tail -= tail;
3006 } else {
3007 skb_frag_t *frag =
3008 &skb_shinfo(skb)->frags[i - 1];
3009 frag->size -= tail;
3010 skb->data_len -= tail;
3011 skb->truesize -= tail;
3012 }
3013 return 0;
3014 }
3015 rx_pg = &rxr->rx_pg_ring[pg_cons];
3016
3017 /* Don't unmap yet. If we're unable to allocate a new
3018 * page, we need to recycle the page and the DMA addr.
3019 */
3020 mapping_old = pci_unmap_addr(rx_pg, mapping);
3021 if (i == pages - 1)
3022 frag_len -= 4;
3023
3024 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3025 rx_pg->page = NULL;
3026
3027 err = bnx2_alloc_rx_page(bp, rxr,
3028 RX_PG_RING_IDX(pg_prod));
3029 if (unlikely(err)) {
3030 rxr->rx_pg_cons = pg_cons;
3031 rxr->rx_pg_prod = pg_prod;
3032 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3033 pages - i);
3034 return err;
3035 }
3036
3037 pci_unmap_page(bp->pdev, mapping_old,
3038 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3039
3040 frag_size -= frag_len;
3041 skb->data_len += frag_len;
3042 skb->truesize += frag_len;
3043 skb->len += frag_len;
3044
3045 pg_prod = NEXT_RX_BD(pg_prod);
3046 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3047 }
3048 rxr->rx_pg_prod = pg_prod;
3049 rxr->rx_pg_cons = pg_cons;
3050 }
3051 return 0;
3052 }
3053
3054 static inline u16
3055 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3056 {
3057 u16 cons;
3058
3059 /* Tell compiler that status block fields can change. */
3060 barrier();
3061 cons = *bnapi->hw_rx_cons_ptr;
3062 barrier();
3063 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3064 cons++;
3065 return cons;
3066 }
3067
3068 static int
3069 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3070 {
3071 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3072 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3073 struct l2_fhdr *rx_hdr;
3074 int rx_pkt = 0, pg_ring_used = 0;
3075
3076 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3077 sw_cons = rxr->rx_cons;
3078 sw_prod = rxr->rx_prod;
3079
3080 /* Memory barrier necessary as speculative reads of the rx
3081 * buffer can be ahead of the index in the status block
3082 */
3083 rmb();
3084 while (sw_cons != hw_cons) {
3085 unsigned int len, hdr_len;
3086 u32 status;
3087 struct sw_bd *rx_buf;
3088 struct sk_buff *skb;
3089 dma_addr_t dma_addr;
3090 u16 vtag = 0;
3091 int hw_vlan __maybe_unused = 0;
3092
3093 sw_ring_cons = RX_RING_IDX(sw_cons);
3094 sw_ring_prod = RX_RING_IDX(sw_prod);
3095
3096 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3097 skb = rx_buf->skb;
3098
3099 rx_buf->skb = NULL;
3100
3101 dma_addr = pci_unmap_addr(rx_buf, mapping);
3102
3103 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3104 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3105 PCI_DMA_FROMDEVICE);
3106
3107 rx_hdr = (struct l2_fhdr *) skb->data;
3108 len = rx_hdr->l2_fhdr_pkt_len;
3109 status = rx_hdr->l2_fhdr_status;
3110
3111 hdr_len = 0;
3112 if (status & L2_FHDR_STATUS_SPLIT) {
3113 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3114 pg_ring_used = 1;
3115 } else if (len > bp->rx_jumbo_thresh) {
3116 hdr_len = bp->rx_jumbo_thresh;
3117 pg_ring_used = 1;
3118 }
3119
3120 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3121 L2_FHDR_ERRORS_PHY_DECODE |
3122 L2_FHDR_ERRORS_ALIGNMENT |
3123 L2_FHDR_ERRORS_TOO_SHORT |
3124 L2_FHDR_ERRORS_GIANT_FRAME))) {
3125
3126 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3127 sw_ring_prod);
3128 if (pg_ring_used) {
3129 int pages;
3130
3131 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3132
3133 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3134 }
3135 goto next_rx;
3136 }
3137
3138 len -= 4;
3139
3140 if (len <= bp->rx_copy_thresh) {
3141 struct sk_buff *new_skb;
3142
3143 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3144 if (new_skb == NULL) {
3145 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3146 sw_ring_prod);
3147 goto next_rx;
3148 }
3149
3150 /* aligned copy */
3151 skb_copy_from_linear_data_offset(skb,
3152 BNX2_RX_OFFSET - 6,
3153 new_skb->data, len + 6);
3154 skb_reserve(new_skb, 6);
3155 skb_put(new_skb, len);
3156
3157 bnx2_reuse_rx_skb(bp, rxr, skb,
3158 sw_ring_cons, sw_ring_prod);
3159
3160 skb = new_skb;
3161 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3162 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3163 goto next_rx;
3164
3165 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3166 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3167 vtag = rx_hdr->l2_fhdr_vlan_tag;
3168 #ifdef BCM_VLAN
3169 if (bp->vlgrp)
3170 hw_vlan = 1;
3171 else
3172 #endif
3173 {
3174 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3175 __skb_push(skb, 4);
3176
3177 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3178 ve->h_vlan_proto = htons(ETH_P_8021Q);
3179 ve->h_vlan_TCI = htons(vtag);
3180 len += 4;
3181 }
3182 }
3183
3184 skb->protocol = eth_type_trans(skb, bp->dev);
3185
3186 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3187 (ntohs(skb->protocol) != 0x8100)) {
3188
3189 dev_kfree_skb(skb);
3190 goto next_rx;
3191
3192 }
3193
3194 skb->ip_summed = CHECKSUM_NONE;
3195 if (bp->rx_csum &&
3196 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3197 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3198
3199 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3200 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3201 skb->ip_summed = CHECKSUM_UNNECESSARY;
3202 }
3203
3204 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3205
3206 #ifdef BCM_VLAN
3207 if (hw_vlan)
3208 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3209 else
3210 #endif
3211 netif_receive_skb(skb);
3212
3213 rx_pkt++;
3214
3215 next_rx:
3216 sw_cons = NEXT_RX_BD(sw_cons);
3217 sw_prod = NEXT_RX_BD(sw_prod);
3218
3219 if ((rx_pkt == budget))
3220 break;
3221
3222 /* Refresh hw_cons to see if there is new work */
3223 if (sw_cons == hw_cons) {
3224 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3225 rmb();
3226 }
3227 }
3228 rxr->rx_cons = sw_cons;
3229 rxr->rx_prod = sw_prod;
3230
3231 if (pg_ring_used)
3232 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3233
3234 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3235
3236 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3237
3238 mmiowb();
3239
3240 return rx_pkt;
3241
3242 }
3243
3244 /* MSI ISR - The only difference between this and the INTx ISR
3245 * is that the MSI interrupt is always serviced.
3246 */
3247 static irqreturn_t
3248 bnx2_msi(int irq, void *dev_instance)
3249 {
3250 struct bnx2_napi *bnapi = dev_instance;
3251 struct bnx2 *bp = bnapi->bp;
3252
3253 prefetch(bnapi->status_blk.msi);
3254 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3255 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3256 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3257
3258 /* Return here if interrupt is disabled. */
3259 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3260 return IRQ_HANDLED;
3261
3262 napi_schedule(&bnapi->napi);
3263
3264 return IRQ_HANDLED;
3265 }
3266
3267 static irqreturn_t
3268 bnx2_msi_1shot(int irq, void *dev_instance)
3269 {
3270 struct bnx2_napi *bnapi = dev_instance;
3271 struct bnx2 *bp = bnapi->bp;
3272
3273 prefetch(bnapi->status_blk.msi);
3274
3275 /* Return here if interrupt is disabled. */
3276 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3277 return IRQ_HANDLED;
3278
3279 napi_schedule(&bnapi->napi);
3280
3281 return IRQ_HANDLED;
3282 }
3283
3284 static irqreturn_t
3285 bnx2_interrupt(int irq, void *dev_instance)
3286 {
3287 struct bnx2_napi *bnapi = dev_instance;
3288 struct bnx2 *bp = bnapi->bp;
3289 struct status_block *sblk = bnapi->status_blk.msi;
3290
3291 /* When using INTx, it is possible for the interrupt to arrive
3292 * at the CPU before the status block posted prior to the
3293 * interrupt. Reading a register will flush the status block.
3294 * When using MSI, the MSI message will always complete after
3295 * the status block write.
3296 */
3297 if ((sblk->status_idx == bnapi->last_status_idx) &&
3298 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3299 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3300 return IRQ_NONE;
3301
3302 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3303 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3304 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3305
3306 /* Read back to deassert IRQ immediately to avoid too many
3307 * spurious interrupts.
3308 */
3309 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3310
3311 /* Return here if interrupt is shared and is disabled. */
3312 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3313 return IRQ_HANDLED;
3314
3315 if (napi_schedule_prep(&bnapi->napi)) {
3316 bnapi->last_status_idx = sblk->status_idx;
3317 __napi_schedule(&bnapi->napi);
3318 }
3319
3320 return IRQ_HANDLED;
3321 }
3322
3323 static inline int
3324 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3325 {
3326 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3327 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3328
3329 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3330 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3331 return 1;
3332 return 0;
3333 }
3334
3335 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3336 STATUS_ATTN_BITS_TIMER_ABORT)
3337
3338 static inline int
3339 bnx2_has_work(struct bnx2_napi *bnapi)
3340 {
3341 struct status_block *sblk = bnapi->status_blk.msi;
3342
3343 if (bnx2_has_fast_work(bnapi))
3344 return 1;
3345
3346 #ifdef BCM_CNIC
3347 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3348 return 1;
3349 #endif
3350
3351 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3352 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3353 return 1;
3354
3355 return 0;
3356 }
3357
3358 static void
3359 bnx2_chk_missed_msi(struct bnx2 *bp)
3360 {
3361 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3362 u32 msi_ctrl;
3363
3364 if (bnx2_has_work(bnapi)) {
3365 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3366 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3367 return;
3368
3369 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3370 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3371 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3372 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3373 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3374 }
3375 }
3376
3377 bp->idle_chk_status_idx = bnapi->last_status_idx;
3378 }
3379
3380 #ifdef BCM_CNIC
3381 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3382 {
3383 struct cnic_ops *c_ops;
3384
3385 if (!bnapi->cnic_present)
3386 return;
3387
3388 rcu_read_lock();
3389 c_ops = rcu_dereference(bp->cnic_ops);
3390 if (c_ops)
3391 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3392 bnapi->status_blk.msi);
3393 rcu_read_unlock();
3394 }
3395 #endif
3396
3397 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3398 {
3399 struct status_block *sblk = bnapi->status_blk.msi;
3400 u32 status_attn_bits = sblk->status_attn_bits;
3401 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3402
3403 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3404 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3405
3406 bnx2_phy_int(bp, bnapi);
3407
3408 /* This is needed to take care of transient status
3409 * during link changes.
3410 */
3411 REG_WR(bp, BNX2_HC_COMMAND,
3412 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3413 REG_RD(bp, BNX2_HC_COMMAND);
3414 }
3415 }
3416
3417 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3418 int work_done, int budget)
3419 {
3420 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3421 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3422
3423 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3424 bnx2_tx_int(bp, bnapi, 0);
3425
3426 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3427 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3428
3429 return work_done;
3430 }
3431
3432 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3433 {
3434 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3435 struct bnx2 *bp = bnapi->bp;
3436 int work_done = 0;
3437 struct status_block_msix *sblk = bnapi->status_blk.msix;
3438
3439 while (1) {
3440 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3441 if (unlikely(work_done >= budget))
3442 break;
3443
3444 bnapi->last_status_idx = sblk->status_idx;
3445 /* status idx must be read before checking for more work. */
3446 rmb();
3447 if (likely(!bnx2_has_fast_work(bnapi))) {
3448
3449 napi_complete(napi);
3450 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3451 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3452 bnapi->last_status_idx);
3453 break;
3454 }
3455 }
3456 return work_done;
3457 }
3458
3459 static int bnx2_poll(struct napi_struct *napi, int budget)
3460 {
3461 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3462 struct bnx2 *bp = bnapi->bp;
3463 int work_done = 0;
3464 struct status_block *sblk = bnapi->status_blk.msi;
3465
3466 while (1) {
3467 bnx2_poll_link(bp, bnapi);
3468
3469 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3470
3471 #ifdef BCM_CNIC
3472 bnx2_poll_cnic(bp, bnapi);
3473 #endif
3474
3475 /* bnapi->last_status_idx is used below to tell the hw how
3476 * much work has been processed, so we must read it before
3477 * checking for more work.
3478 */
3479 bnapi->last_status_idx = sblk->status_idx;
3480
3481 if (unlikely(work_done >= budget))
3482 break;
3483
3484 rmb();
3485 if (likely(!bnx2_has_work(bnapi))) {
3486 napi_complete(napi);
3487 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3488 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3489 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3490 bnapi->last_status_idx);
3491 break;
3492 }
3493 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3494 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3495 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3496 bnapi->last_status_idx);
3497
3498 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3499 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3500 bnapi->last_status_idx);
3501 break;
3502 }
3503 }
3504
3505 return work_done;
3506 }
3507
3508 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3509 * from set_multicast.
3510 */
3511 static void
3512 bnx2_set_rx_mode(struct net_device *dev)
3513 {
3514 struct bnx2 *bp = netdev_priv(dev);
3515 u32 rx_mode, sort_mode;
3516 struct netdev_hw_addr *ha;
3517 int i;
3518
3519 if (!netif_running(dev))
3520 return;
3521
3522 spin_lock_bh(&bp->phy_lock);
3523
3524 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3525 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3526 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3527 #ifdef BCM_VLAN
3528 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3529 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3530 #else
3531 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3532 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3533 #endif
3534 if (dev->flags & IFF_PROMISC) {
3535 /* Promiscuous mode. */
3536 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3537 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3538 BNX2_RPM_SORT_USER0_PROM_VLAN;
3539 }
3540 else if (dev->flags & IFF_ALLMULTI) {
3541 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3542 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3543 0xffffffff);
3544 }
3545 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3546 }
3547 else {
3548 /* Accept one or more multicast(s). */
3549 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3550 u32 regidx;
3551 u32 bit;
3552 u32 crc;
3553
3554 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3555
3556 netdev_for_each_mc_addr(ha, dev) {
3557 crc = ether_crc_le(ETH_ALEN, ha->addr);
3558 bit = crc & 0xff;
3559 regidx = (bit & 0xe0) >> 5;
3560 bit &= 0x1f;
3561 mc_filter[regidx] |= (1 << bit);
3562 }
3563
3564 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3565 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3566 mc_filter[i]);
3567 }
3568
3569 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3570 }
3571
3572 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3573 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3574 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3575 BNX2_RPM_SORT_USER0_PROM_VLAN;
3576 } else if (!(dev->flags & IFF_PROMISC)) {
3577 /* Add all entries into to the match filter list */
3578 i = 0;
3579 netdev_for_each_uc_addr(ha, dev) {
3580 bnx2_set_mac_addr(bp, ha->addr,
3581 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3582 sort_mode |= (1 <<
3583 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3584 i++;
3585 }
3586
3587 }
3588
3589 if (rx_mode != bp->rx_mode) {
3590 bp->rx_mode = rx_mode;
3591 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3592 }
3593
3594 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3595 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3596 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3597
3598 spin_unlock_bh(&bp->phy_lock);
3599 }
3600
3601 static int __devinit
3602 check_fw_section(const struct firmware *fw,
3603 const struct bnx2_fw_file_section *section,
3604 u32 alignment, bool non_empty)
3605 {
3606 u32 offset = be32_to_cpu(section->offset);
3607 u32 len = be32_to_cpu(section->len);
3608
3609 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3610 return -EINVAL;
3611 if ((non_empty && len == 0) || len > fw->size - offset ||
3612 len & (alignment - 1))
3613 return -EINVAL;
3614 return 0;
3615 }
3616
3617 static int __devinit
3618 check_mips_fw_entry(const struct firmware *fw,
3619 const struct bnx2_mips_fw_file_entry *entry)
3620 {
3621 if (check_fw_section(fw, &entry->text, 4, true) ||
3622 check_fw_section(fw, &entry->data, 4, false) ||
3623 check_fw_section(fw, &entry->rodata, 4, false))
3624 return -EINVAL;
3625 return 0;
3626 }
3627
3628 static int __devinit
3629 bnx2_request_firmware(struct bnx2 *bp)
3630 {
3631 const char *mips_fw_file, *rv2p_fw_file;
3632 const struct bnx2_mips_fw_file *mips_fw;
3633 const struct bnx2_rv2p_fw_file *rv2p_fw;
3634 int rc;
3635
3636 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3637 mips_fw_file = FW_MIPS_FILE_09;
3638 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3639 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3640 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3641 else
3642 rv2p_fw_file = FW_RV2P_FILE_09;
3643 } else {
3644 mips_fw_file = FW_MIPS_FILE_06;
3645 rv2p_fw_file = FW_RV2P_FILE_06;
3646 }
3647
3648 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3649 if (rc) {
3650 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3651 return rc;
3652 }
3653
3654 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3655 if (rc) {
3656 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3657 return rc;
3658 }
3659 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3660 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3661 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3662 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3663 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3664 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3665 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3666 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3667 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3668 return -EINVAL;
3669 }
3670 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3671 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3672 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3673 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3674 return -EINVAL;
3675 }
3676
3677 return 0;
3678 }
3679
3680 static u32
3681 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3682 {
3683 switch (idx) {
3684 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3685 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3686 rv2p_code |= RV2P_BD_PAGE_SIZE;
3687 break;
3688 }
3689 return rv2p_code;
3690 }
3691
3692 static int
3693 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3694 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3695 {
3696 u32 rv2p_code_len, file_offset;
3697 __be32 *rv2p_code;
3698 int i;
3699 u32 val, cmd, addr;
3700
3701 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3702 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3703
3704 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3705
3706 if (rv2p_proc == RV2P_PROC1) {
3707 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3708 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3709 } else {
3710 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3711 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3712 }
3713
3714 for (i = 0; i < rv2p_code_len; i += 8) {
3715 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3716 rv2p_code++;
3717 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3718 rv2p_code++;
3719
3720 val = (i / 8) | cmd;
3721 REG_WR(bp, addr, val);
3722 }
3723
3724 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3725 for (i = 0; i < 8; i++) {
3726 u32 loc, code;
3727
3728 loc = be32_to_cpu(fw_entry->fixup[i]);
3729 if (loc && ((loc * 4) < rv2p_code_len)) {
3730 code = be32_to_cpu(*(rv2p_code + loc - 1));
3731 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3732 code = be32_to_cpu(*(rv2p_code + loc));
3733 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3734 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3735
3736 val = (loc / 2) | cmd;
3737 REG_WR(bp, addr, val);
3738 }
3739 }
3740
3741 /* Reset the processor, un-stall is done later. */
3742 if (rv2p_proc == RV2P_PROC1) {
3743 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3744 }
3745 else {
3746 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3747 }
3748
3749 return 0;
3750 }
3751
3752 static int
3753 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3754 const struct bnx2_mips_fw_file_entry *fw_entry)
3755 {
3756 u32 addr, len, file_offset;
3757 __be32 *data;
3758 u32 offset;
3759 u32 val;
3760
3761 /* Halt the CPU. */
3762 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3763 val |= cpu_reg->mode_value_halt;
3764 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3765 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3766
3767 /* Load the Text area. */
3768 addr = be32_to_cpu(fw_entry->text.addr);
3769 len = be32_to_cpu(fw_entry->text.len);
3770 file_offset = be32_to_cpu(fw_entry->text.offset);
3771 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3772
3773 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3774 if (len) {
3775 int j;
3776
3777 for (j = 0; j < (len / 4); j++, offset += 4)
3778 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3779 }
3780
3781 /* Load the Data area. */
3782 addr = be32_to_cpu(fw_entry->data.addr);
3783 len = be32_to_cpu(fw_entry->data.len);
3784 file_offset = be32_to_cpu(fw_entry->data.offset);
3785 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3786
3787 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3788 if (len) {
3789 int j;
3790
3791 for (j = 0; j < (len / 4); j++, offset += 4)
3792 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3793 }
3794
3795 /* Load the Read-Only area. */
3796 addr = be32_to_cpu(fw_entry->rodata.addr);
3797 len = be32_to_cpu(fw_entry->rodata.len);
3798 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3799 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3800
3801 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3802 if (len) {
3803 int j;
3804
3805 for (j = 0; j < (len / 4); j++, offset += 4)
3806 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3807 }
3808
3809 /* Clear the pre-fetch instruction. */
3810 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3811
3812 val = be32_to_cpu(fw_entry->start_addr);
3813 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3814
3815 /* Start the CPU. */
3816 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3817 val &= ~cpu_reg->mode_value_halt;
3818 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3819 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3820
3821 return 0;
3822 }
3823
3824 static int
3825 bnx2_init_cpus(struct bnx2 *bp)
3826 {
3827 const struct bnx2_mips_fw_file *mips_fw =
3828 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3829 const struct bnx2_rv2p_fw_file *rv2p_fw =
3830 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3831 int rc;
3832
3833 /* Initialize the RV2P processor. */
3834 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3835 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3836
3837 /* Initialize the RX Processor. */
3838 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3839 if (rc)
3840 goto init_cpu_err;
3841
3842 /* Initialize the TX Processor. */
3843 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3844 if (rc)
3845 goto init_cpu_err;
3846
3847 /* Initialize the TX Patch-up Processor. */
3848 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3849 if (rc)
3850 goto init_cpu_err;
3851
3852 /* Initialize the Completion Processor. */
3853 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3854 if (rc)
3855 goto init_cpu_err;
3856
3857 /* Initialize the Command Processor. */
3858 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3859
3860 init_cpu_err:
3861 return rc;
3862 }
3863
3864 static int
3865 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3866 {
3867 u16 pmcsr;
3868
3869 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3870
3871 switch (state) {
3872 case PCI_D0: {
3873 u32 val;
3874
3875 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3876 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3877 PCI_PM_CTRL_PME_STATUS);
3878
3879 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3880 /* delay required during transition out of D3hot */
3881 msleep(20);
3882
3883 val = REG_RD(bp, BNX2_EMAC_MODE);
3884 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3885 val &= ~BNX2_EMAC_MODE_MPKT;
3886 REG_WR(bp, BNX2_EMAC_MODE, val);
3887
3888 val = REG_RD(bp, BNX2_RPM_CONFIG);
3889 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3890 REG_WR(bp, BNX2_RPM_CONFIG, val);
3891 break;
3892 }
3893 case PCI_D3hot: {
3894 int i;
3895 u32 val, wol_msg;
3896
3897 if (bp->wol) {
3898 u32 advertising;
3899 u8 autoneg;
3900
3901 autoneg = bp->autoneg;
3902 advertising = bp->advertising;
3903
3904 if (bp->phy_port == PORT_TP) {
3905 bp->autoneg = AUTONEG_SPEED;
3906 bp->advertising = ADVERTISED_10baseT_Half |
3907 ADVERTISED_10baseT_Full |
3908 ADVERTISED_100baseT_Half |
3909 ADVERTISED_100baseT_Full |
3910 ADVERTISED_Autoneg;
3911 }
3912
3913 spin_lock_bh(&bp->phy_lock);
3914 bnx2_setup_phy(bp, bp->phy_port);
3915 spin_unlock_bh(&bp->phy_lock);
3916
3917 bp->autoneg = autoneg;
3918 bp->advertising = advertising;
3919
3920 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3921
3922 val = REG_RD(bp, BNX2_EMAC_MODE);
3923
3924 /* Enable port mode. */
3925 val &= ~BNX2_EMAC_MODE_PORT;
3926 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3927 BNX2_EMAC_MODE_ACPI_RCVD |
3928 BNX2_EMAC_MODE_MPKT;
3929 if (bp->phy_port == PORT_TP)
3930 val |= BNX2_EMAC_MODE_PORT_MII;
3931 else {
3932 val |= BNX2_EMAC_MODE_PORT_GMII;
3933 if (bp->line_speed == SPEED_2500)
3934 val |= BNX2_EMAC_MODE_25G_MODE;
3935 }
3936
3937 REG_WR(bp, BNX2_EMAC_MODE, val);
3938
3939 /* receive all multicast */
3940 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3941 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3942 0xffffffff);
3943 }
3944 REG_WR(bp, BNX2_EMAC_RX_MODE,
3945 BNX2_EMAC_RX_MODE_SORT_MODE);
3946
3947 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3948 BNX2_RPM_SORT_USER0_MC_EN;
3949 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3950 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3951 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3952 BNX2_RPM_SORT_USER0_ENA);
3953
3954 /* Need to enable EMAC and RPM for WOL. */
3955 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3956 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3957 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3958 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3959
3960 val = REG_RD(bp, BNX2_RPM_CONFIG);
3961 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3962 REG_WR(bp, BNX2_RPM_CONFIG, val);
3963
3964 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3965 }
3966 else {
3967 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3968 }
3969
3970 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3971 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3972 1, 0);
3973
3974 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3975 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3976 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3977
3978 if (bp->wol)
3979 pmcsr |= 3;
3980 }
3981 else {
3982 pmcsr |= 3;
3983 }
3984 if (bp->wol) {
3985 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3986 }
3987 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3988 pmcsr);
3989
3990 /* No more memory access after this point until
3991 * device is brought back to D0.
3992 */
3993 udelay(50);
3994 break;
3995 }
3996 default:
3997 return -EINVAL;
3998 }
3999 return 0;
4000 }
4001
4002 static int
4003 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4004 {
4005 u32 val;
4006 int j;
4007
4008 /* Request access to the flash interface. */
4009 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4010 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4011 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4012 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4013 break;
4014
4015 udelay(5);
4016 }
4017
4018 if (j >= NVRAM_TIMEOUT_COUNT)
4019 return -EBUSY;
4020
4021 return 0;
4022 }
4023
4024 static int
4025 bnx2_release_nvram_lock(struct bnx2 *bp)
4026 {
4027 int j;
4028 u32 val;
4029
4030 /* Relinquish nvram interface. */
4031 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4032
4033 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4034 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4035 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4036 break;
4037
4038 udelay(5);
4039 }
4040
4041 if (j >= NVRAM_TIMEOUT_COUNT)
4042 return -EBUSY;
4043
4044 return 0;
4045 }
4046
4047
4048 static int
4049 bnx2_enable_nvram_write(struct bnx2 *bp)
4050 {
4051 u32 val;
4052
4053 val = REG_RD(bp, BNX2_MISC_CFG);
4054 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4055
4056 if (bp->flash_info->flags & BNX2_NV_WREN) {
4057 int j;
4058
4059 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4060 REG_WR(bp, BNX2_NVM_COMMAND,
4061 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4062
4063 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4064 udelay(5);
4065
4066 val = REG_RD(bp, BNX2_NVM_COMMAND);
4067 if (val & BNX2_NVM_COMMAND_DONE)
4068 break;
4069 }
4070
4071 if (j >= NVRAM_TIMEOUT_COUNT)
4072 return -EBUSY;
4073 }
4074 return 0;
4075 }
4076
4077 static void
4078 bnx2_disable_nvram_write(struct bnx2 *bp)
4079 {
4080 u32 val;
4081
4082 val = REG_RD(bp, BNX2_MISC_CFG);
4083 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4084 }
4085
4086
4087 static void
4088 bnx2_enable_nvram_access(struct bnx2 *bp)
4089 {
4090 u32 val;
4091
4092 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4093 /* Enable both bits, even on read. */
4094 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4095 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4096 }
4097
4098 static void
4099 bnx2_disable_nvram_access(struct bnx2 *bp)
4100 {
4101 u32 val;
4102
4103 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4104 /* Disable both bits, even after read. */
4105 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4106 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4107 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4108 }
4109
4110 static int
4111 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4112 {
4113 u32 cmd;
4114 int j;
4115
4116 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4117 /* Buffered flash, no erase needed */
4118 return 0;
4119
4120 /* Build an erase command */
4121 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4122 BNX2_NVM_COMMAND_DOIT;
4123
4124 /* Need to clear DONE bit separately. */
4125 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4126
4127 /* Address of the NVRAM to read from. */
4128 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4129
4130 /* Issue an erase command. */
4131 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4132
4133 /* Wait for completion. */
4134 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4135 u32 val;
4136
4137 udelay(5);
4138
4139 val = REG_RD(bp, BNX2_NVM_COMMAND);
4140 if (val & BNX2_NVM_COMMAND_DONE)
4141 break;
4142 }
4143
4144 if (j >= NVRAM_TIMEOUT_COUNT)
4145 return -EBUSY;
4146
4147 return 0;
4148 }
4149
4150 static int
4151 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4152 {
4153 u32 cmd;
4154 int j;
4155
4156 /* Build the command word. */
4157 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4158
4159 /* Calculate an offset of a buffered flash, not needed for 5709. */
4160 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4161 offset = ((offset / bp->flash_info->page_size) <<
4162 bp->flash_info->page_bits) +
4163 (offset % bp->flash_info->page_size);
4164 }
4165
4166 /* Need to clear DONE bit separately. */
4167 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4168
4169 /* Address of the NVRAM to read from. */
4170 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4171
4172 /* Issue a read command. */
4173 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4174
4175 /* Wait for completion. */
4176 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4177 u32 val;
4178
4179 udelay(5);
4180
4181 val = REG_RD(bp, BNX2_NVM_COMMAND);
4182 if (val & BNX2_NVM_COMMAND_DONE) {
4183 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4184 memcpy(ret_val, &v, 4);
4185 break;
4186 }
4187 }
4188 if (j >= NVRAM_TIMEOUT_COUNT)
4189 return -EBUSY;
4190
4191 return 0;
4192 }
4193
4194
4195 static int
4196 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4197 {
4198 u32 cmd;
4199 __be32 val32;
4200 int j;
4201
4202 /* Build the command word. */
4203 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4204
4205 /* Calculate an offset of a buffered flash, not needed for 5709. */
4206 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4207 offset = ((offset / bp->flash_info->page_size) <<
4208 bp->flash_info->page_bits) +
4209 (offset % bp->flash_info->page_size);
4210 }
4211
4212 /* Need to clear DONE bit separately. */
4213 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4214
4215 memcpy(&val32, val, 4);
4216
4217 /* Write the data. */
4218 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4219
4220 /* Address of the NVRAM to write to. */
4221 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4222
4223 /* Issue the write command. */
4224 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4225
4226 /* Wait for completion. */
4227 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4228 udelay(5);
4229
4230 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4231 break;
4232 }
4233 if (j >= NVRAM_TIMEOUT_COUNT)
4234 return -EBUSY;
4235
4236 return 0;
4237 }
4238
4239 static int
4240 bnx2_init_nvram(struct bnx2 *bp)
4241 {
4242 u32 val;
4243 int j, entry_count, rc = 0;
4244 const struct flash_spec *flash;
4245
4246 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4247 bp->flash_info = &flash_5709;
4248 goto get_flash_size;
4249 }
4250
4251 /* Determine the selected interface. */
4252 val = REG_RD(bp, BNX2_NVM_CFG1);
4253
4254 entry_count = ARRAY_SIZE(flash_table);
4255
4256 if (val & 0x40000000) {
4257
4258 /* Flash interface has been reconfigured */
4259 for (j = 0, flash = &flash_table[0]; j < entry_count;
4260 j++, flash++) {
4261 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4262 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4263 bp->flash_info = flash;
4264 break;
4265 }
4266 }
4267 }
4268 else {
4269 u32 mask;
4270 /* Not yet been reconfigured */
4271
4272 if (val & (1 << 23))
4273 mask = FLASH_BACKUP_STRAP_MASK;
4274 else
4275 mask = FLASH_STRAP_MASK;
4276
4277 for (j = 0, flash = &flash_table[0]; j < entry_count;
4278 j++, flash++) {
4279
4280 if ((val & mask) == (flash->strapping & mask)) {
4281 bp->flash_info = flash;
4282
4283 /* Request access to the flash interface. */
4284 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4285 return rc;
4286
4287 /* Enable access to flash interface */
4288 bnx2_enable_nvram_access(bp);
4289
4290 /* Reconfigure the flash interface */
4291 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4292 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4293 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4294 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4295
4296 /* Disable access to flash interface */
4297 bnx2_disable_nvram_access(bp);
4298 bnx2_release_nvram_lock(bp);
4299
4300 break;
4301 }
4302 }
4303 } /* if (val & 0x40000000) */
4304
4305 if (j == entry_count) {
4306 bp->flash_info = NULL;
4307 pr_alert("Unknown flash/EEPROM type\n");
4308 return -ENODEV;
4309 }
4310
4311 get_flash_size:
4312 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4313 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4314 if (val)
4315 bp->flash_size = val;
4316 else
4317 bp->flash_size = bp->flash_info->total_size;
4318
4319 return rc;
4320 }
4321
4322 static int
4323 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4324 int buf_size)
4325 {
4326 int rc = 0;
4327 u32 cmd_flags, offset32, len32, extra;
4328
4329 if (buf_size == 0)
4330 return 0;
4331
4332 /* Request access to the flash interface. */
4333 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4334 return rc;
4335
4336 /* Enable access to flash interface */
4337 bnx2_enable_nvram_access(bp);
4338
4339 len32 = buf_size;
4340 offset32 = offset;
4341 extra = 0;
4342
4343 cmd_flags = 0;
4344
4345 if (offset32 & 3) {
4346 u8 buf[4];
4347 u32 pre_len;
4348
4349 offset32 &= ~3;
4350 pre_len = 4 - (offset & 3);
4351
4352 if (pre_len >= len32) {
4353 pre_len = len32;
4354 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4355 BNX2_NVM_COMMAND_LAST;
4356 }
4357 else {
4358 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4359 }
4360
4361 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4362
4363 if (rc)
4364 return rc;
4365
4366 memcpy(ret_buf, buf + (offset & 3), pre_len);
4367
4368 offset32 += 4;
4369 ret_buf += pre_len;
4370 len32 -= pre_len;
4371 }
4372 if (len32 & 3) {
4373 extra = 4 - (len32 & 3);
4374 len32 = (len32 + 4) & ~3;
4375 }
4376
4377 if (len32 == 4) {
4378 u8 buf[4];
4379
4380 if (cmd_flags)
4381 cmd_flags = BNX2_NVM_COMMAND_LAST;
4382 else
4383 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4384 BNX2_NVM_COMMAND_LAST;
4385
4386 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4387
4388 memcpy(ret_buf, buf, 4 - extra);
4389 }
4390 else if (len32 > 0) {
4391 u8 buf[4];
4392
4393 /* Read the first word. */
4394 if (cmd_flags)
4395 cmd_flags = 0;
4396 else
4397 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4398
4399 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4400
4401 /* Advance to the next dword. */
4402 offset32 += 4;
4403 ret_buf += 4;
4404 len32 -= 4;
4405
4406 while (len32 > 4 && rc == 0) {
4407 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4408
4409 /* Advance to the next dword. */
4410 offset32 += 4;
4411 ret_buf += 4;
4412 len32 -= 4;
4413 }
4414
4415 if (rc)
4416 return rc;
4417
4418 cmd_flags = BNX2_NVM_COMMAND_LAST;
4419 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4420
4421 memcpy(ret_buf, buf, 4 - extra);
4422 }
4423
4424 /* Disable access to flash interface */
4425 bnx2_disable_nvram_access(bp);
4426
4427 bnx2_release_nvram_lock(bp);
4428
4429 return rc;
4430 }
4431
4432 static int
4433 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4434 int buf_size)
4435 {
4436 u32 written, offset32, len32;
4437 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4438 int rc = 0;
4439 int align_start, align_end;
4440
4441 buf = data_buf;
4442 offset32 = offset;
4443 len32 = buf_size;
4444 align_start = align_end = 0;
4445
4446 if ((align_start = (offset32 & 3))) {
4447 offset32 &= ~3;
4448 len32 += align_start;
4449 if (len32 < 4)
4450 len32 = 4;
4451 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4452 return rc;
4453 }
4454
4455 if (len32 & 3) {
4456 align_end = 4 - (len32 & 3);
4457 len32 += align_end;
4458 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4459 return rc;
4460 }
4461
4462 if (align_start || align_end) {
4463 align_buf = kmalloc(len32, GFP_KERNEL);
4464 if (align_buf == NULL)
4465 return -ENOMEM;
4466 if (align_start) {
4467 memcpy(align_buf, start, 4);
4468 }
4469 if (align_end) {
4470 memcpy(align_buf + len32 - 4, end, 4);
4471 }
4472 memcpy(align_buf + align_start, data_buf, buf_size);
4473 buf = align_buf;
4474 }
4475
4476 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4477 flash_buffer = kmalloc(264, GFP_KERNEL);
4478 if (flash_buffer == NULL) {
4479 rc = -ENOMEM;
4480 goto nvram_write_end;
4481 }
4482 }
4483
4484 written = 0;
4485 while ((written < len32) && (rc == 0)) {
4486 u32 page_start, page_end, data_start, data_end;
4487 u32 addr, cmd_flags;
4488 int i;
4489
4490 /* Find the page_start addr */
4491 page_start = offset32 + written;
4492 page_start -= (page_start % bp->flash_info->page_size);
4493 /* Find the page_end addr */
4494 page_end = page_start + bp->flash_info->page_size;
4495 /* Find the data_start addr */
4496 data_start = (written == 0) ? offset32 : page_start;
4497 /* Find the data_end addr */
4498 data_end = (page_end > offset32 + len32) ?
4499 (offset32 + len32) : page_end;
4500
4501 /* Request access to the flash interface. */
4502 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4503 goto nvram_write_end;
4504
4505 /* Enable access to flash interface */
4506 bnx2_enable_nvram_access(bp);
4507
4508 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4509 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4510 int j;
4511
4512 /* Read the whole page into the buffer
4513 * (non-buffer flash only) */
4514 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4515 if (j == (bp->flash_info->page_size - 4)) {
4516 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4517 }
4518 rc = bnx2_nvram_read_dword(bp,
4519 page_start + j,
4520 &flash_buffer[j],
4521 cmd_flags);
4522
4523 if (rc)
4524 goto nvram_write_end;
4525
4526 cmd_flags = 0;
4527 }
4528 }
4529
4530 /* Enable writes to flash interface (unlock write-protect) */
4531 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4532 goto nvram_write_end;
4533
4534 /* Loop to write back the buffer data from page_start to
4535 * data_start */
4536 i = 0;
4537 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4538 /* Erase the page */
4539 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4540 goto nvram_write_end;
4541
4542 /* Re-enable the write again for the actual write */
4543 bnx2_enable_nvram_write(bp);
4544
4545 for (addr = page_start; addr < data_start;
4546 addr += 4, i += 4) {
4547
4548 rc = bnx2_nvram_write_dword(bp, addr,
4549 &flash_buffer[i], cmd_flags);
4550
4551 if (rc != 0)
4552 goto nvram_write_end;
4553
4554 cmd_flags = 0;
4555 }
4556 }
4557
4558 /* Loop to write the new data from data_start to data_end */
4559 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4560 if ((addr == page_end - 4) ||
4561 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4562 (addr == data_end - 4))) {
4563
4564 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4565 }
4566 rc = bnx2_nvram_write_dword(bp, addr, buf,
4567 cmd_flags);
4568
4569 if (rc != 0)
4570 goto nvram_write_end;
4571
4572 cmd_flags = 0;
4573 buf += 4;
4574 }
4575
4576 /* Loop to write back the buffer data from data_end
4577 * to page_end */
4578 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4579 for (addr = data_end; addr < page_end;
4580 addr += 4, i += 4) {
4581
4582 if (addr == page_end-4) {
4583 cmd_flags = BNX2_NVM_COMMAND_LAST;
4584 }
4585 rc = bnx2_nvram_write_dword(bp, addr,
4586 &flash_buffer[i], cmd_flags);
4587
4588 if (rc != 0)
4589 goto nvram_write_end;
4590
4591 cmd_flags = 0;
4592 }
4593 }
4594
4595 /* Disable writes to flash interface (lock write-protect) */
4596 bnx2_disable_nvram_write(bp);
4597
4598 /* Disable access to flash interface */
4599 bnx2_disable_nvram_access(bp);
4600 bnx2_release_nvram_lock(bp);
4601
4602 /* Increment written */
4603 written += data_end - data_start;
4604 }
4605
4606 nvram_write_end:
4607 kfree(flash_buffer);
4608 kfree(align_buf);
4609 return rc;
4610 }
4611
4612 static void
4613 bnx2_init_fw_cap(struct bnx2 *bp)
4614 {
4615 u32 val, sig = 0;
4616
4617 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4618 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4619
4620 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4621 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4622
4623 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4624 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4625 return;
4626
4627 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4628 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4629 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4630 }
4631
4632 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4633 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4634 u32 link;
4635
4636 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4637
4638 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4639 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4640 bp->phy_port = PORT_FIBRE;
4641 else
4642 bp->phy_port = PORT_TP;
4643
4644 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4645 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4646 }
4647
4648 if (netif_running(bp->dev) && sig)
4649 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4650 }
4651
4652 static void
4653 bnx2_setup_msix_tbl(struct bnx2 *bp)
4654 {
4655 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4656
4657 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4658 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4659 }
4660
4661 static int
4662 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4663 {
4664 u32 val;
4665 int i, rc = 0;
4666 u8 old_port;
4667
4668 /* Wait for the current PCI transaction to complete before
4669 * issuing a reset. */
4670 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4671 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4672 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4673 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4674 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4675 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4676 udelay(5);
4677
4678 /* Wait for the firmware to tell us it is ok to issue a reset. */
4679 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4680
4681 /* Deposit a driver reset signature so the firmware knows that
4682 * this is a soft reset. */
4683 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4684 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4685
4686 /* Do a dummy read to force the chip to complete all current transaction
4687 * before we issue a reset. */
4688 val = REG_RD(bp, BNX2_MISC_ID);
4689
4690 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4691 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4692 REG_RD(bp, BNX2_MISC_COMMAND);
4693 udelay(5);
4694
4695 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4696 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4697
4698 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4699
4700 } else {
4701 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4702 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4703 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4704
4705 /* Chip reset. */
4706 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4707
4708 /* Reading back any register after chip reset will hang the
4709 * bus on 5706 A0 and A1. The msleep below provides plenty
4710 * of margin for write posting.
4711 */
4712 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4713 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4714 msleep(20);
4715
4716 /* Reset takes approximate 30 usec */
4717 for (i = 0; i < 10; i++) {
4718 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4719 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4720 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4721 break;
4722 udelay(10);
4723 }
4724
4725 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4726 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4727 pr_err("Chip reset did not complete\n");
4728 return -EBUSY;
4729 }
4730 }
4731
4732 /* Make sure byte swapping is properly configured. */
4733 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4734 if (val != 0x01020304) {
4735 pr_err("Chip not in correct endian mode\n");
4736 return -ENODEV;
4737 }
4738
4739 /* Wait for the firmware to finish its initialization. */
4740 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4741 if (rc)
4742 return rc;
4743
4744 spin_lock_bh(&bp->phy_lock);
4745 old_port = bp->phy_port;
4746 bnx2_init_fw_cap(bp);
4747 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4748 old_port != bp->phy_port)
4749 bnx2_set_default_remote_link(bp);
4750 spin_unlock_bh(&bp->phy_lock);
4751
4752 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4753 /* Adjust the voltage regular to two steps lower. The default
4754 * of this register is 0x0000000e. */
4755 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4756
4757 /* Remove bad rbuf memory from the free pool. */
4758 rc = bnx2_alloc_bad_rbuf(bp);
4759 }
4760
4761 if (bp->flags & BNX2_FLAG_USING_MSIX)
4762 bnx2_setup_msix_tbl(bp);
4763
4764 return rc;
4765 }
4766
4767 static int
4768 bnx2_init_chip(struct bnx2 *bp)
4769 {
4770 u32 val, mtu;
4771 int rc, i;
4772
4773 /* Make sure the interrupt is not active. */
4774 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4775
4776 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4777 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4778 #ifdef __BIG_ENDIAN
4779 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4780 #endif
4781 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4782 DMA_READ_CHANS << 12 |
4783 DMA_WRITE_CHANS << 16;
4784
4785 val |= (0x2 << 20) | (1 << 11);
4786
4787 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4788 val |= (1 << 23);
4789
4790 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4791 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4792 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4793
4794 REG_WR(bp, BNX2_DMA_CONFIG, val);
4795
4796 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4797 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4798 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4799 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4800 }
4801
4802 if (bp->flags & BNX2_FLAG_PCIX) {
4803 u16 val16;
4804
4805 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4806 &val16);
4807 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4808 val16 & ~PCI_X_CMD_ERO);
4809 }
4810
4811 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4812 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4813 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4814 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4815
4816 /* Initialize context mapping and zero out the quick contexts. The
4817 * context block must have already been enabled. */
4818 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4819 rc = bnx2_init_5709_context(bp);
4820 if (rc)
4821 return rc;
4822 } else
4823 bnx2_init_context(bp);
4824
4825 if ((rc = bnx2_init_cpus(bp)) != 0)
4826 return rc;
4827
4828 bnx2_init_nvram(bp);
4829
4830 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4831
4832 val = REG_RD(bp, BNX2_MQ_CONFIG);
4833 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4834 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4835 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4836 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4837 if (CHIP_REV(bp) == CHIP_REV_Ax)
4838 val |= BNX2_MQ_CONFIG_HALT_DIS;
4839 }
4840
4841 REG_WR(bp, BNX2_MQ_CONFIG, val);
4842
4843 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4844 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4845 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4846
4847 val = (BCM_PAGE_BITS - 8) << 24;
4848 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4849
4850 /* Configure page size. */
4851 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4852 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4853 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4854 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4855
4856 val = bp->mac_addr[0] +
4857 (bp->mac_addr[1] << 8) +
4858 (bp->mac_addr[2] << 16) +
4859 bp->mac_addr[3] +
4860 (bp->mac_addr[4] << 8) +
4861 (bp->mac_addr[5] << 16);
4862 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4863
4864 /* Program the MTU. Also include 4 bytes for CRC32. */
4865 mtu = bp->dev->mtu;
4866 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4867 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4868 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4869 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4870
4871 if (mtu < 1500)
4872 mtu = 1500;
4873
4874 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4875 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4876 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4877
4878 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4879 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4880 bp->bnx2_napi[i].last_status_idx = 0;
4881
4882 bp->idle_chk_status_idx = 0xffff;
4883
4884 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4885
4886 /* Set up how to generate a link change interrupt. */
4887 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4888
4889 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4890 (u64) bp->status_blk_mapping & 0xffffffff);
4891 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4892
4893 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4894 (u64) bp->stats_blk_mapping & 0xffffffff);
4895 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4896 (u64) bp->stats_blk_mapping >> 32);
4897
4898 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4899 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4900
4901 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4902 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4903
4904 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4905 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4906
4907 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4908
4909 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4910
4911 REG_WR(bp, BNX2_HC_COM_TICKS,
4912 (bp->com_ticks_int << 16) | bp->com_ticks);
4913
4914 REG_WR(bp, BNX2_HC_CMD_TICKS,
4915 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4916
4917 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4918 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4919 else
4920 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4921 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4922
4923 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4924 val = BNX2_HC_CONFIG_COLLECT_STATS;
4925 else {
4926 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4927 BNX2_HC_CONFIG_COLLECT_STATS;
4928 }
4929
4930 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4931 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4932 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4933
4934 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4935 }
4936
4937 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4938 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4939
4940 REG_WR(bp, BNX2_HC_CONFIG, val);
4941
4942 for (i = 1; i < bp->irq_nvecs; i++) {
4943 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4944 BNX2_HC_SB_CONFIG_1;
4945
4946 REG_WR(bp, base,
4947 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4948 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4949 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4950
4951 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4952 (bp->tx_quick_cons_trip_int << 16) |
4953 bp->tx_quick_cons_trip);
4954
4955 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4956 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4957
4958 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4959 (bp->rx_quick_cons_trip_int << 16) |
4960 bp->rx_quick_cons_trip);
4961
4962 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4963 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4964 }
4965
4966 /* Clear internal stats counters. */
4967 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4968
4969 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4970
4971 /* Initialize the receive filter. */
4972 bnx2_set_rx_mode(bp->dev);
4973
4974 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4975 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4976 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4977 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4978 }
4979 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4980 1, 0);
4981
4982 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4983 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4984
4985 udelay(20);
4986
4987 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4988
4989 return rc;
4990 }
4991
4992 static void
4993 bnx2_clear_ring_states(struct bnx2 *bp)
4994 {
4995 struct bnx2_napi *bnapi;
4996 struct bnx2_tx_ring_info *txr;
4997 struct bnx2_rx_ring_info *rxr;
4998 int i;
4999
5000 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5001 bnapi = &bp->bnx2_napi[i];
5002 txr = &bnapi->tx_ring;
5003 rxr = &bnapi->rx_ring;
5004
5005 txr->tx_cons = 0;
5006 txr->hw_tx_cons = 0;
5007 rxr->rx_prod_bseq = 0;
5008 rxr->rx_prod = 0;
5009 rxr->rx_cons = 0;
5010 rxr->rx_pg_prod = 0;
5011 rxr->rx_pg_cons = 0;
5012 }
5013 }
5014
5015 static void
5016 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5017 {
5018 u32 val, offset0, offset1, offset2, offset3;
5019 u32 cid_addr = GET_CID_ADDR(cid);
5020
5021 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5022 offset0 = BNX2_L2CTX_TYPE_XI;
5023 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5024 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5025 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5026 } else {
5027 offset0 = BNX2_L2CTX_TYPE;
5028 offset1 = BNX2_L2CTX_CMD_TYPE;
5029 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5030 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5031 }
5032 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5033 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5034
5035 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5036 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5037
5038 val = (u64) txr->tx_desc_mapping >> 32;
5039 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5040
5041 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5042 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5043 }
5044
5045 static void
5046 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5047 {
5048 struct tx_bd *txbd;
5049 u32 cid = TX_CID;
5050 struct bnx2_napi *bnapi;
5051 struct bnx2_tx_ring_info *txr;
5052
5053 bnapi = &bp->bnx2_napi[ring_num];
5054 txr = &bnapi->tx_ring;
5055
5056 if (ring_num == 0)
5057 cid = TX_CID;
5058 else
5059 cid = TX_TSS_CID + ring_num - 1;
5060
5061 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5062
5063 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5064
5065 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5066 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5067
5068 txr->tx_prod = 0;
5069 txr->tx_prod_bseq = 0;
5070
5071 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5072 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5073
5074 bnx2_init_tx_context(bp, cid, txr);
5075 }
5076
5077 static void
5078 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5079 int num_rings)
5080 {
5081 int i;
5082 struct rx_bd *rxbd;
5083
5084 for (i = 0; i < num_rings; i++) {
5085 int j;
5086
5087 rxbd = &rx_ring[i][0];
5088 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5089 rxbd->rx_bd_len = buf_size;
5090 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5091 }
5092 if (i == (num_rings - 1))
5093 j = 0;
5094 else
5095 j = i + 1;
5096 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5097 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5098 }
5099 }
5100
5101 static void
5102 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5103 {
5104 int i;
5105 u16 prod, ring_prod;
5106 u32 cid, rx_cid_addr, val;
5107 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5108 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5109
5110 if (ring_num == 0)
5111 cid = RX_CID;
5112 else
5113 cid = RX_RSS_CID + ring_num - 1;
5114
5115 rx_cid_addr = GET_CID_ADDR(cid);
5116
5117 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5118 bp->rx_buf_use_size, bp->rx_max_ring);
5119
5120 bnx2_init_rx_context(bp, cid);
5121
5122 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5123 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5124 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5125 }
5126
5127 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5128 if (bp->rx_pg_ring_size) {
5129 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5130 rxr->rx_pg_desc_mapping,
5131 PAGE_SIZE, bp->rx_max_pg_ring);
5132 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5133 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5134 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5135 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5136
5137 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5138 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5139
5140 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5141 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5142
5143 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5144 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5145 }
5146
5147 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5148 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5149
5150 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5151 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5152
5153 ring_prod = prod = rxr->rx_pg_prod;
5154 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5155 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5156 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5157 ring_num, i, bp->rx_pg_ring_size);
5158 break;
5159 }
5160 prod = NEXT_RX_BD(prod);
5161 ring_prod = RX_PG_RING_IDX(prod);
5162 }
5163 rxr->rx_pg_prod = prod;
5164
5165 ring_prod = prod = rxr->rx_prod;
5166 for (i = 0; i < bp->rx_ring_size; i++) {
5167 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5168 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5169 ring_num, i, bp->rx_ring_size);
5170 break;
5171 }
5172 prod = NEXT_RX_BD(prod);
5173 ring_prod = RX_RING_IDX(prod);
5174 }
5175 rxr->rx_prod = prod;
5176
5177 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5178 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5179 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5180
5181 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5182 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5183
5184 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5185 }
5186
5187 static void
5188 bnx2_init_all_rings(struct bnx2 *bp)
5189 {
5190 int i;
5191 u32 val;
5192
5193 bnx2_clear_ring_states(bp);
5194
5195 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5196 for (i = 0; i < bp->num_tx_rings; i++)
5197 bnx2_init_tx_ring(bp, i);
5198
5199 if (bp->num_tx_rings > 1)
5200 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5201 (TX_TSS_CID << 7));
5202
5203 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5204 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5205
5206 for (i = 0; i < bp->num_rx_rings; i++)
5207 bnx2_init_rx_ring(bp, i);
5208
5209 if (bp->num_rx_rings > 1) {
5210 u32 tbl_32;
5211 u8 *tbl = (u8 *) &tbl_32;
5212
5213 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5214 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5215
5216 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5217 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5218 if ((i % 4) == 3)
5219 bnx2_reg_wr_ind(bp,
5220 BNX2_RXP_SCRATCH_RSS_TBL + i,
5221 cpu_to_be32(tbl_32));
5222 }
5223
5224 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5225 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5226
5227 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5228
5229 }
5230 }
5231
5232 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5233 {
5234 u32 max, num_rings = 1;
5235
5236 while (ring_size > MAX_RX_DESC_CNT) {
5237 ring_size -= MAX_RX_DESC_CNT;
5238 num_rings++;
5239 }
5240 /* round to next power of 2 */
5241 max = max_size;
5242 while ((max & num_rings) == 0)
5243 max >>= 1;
5244
5245 if (num_rings != max)
5246 max <<= 1;
5247
5248 return max;
5249 }
5250
5251 static void
5252 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5253 {
5254 u32 rx_size, rx_space, jumbo_size;
5255
5256 /* 8 for CRC and VLAN */
5257 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5258
5259 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5260 sizeof(struct skb_shared_info);
5261
5262 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5263 bp->rx_pg_ring_size = 0;
5264 bp->rx_max_pg_ring = 0;
5265 bp->rx_max_pg_ring_idx = 0;
5266 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5267 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5268
5269 jumbo_size = size * pages;
5270 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5271 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5272
5273 bp->rx_pg_ring_size = jumbo_size;
5274 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5275 MAX_RX_PG_RINGS);
5276 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5277 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5278 bp->rx_copy_thresh = 0;
5279 }
5280
5281 bp->rx_buf_use_size = rx_size;
5282 /* hw alignment */
5283 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5284 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5285 bp->rx_ring_size = size;
5286 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5287 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5288 }
5289
5290 static void
5291 bnx2_free_tx_skbs(struct bnx2 *bp)
5292 {
5293 int i;
5294
5295 for (i = 0; i < bp->num_tx_rings; i++) {
5296 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5297 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5298 int j;
5299
5300 if (txr->tx_buf_ring == NULL)
5301 continue;
5302
5303 for (j = 0; j < TX_DESC_CNT; ) {
5304 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5305 struct sk_buff *skb = tx_buf->skb;
5306 int k, last;
5307
5308 if (skb == NULL) {
5309 j++;
5310 continue;
5311 }
5312
5313 pci_unmap_single(bp->pdev,
5314 pci_unmap_addr(tx_buf, mapping),
5315 skb_headlen(skb),
5316 PCI_DMA_TODEVICE);
5317
5318 tx_buf->skb = NULL;
5319
5320 last = tx_buf->nr_frags;
5321 j++;
5322 for (k = 0; k < last; k++, j++) {
5323 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5324 pci_unmap_page(bp->pdev,
5325 pci_unmap_addr(tx_buf, mapping),
5326 skb_shinfo(skb)->frags[k].size,
5327 PCI_DMA_TODEVICE);
5328 }
5329 dev_kfree_skb(skb);
5330 }
5331 }
5332 }
5333
5334 static void
5335 bnx2_free_rx_skbs(struct bnx2 *bp)
5336 {
5337 int i;
5338
5339 for (i = 0; i < bp->num_rx_rings; i++) {
5340 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5341 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5342 int j;
5343
5344 if (rxr->rx_buf_ring == NULL)
5345 return;
5346
5347 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5348 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5349 struct sk_buff *skb = rx_buf->skb;
5350
5351 if (skb == NULL)
5352 continue;
5353
5354 pci_unmap_single(bp->pdev,
5355 pci_unmap_addr(rx_buf, mapping),
5356 bp->rx_buf_use_size,
5357 PCI_DMA_FROMDEVICE);
5358
5359 rx_buf->skb = NULL;
5360
5361 dev_kfree_skb(skb);
5362 }
5363 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5364 bnx2_free_rx_page(bp, rxr, j);
5365 }
5366 }
5367
5368 static void
5369 bnx2_free_skbs(struct bnx2 *bp)
5370 {
5371 bnx2_free_tx_skbs(bp);
5372 bnx2_free_rx_skbs(bp);
5373 }
5374
5375 static int
5376 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5377 {
5378 int rc;
5379
5380 rc = bnx2_reset_chip(bp, reset_code);
5381 bnx2_free_skbs(bp);
5382 if (rc)
5383 return rc;
5384
5385 if ((rc = bnx2_init_chip(bp)) != 0)
5386 return rc;
5387
5388 bnx2_init_all_rings(bp);
5389 return 0;
5390 }
5391
5392 static int
5393 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5394 {
5395 int rc;
5396
5397 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5398 return rc;
5399
5400 spin_lock_bh(&bp->phy_lock);
5401 bnx2_init_phy(bp, reset_phy);
5402 bnx2_set_link(bp);
5403 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5404 bnx2_remote_phy_event(bp);
5405 spin_unlock_bh(&bp->phy_lock);
5406 return 0;
5407 }
5408
5409 static int
5410 bnx2_shutdown_chip(struct bnx2 *bp)
5411 {
5412 u32 reset_code;
5413
5414 if (bp->flags & BNX2_FLAG_NO_WOL)
5415 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5416 else if (bp->wol)
5417 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5418 else
5419 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5420
5421 return bnx2_reset_chip(bp, reset_code);
5422 }
5423
5424 static int
5425 bnx2_test_registers(struct bnx2 *bp)
5426 {
5427 int ret;
5428 int i, is_5709;
5429 static const struct {
5430 u16 offset;
5431 u16 flags;
5432 #define BNX2_FL_NOT_5709 1
5433 u32 rw_mask;
5434 u32 ro_mask;
5435 } reg_tbl[] = {
5436 { 0x006c, 0, 0x00000000, 0x0000003f },
5437 { 0x0090, 0, 0xffffffff, 0x00000000 },
5438 { 0x0094, 0, 0x00000000, 0x00000000 },
5439
5440 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5441 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5442 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5443 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5444 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5445 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5446 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5447 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5448 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5449
5450 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5451 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5452 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5453 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5454 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5455 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5456
5457 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5458 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5459 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5460
5461 { 0x1000, 0, 0x00000000, 0x00000001 },
5462 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5463
5464 { 0x1408, 0, 0x01c00800, 0x00000000 },
5465 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5466 { 0x14a8, 0, 0x00000000, 0x000001ff },
5467 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5468 { 0x14b0, 0, 0x00000002, 0x00000001 },
5469 { 0x14b8, 0, 0x00000000, 0x00000000 },
5470 { 0x14c0, 0, 0x00000000, 0x00000009 },
5471 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5472 { 0x14cc, 0, 0x00000000, 0x00000001 },
5473 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5474
5475 { 0x1800, 0, 0x00000000, 0x00000001 },
5476 { 0x1804, 0, 0x00000000, 0x00000003 },
5477
5478 { 0x2800, 0, 0x00000000, 0x00000001 },
5479 { 0x2804, 0, 0x00000000, 0x00003f01 },
5480 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5481 { 0x2810, 0, 0xffff0000, 0x00000000 },
5482 { 0x2814, 0, 0xffff0000, 0x00000000 },
5483 { 0x2818, 0, 0xffff0000, 0x00000000 },
5484 { 0x281c, 0, 0xffff0000, 0x00000000 },
5485 { 0x2834, 0, 0xffffffff, 0x00000000 },
5486 { 0x2840, 0, 0x00000000, 0xffffffff },
5487 { 0x2844, 0, 0x00000000, 0xffffffff },
5488 { 0x2848, 0, 0xffffffff, 0x00000000 },
5489 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5490
5491 { 0x2c00, 0, 0x00000000, 0x00000011 },
5492 { 0x2c04, 0, 0x00000000, 0x00030007 },
5493
5494 { 0x3c00, 0, 0x00000000, 0x00000001 },
5495 { 0x3c04, 0, 0x00000000, 0x00070000 },
5496 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5497 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5498 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5499 { 0x3c14, 0, 0x00000000, 0xffffffff },
5500 { 0x3c18, 0, 0x00000000, 0xffffffff },
5501 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5502 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5503
5504 { 0x5004, 0, 0x00000000, 0x0000007f },
5505 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5506
5507 { 0x5c00, 0, 0x00000000, 0x00000001 },
5508 { 0x5c04, 0, 0x00000000, 0x0003000f },
5509 { 0x5c08, 0, 0x00000003, 0x00000000 },
5510 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5511 { 0x5c10, 0, 0x00000000, 0xffffffff },
5512 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5513 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5514 { 0x5c88, 0, 0x00000000, 0x00077373 },
5515 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5516
5517 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5518 { 0x680c, 0, 0xffffffff, 0x00000000 },
5519 { 0x6810, 0, 0xffffffff, 0x00000000 },
5520 { 0x6814, 0, 0xffffffff, 0x00000000 },
5521 { 0x6818, 0, 0xffffffff, 0x00000000 },
5522 { 0x681c, 0, 0xffffffff, 0x00000000 },
5523 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5524 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5525 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5526 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5527 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5528 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5529 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5530 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5531 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5532 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5533 { 0x684c, 0, 0xffffffff, 0x00000000 },
5534 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5535 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5536 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5537 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5538 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5539 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5540
5541 { 0xffff, 0, 0x00000000, 0x00000000 },
5542 };
5543
5544 ret = 0;
5545 is_5709 = 0;
5546 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5547 is_5709 = 1;
5548
5549 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5550 u32 offset, rw_mask, ro_mask, save_val, val;
5551 u16 flags = reg_tbl[i].flags;
5552
5553 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5554 continue;
5555
5556 offset = (u32) reg_tbl[i].offset;
5557 rw_mask = reg_tbl[i].rw_mask;
5558 ro_mask = reg_tbl[i].ro_mask;
5559
5560 save_val = readl(bp->regview + offset);
5561
5562 writel(0, bp->regview + offset);
5563
5564 val = readl(bp->regview + offset);
5565 if ((val & rw_mask) != 0) {
5566 goto reg_test_err;
5567 }
5568
5569 if ((val & ro_mask) != (save_val & ro_mask)) {
5570 goto reg_test_err;
5571 }
5572
5573 writel(0xffffffff, bp->regview + offset);
5574
5575 val = readl(bp->regview + offset);
5576 if ((val & rw_mask) != rw_mask) {
5577 goto reg_test_err;
5578 }
5579
5580 if ((val & ro_mask) != (save_val & ro_mask)) {
5581 goto reg_test_err;
5582 }
5583
5584 writel(save_val, bp->regview + offset);
5585 continue;
5586
5587 reg_test_err:
5588 writel(save_val, bp->regview + offset);
5589 ret = -ENODEV;
5590 break;
5591 }
5592 return ret;
5593 }
5594
5595 static int
5596 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5597 {
5598 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5599 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5600 int i;
5601
5602 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5603 u32 offset;
5604
5605 for (offset = 0; offset < size; offset += 4) {
5606
5607 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5608
5609 if (bnx2_reg_rd_ind(bp, start + offset) !=
5610 test_pattern[i]) {
5611 return -ENODEV;
5612 }
5613 }
5614 }
5615 return 0;
5616 }
5617
5618 static int
5619 bnx2_test_memory(struct bnx2 *bp)
5620 {
5621 int ret = 0;
5622 int i;
5623 static struct mem_entry {
5624 u32 offset;
5625 u32 len;
5626 } mem_tbl_5706[] = {
5627 { 0x60000, 0x4000 },
5628 { 0xa0000, 0x3000 },
5629 { 0xe0000, 0x4000 },
5630 { 0x120000, 0x4000 },
5631 { 0x1a0000, 0x4000 },
5632 { 0x160000, 0x4000 },
5633 { 0xffffffff, 0 },
5634 },
5635 mem_tbl_5709[] = {
5636 { 0x60000, 0x4000 },
5637 { 0xa0000, 0x3000 },
5638 { 0xe0000, 0x4000 },
5639 { 0x120000, 0x4000 },
5640 { 0x1a0000, 0x4000 },
5641 { 0xffffffff, 0 },
5642 };
5643 struct mem_entry *mem_tbl;
5644
5645 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5646 mem_tbl = mem_tbl_5709;
5647 else
5648 mem_tbl = mem_tbl_5706;
5649
5650 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5651 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5652 mem_tbl[i].len)) != 0) {
5653 return ret;
5654 }
5655 }
5656
5657 return ret;
5658 }
5659
5660 #define BNX2_MAC_LOOPBACK 0
5661 #define BNX2_PHY_LOOPBACK 1
5662
5663 static int
5664 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5665 {
5666 unsigned int pkt_size, num_pkts, i;
5667 struct sk_buff *skb, *rx_skb;
5668 unsigned char *packet;
5669 u16 rx_start_idx, rx_idx;
5670 dma_addr_t map;
5671 struct tx_bd *txbd;
5672 struct sw_bd *rx_buf;
5673 struct l2_fhdr *rx_hdr;
5674 int ret = -ENODEV;
5675 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5676 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5677 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5678
5679 tx_napi = bnapi;
5680
5681 txr = &tx_napi->tx_ring;
5682 rxr = &bnapi->rx_ring;
5683 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5684 bp->loopback = MAC_LOOPBACK;
5685 bnx2_set_mac_loopback(bp);
5686 }
5687 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5688 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5689 return 0;
5690
5691 bp->loopback = PHY_LOOPBACK;
5692 bnx2_set_phy_loopback(bp);
5693 }
5694 else
5695 return -EINVAL;
5696
5697 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5698 skb = netdev_alloc_skb(bp->dev, pkt_size);
5699 if (!skb)
5700 return -ENOMEM;
5701 packet = skb_put(skb, pkt_size);
5702 memcpy(packet, bp->dev->dev_addr, 6);
5703 memset(packet + 6, 0x0, 8);
5704 for (i = 14; i < pkt_size; i++)
5705 packet[i] = (unsigned char) (i & 0xff);
5706
5707 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5708 PCI_DMA_TODEVICE);
5709 if (pci_dma_mapping_error(bp->pdev, map)) {
5710 dev_kfree_skb(skb);
5711 return -EIO;
5712 }
5713
5714 REG_WR(bp, BNX2_HC_COMMAND,
5715 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5716
5717 REG_RD(bp, BNX2_HC_COMMAND);
5718
5719 udelay(5);
5720 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5721
5722 num_pkts = 0;
5723
5724 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5725
5726 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5727 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5728 txbd->tx_bd_mss_nbytes = pkt_size;
5729 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5730
5731 num_pkts++;
5732 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5733 txr->tx_prod_bseq += pkt_size;
5734
5735 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5736 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5737
5738 udelay(100);
5739
5740 REG_WR(bp, BNX2_HC_COMMAND,
5741 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5742
5743 REG_RD(bp, BNX2_HC_COMMAND);
5744
5745 udelay(5);
5746
5747 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5748 dev_kfree_skb(skb);
5749
5750 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5751 goto loopback_test_done;
5752
5753 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5754 if (rx_idx != rx_start_idx + num_pkts) {
5755 goto loopback_test_done;
5756 }
5757
5758 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5759 rx_skb = rx_buf->skb;
5760
5761 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5762 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5763
5764 pci_dma_sync_single_for_cpu(bp->pdev,
5765 pci_unmap_addr(rx_buf, mapping),
5766 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5767
5768 if (rx_hdr->l2_fhdr_status &
5769 (L2_FHDR_ERRORS_BAD_CRC |
5770 L2_FHDR_ERRORS_PHY_DECODE |
5771 L2_FHDR_ERRORS_ALIGNMENT |
5772 L2_FHDR_ERRORS_TOO_SHORT |
5773 L2_FHDR_ERRORS_GIANT_FRAME)) {
5774
5775 goto loopback_test_done;
5776 }
5777
5778 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5779 goto loopback_test_done;
5780 }
5781
5782 for (i = 14; i < pkt_size; i++) {
5783 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5784 goto loopback_test_done;
5785 }
5786 }
5787
5788 ret = 0;
5789
5790 loopback_test_done:
5791 bp->loopback = 0;
5792 return ret;
5793 }
5794
5795 #define BNX2_MAC_LOOPBACK_FAILED 1
5796 #define BNX2_PHY_LOOPBACK_FAILED 2
5797 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5798 BNX2_PHY_LOOPBACK_FAILED)
5799
5800 static int
5801 bnx2_test_loopback(struct bnx2 *bp)
5802 {
5803 int rc = 0;
5804
5805 if (!netif_running(bp->dev))
5806 return BNX2_LOOPBACK_FAILED;
5807
5808 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5809 spin_lock_bh(&bp->phy_lock);
5810 bnx2_init_phy(bp, 1);
5811 spin_unlock_bh(&bp->phy_lock);
5812 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5813 rc |= BNX2_MAC_LOOPBACK_FAILED;
5814 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5815 rc |= BNX2_PHY_LOOPBACK_FAILED;
5816 return rc;
5817 }
5818
5819 #define NVRAM_SIZE 0x200
5820 #define CRC32_RESIDUAL 0xdebb20e3
5821
5822 static int
5823 bnx2_test_nvram(struct bnx2 *bp)
5824 {
5825 __be32 buf[NVRAM_SIZE / 4];
5826 u8 *data = (u8 *) buf;
5827 int rc = 0;
5828 u32 magic, csum;
5829
5830 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5831 goto test_nvram_done;
5832
5833 magic = be32_to_cpu(buf[0]);
5834 if (magic != 0x669955aa) {
5835 rc = -ENODEV;
5836 goto test_nvram_done;
5837 }
5838
5839 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5840 goto test_nvram_done;
5841
5842 csum = ether_crc_le(0x100, data);
5843 if (csum != CRC32_RESIDUAL) {
5844 rc = -ENODEV;
5845 goto test_nvram_done;
5846 }
5847
5848 csum = ether_crc_le(0x100, data + 0x100);
5849 if (csum != CRC32_RESIDUAL) {
5850 rc = -ENODEV;
5851 }
5852
5853 test_nvram_done:
5854 return rc;
5855 }
5856
5857 static int
5858 bnx2_test_link(struct bnx2 *bp)
5859 {
5860 u32 bmsr;
5861
5862 if (!netif_running(bp->dev))
5863 return -ENODEV;
5864
5865 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5866 if (bp->link_up)
5867 return 0;
5868 return -ENODEV;
5869 }
5870 spin_lock_bh(&bp->phy_lock);
5871 bnx2_enable_bmsr1(bp);
5872 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5873 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5874 bnx2_disable_bmsr1(bp);
5875 spin_unlock_bh(&bp->phy_lock);
5876
5877 if (bmsr & BMSR_LSTATUS) {
5878 return 0;
5879 }
5880 return -ENODEV;
5881 }
5882
5883 static int
5884 bnx2_test_intr(struct bnx2 *bp)
5885 {
5886 int i;
5887 u16 status_idx;
5888
5889 if (!netif_running(bp->dev))
5890 return -ENODEV;
5891
5892 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5893
5894 /* This register is not touched during run-time. */
5895 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5896 REG_RD(bp, BNX2_HC_COMMAND);
5897
5898 for (i = 0; i < 10; i++) {
5899 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5900 status_idx) {
5901
5902 break;
5903 }
5904
5905 msleep_interruptible(10);
5906 }
5907 if (i < 10)
5908 return 0;
5909
5910 return -ENODEV;
5911 }
5912
5913 /* Determining link for parallel detection. */
5914 static int
5915 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5916 {
5917 u32 mode_ctl, an_dbg, exp;
5918
5919 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5920 return 0;
5921
5922 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5923 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5924
5925 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5926 return 0;
5927
5928 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5929 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5930 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5931
5932 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5933 return 0;
5934
5935 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5936 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5937 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5938
5939 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5940 return 0;
5941
5942 return 1;
5943 }
5944
5945 static void
5946 bnx2_5706_serdes_timer(struct bnx2 *bp)
5947 {
5948 int check_link = 1;
5949
5950 spin_lock(&bp->phy_lock);
5951 if (bp->serdes_an_pending) {
5952 bp->serdes_an_pending--;
5953 check_link = 0;
5954 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5955 u32 bmcr;
5956
5957 bp->current_interval = BNX2_TIMER_INTERVAL;
5958
5959 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5960
5961 if (bmcr & BMCR_ANENABLE) {
5962 if (bnx2_5706_serdes_has_link(bp)) {
5963 bmcr &= ~BMCR_ANENABLE;
5964 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5965 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5966 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5967 }
5968 }
5969 }
5970 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5971 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5972 u32 phy2;
5973
5974 bnx2_write_phy(bp, 0x17, 0x0f01);
5975 bnx2_read_phy(bp, 0x15, &phy2);
5976 if (phy2 & 0x20) {
5977 u32 bmcr;
5978
5979 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5980 bmcr |= BMCR_ANENABLE;
5981 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5982
5983 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5984 }
5985 } else
5986 bp->current_interval = BNX2_TIMER_INTERVAL;
5987
5988 if (check_link) {
5989 u32 val;
5990
5991 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5992 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5993 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5994
5995 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5996 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5997 bnx2_5706s_force_link_dn(bp, 1);
5998 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5999 } else
6000 bnx2_set_link(bp);
6001 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6002 bnx2_set_link(bp);
6003 }
6004 spin_unlock(&bp->phy_lock);
6005 }
6006
6007 static void
6008 bnx2_5708_serdes_timer(struct bnx2 *bp)
6009 {
6010 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6011 return;
6012
6013 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6014 bp->serdes_an_pending = 0;
6015 return;
6016 }
6017
6018 spin_lock(&bp->phy_lock);
6019 if (bp->serdes_an_pending)
6020 bp->serdes_an_pending--;
6021 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6022 u32 bmcr;
6023
6024 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6025 if (bmcr & BMCR_ANENABLE) {
6026 bnx2_enable_forced_2g5(bp);
6027 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6028 } else {
6029 bnx2_disable_forced_2g5(bp);
6030 bp->serdes_an_pending = 2;
6031 bp->current_interval = BNX2_TIMER_INTERVAL;
6032 }
6033
6034 } else
6035 bp->current_interval = BNX2_TIMER_INTERVAL;
6036
6037 spin_unlock(&bp->phy_lock);
6038 }
6039
6040 static void
6041 bnx2_timer(unsigned long data)
6042 {
6043 struct bnx2 *bp = (struct bnx2 *) data;
6044
6045 if (!netif_running(bp->dev))
6046 return;
6047
6048 if (atomic_read(&bp->intr_sem) != 0)
6049 goto bnx2_restart_timer;
6050
6051 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6052 BNX2_FLAG_USING_MSI)
6053 bnx2_chk_missed_msi(bp);
6054
6055 bnx2_send_heart_beat(bp);
6056
6057 bp->stats_blk->stat_FwRxDrop =
6058 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6059
6060 /* workaround occasional corrupted counters */
6061 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6062 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6063 BNX2_HC_COMMAND_STATS_NOW);
6064
6065 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6066 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6067 bnx2_5706_serdes_timer(bp);
6068 else
6069 bnx2_5708_serdes_timer(bp);
6070 }
6071
6072 bnx2_restart_timer:
6073 mod_timer(&bp->timer, jiffies + bp->current_interval);
6074 }
6075
6076 static int
6077 bnx2_request_irq(struct bnx2 *bp)
6078 {
6079 unsigned long flags;
6080 struct bnx2_irq *irq;
6081 int rc = 0, i;
6082
6083 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6084 flags = 0;
6085 else
6086 flags = IRQF_SHARED;
6087
6088 for (i = 0; i < bp->irq_nvecs; i++) {
6089 irq = &bp->irq_tbl[i];
6090 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6091 &bp->bnx2_napi[i]);
6092 if (rc)
6093 break;
6094 irq->requested = 1;
6095 }
6096 return rc;
6097 }
6098
6099 static void
6100 bnx2_free_irq(struct bnx2 *bp)
6101 {
6102 struct bnx2_irq *irq;
6103 int i;
6104
6105 for (i = 0; i < bp->irq_nvecs; i++) {
6106 irq = &bp->irq_tbl[i];
6107 if (irq->requested)
6108 free_irq(irq->vector, &bp->bnx2_napi[i]);
6109 irq->requested = 0;
6110 }
6111 if (bp->flags & BNX2_FLAG_USING_MSI)
6112 pci_disable_msi(bp->pdev);
6113 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6114 pci_disable_msix(bp->pdev);
6115
6116 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6117 }
6118
6119 static void
6120 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6121 {
6122 int i, rc;
6123 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6124 struct net_device *dev = bp->dev;
6125 const int len = sizeof(bp->irq_tbl[0].name);
6126
6127 bnx2_setup_msix_tbl(bp);
6128 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6129 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6130 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6131
6132 /* Need to flush the previous three writes to ensure MSI-X
6133 * is setup properly */
6134 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6135
6136 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6137 msix_ent[i].entry = i;
6138 msix_ent[i].vector = 0;
6139 }
6140
6141 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6142 if (rc != 0)
6143 return;
6144
6145 bp->irq_nvecs = msix_vecs;
6146 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6147 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6148 bp->irq_tbl[i].vector = msix_ent[i].vector;
6149 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6150 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6151 }
6152 }
6153
6154 static void
6155 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6156 {
6157 int cpus = num_online_cpus();
6158 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6159
6160 bp->irq_tbl[0].handler = bnx2_interrupt;
6161 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6162 bp->irq_nvecs = 1;
6163 bp->irq_tbl[0].vector = bp->pdev->irq;
6164
6165 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6166 bnx2_enable_msix(bp, msix_vecs);
6167
6168 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6169 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6170 if (pci_enable_msi(bp->pdev) == 0) {
6171 bp->flags |= BNX2_FLAG_USING_MSI;
6172 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6173 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6174 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6175 } else
6176 bp->irq_tbl[0].handler = bnx2_msi;
6177
6178 bp->irq_tbl[0].vector = bp->pdev->irq;
6179 }
6180 }
6181
6182 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6183 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6184
6185 bp->num_rx_rings = bp->irq_nvecs;
6186 }
6187
6188 /* Called with rtnl_lock */
6189 static int
6190 bnx2_open(struct net_device *dev)
6191 {
6192 struct bnx2 *bp = netdev_priv(dev);
6193 int rc;
6194
6195 netif_carrier_off(dev);
6196
6197 bnx2_set_power_state(bp, PCI_D0);
6198 bnx2_disable_int(bp);
6199
6200 bnx2_setup_int_mode(bp, disable_msi);
6201 bnx2_init_napi(bp);
6202 bnx2_napi_enable(bp);
6203 rc = bnx2_alloc_mem(bp);
6204 if (rc)
6205 goto open_err;
6206
6207 rc = bnx2_request_irq(bp);
6208 if (rc)
6209 goto open_err;
6210
6211 rc = bnx2_init_nic(bp, 1);
6212 if (rc)
6213 goto open_err;
6214
6215 mod_timer(&bp->timer, jiffies + bp->current_interval);
6216
6217 atomic_set(&bp->intr_sem, 0);
6218
6219 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6220
6221 bnx2_enable_int(bp);
6222
6223 if (bp->flags & BNX2_FLAG_USING_MSI) {
6224 /* Test MSI to make sure it is working
6225 * If MSI test fails, go back to INTx mode
6226 */
6227 if (bnx2_test_intr(bp) != 0) {
6228 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6229
6230 bnx2_disable_int(bp);
6231 bnx2_free_irq(bp);
6232
6233 bnx2_setup_int_mode(bp, 1);
6234
6235 rc = bnx2_init_nic(bp, 0);
6236
6237 if (!rc)
6238 rc = bnx2_request_irq(bp);
6239
6240 if (rc) {
6241 del_timer_sync(&bp->timer);
6242 goto open_err;
6243 }
6244 bnx2_enable_int(bp);
6245 }
6246 }
6247 if (bp->flags & BNX2_FLAG_USING_MSI)
6248 netdev_info(dev, "using MSI\n");
6249 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6250 netdev_info(dev, "using MSIX\n");
6251
6252 netif_tx_start_all_queues(dev);
6253
6254 return 0;
6255
6256 open_err:
6257 bnx2_napi_disable(bp);
6258 bnx2_free_skbs(bp);
6259 bnx2_free_irq(bp);
6260 bnx2_free_mem(bp);
6261 return rc;
6262 }
6263
6264 static void
6265 bnx2_reset_task(struct work_struct *work)
6266 {
6267 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6268
6269 rtnl_lock();
6270 if (!netif_running(bp->dev)) {
6271 rtnl_unlock();
6272 return;
6273 }
6274
6275 bnx2_netif_stop(bp);
6276
6277 bnx2_init_nic(bp, 1);
6278
6279 atomic_set(&bp->intr_sem, 1);
6280 bnx2_netif_start(bp);
6281 rtnl_unlock();
6282 }
6283
6284 static void
6285 bnx2_dump_state(struct bnx2 *bp)
6286 {
6287 struct net_device *dev = bp->dev;
6288
6289 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6290 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6291 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6292 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6293 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6294 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6295 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6296 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6297 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6298 if (bp->flags & BNX2_FLAG_USING_MSIX)
6299 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6300 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6301 }
6302
6303 static void
6304 bnx2_tx_timeout(struct net_device *dev)
6305 {
6306 struct bnx2 *bp = netdev_priv(dev);
6307
6308 bnx2_dump_state(bp);
6309
6310 /* This allows the netif to be shutdown gracefully before resetting */
6311 schedule_work(&bp->reset_task);
6312 }
6313
6314 #ifdef BCM_VLAN
6315 /* Called with rtnl_lock */
6316 static void
6317 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6318 {
6319 struct bnx2 *bp = netdev_priv(dev);
6320
6321 if (netif_running(dev))
6322 bnx2_netif_stop(bp);
6323
6324 bp->vlgrp = vlgrp;
6325
6326 if (!netif_running(dev))
6327 return;
6328
6329 bnx2_set_rx_mode(dev);
6330 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6331 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6332
6333 bnx2_netif_start(bp);
6334 }
6335 #endif
6336
6337 /* Called with netif_tx_lock.
6338 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6339 * netif_wake_queue().
6340 */
6341 static netdev_tx_t
6342 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6343 {
6344 struct bnx2 *bp = netdev_priv(dev);
6345 dma_addr_t mapping;
6346 struct tx_bd *txbd;
6347 struct sw_tx_bd *tx_buf;
6348 u32 len, vlan_tag_flags, last_frag, mss;
6349 u16 prod, ring_prod;
6350 int i;
6351 struct bnx2_napi *bnapi;
6352 struct bnx2_tx_ring_info *txr;
6353 struct netdev_queue *txq;
6354
6355 /* Determine which tx ring we will be placed on */
6356 i = skb_get_queue_mapping(skb);
6357 bnapi = &bp->bnx2_napi[i];
6358 txr = &bnapi->tx_ring;
6359 txq = netdev_get_tx_queue(dev, i);
6360
6361 if (unlikely(bnx2_tx_avail(bp, txr) <
6362 (skb_shinfo(skb)->nr_frags + 1))) {
6363 netif_tx_stop_queue(txq);
6364 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6365
6366 return NETDEV_TX_BUSY;
6367 }
6368 len = skb_headlen(skb);
6369 prod = txr->tx_prod;
6370 ring_prod = TX_RING_IDX(prod);
6371
6372 vlan_tag_flags = 0;
6373 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6374 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6375 }
6376
6377 #ifdef BCM_VLAN
6378 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6379 vlan_tag_flags |=
6380 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6381 }
6382 #endif
6383 if ((mss = skb_shinfo(skb)->gso_size)) {
6384 u32 tcp_opt_len;
6385 struct iphdr *iph;
6386
6387 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6388
6389 tcp_opt_len = tcp_optlen(skb);
6390
6391 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6392 u32 tcp_off = skb_transport_offset(skb) -
6393 sizeof(struct ipv6hdr) - ETH_HLEN;
6394
6395 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6396 TX_BD_FLAGS_SW_FLAGS;
6397 if (likely(tcp_off == 0))
6398 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6399 else {
6400 tcp_off >>= 3;
6401 vlan_tag_flags |= ((tcp_off & 0x3) <<
6402 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6403 ((tcp_off & 0x10) <<
6404 TX_BD_FLAGS_TCP6_OFF4_SHL);
6405 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6406 }
6407 } else {
6408 iph = ip_hdr(skb);
6409 if (tcp_opt_len || (iph->ihl > 5)) {
6410 vlan_tag_flags |= ((iph->ihl - 5) +
6411 (tcp_opt_len >> 2)) << 8;
6412 }
6413 }
6414 } else
6415 mss = 0;
6416
6417 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6418 if (pci_dma_mapping_error(bp->pdev, mapping)) {
6419 dev_kfree_skb(skb);
6420 return NETDEV_TX_OK;
6421 }
6422
6423 tx_buf = &txr->tx_buf_ring[ring_prod];
6424 tx_buf->skb = skb;
6425 pci_unmap_addr_set(tx_buf, mapping, mapping);
6426
6427 txbd = &txr->tx_desc_ring[ring_prod];
6428
6429 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6430 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6431 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6432 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6433
6434 last_frag = skb_shinfo(skb)->nr_frags;
6435 tx_buf->nr_frags = last_frag;
6436 tx_buf->is_gso = skb_is_gso(skb);
6437
6438 for (i = 0; i < last_frag; i++) {
6439 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6440
6441 prod = NEXT_TX_BD(prod);
6442 ring_prod = TX_RING_IDX(prod);
6443 txbd = &txr->tx_desc_ring[ring_prod];
6444
6445 len = frag->size;
6446 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6447 len, PCI_DMA_TODEVICE);
6448 if (pci_dma_mapping_error(bp->pdev, mapping))
6449 goto dma_error;
6450 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6451 mapping);
6452
6453 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6454 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6455 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6456 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6457
6458 }
6459 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6460
6461 prod = NEXT_TX_BD(prod);
6462 txr->tx_prod_bseq += skb->len;
6463
6464 REG_WR16(bp, txr->tx_bidx_addr, prod);
6465 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6466
6467 mmiowb();
6468
6469 txr->tx_prod = prod;
6470
6471 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6472 netif_tx_stop_queue(txq);
6473 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6474 netif_tx_wake_queue(txq);
6475 }
6476
6477 return NETDEV_TX_OK;
6478 dma_error:
6479 /* save value of frag that failed */
6480 last_frag = i;
6481
6482 /* start back at beginning and unmap skb */
6483 prod = txr->tx_prod;
6484 ring_prod = TX_RING_IDX(prod);
6485 tx_buf = &txr->tx_buf_ring[ring_prod];
6486 tx_buf->skb = NULL;
6487 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6488 skb_headlen(skb), PCI_DMA_TODEVICE);
6489
6490 /* unmap remaining mapped pages */
6491 for (i = 0; i < last_frag; i++) {
6492 prod = NEXT_TX_BD(prod);
6493 ring_prod = TX_RING_IDX(prod);
6494 tx_buf = &txr->tx_buf_ring[ring_prod];
6495 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6496 skb_shinfo(skb)->frags[i].size,
6497 PCI_DMA_TODEVICE);
6498 }
6499
6500 dev_kfree_skb(skb);
6501 return NETDEV_TX_OK;
6502 }
6503
6504 /* Called with rtnl_lock */
6505 static int
6506 bnx2_close(struct net_device *dev)
6507 {
6508 struct bnx2 *bp = netdev_priv(dev);
6509
6510 cancel_work_sync(&bp->reset_task);
6511
6512 bnx2_disable_int_sync(bp);
6513 bnx2_napi_disable(bp);
6514 del_timer_sync(&bp->timer);
6515 bnx2_shutdown_chip(bp);
6516 bnx2_free_irq(bp);
6517 bnx2_free_skbs(bp);
6518 bnx2_free_mem(bp);
6519 bp->link_up = 0;
6520 netif_carrier_off(bp->dev);
6521 bnx2_set_power_state(bp, PCI_D3hot);
6522 return 0;
6523 }
6524
6525 static void
6526 bnx2_save_stats(struct bnx2 *bp)
6527 {
6528 u32 *hw_stats = (u32 *) bp->stats_blk;
6529 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6530 int i;
6531
6532 /* The 1st 10 counters are 64-bit counters */
6533 for (i = 0; i < 20; i += 2) {
6534 u32 hi;
6535 u64 lo;
6536
6537 hi = temp_stats[i] + hw_stats[i];
6538 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6539 if (lo > 0xffffffff)
6540 hi++;
6541 temp_stats[i] = hi;
6542 temp_stats[i + 1] = lo & 0xffffffff;
6543 }
6544
6545 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6546 temp_stats[i] += hw_stats[i];
6547 }
6548
6549 #define GET_64BIT_NET_STATS64(ctr) \
6550 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6551 (unsigned long) (ctr##_lo)
6552
6553 #define GET_64BIT_NET_STATS32(ctr) \
6554 (ctr##_lo)
6555
6556 #if (BITS_PER_LONG == 64)
6557 #define GET_64BIT_NET_STATS(ctr) \
6558 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6559 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6560 #else
6561 #define GET_64BIT_NET_STATS(ctr) \
6562 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6563 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6564 #endif
6565
6566 #define GET_32BIT_NET_STATS(ctr) \
6567 (unsigned long) (bp->stats_blk->ctr + \
6568 bp->temp_stats_blk->ctr)
6569
6570 static struct net_device_stats *
6571 bnx2_get_stats(struct net_device *dev)
6572 {
6573 struct bnx2 *bp = netdev_priv(dev);
6574 struct net_device_stats *net_stats = &dev->stats;
6575
6576 if (bp->stats_blk == NULL) {
6577 return net_stats;
6578 }
6579 net_stats->rx_packets =
6580 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6581 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6582 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6583
6584 net_stats->tx_packets =
6585 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6586 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6587 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6588
6589 net_stats->rx_bytes =
6590 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6591
6592 net_stats->tx_bytes =
6593 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6594
6595 net_stats->multicast =
6596 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6597
6598 net_stats->collisions =
6599 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6600
6601 net_stats->rx_length_errors =
6602 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6603 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6604
6605 net_stats->rx_over_errors =
6606 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6607 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6608
6609 net_stats->rx_frame_errors =
6610 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6611
6612 net_stats->rx_crc_errors =
6613 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6614
6615 net_stats->rx_errors = net_stats->rx_length_errors +
6616 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6617 net_stats->rx_crc_errors;
6618
6619 net_stats->tx_aborted_errors =
6620 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6621 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6622
6623 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6624 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6625 net_stats->tx_carrier_errors = 0;
6626 else {
6627 net_stats->tx_carrier_errors =
6628 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6629 }
6630
6631 net_stats->tx_errors =
6632 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6633 net_stats->tx_aborted_errors +
6634 net_stats->tx_carrier_errors;
6635
6636 net_stats->rx_missed_errors =
6637 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6638 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6639 GET_32BIT_NET_STATS(stat_FwRxDrop);
6640
6641 return net_stats;
6642 }
6643
6644 /* All ethtool functions called with rtnl_lock */
6645
6646 static int
6647 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6648 {
6649 struct bnx2 *bp = netdev_priv(dev);
6650 int support_serdes = 0, support_copper = 0;
6651
6652 cmd->supported = SUPPORTED_Autoneg;
6653 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6654 support_serdes = 1;
6655 support_copper = 1;
6656 } else if (bp->phy_port == PORT_FIBRE)
6657 support_serdes = 1;
6658 else
6659 support_copper = 1;
6660
6661 if (support_serdes) {
6662 cmd->supported |= SUPPORTED_1000baseT_Full |
6663 SUPPORTED_FIBRE;
6664 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6665 cmd->supported |= SUPPORTED_2500baseX_Full;
6666
6667 }
6668 if (support_copper) {
6669 cmd->supported |= SUPPORTED_10baseT_Half |
6670 SUPPORTED_10baseT_Full |
6671 SUPPORTED_100baseT_Half |
6672 SUPPORTED_100baseT_Full |
6673 SUPPORTED_1000baseT_Full |
6674 SUPPORTED_TP;
6675
6676 }
6677
6678 spin_lock_bh(&bp->phy_lock);
6679 cmd->port = bp->phy_port;
6680 cmd->advertising = bp->advertising;
6681
6682 if (bp->autoneg & AUTONEG_SPEED) {
6683 cmd->autoneg = AUTONEG_ENABLE;
6684 }
6685 else {
6686 cmd->autoneg = AUTONEG_DISABLE;
6687 }
6688
6689 if (netif_carrier_ok(dev)) {
6690 cmd->speed = bp->line_speed;
6691 cmd->duplex = bp->duplex;
6692 }
6693 else {
6694 cmd->speed = -1;
6695 cmd->duplex = -1;
6696 }
6697 spin_unlock_bh(&bp->phy_lock);
6698
6699 cmd->transceiver = XCVR_INTERNAL;
6700 cmd->phy_address = bp->phy_addr;
6701
6702 return 0;
6703 }
6704
6705 static int
6706 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6707 {
6708 struct bnx2 *bp = netdev_priv(dev);
6709 u8 autoneg = bp->autoneg;
6710 u8 req_duplex = bp->req_duplex;
6711 u16 req_line_speed = bp->req_line_speed;
6712 u32 advertising = bp->advertising;
6713 int err = -EINVAL;
6714
6715 spin_lock_bh(&bp->phy_lock);
6716
6717 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6718 goto err_out_unlock;
6719
6720 if (cmd->port != bp->phy_port &&
6721 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6722 goto err_out_unlock;
6723
6724 /* If device is down, we can store the settings only if the user
6725 * is setting the currently active port.
6726 */
6727 if (!netif_running(dev) && cmd->port != bp->phy_port)
6728 goto err_out_unlock;
6729
6730 if (cmd->autoneg == AUTONEG_ENABLE) {
6731 autoneg |= AUTONEG_SPEED;
6732
6733 advertising = cmd->advertising;
6734 if (cmd->port == PORT_TP) {
6735 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6736 if (!advertising)
6737 advertising = ETHTOOL_ALL_COPPER_SPEED;
6738 } else {
6739 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6740 if (!advertising)
6741 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6742 }
6743 advertising |= ADVERTISED_Autoneg;
6744 }
6745 else {
6746 if (cmd->port == PORT_FIBRE) {
6747 if ((cmd->speed != SPEED_1000 &&
6748 cmd->speed != SPEED_2500) ||
6749 (cmd->duplex != DUPLEX_FULL))
6750 goto err_out_unlock;
6751
6752 if (cmd->speed == SPEED_2500 &&
6753 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6754 goto err_out_unlock;
6755 }
6756 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6757 goto err_out_unlock;
6758
6759 autoneg &= ~AUTONEG_SPEED;
6760 req_line_speed = cmd->speed;
6761 req_duplex = cmd->duplex;
6762 advertising = 0;
6763 }
6764
6765 bp->autoneg = autoneg;
6766 bp->advertising = advertising;
6767 bp->req_line_speed = req_line_speed;
6768 bp->req_duplex = req_duplex;
6769
6770 err = 0;
6771 /* If device is down, the new settings will be picked up when it is
6772 * brought up.
6773 */
6774 if (netif_running(dev))
6775 err = bnx2_setup_phy(bp, cmd->port);
6776
6777 err_out_unlock:
6778 spin_unlock_bh(&bp->phy_lock);
6779
6780 return err;
6781 }
6782
6783 static void
6784 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6785 {
6786 struct bnx2 *bp = netdev_priv(dev);
6787
6788 strcpy(info->driver, DRV_MODULE_NAME);
6789 strcpy(info->version, DRV_MODULE_VERSION);
6790 strcpy(info->bus_info, pci_name(bp->pdev));
6791 strcpy(info->fw_version, bp->fw_version);
6792 }
6793
6794 #define BNX2_REGDUMP_LEN (32 * 1024)
6795
6796 static int
6797 bnx2_get_regs_len(struct net_device *dev)
6798 {
6799 return BNX2_REGDUMP_LEN;
6800 }
6801
6802 static void
6803 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6804 {
6805 u32 *p = _p, i, offset;
6806 u8 *orig_p = _p;
6807 struct bnx2 *bp = netdev_priv(dev);
6808 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6809 0x0800, 0x0880, 0x0c00, 0x0c10,
6810 0x0c30, 0x0d08, 0x1000, 0x101c,
6811 0x1040, 0x1048, 0x1080, 0x10a4,
6812 0x1400, 0x1490, 0x1498, 0x14f0,
6813 0x1500, 0x155c, 0x1580, 0x15dc,
6814 0x1600, 0x1658, 0x1680, 0x16d8,
6815 0x1800, 0x1820, 0x1840, 0x1854,
6816 0x1880, 0x1894, 0x1900, 0x1984,
6817 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6818 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6819 0x2000, 0x2030, 0x23c0, 0x2400,
6820 0x2800, 0x2820, 0x2830, 0x2850,
6821 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6822 0x3c00, 0x3c94, 0x4000, 0x4010,
6823 0x4080, 0x4090, 0x43c0, 0x4458,
6824 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6825 0x4fc0, 0x5010, 0x53c0, 0x5444,
6826 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6827 0x5fc0, 0x6000, 0x6400, 0x6428,
6828 0x6800, 0x6848, 0x684c, 0x6860,
6829 0x6888, 0x6910, 0x8000 };
6830
6831 regs->version = 0;
6832
6833 memset(p, 0, BNX2_REGDUMP_LEN);
6834
6835 if (!netif_running(bp->dev))
6836 return;
6837
6838 i = 0;
6839 offset = reg_boundaries[0];
6840 p += offset;
6841 while (offset < BNX2_REGDUMP_LEN) {
6842 *p++ = REG_RD(bp, offset);
6843 offset += 4;
6844 if (offset == reg_boundaries[i + 1]) {
6845 offset = reg_boundaries[i + 2];
6846 p = (u32 *) (orig_p + offset);
6847 i += 2;
6848 }
6849 }
6850 }
6851
6852 static void
6853 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6854 {
6855 struct bnx2 *bp = netdev_priv(dev);
6856
6857 if (bp->flags & BNX2_FLAG_NO_WOL) {
6858 wol->supported = 0;
6859 wol->wolopts = 0;
6860 }
6861 else {
6862 wol->supported = WAKE_MAGIC;
6863 if (bp->wol)
6864 wol->wolopts = WAKE_MAGIC;
6865 else
6866 wol->wolopts = 0;
6867 }
6868 memset(&wol->sopass, 0, sizeof(wol->sopass));
6869 }
6870
6871 static int
6872 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6873 {
6874 struct bnx2 *bp = netdev_priv(dev);
6875
6876 if (wol->wolopts & ~WAKE_MAGIC)
6877 return -EINVAL;
6878
6879 if (wol->wolopts & WAKE_MAGIC) {
6880 if (bp->flags & BNX2_FLAG_NO_WOL)
6881 return -EINVAL;
6882
6883 bp->wol = 1;
6884 }
6885 else {
6886 bp->wol = 0;
6887 }
6888 return 0;
6889 }
6890
6891 static int
6892 bnx2_nway_reset(struct net_device *dev)
6893 {
6894 struct bnx2 *bp = netdev_priv(dev);
6895 u32 bmcr;
6896
6897 if (!netif_running(dev))
6898 return -EAGAIN;
6899
6900 if (!(bp->autoneg & AUTONEG_SPEED)) {
6901 return -EINVAL;
6902 }
6903
6904 spin_lock_bh(&bp->phy_lock);
6905
6906 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6907 int rc;
6908
6909 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6910 spin_unlock_bh(&bp->phy_lock);
6911 return rc;
6912 }
6913
6914 /* Force a link down visible on the other side */
6915 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6916 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6917 spin_unlock_bh(&bp->phy_lock);
6918
6919 msleep(20);
6920
6921 spin_lock_bh(&bp->phy_lock);
6922
6923 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6924 bp->serdes_an_pending = 1;
6925 mod_timer(&bp->timer, jiffies + bp->current_interval);
6926 }
6927
6928 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6929 bmcr &= ~BMCR_LOOPBACK;
6930 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6931
6932 spin_unlock_bh(&bp->phy_lock);
6933
6934 return 0;
6935 }
6936
6937 static u32
6938 bnx2_get_link(struct net_device *dev)
6939 {
6940 struct bnx2 *bp = netdev_priv(dev);
6941
6942 return bp->link_up;
6943 }
6944
6945 static int
6946 bnx2_get_eeprom_len(struct net_device *dev)
6947 {
6948 struct bnx2 *bp = netdev_priv(dev);
6949
6950 if (bp->flash_info == NULL)
6951 return 0;
6952
6953 return (int) bp->flash_size;
6954 }
6955
6956 static int
6957 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6958 u8 *eebuf)
6959 {
6960 struct bnx2 *bp = netdev_priv(dev);
6961 int rc;
6962
6963 if (!netif_running(dev))
6964 return -EAGAIN;
6965
6966 /* parameters already validated in ethtool_get_eeprom */
6967
6968 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6969
6970 return rc;
6971 }
6972
6973 static int
6974 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6975 u8 *eebuf)
6976 {
6977 struct bnx2 *bp = netdev_priv(dev);
6978 int rc;
6979
6980 if (!netif_running(dev))
6981 return -EAGAIN;
6982
6983 /* parameters already validated in ethtool_set_eeprom */
6984
6985 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6986
6987 return rc;
6988 }
6989
6990 static int
6991 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6992 {
6993 struct bnx2 *bp = netdev_priv(dev);
6994
6995 memset(coal, 0, sizeof(struct ethtool_coalesce));
6996
6997 coal->rx_coalesce_usecs = bp->rx_ticks;
6998 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6999 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7000 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7001
7002 coal->tx_coalesce_usecs = bp->tx_ticks;
7003 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7004 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7005 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7006
7007 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7008
7009 return 0;
7010 }
7011
7012 static int
7013 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7014 {
7015 struct bnx2 *bp = netdev_priv(dev);
7016
7017 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7018 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7019
7020 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7021 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7022
7023 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7024 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7025
7026 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7027 if (bp->rx_quick_cons_trip_int > 0xff)
7028 bp->rx_quick_cons_trip_int = 0xff;
7029
7030 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7031 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7032
7033 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7034 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7035
7036 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7037 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7038
7039 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7040 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7041 0xff;
7042
7043 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7044 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7045 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7046 bp->stats_ticks = USEC_PER_SEC;
7047 }
7048 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7049 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7050 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7051
7052 if (netif_running(bp->dev)) {
7053 bnx2_netif_stop(bp);
7054 bnx2_init_nic(bp, 0);
7055 bnx2_netif_start(bp);
7056 }
7057
7058 return 0;
7059 }
7060
7061 static void
7062 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7063 {
7064 struct bnx2 *bp = netdev_priv(dev);
7065
7066 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7067 ering->rx_mini_max_pending = 0;
7068 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7069
7070 ering->rx_pending = bp->rx_ring_size;
7071 ering->rx_mini_pending = 0;
7072 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7073
7074 ering->tx_max_pending = MAX_TX_DESC_CNT;
7075 ering->tx_pending = bp->tx_ring_size;
7076 }
7077
7078 static int
7079 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7080 {
7081 if (netif_running(bp->dev)) {
7082 /* Reset will erase chipset stats; save them */
7083 bnx2_save_stats(bp);
7084
7085 bnx2_netif_stop(bp);
7086 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7087 bnx2_free_skbs(bp);
7088 bnx2_free_mem(bp);
7089 }
7090
7091 bnx2_set_rx_ring_size(bp, rx);
7092 bp->tx_ring_size = tx;
7093
7094 if (netif_running(bp->dev)) {
7095 int rc;
7096
7097 rc = bnx2_alloc_mem(bp);
7098 if (!rc)
7099 rc = bnx2_init_nic(bp, 0);
7100
7101 if (rc) {
7102 bnx2_napi_enable(bp);
7103 dev_close(bp->dev);
7104 return rc;
7105 }
7106 #ifdef BCM_CNIC
7107 mutex_lock(&bp->cnic_lock);
7108 /* Let cnic know about the new status block. */
7109 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7110 bnx2_setup_cnic_irq_info(bp);
7111 mutex_unlock(&bp->cnic_lock);
7112 #endif
7113 bnx2_netif_start(bp);
7114 }
7115 return 0;
7116 }
7117
7118 static int
7119 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7120 {
7121 struct bnx2 *bp = netdev_priv(dev);
7122 int rc;
7123
7124 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7125 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7126 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7127
7128 return -EINVAL;
7129 }
7130 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7131 return rc;
7132 }
7133
7134 static void
7135 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7136 {
7137 struct bnx2 *bp = netdev_priv(dev);
7138
7139 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7140 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7141 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7142 }
7143
7144 static int
7145 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7146 {
7147 struct bnx2 *bp = netdev_priv(dev);
7148
7149 bp->req_flow_ctrl = 0;
7150 if (epause->rx_pause)
7151 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7152 if (epause->tx_pause)
7153 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7154
7155 if (epause->autoneg) {
7156 bp->autoneg |= AUTONEG_FLOW_CTRL;
7157 }
7158 else {
7159 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7160 }
7161
7162 if (netif_running(dev)) {
7163 spin_lock_bh(&bp->phy_lock);
7164 bnx2_setup_phy(bp, bp->phy_port);
7165 spin_unlock_bh(&bp->phy_lock);
7166 }
7167
7168 return 0;
7169 }
7170
7171 static u32
7172 bnx2_get_rx_csum(struct net_device *dev)
7173 {
7174 struct bnx2 *bp = netdev_priv(dev);
7175
7176 return bp->rx_csum;
7177 }
7178
7179 static int
7180 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7181 {
7182 struct bnx2 *bp = netdev_priv(dev);
7183
7184 bp->rx_csum = data;
7185 return 0;
7186 }
7187
7188 static int
7189 bnx2_set_tso(struct net_device *dev, u32 data)
7190 {
7191 struct bnx2 *bp = netdev_priv(dev);
7192
7193 if (data) {
7194 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7195 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7196 dev->features |= NETIF_F_TSO6;
7197 } else
7198 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7199 NETIF_F_TSO_ECN);
7200 return 0;
7201 }
7202
7203 static struct {
7204 char string[ETH_GSTRING_LEN];
7205 } bnx2_stats_str_arr[] = {
7206 { "rx_bytes" },
7207 { "rx_error_bytes" },
7208 { "tx_bytes" },
7209 { "tx_error_bytes" },
7210 { "rx_ucast_packets" },
7211 { "rx_mcast_packets" },
7212 { "rx_bcast_packets" },
7213 { "tx_ucast_packets" },
7214 { "tx_mcast_packets" },
7215 { "tx_bcast_packets" },
7216 { "tx_mac_errors" },
7217 { "tx_carrier_errors" },
7218 { "rx_crc_errors" },
7219 { "rx_align_errors" },
7220 { "tx_single_collisions" },
7221 { "tx_multi_collisions" },
7222 { "tx_deferred" },
7223 { "tx_excess_collisions" },
7224 { "tx_late_collisions" },
7225 { "tx_total_collisions" },
7226 { "rx_fragments" },
7227 { "rx_jabbers" },
7228 { "rx_undersize_packets" },
7229 { "rx_oversize_packets" },
7230 { "rx_64_byte_packets" },
7231 { "rx_65_to_127_byte_packets" },
7232 { "rx_128_to_255_byte_packets" },
7233 { "rx_256_to_511_byte_packets" },
7234 { "rx_512_to_1023_byte_packets" },
7235 { "rx_1024_to_1522_byte_packets" },
7236 { "rx_1523_to_9022_byte_packets" },
7237 { "tx_64_byte_packets" },
7238 { "tx_65_to_127_byte_packets" },
7239 { "tx_128_to_255_byte_packets" },
7240 { "tx_256_to_511_byte_packets" },
7241 { "tx_512_to_1023_byte_packets" },
7242 { "tx_1024_to_1522_byte_packets" },
7243 { "tx_1523_to_9022_byte_packets" },
7244 { "rx_xon_frames" },
7245 { "rx_xoff_frames" },
7246 { "tx_xon_frames" },
7247 { "tx_xoff_frames" },
7248 { "rx_mac_ctrl_frames" },
7249 { "rx_filtered_packets" },
7250 { "rx_ftq_discards" },
7251 { "rx_discards" },
7252 { "rx_fw_discards" },
7253 };
7254
7255 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7256 sizeof(bnx2_stats_str_arr[0]))
7257
7258 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7259
7260 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7261 STATS_OFFSET32(stat_IfHCInOctets_hi),
7262 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7263 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7264 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7265 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7266 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7267 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7268 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7269 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7270 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7271 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7272 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7273 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7274 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7275 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7276 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7277 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7278 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7279 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7280 STATS_OFFSET32(stat_EtherStatsCollisions),
7281 STATS_OFFSET32(stat_EtherStatsFragments),
7282 STATS_OFFSET32(stat_EtherStatsJabbers),
7283 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7284 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7285 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7286 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7287 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7288 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7289 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7290 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7291 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7292 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7293 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7294 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7295 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7296 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7297 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7298 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7299 STATS_OFFSET32(stat_XonPauseFramesReceived),
7300 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7301 STATS_OFFSET32(stat_OutXonSent),
7302 STATS_OFFSET32(stat_OutXoffSent),
7303 STATS_OFFSET32(stat_MacControlFramesReceived),
7304 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7305 STATS_OFFSET32(stat_IfInFTQDiscards),
7306 STATS_OFFSET32(stat_IfInMBUFDiscards),
7307 STATS_OFFSET32(stat_FwRxDrop),
7308 };
7309
7310 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7311 * skipped because of errata.
7312 */
7313 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7314 8,0,8,8,8,8,8,8,8,8,
7315 4,0,4,4,4,4,4,4,4,4,
7316 4,4,4,4,4,4,4,4,4,4,
7317 4,4,4,4,4,4,4,4,4,4,
7318 4,4,4,4,4,4,4,
7319 };
7320
7321 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7322 8,0,8,8,8,8,8,8,8,8,
7323 4,4,4,4,4,4,4,4,4,4,
7324 4,4,4,4,4,4,4,4,4,4,
7325 4,4,4,4,4,4,4,4,4,4,
7326 4,4,4,4,4,4,4,
7327 };
7328
7329 #define BNX2_NUM_TESTS 6
7330
7331 static struct {
7332 char string[ETH_GSTRING_LEN];
7333 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7334 { "register_test (offline)" },
7335 { "memory_test (offline)" },
7336 { "loopback_test (offline)" },
7337 { "nvram_test (online)" },
7338 { "interrupt_test (online)" },
7339 { "link_test (online)" },
7340 };
7341
7342 static int
7343 bnx2_get_sset_count(struct net_device *dev, int sset)
7344 {
7345 switch (sset) {
7346 case ETH_SS_TEST:
7347 return BNX2_NUM_TESTS;
7348 case ETH_SS_STATS:
7349 return BNX2_NUM_STATS;
7350 default:
7351 return -EOPNOTSUPP;
7352 }
7353 }
7354
7355 static void
7356 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7357 {
7358 struct bnx2 *bp = netdev_priv(dev);
7359
7360 bnx2_set_power_state(bp, PCI_D0);
7361
7362 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7363 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7364 int i;
7365
7366 bnx2_netif_stop(bp);
7367 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7368 bnx2_free_skbs(bp);
7369
7370 if (bnx2_test_registers(bp) != 0) {
7371 buf[0] = 1;
7372 etest->flags |= ETH_TEST_FL_FAILED;
7373 }
7374 if (bnx2_test_memory(bp) != 0) {
7375 buf[1] = 1;
7376 etest->flags |= ETH_TEST_FL_FAILED;
7377 }
7378 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7379 etest->flags |= ETH_TEST_FL_FAILED;
7380
7381 if (!netif_running(bp->dev))
7382 bnx2_shutdown_chip(bp);
7383 else {
7384 bnx2_init_nic(bp, 1);
7385 bnx2_netif_start(bp);
7386 }
7387
7388 /* wait for link up */
7389 for (i = 0; i < 7; i++) {
7390 if (bp->link_up)
7391 break;
7392 msleep_interruptible(1000);
7393 }
7394 }
7395
7396 if (bnx2_test_nvram(bp) != 0) {
7397 buf[3] = 1;
7398 etest->flags |= ETH_TEST_FL_FAILED;
7399 }
7400 if (bnx2_test_intr(bp) != 0) {
7401 buf[4] = 1;
7402 etest->flags |= ETH_TEST_FL_FAILED;
7403 }
7404
7405 if (bnx2_test_link(bp) != 0) {
7406 buf[5] = 1;
7407 etest->flags |= ETH_TEST_FL_FAILED;
7408
7409 }
7410 if (!netif_running(bp->dev))
7411 bnx2_set_power_state(bp, PCI_D3hot);
7412 }
7413
7414 static void
7415 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7416 {
7417 switch (stringset) {
7418 case ETH_SS_STATS:
7419 memcpy(buf, bnx2_stats_str_arr,
7420 sizeof(bnx2_stats_str_arr));
7421 break;
7422 case ETH_SS_TEST:
7423 memcpy(buf, bnx2_tests_str_arr,
7424 sizeof(bnx2_tests_str_arr));
7425 break;
7426 }
7427 }
7428
7429 static void
7430 bnx2_get_ethtool_stats(struct net_device *dev,
7431 struct ethtool_stats *stats, u64 *buf)
7432 {
7433 struct bnx2 *bp = netdev_priv(dev);
7434 int i;
7435 u32 *hw_stats = (u32 *) bp->stats_blk;
7436 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7437 u8 *stats_len_arr = NULL;
7438
7439 if (hw_stats == NULL) {
7440 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7441 return;
7442 }
7443
7444 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7445 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7446 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7447 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7448 stats_len_arr = bnx2_5706_stats_len_arr;
7449 else
7450 stats_len_arr = bnx2_5708_stats_len_arr;
7451
7452 for (i = 0; i < BNX2_NUM_STATS; i++) {
7453 unsigned long offset;
7454
7455 if (stats_len_arr[i] == 0) {
7456 /* skip this counter */
7457 buf[i] = 0;
7458 continue;
7459 }
7460
7461 offset = bnx2_stats_offset_arr[i];
7462 if (stats_len_arr[i] == 4) {
7463 /* 4-byte counter */
7464 buf[i] = (u64) *(hw_stats + offset) +
7465 *(temp_stats + offset);
7466 continue;
7467 }
7468 /* 8-byte counter */
7469 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7470 *(hw_stats + offset + 1) +
7471 (((u64) *(temp_stats + offset)) << 32) +
7472 *(temp_stats + offset + 1);
7473 }
7474 }
7475
7476 static int
7477 bnx2_phys_id(struct net_device *dev, u32 data)
7478 {
7479 struct bnx2 *bp = netdev_priv(dev);
7480 int i;
7481 u32 save;
7482
7483 bnx2_set_power_state(bp, PCI_D0);
7484
7485 if (data == 0)
7486 data = 2;
7487
7488 save = REG_RD(bp, BNX2_MISC_CFG);
7489 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7490
7491 for (i = 0; i < (data * 2); i++) {
7492 if ((i % 2) == 0) {
7493 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7494 }
7495 else {
7496 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7497 BNX2_EMAC_LED_1000MB_OVERRIDE |
7498 BNX2_EMAC_LED_100MB_OVERRIDE |
7499 BNX2_EMAC_LED_10MB_OVERRIDE |
7500 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7501 BNX2_EMAC_LED_TRAFFIC);
7502 }
7503 msleep_interruptible(500);
7504 if (signal_pending(current))
7505 break;
7506 }
7507 REG_WR(bp, BNX2_EMAC_LED, 0);
7508 REG_WR(bp, BNX2_MISC_CFG, save);
7509
7510 if (!netif_running(dev))
7511 bnx2_set_power_state(bp, PCI_D3hot);
7512
7513 return 0;
7514 }
7515
7516 static int
7517 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7518 {
7519 struct bnx2 *bp = netdev_priv(dev);
7520
7521 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7522 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7523 else
7524 return (ethtool_op_set_tx_csum(dev, data));
7525 }
7526
7527 static const struct ethtool_ops bnx2_ethtool_ops = {
7528 .get_settings = bnx2_get_settings,
7529 .set_settings = bnx2_set_settings,
7530 .get_drvinfo = bnx2_get_drvinfo,
7531 .get_regs_len = bnx2_get_regs_len,
7532 .get_regs = bnx2_get_regs,
7533 .get_wol = bnx2_get_wol,
7534 .set_wol = bnx2_set_wol,
7535 .nway_reset = bnx2_nway_reset,
7536 .get_link = bnx2_get_link,
7537 .get_eeprom_len = bnx2_get_eeprom_len,
7538 .get_eeprom = bnx2_get_eeprom,
7539 .set_eeprom = bnx2_set_eeprom,
7540 .get_coalesce = bnx2_get_coalesce,
7541 .set_coalesce = bnx2_set_coalesce,
7542 .get_ringparam = bnx2_get_ringparam,
7543 .set_ringparam = bnx2_set_ringparam,
7544 .get_pauseparam = bnx2_get_pauseparam,
7545 .set_pauseparam = bnx2_set_pauseparam,
7546 .get_rx_csum = bnx2_get_rx_csum,
7547 .set_rx_csum = bnx2_set_rx_csum,
7548 .set_tx_csum = bnx2_set_tx_csum,
7549 .set_sg = ethtool_op_set_sg,
7550 .set_tso = bnx2_set_tso,
7551 .self_test = bnx2_self_test,
7552 .get_strings = bnx2_get_strings,
7553 .phys_id = bnx2_phys_id,
7554 .get_ethtool_stats = bnx2_get_ethtool_stats,
7555 .get_sset_count = bnx2_get_sset_count,
7556 };
7557
7558 /* Called with rtnl_lock */
7559 static int
7560 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7561 {
7562 struct mii_ioctl_data *data = if_mii(ifr);
7563 struct bnx2 *bp = netdev_priv(dev);
7564 int err;
7565
7566 switch(cmd) {
7567 case SIOCGMIIPHY:
7568 data->phy_id = bp->phy_addr;
7569
7570 /* fallthru */
7571 case SIOCGMIIREG: {
7572 u32 mii_regval;
7573
7574 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7575 return -EOPNOTSUPP;
7576
7577 if (!netif_running(dev))
7578 return -EAGAIN;
7579
7580 spin_lock_bh(&bp->phy_lock);
7581 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7582 spin_unlock_bh(&bp->phy_lock);
7583
7584 data->val_out = mii_regval;
7585
7586 return err;
7587 }
7588
7589 case SIOCSMIIREG:
7590 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7591 return -EOPNOTSUPP;
7592
7593 if (!netif_running(dev))
7594 return -EAGAIN;
7595
7596 spin_lock_bh(&bp->phy_lock);
7597 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7598 spin_unlock_bh(&bp->phy_lock);
7599
7600 return err;
7601
7602 default:
7603 /* do nothing */
7604 break;
7605 }
7606 return -EOPNOTSUPP;
7607 }
7608
7609 /* Called with rtnl_lock */
7610 static int
7611 bnx2_change_mac_addr(struct net_device *dev, void *p)
7612 {
7613 struct sockaddr *addr = p;
7614 struct bnx2 *bp = netdev_priv(dev);
7615
7616 if (!is_valid_ether_addr(addr->sa_data))
7617 return -EINVAL;
7618
7619 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7620 if (netif_running(dev))
7621 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7622
7623 return 0;
7624 }
7625
7626 /* Called with rtnl_lock */
7627 static int
7628 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7629 {
7630 struct bnx2 *bp = netdev_priv(dev);
7631
7632 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7633 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7634 return -EINVAL;
7635
7636 dev->mtu = new_mtu;
7637 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7638 }
7639
7640 #ifdef CONFIG_NET_POLL_CONTROLLER
7641 static void
7642 poll_bnx2(struct net_device *dev)
7643 {
7644 struct bnx2 *bp = netdev_priv(dev);
7645 int i;
7646
7647 for (i = 0; i < bp->irq_nvecs; i++) {
7648 struct bnx2_irq *irq = &bp->irq_tbl[i];
7649
7650 disable_irq(irq->vector);
7651 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7652 enable_irq(irq->vector);
7653 }
7654 }
7655 #endif
7656
7657 static void __devinit
7658 bnx2_get_5709_media(struct bnx2 *bp)
7659 {
7660 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7661 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7662 u32 strap;
7663
7664 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7665 return;
7666 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7667 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7668 return;
7669 }
7670
7671 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7672 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7673 else
7674 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7675
7676 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7677 switch (strap) {
7678 case 0x4:
7679 case 0x5:
7680 case 0x6:
7681 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7682 return;
7683 }
7684 } else {
7685 switch (strap) {
7686 case 0x1:
7687 case 0x2:
7688 case 0x4:
7689 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7690 return;
7691 }
7692 }
7693 }
7694
7695 static void __devinit
7696 bnx2_get_pci_speed(struct bnx2 *bp)
7697 {
7698 u32 reg;
7699
7700 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7701 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7702 u32 clkreg;
7703
7704 bp->flags |= BNX2_FLAG_PCIX;
7705
7706 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7707
7708 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7709 switch (clkreg) {
7710 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7711 bp->bus_speed_mhz = 133;
7712 break;
7713
7714 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7715 bp->bus_speed_mhz = 100;
7716 break;
7717
7718 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7719 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7720 bp->bus_speed_mhz = 66;
7721 break;
7722
7723 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7724 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7725 bp->bus_speed_mhz = 50;
7726 break;
7727
7728 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7729 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7730 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7731 bp->bus_speed_mhz = 33;
7732 break;
7733 }
7734 }
7735 else {
7736 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7737 bp->bus_speed_mhz = 66;
7738 else
7739 bp->bus_speed_mhz = 33;
7740 }
7741
7742 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7743 bp->flags |= BNX2_FLAG_PCI_32BIT;
7744
7745 }
7746
7747 static void __devinit
7748 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7749 {
7750 int rc, i, j;
7751 u8 *data;
7752 unsigned int block_end, rosize, len;
7753
7754 #define BNX2_VPD_NVRAM_OFFSET 0x300
7755 #define BNX2_VPD_LEN 128
7756 #define BNX2_MAX_VER_SLEN 30
7757
7758 data = kmalloc(256, GFP_KERNEL);
7759 if (!data)
7760 return;
7761
7762 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7763 BNX2_VPD_LEN);
7764 if (rc)
7765 goto vpd_done;
7766
7767 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7768 data[i] = data[i + BNX2_VPD_LEN + 3];
7769 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7770 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7771 data[i + 3] = data[i + BNX2_VPD_LEN];
7772 }
7773
7774 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7775 if (i < 0)
7776 goto vpd_done;
7777
7778 rosize = pci_vpd_lrdt_size(&data[i]);
7779 i += PCI_VPD_LRDT_TAG_SIZE;
7780 block_end = i + rosize;
7781
7782 if (block_end > BNX2_VPD_LEN)
7783 goto vpd_done;
7784
7785 j = pci_vpd_find_info_keyword(data, i, rosize,
7786 PCI_VPD_RO_KEYWORD_MFR_ID);
7787 if (j < 0)
7788 goto vpd_done;
7789
7790 len = pci_vpd_info_field_size(&data[j]);
7791
7792 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7793 if (j + len > block_end || len != 4 ||
7794 memcmp(&data[j], "1028", 4))
7795 goto vpd_done;
7796
7797 j = pci_vpd_find_info_keyword(data, i, rosize,
7798 PCI_VPD_RO_KEYWORD_VENDOR0);
7799 if (j < 0)
7800 goto vpd_done;
7801
7802 len = pci_vpd_info_field_size(&data[j]);
7803
7804 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7805 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7806 goto vpd_done;
7807
7808 memcpy(bp->fw_version, &data[j], len);
7809 bp->fw_version[len] = ' ';
7810
7811 vpd_done:
7812 kfree(data);
7813 }
7814
7815 static int __devinit
7816 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7817 {
7818 struct bnx2 *bp;
7819 unsigned long mem_len;
7820 int rc, i, j;
7821 u32 reg;
7822 u64 dma_mask, persist_dma_mask;
7823
7824 SET_NETDEV_DEV(dev, &pdev->dev);
7825 bp = netdev_priv(dev);
7826
7827 bp->flags = 0;
7828 bp->phy_flags = 0;
7829
7830 bp->temp_stats_blk =
7831 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7832
7833 if (bp->temp_stats_blk == NULL) {
7834 rc = -ENOMEM;
7835 goto err_out;
7836 }
7837
7838 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7839 rc = pci_enable_device(pdev);
7840 if (rc) {
7841 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7842 goto err_out;
7843 }
7844
7845 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7846 dev_err(&pdev->dev,
7847 "Cannot find PCI device base address, aborting\n");
7848 rc = -ENODEV;
7849 goto err_out_disable;
7850 }
7851
7852 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7853 if (rc) {
7854 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7855 goto err_out_disable;
7856 }
7857
7858 pci_set_master(pdev);
7859 pci_save_state(pdev);
7860
7861 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7862 if (bp->pm_cap == 0) {
7863 dev_err(&pdev->dev,
7864 "Cannot find power management capability, aborting\n");
7865 rc = -EIO;
7866 goto err_out_release;
7867 }
7868
7869 bp->dev = dev;
7870 bp->pdev = pdev;
7871
7872 spin_lock_init(&bp->phy_lock);
7873 spin_lock_init(&bp->indirect_lock);
7874 #ifdef BCM_CNIC
7875 mutex_init(&bp->cnic_lock);
7876 #endif
7877 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7878
7879 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7880 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7881 dev->mem_end = dev->mem_start + mem_len;
7882 dev->irq = pdev->irq;
7883
7884 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7885
7886 if (!bp->regview) {
7887 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7888 rc = -ENOMEM;
7889 goto err_out_release;
7890 }
7891
7892 /* Configure byte swap and enable write to the reg_window registers.
7893 * Rely on CPU to do target byte swapping on big endian systems
7894 * The chip's target access swapping will not swap all accesses
7895 */
7896 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7897 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7898 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7899
7900 bnx2_set_power_state(bp, PCI_D0);
7901
7902 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7903
7904 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7905 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7906 dev_err(&pdev->dev,
7907 "Cannot find PCIE capability, aborting\n");
7908 rc = -EIO;
7909 goto err_out_unmap;
7910 }
7911 bp->flags |= BNX2_FLAG_PCIE;
7912 if (CHIP_REV(bp) == CHIP_REV_Ax)
7913 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7914 } else {
7915 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7916 if (bp->pcix_cap == 0) {
7917 dev_err(&pdev->dev,
7918 "Cannot find PCIX capability, aborting\n");
7919 rc = -EIO;
7920 goto err_out_unmap;
7921 }
7922 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7923 }
7924
7925 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7926 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7927 bp->flags |= BNX2_FLAG_MSIX_CAP;
7928 }
7929
7930 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7931 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7932 bp->flags |= BNX2_FLAG_MSI_CAP;
7933 }
7934
7935 /* 5708 cannot support DMA addresses > 40-bit. */
7936 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7937 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7938 else
7939 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7940
7941 /* Configure DMA attributes. */
7942 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7943 dev->features |= NETIF_F_HIGHDMA;
7944 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7945 if (rc) {
7946 dev_err(&pdev->dev,
7947 "pci_set_consistent_dma_mask failed, aborting\n");
7948 goto err_out_unmap;
7949 }
7950 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7951 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7952 goto err_out_unmap;
7953 }
7954
7955 if (!(bp->flags & BNX2_FLAG_PCIE))
7956 bnx2_get_pci_speed(bp);
7957
7958 /* 5706A0 may falsely detect SERR and PERR. */
7959 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7960 reg = REG_RD(bp, PCI_COMMAND);
7961 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7962 REG_WR(bp, PCI_COMMAND, reg);
7963 }
7964 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7965 !(bp->flags & BNX2_FLAG_PCIX)) {
7966
7967 dev_err(&pdev->dev,
7968 "5706 A1 can only be used in a PCIX bus, aborting\n");
7969 goto err_out_unmap;
7970 }
7971
7972 bnx2_init_nvram(bp);
7973
7974 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7975
7976 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7977 BNX2_SHM_HDR_SIGNATURE_SIG) {
7978 u32 off = PCI_FUNC(pdev->devfn) << 2;
7979
7980 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7981 } else
7982 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7983
7984 /* Get the permanent MAC address. First we need to make sure the
7985 * firmware is actually running.
7986 */
7987 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7988
7989 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7990 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7991 dev_err(&pdev->dev, "Firmware not running, aborting\n");
7992 rc = -ENODEV;
7993 goto err_out_unmap;
7994 }
7995
7996 bnx2_read_vpd_fw_ver(bp);
7997
7998 j = strlen(bp->fw_version);
7999 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8000 for (i = 0; i < 3 && j < 24; i++) {
8001 u8 num, k, skip0;
8002
8003 if (i == 0) {
8004 bp->fw_version[j++] = 'b';
8005 bp->fw_version[j++] = 'c';
8006 bp->fw_version[j++] = ' ';
8007 }
8008 num = (u8) (reg >> (24 - (i * 8)));
8009 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8010 if (num >= k || !skip0 || k == 1) {
8011 bp->fw_version[j++] = (num / k) + '0';
8012 skip0 = 0;
8013 }
8014 }
8015 if (i != 2)
8016 bp->fw_version[j++] = '.';
8017 }
8018 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8019 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8020 bp->wol = 1;
8021
8022 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8023 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8024
8025 for (i = 0; i < 30; i++) {
8026 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8027 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8028 break;
8029 msleep(10);
8030 }
8031 }
8032 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8033 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8034 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8035 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8036 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8037
8038 if (j < 32)
8039 bp->fw_version[j++] = ' ';
8040 for (i = 0; i < 3 && j < 28; i++) {
8041 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8042 reg = swab32(reg);
8043 memcpy(&bp->fw_version[j], &reg, 4);
8044 j += 4;
8045 }
8046 }
8047
8048 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8049 bp->mac_addr[0] = (u8) (reg >> 8);
8050 bp->mac_addr[1] = (u8) reg;
8051
8052 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8053 bp->mac_addr[2] = (u8) (reg >> 24);
8054 bp->mac_addr[3] = (u8) (reg >> 16);
8055 bp->mac_addr[4] = (u8) (reg >> 8);
8056 bp->mac_addr[5] = (u8) reg;
8057
8058 bp->tx_ring_size = MAX_TX_DESC_CNT;
8059 bnx2_set_rx_ring_size(bp, 255);
8060
8061 bp->rx_csum = 1;
8062
8063 bp->tx_quick_cons_trip_int = 2;
8064 bp->tx_quick_cons_trip = 20;
8065 bp->tx_ticks_int = 18;
8066 bp->tx_ticks = 80;
8067
8068 bp->rx_quick_cons_trip_int = 2;
8069 bp->rx_quick_cons_trip = 12;
8070 bp->rx_ticks_int = 18;
8071 bp->rx_ticks = 18;
8072
8073 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8074
8075 bp->current_interval = BNX2_TIMER_INTERVAL;
8076
8077 bp->phy_addr = 1;
8078
8079 /* Disable WOL support if we are running on a SERDES chip. */
8080 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8081 bnx2_get_5709_media(bp);
8082 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8083 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8084
8085 bp->phy_port = PORT_TP;
8086 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8087 bp->phy_port = PORT_FIBRE;
8088 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8089 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8090 bp->flags |= BNX2_FLAG_NO_WOL;
8091 bp->wol = 0;
8092 }
8093 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8094 /* Don't do parallel detect on this board because of
8095 * some board problems. The link will not go down
8096 * if we do parallel detect.
8097 */
8098 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8099 pdev->subsystem_device == 0x310c)
8100 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8101 } else {
8102 bp->phy_addr = 2;
8103 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8104 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8105 }
8106 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8107 CHIP_NUM(bp) == CHIP_NUM_5708)
8108 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8109 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8110 (CHIP_REV(bp) == CHIP_REV_Ax ||
8111 CHIP_REV(bp) == CHIP_REV_Bx))
8112 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8113
8114 bnx2_init_fw_cap(bp);
8115
8116 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8117 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8118 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8119 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8120 bp->flags |= BNX2_FLAG_NO_WOL;
8121 bp->wol = 0;
8122 }
8123
8124 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8125 bp->tx_quick_cons_trip_int =
8126 bp->tx_quick_cons_trip;
8127 bp->tx_ticks_int = bp->tx_ticks;
8128 bp->rx_quick_cons_trip_int =
8129 bp->rx_quick_cons_trip;
8130 bp->rx_ticks_int = bp->rx_ticks;
8131 bp->comp_prod_trip_int = bp->comp_prod_trip;
8132 bp->com_ticks_int = bp->com_ticks;
8133 bp->cmd_ticks_int = bp->cmd_ticks;
8134 }
8135
8136 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8137 *
8138 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8139 * with byte enables disabled on the unused 32-bit word. This is legal
8140 * but causes problems on the AMD 8132 which will eventually stop
8141 * responding after a while.
8142 *
8143 * AMD believes this incompatibility is unique to the 5706, and
8144 * prefers to locally disable MSI rather than globally disabling it.
8145 */
8146 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8147 struct pci_dev *amd_8132 = NULL;
8148
8149 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8150 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8151 amd_8132))) {
8152
8153 if (amd_8132->revision >= 0x10 &&
8154 amd_8132->revision <= 0x13) {
8155 disable_msi = 1;
8156 pci_dev_put(amd_8132);
8157 break;
8158 }
8159 }
8160 }
8161
8162 bnx2_set_default_link(bp);
8163 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8164
8165 init_timer(&bp->timer);
8166 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8167 bp->timer.data = (unsigned long) bp;
8168 bp->timer.function = bnx2_timer;
8169
8170 return 0;
8171
8172 err_out_unmap:
8173 if (bp->regview) {
8174 iounmap(bp->regview);
8175 bp->regview = NULL;
8176 }
8177
8178 err_out_release:
8179 pci_release_regions(pdev);
8180
8181 err_out_disable:
8182 pci_disable_device(pdev);
8183 pci_set_drvdata(pdev, NULL);
8184
8185 err_out:
8186 return rc;
8187 }
8188
8189 static char * __devinit
8190 bnx2_bus_string(struct bnx2 *bp, char *str)
8191 {
8192 char *s = str;
8193
8194 if (bp->flags & BNX2_FLAG_PCIE) {
8195 s += sprintf(s, "PCI Express");
8196 } else {
8197 s += sprintf(s, "PCI");
8198 if (bp->flags & BNX2_FLAG_PCIX)
8199 s += sprintf(s, "-X");
8200 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8201 s += sprintf(s, " 32-bit");
8202 else
8203 s += sprintf(s, " 64-bit");
8204 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8205 }
8206 return str;
8207 }
8208
8209 static void __devinit
8210 bnx2_init_napi(struct bnx2 *bp)
8211 {
8212 int i;
8213
8214 for (i = 0; i < bp->irq_nvecs; i++) {
8215 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8216 int (*poll)(struct napi_struct *, int);
8217
8218 if (i == 0)
8219 poll = bnx2_poll;
8220 else
8221 poll = bnx2_poll_msix;
8222
8223 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8224 bnapi->bp = bp;
8225 }
8226 }
8227
8228 static const struct net_device_ops bnx2_netdev_ops = {
8229 .ndo_open = bnx2_open,
8230 .ndo_start_xmit = bnx2_start_xmit,
8231 .ndo_stop = bnx2_close,
8232 .ndo_get_stats = bnx2_get_stats,
8233 .ndo_set_rx_mode = bnx2_set_rx_mode,
8234 .ndo_do_ioctl = bnx2_ioctl,
8235 .ndo_validate_addr = eth_validate_addr,
8236 .ndo_set_mac_address = bnx2_change_mac_addr,
8237 .ndo_change_mtu = bnx2_change_mtu,
8238 .ndo_tx_timeout = bnx2_tx_timeout,
8239 #ifdef BCM_VLAN
8240 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8241 #endif
8242 #ifdef CONFIG_NET_POLL_CONTROLLER
8243 .ndo_poll_controller = poll_bnx2,
8244 #endif
8245 };
8246
8247 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8248 {
8249 #ifdef BCM_VLAN
8250 dev->vlan_features |= flags;
8251 #endif
8252 }
8253
8254 static int __devinit
8255 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8256 {
8257 static int version_printed = 0;
8258 struct net_device *dev = NULL;
8259 struct bnx2 *bp;
8260 int rc;
8261 char str[40];
8262
8263 if (version_printed++ == 0)
8264 pr_info("%s", version);
8265
8266 /* dev zeroed in init_etherdev */
8267 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8268
8269 if (!dev)
8270 return -ENOMEM;
8271
8272 rc = bnx2_init_board(pdev, dev);
8273 if (rc < 0) {
8274 free_netdev(dev);
8275 return rc;
8276 }
8277
8278 dev->netdev_ops = &bnx2_netdev_ops;
8279 dev->watchdog_timeo = TX_TIMEOUT;
8280 dev->ethtool_ops = &bnx2_ethtool_ops;
8281
8282 bp = netdev_priv(dev);
8283
8284 pci_set_drvdata(pdev, dev);
8285
8286 rc = bnx2_request_firmware(bp);
8287 if (rc)
8288 goto error;
8289
8290 memcpy(dev->dev_addr, bp->mac_addr, 6);
8291 memcpy(dev->perm_addr, bp->mac_addr, 6);
8292
8293 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8294 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8295 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8296 dev->features |= NETIF_F_IPV6_CSUM;
8297 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8298 }
8299 #ifdef BCM_VLAN
8300 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8301 #endif
8302 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8303 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8304 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8305 dev->features |= NETIF_F_TSO6;
8306 vlan_features_add(dev, NETIF_F_TSO6);
8307 }
8308 if ((rc = register_netdev(dev))) {
8309 dev_err(&pdev->dev, "Cannot register net device\n");
8310 goto error;
8311 }
8312
8313 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8314 board_info[ent->driver_data].name,
8315 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8316 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8317 bnx2_bus_string(bp, str),
8318 dev->base_addr,
8319 bp->pdev->irq, dev->dev_addr);
8320
8321 return 0;
8322
8323 error:
8324 if (bp->mips_firmware)
8325 release_firmware(bp->mips_firmware);
8326 if (bp->rv2p_firmware)
8327 release_firmware(bp->rv2p_firmware);
8328
8329 if (bp->regview)
8330 iounmap(bp->regview);
8331 pci_release_regions(pdev);
8332 pci_disable_device(pdev);
8333 pci_set_drvdata(pdev, NULL);
8334 free_netdev(dev);
8335 return rc;
8336 }
8337
8338 static void __devexit
8339 bnx2_remove_one(struct pci_dev *pdev)
8340 {
8341 struct net_device *dev = pci_get_drvdata(pdev);
8342 struct bnx2 *bp = netdev_priv(dev);
8343
8344 flush_scheduled_work();
8345
8346 unregister_netdev(dev);
8347
8348 if (bp->mips_firmware)
8349 release_firmware(bp->mips_firmware);
8350 if (bp->rv2p_firmware)
8351 release_firmware(bp->rv2p_firmware);
8352
8353 if (bp->regview)
8354 iounmap(bp->regview);
8355
8356 kfree(bp->temp_stats_blk);
8357
8358 free_netdev(dev);
8359 pci_release_regions(pdev);
8360 pci_disable_device(pdev);
8361 pci_set_drvdata(pdev, NULL);
8362 }
8363
8364 static int
8365 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8366 {
8367 struct net_device *dev = pci_get_drvdata(pdev);
8368 struct bnx2 *bp = netdev_priv(dev);
8369
8370 /* PCI register 4 needs to be saved whether netif_running() or not.
8371 * MSI address and data need to be saved if using MSI and
8372 * netif_running().
8373 */
8374 pci_save_state(pdev);
8375 if (!netif_running(dev))
8376 return 0;
8377
8378 flush_scheduled_work();
8379 bnx2_netif_stop(bp);
8380 netif_device_detach(dev);
8381 del_timer_sync(&bp->timer);
8382 bnx2_shutdown_chip(bp);
8383 bnx2_free_skbs(bp);
8384 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8385 return 0;
8386 }
8387
8388 static int
8389 bnx2_resume(struct pci_dev *pdev)
8390 {
8391 struct net_device *dev = pci_get_drvdata(pdev);
8392 struct bnx2 *bp = netdev_priv(dev);
8393
8394 pci_restore_state(pdev);
8395 if (!netif_running(dev))
8396 return 0;
8397
8398 bnx2_set_power_state(bp, PCI_D0);
8399 netif_device_attach(dev);
8400 bnx2_init_nic(bp, 1);
8401 bnx2_netif_start(bp);
8402 return 0;
8403 }
8404
8405 /**
8406 * bnx2_io_error_detected - called when PCI error is detected
8407 * @pdev: Pointer to PCI device
8408 * @state: The current pci connection state
8409 *
8410 * This function is called after a PCI bus error affecting
8411 * this device has been detected.
8412 */
8413 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8414 pci_channel_state_t state)
8415 {
8416 struct net_device *dev = pci_get_drvdata(pdev);
8417 struct bnx2 *bp = netdev_priv(dev);
8418
8419 rtnl_lock();
8420 netif_device_detach(dev);
8421
8422 if (state == pci_channel_io_perm_failure) {
8423 rtnl_unlock();
8424 return PCI_ERS_RESULT_DISCONNECT;
8425 }
8426
8427 if (netif_running(dev)) {
8428 bnx2_netif_stop(bp);
8429 del_timer_sync(&bp->timer);
8430 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8431 }
8432
8433 pci_disable_device(pdev);
8434 rtnl_unlock();
8435
8436 /* Request a slot slot reset. */
8437 return PCI_ERS_RESULT_NEED_RESET;
8438 }
8439
8440 /**
8441 * bnx2_io_slot_reset - called after the pci bus has been reset.
8442 * @pdev: Pointer to PCI device
8443 *
8444 * Restart the card from scratch, as if from a cold-boot.
8445 */
8446 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8447 {
8448 struct net_device *dev = pci_get_drvdata(pdev);
8449 struct bnx2 *bp = netdev_priv(dev);
8450
8451 rtnl_lock();
8452 if (pci_enable_device(pdev)) {
8453 dev_err(&pdev->dev,
8454 "Cannot re-enable PCI device after reset\n");
8455 rtnl_unlock();
8456 return PCI_ERS_RESULT_DISCONNECT;
8457 }
8458 pci_set_master(pdev);
8459 pci_restore_state(pdev);
8460 pci_save_state(pdev);
8461
8462 if (netif_running(dev)) {
8463 bnx2_set_power_state(bp, PCI_D0);
8464 bnx2_init_nic(bp, 1);
8465 }
8466
8467 rtnl_unlock();
8468 return PCI_ERS_RESULT_RECOVERED;
8469 }
8470
8471 /**
8472 * bnx2_io_resume - called when traffic can start flowing again.
8473 * @pdev: Pointer to PCI device
8474 *
8475 * This callback is called when the error recovery driver tells us that
8476 * its OK to resume normal operation.
8477 */
8478 static void bnx2_io_resume(struct pci_dev *pdev)
8479 {
8480 struct net_device *dev = pci_get_drvdata(pdev);
8481 struct bnx2 *bp = netdev_priv(dev);
8482
8483 rtnl_lock();
8484 if (netif_running(dev))
8485 bnx2_netif_start(bp);
8486
8487 netif_device_attach(dev);
8488 rtnl_unlock();
8489 }
8490
8491 static struct pci_error_handlers bnx2_err_handler = {
8492 .error_detected = bnx2_io_error_detected,
8493 .slot_reset = bnx2_io_slot_reset,
8494 .resume = bnx2_io_resume,
8495 };
8496
8497 static struct pci_driver bnx2_pci_driver = {
8498 .name = DRV_MODULE_NAME,
8499 .id_table = bnx2_pci_tbl,
8500 .probe = bnx2_init_one,
8501 .remove = __devexit_p(bnx2_remove_one),
8502 .suspend = bnx2_suspend,
8503 .resume = bnx2_resume,
8504 .err_handler = &bnx2_err_handler,
8505 };
8506
8507 static int __init bnx2_init(void)
8508 {
8509 return pci_register_driver(&bnx2_pci_driver);
8510 }
8511
8512 static void __exit bnx2_cleanup(void)
8513 {
8514 pci_unregister_driver(&bnx2_pci_driver);
8515 }
8516
8517 module_init(bnx2_init);
8518 module_exit(bnx2_cleanup);
8519
8520
8521
This page took 0.19579 seconds and 4 git commands to generate.