netxen: support for ethtool set ringparam
[deliverable/linux.git] / drivers / net / netxen / netxen_nic_main.c
1 /*
2 * Copyright (C) 2003 - 2009 NetXen, Inc.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
22 *
23 * Contact Information:
24 * info@netxen.com
25 * NetXen Inc,
26 * 18922 Forge Drive
27 * Cupertino, CA 95014-0701
28 *
29 */
30
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include "netxen_nic_hw.h"
34
35 #include "netxen_nic.h"
36 #include "netxen_nic_phan_reg.h"
37
38 #include <linux/dma-mapping.h>
39 #include <linux/if_vlan.h>
40 #include <net/ip.h>
41 #include <linux/ipv6.h>
42 #include <linux/inetdevice.h>
43
44 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
45 MODULE_LICENSE("GPL");
46 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
47
48 char netxen_nic_driver_name[] = "netxen_nic";
49 static char netxen_nic_driver_string[] = "NetXen Network Driver version "
50 NETXEN_NIC_LINUX_VERSIONID;
51
52 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
53
54 /* Default to restricted 1G auto-neg mode */
55 static int wol_port_mode = 5;
56
57 static int use_msi = 1;
58
59 static int use_msi_x = 1;
60
61 /* Local functions to NetXen NIC driver */
62 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
63 const struct pci_device_id *ent);
64 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
65 static int netxen_nic_open(struct net_device *netdev);
66 static int netxen_nic_close(struct net_device *netdev);
67 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
68 static void netxen_tx_timeout(struct net_device *netdev);
69 static void netxen_reset_task(struct work_struct *work);
70 static void netxen_watchdog(unsigned long);
71 static int netxen_nic_poll(struct napi_struct *napi, int budget);
72 #ifdef CONFIG_NET_POLL_CONTROLLER
73 static void netxen_nic_poll_controller(struct net_device *netdev);
74 #endif
75 static irqreturn_t netxen_intr(int irq, void *data);
76 static irqreturn_t netxen_msi_intr(int irq, void *data);
77 static irqreturn_t netxen_msix_intr(int irq, void *data);
78
79 /* PCI Device ID Table */
80 #define ENTRY(device) \
81 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
82 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
83
84 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
85 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
86 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
87 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
88 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
89 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
91 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
92 ENTRY(PCI_DEVICE_ID_NX3031),
93 {0,}
94 };
95
96 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
97
98 static struct workqueue_struct *netxen_workq;
99 #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
100 #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
101
102 static void netxen_watchdog(unsigned long);
103
104 static uint32_t crb_cmd_producer[4] = {
105 CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
106 CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
107 };
108
109 void
110 netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
111 struct nx_host_tx_ring *tx_ring)
112 {
113 NXWR32(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
114
115 if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) {
116 netif_stop_queue(adapter->netdev);
117 smp_mb();
118 }
119 }
120
121 static uint32_t crb_cmd_consumer[4] = {
122 CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
123 CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
124 };
125
126 static inline void
127 netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
128 struct nx_host_tx_ring *tx_ring)
129 {
130 NXWR32(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
131 }
132
133 static uint32_t msi_tgt_status[8] = {
134 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
135 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
136 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
137 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
138 };
139
140 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
141
142 static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
143 {
144 struct netxen_adapter *adapter = sds_ring->adapter;
145
146 NXWR32(adapter, sds_ring->crb_intr_mask, 0);
147 }
148
149 static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
150 {
151 struct netxen_adapter *adapter = sds_ring->adapter;
152
153 NXWR32(adapter, sds_ring->crb_intr_mask, 0x1);
154
155 if (!NETXEN_IS_MSI_FAMILY(adapter))
156 adapter->pci_write_immediate(adapter,
157 adapter->legacy_intr.tgt_mask_reg, 0xfbff);
158 }
159
160 static int
161 netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
162 {
163 int size = sizeof(struct nx_host_sds_ring) * count;
164
165 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
166
167 return (recv_ctx->sds_rings == NULL);
168 }
169
170 static void
171 netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
172 {
173 if (recv_ctx->sds_rings != NULL)
174 kfree(recv_ctx->sds_rings);
175 }
176
177 static int
178 netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
179 {
180 int ring;
181 struct nx_host_sds_ring *sds_ring;
182 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
183
184 if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
185 return -ENOMEM;
186
187 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
188 sds_ring = &recv_ctx->sds_rings[ring];
189 netif_napi_add(netdev, &sds_ring->napi,
190 netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
191 }
192
193 return 0;
194 }
195
196 static void
197 netxen_napi_enable(struct netxen_adapter *adapter)
198 {
199 int ring;
200 struct nx_host_sds_ring *sds_ring;
201 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
202
203 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
204 sds_ring = &recv_ctx->sds_rings[ring];
205 napi_enable(&sds_ring->napi);
206 netxen_nic_enable_int(sds_ring);
207 }
208 }
209
210 static void
211 netxen_napi_disable(struct netxen_adapter *adapter)
212 {
213 int ring;
214 struct nx_host_sds_ring *sds_ring;
215 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
216
217 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
218 sds_ring = &recv_ctx->sds_rings[ring];
219 netxen_nic_disable_int(sds_ring);
220 napi_synchronize(&sds_ring->napi);
221 napi_disable(&sds_ring->napi);
222 }
223 }
224
225 static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
226 {
227 struct pci_dev *pdev = adapter->pdev;
228 uint64_t mask, cmask;
229
230 adapter->pci_using_dac = 0;
231
232 mask = DMA_BIT_MASK(32);
233 /*
234 * Consistent DMA mask is set to 32 bit because it cannot be set to
235 * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
236 * come off this pool.
237 */
238 cmask = DMA_BIT_MASK(32);
239
240 #ifndef CONFIG_IA64
241 if (revision_id >= NX_P3_B0)
242 mask = DMA_BIT_MASK(39);
243 else if (revision_id == NX_P2_C1)
244 mask = DMA_BIT_MASK(35);
245 #endif
246 if (pci_set_dma_mask(pdev, mask) == 0 &&
247 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
248 adapter->pci_using_dac = 1;
249 return 0;
250 }
251
252 return -EIO;
253 }
254
255 /* Update addressable range if firmware supports it */
256 static int
257 nx_update_dma_mask(struct netxen_adapter *adapter)
258 {
259 int change, shift, err;
260 uint64_t mask, old_mask;
261 struct pci_dev *pdev = adapter->pdev;
262
263 change = 0;
264
265 shift = NXRD32(adapter, CRB_DMA_SHIFT);
266 if (shift >= 32)
267 return 0;
268
269 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
270 change = 1;
271 else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
272 change = 1;
273
274 if (change) {
275 old_mask = pdev->dma_mask;
276 mask = (1ULL<<(32+shift)) - 1;
277
278 err = pci_set_dma_mask(pdev, mask);
279 if (err)
280 return pci_set_dma_mask(pdev, old_mask);
281 }
282
283 return 0;
284 }
285
286 static void
287 netxen_check_options(struct netxen_adapter *adapter)
288 {
289 if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
290 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
291 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
292 } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
293 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
294 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
295 }
296
297 adapter->msix_supported = 0;
298 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
299 adapter->msix_supported = !!use_msi_x;
300 adapter->rss_supported = !!use_msi_x;
301 } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
302 switch (adapter->ahw.board_type) {
303 case NETXEN_BRDTYPE_P2_SB31_10G:
304 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
305 adapter->msix_supported = !!use_msi_x;
306 adapter->rss_supported = !!use_msi_x;
307 break;
308 default:
309 break;
310 }
311 }
312
313 adapter->num_txd = MAX_CMD_DESCRIPTORS;
314
315 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
316 adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
317 adapter->max_rds_rings = 3;
318 } else {
319 adapter->num_lro_rxd = 0;
320 adapter->max_rds_rings = 2;
321 }
322 }
323
324 static int
325 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
326 {
327 u32 val, timeout;
328
329 if (first_boot == 0x55555555) {
330 /* This is the first boot after power up */
331 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
332
333 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
334 return 0;
335
336 /* PCI bus master workaround */
337 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
338 if (!(first_boot & 0x4)) {
339 first_boot |= 0x4;
340 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
341 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
342 }
343
344 /* This is the first boot after power up */
345 first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
346 if (first_boot != 0x80000f) {
347 /* clear the register for future unloads/loads */
348 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
349 return -EIO;
350 }
351
352 /* Start P2 boot loader */
353 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
354 NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
355 timeout = 0;
356 do {
357 msleep(1);
358 val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
359
360 if (++timeout > 5000)
361 return -EIO;
362
363 } while (val == NETXEN_BDINFO_MAGIC);
364 }
365 return 0;
366 }
367
368 static void netxen_set_port_mode(struct netxen_adapter *adapter)
369 {
370 u32 val, data;
371
372 val = adapter->ahw.board_type;
373 if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
374 (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
375 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
376 data = NETXEN_PORT_MODE_802_3_AP;
377 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
378 } else if (port_mode == NETXEN_PORT_MODE_XG) {
379 data = NETXEN_PORT_MODE_XG;
380 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
381 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
382 data = NETXEN_PORT_MODE_AUTO_NEG_1G;
383 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
384 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
385 data = NETXEN_PORT_MODE_AUTO_NEG_XG;
386 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
387 } else {
388 data = NETXEN_PORT_MODE_AUTO_NEG;
389 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
390 }
391
392 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
393 (wol_port_mode != NETXEN_PORT_MODE_XG) &&
394 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
395 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
396 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
397 }
398 NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
399 }
400 }
401
402 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
403 {
404 u32 control;
405 int pos;
406
407 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
408 if (pos) {
409 pci_read_config_dword(pdev, pos, &control);
410 if (enable)
411 control |= PCI_MSIX_FLAGS_ENABLE;
412 else
413 control = 0;
414 pci_write_config_dword(pdev, pos, control);
415 }
416 }
417
418 static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
419 {
420 int i;
421
422 for (i = 0; i < count; i++)
423 adapter->msix_entries[i].entry = i;
424 }
425
426 static int
427 netxen_read_mac_addr(struct netxen_adapter *adapter)
428 {
429 int i;
430 unsigned char *p;
431 __le64 mac_addr;
432 struct net_device *netdev = adapter->netdev;
433 struct pci_dev *pdev = adapter->pdev;
434
435 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
436 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
437 return -EIO;
438 } else {
439 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
440 return -EIO;
441 }
442
443 p = (unsigned char *)&mac_addr;
444 for (i = 0; i < 6; i++)
445 netdev->dev_addr[i] = *(p + 5 - i);
446
447 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
448
449 /* set station address */
450
451 if (!is_valid_ether_addr(netdev->perm_addr))
452 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
453
454 return 0;
455 }
456
457 int netxen_nic_set_mac(struct net_device *netdev, void *p)
458 {
459 struct netxen_adapter *adapter = netdev_priv(netdev);
460 struct sockaddr *addr = p;
461
462 if (!is_valid_ether_addr(addr->sa_data))
463 return -EINVAL;
464
465 if (netif_running(netdev)) {
466 netif_device_detach(netdev);
467 netxen_napi_disable(adapter);
468 }
469
470 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
471 adapter->macaddr_set(adapter, addr->sa_data);
472
473 if (netif_running(netdev)) {
474 netif_device_attach(netdev);
475 netxen_napi_enable(adapter);
476 }
477 return 0;
478 }
479
480 static void netxen_set_multicast_list(struct net_device *dev)
481 {
482 struct netxen_adapter *adapter = netdev_priv(dev);
483
484 adapter->set_multi(dev);
485 }
486
487 static const struct net_device_ops netxen_netdev_ops = {
488 .ndo_open = netxen_nic_open,
489 .ndo_stop = netxen_nic_close,
490 .ndo_start_xmit = netxen_nic_xmit_frame,
491 .ndo_get_stats = netxen_nic_get_stats,
492 .ndo_validate_addr = eth_validate_addr,
493 .ndo_set_multicast_list = netxen_set_multicast_list,
494 .ndo_set_mac_address = netxen_nic_set_mac,
495 .ndo_change_mtu = netxen_nic_change_mtu,
496 .ndo_tx_timeout = netxen_tx_timeout,
497 #ifdef CONFIG_NET_POLL_CONTROLLER
498 .ndo_poll_controller = netxen_nic_poll_controller,
499 #endif
500 };
501
502 static void
503 netxen_setup_intr(struct netxen_adapter *adapter)
504 {
505 struct netxen_legacy_intr_set *legacy_intrp;
506 struct pci_dev *pdev = adapter->pdev;
507 int err, num_msix;
508
509 if (adapter->rss_supported) {
510 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
511 MSIX_ENTRIES_PER_ADAPTER : 2;
512 } else
513 num_msix = 1;
514
515 adapter->max_sds_rings = 1;
516
517 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
518
519 if (adapter->ahw.revision_id >= NX_P3_B0)
520 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
521 else
522 legacy_intrp = &legacy_intr[0];
523 adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit;
524 adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg;
525 adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg;
526 adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg;
527
528 netxen_set_msix_bit(pdev, 0);
529
530 if (adapter->msix_supported) {
531
532 netxen_init_msix_entries(adapter, num_msix);
533 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
534 if (err == 0) {
535 adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
536 netxen_set_msix_bit(pdev, 1);
537
538 if (adapter->rss_supported)
539 adapter->max_sds_rings = num_msix;
540
541 dev_info(&pdev->dev, "using msi-x interrupts\n");
542 return;
543 }
544
545 if (err > 0)
546 pci_disable_msix(pdev);
547
548 /* fall through for msi */
549 }
550
551 if (use_msi && !pci_enable_msi(pdev)) {
552 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
553 adapter->msi_tgt_status =
554 msi_tgt_status[adapter->ahw.pci_func];
555 dev_info(&pdev->dev, "using msi interrupts\n");
556 adapter->msix_entries[0].vector = pdev->irq;
557 return;
558 }
559
560 dev_info(&pdev->dev, "using legacy interrupts\n");
561 adapter->msix_entries[0].vector = pdev->irq;
562 }
563
564 static void
565 netxen_teardown_intr(struct netxen_adapter *adapter)
566 {
567 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
568 pci_disable_msix(adapter->pdev);
569 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
570 pci_disable_msi(adapter->pdev);
571 }
572
573 static void
574 netxen_cleanup_pci_map(struct netxen_adapter *adapter)
575 {
576 if (adapter->ahw.db_base != NULL)
577 iounmap(adapter->ahw.db_base);
578 if (adapter->ahw.pci_base0 != NULL)
579 iounmap(adapter->ahw.pci_base0);
580 if (adapter->ahw.pci_base1 != NULL)
581 iounmap(adapter->ahw.pci_base1);
582 if (adapter->ahw.pci_base2 != NULL)
583 iounmap(adapter->ahw.pci_base2);
584 }
585
586 static int
587 netxen_setup_pci_map(struct netxen_adapter *adapter)
588 {
589 void __iomem *mem_ptr0 = NULL;
590 void __iomem *mem_ptr1 = NULL;
591 void __iomem *mem_ptr2 = NULL;
592 void __iomem *db_ptr = NULL;
593
594 unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0;
595
596 struct pci_dev *pdev = adapter->pdev;
597 int pci_func = adapter->ahw.pci_func;
598
599 int err = 0;
600
601 /*
602 * Set the CRB window to invalid. If any register in window 0 is
603 * accessed it should set the window to 0 and then reset it to 1.
604 */
605 adapter->curr_window = 255;
606 adapter->ahw.qdr_sn_window = -1;
607 adapter->ahw.ddr_mn_window = -1;
608
609 /* remap phys address */
610 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
611 mem_len = pci_resource_len(pdev, 0);
612 pci_len0 = 0;
613
614 adapter->hw_write_wx = netxen_nic_hw_write_wx_128M;
615 adapter->hw_read_wx = netxen_nic_hw_read_wx_128M;
616 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M;
617 adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M;
618 adapter->pci_set_window = netxen_nic_pci_set_window_128M;
619 adapter->pci_mem_read = netxen_nic_pci_mem_read_128M;
620 adapter->pci_mem_write = netxen_nic_pci_mem_write_128M;
621
622 /* 128 Meg of memory */
623 if (mem_len == NETXEN_PCI_128MB_SIZE) {
624 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
625 mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
626 SECOND_PAGE_GROUP_SIZE);
627 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
628 THIRD_PAGE_GROUP_SIZE);
629 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
630 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
631 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
632 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
633 } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
634 adapter->hw_write_wx = netxen_nic_hw_write_wx_2M;
635 adapter->hw_read_wx = netxen_nic_hw_read_wx_2M;
636 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M;
637 adapter->pci_write_immediate =
638 netxen_nic_pci_write_immediate_2M;
639 adapter->pci_set_window = netxen_nic_pci_set_window_2M;
640 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
641 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
642
643 mem_ptr0 = pci_ioremap_bar(pdev, 0);
644 if (mem_ptr0 == NULL) {
645 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
646 return -EIO;
647 }
648 pci_len0 = mem_len;
649
650 adapter->ahw.ddr_mn_window = 0;
651 adapter->ahw.qdr_sn_window = 0;
652
653 adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW +
654 (pci_func * 0x20);
655 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW;
656 if (pci_func < 4)
657 adapter->ahw.ms_win_crb += (pci_func * 0x20);
658 else
659 adapter->ahw.ms_win_crb +=
660 0xA0 + ((pci_func - 4) * 0x10);
661 } else {
662 return -EIO;
663 }
664
665 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
666
667 adapter->ahw.pci_base0 = mem_ptr0;
668 adapter->ahw.pci_len0 = pci_len0;
669 adapter->ahw.pci_base1 = mem_ptr1;
670 adapter->ahw.pci_base2 = mem_ptr2;
671
672 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
673 goto skip_doorbell;
674
675 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
676 db_len = pci_resource_len(pdev, 4);
677
678 if (db_len == 0) {
679 printk(KERN_ERR "%s: doorbell is disabled\n",
680 netxen_nic_driver_name);
681 err = -EIO;
682 goto err_out;
683 }
684
685 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
686 if (!db_ptr) {
687 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
688 netxen_nic_driver_name);
689 err = -EIO;
690 goto err_out;
691 }
692
693 skip_doorbell:
694 adapter->ahw.db_base = db_ptr;
695 adapter->ahw.db_len = db_len;
696 return 0;
697
698 err_out:
699 netxen_cleanup_pci_map(adapter);
700 return err;
701 }
702
703 static int
704 netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
705 {
706 int val, err, first_boot;
707 struct pci_dev *pdev = adapter->pdev;
708
709 int first_driver = 0;
710
711 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
712 first_driver = (adapter->portnum == 0);
713 else
714 first_driver = (adapter->ahw.pci_func == 0);
715
716 if (!first_driver)
717 goto wait_init;
718
719 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
720
721 err = netxen_check_hw_init(adapter, first_boot);
722 if (err) {
723 dev_err(&pdev->dev, "error in init HW init sequence\n");
724 return err;
725 }
726
727 if (request_fw)
728 netxen_request_firmware(adapter);
729
730 err = netxen_need_fw_reset(adapter);
731 if (err <= 0)
732 return err;
733
734 if (first_boot != 0x55555555) {
735 NXWR32(adapter, CRB_CMDPEG_STATE, 0);
736 netxen_pinit_from_rom(adapter, 0);
737 msleep(1);
738 }
739
740 NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
741 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
742 netxen_set_port_mode(adapter);
743
744 netxen_load_firmware(adapter);
745
746 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
747
748 /* Initialize multicast addr pool owners */
749 val = 0x7654;
750 if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
751 val |= 0x0f000000;
752 NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
753
754 }
755
756 err = netxen_init_dummy_dma(adapter);
757 if (err)
758 return err;
759
760 /*
761 * Tell the hardware our version number.
762 */
763 val = (_NETXEN_NIC_LINUX_MAJOR << 16)
764 | ((_NETXEN_NIC_LINUX_MINOR << 8))
765 | (_NETXEN_NIC_LINUX_SUBVERSION);
766 NXWR32(adapter, CRB_DRIVER_VERSION, val);
767
768 wait_init:
769 /* Handshake with the card before we register the devices. */
770 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
771 if (err) {
772 netxen_free_dummy_dma(adapter);
773 return err;
774 }
775
776 nx_update_dma_mask(adapter);
777
778 netxen_nic_get_firmware_info(adapter);
779
780 return 0;
781 }
782
783 static int
784 netxen_nic_request_irq(struct netxen_adapter *adapter)
785 {
786 irq_handler_t handler;
787 struct nx_host_sds_ring *sds_ring;
788 int err, ring;
789
790 unsigned long flags = IRQF_SAMPLE_RANDOM;
791 struct net_device *netdev = adapter->netdev;
792 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
793
794 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
795 handler = netxen_msix_intr;
796 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
797 handler = netxen_msi_intr;
798 else {
799 flags |= IRQF_SHARED;
800 handler = netxen_intr;
801 }
802 adapter->irq = netdev->irq;
803
804 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
805 sds_ring = &recv_ctx->sds_rings[ring];
806 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
807 err = request_irq(sds_ring->irq, handler,
808 flags, sds_ring->name, sds_ring);
809 if (err)
810 return err;
811 }
812
813 return 0;
814 }
815
816 static void
817 netxen_nic_free_irq(struct netxen_adapter *adapter)
818 {
819 int ring;
820 struct nx_host_sds_ring *sds_ring;
821
822 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
823
824 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
825 sds_ring = &recv_ctx->sds_rings[ring];
826 free_irq(sds_ring->irq, sds_ring);
827 }
828 }
829
830 static void
831 netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter)
832 {
833 adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
834 adapter->coal.normal.data.rx_time_us =
835 NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
836 adapter->coal.normal.data.rx_packets =
837 NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
838 adapter->coal.normal.data.tx_time_us =
839 NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US;
840 adapter->coal.normal.data.tx_packets =
841 NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS;
842 }
843
844 static int
845 netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
846 {
847 int err;
848
849 err = adapter->init_port(adapter, adapter->physical_port);
850 if (err) {
851 printk(KERN_ERR "%s: Failed to initialize port %d\n",
852 netxen_nic_driver_name, adapter->portnum);
853 return err;
854 }
855 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
856 adapter->macaddr_set(adapter, netdev->dev_addr);
857
858 adapter->set_multi(netdev);
859 adapter->set_mtu(adapter, netdev->mtu);
860
861 adapter->ahw.linkup = 0;
862
863 if (adapter->max_sds_rings > 1)
864 netxen_config_rss(adapter, 1);
865
866 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
867 netxen_config_intr_coalesce(adapter);
868
869 netxen_napi_enable(adapter);
870
871 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
872 netxen_linkevent_request(adapter, 1);
873 else
874 netxen_nic_set_link_parameters(adapter);
875
876 mod_timer(&adapter->watchdog_timer, jiffies);
877
878 return 0;
879 }
880
881 static void
882 netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
883 {
884 spin_lock(&adapter->tx_clean_lock);
885 netif_carrier_off(netdev);
886 netif_tx_disable(netdev);
887
888 if (adapter->stop_port)
889 adapter->stop_port(adapter);
890
891 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
892 netxen_p3_free_mac_list(adapter);
893
894 netxen_napi_disable(adapter);
895
896 netxen_release_tx_buffers(adapter);
897 spin_unlock(&adapter->tx_clean_lock);
898
899 del_timer_sync(&adapter->watchdog_timer);
900 FLUSH_SCHEDULED_WORK();
901 }
902
903
904 static int
905 netxen_nic_attach(struct netxen_adapter *adapter)
906 {
907 struct net_device *netdev = adapter->netdev;
908 struct pci_dev *pdev = adapter->pdev;
909 int err, ring;
910 struct nx_host_rds_ring *rds_ring;
911 struct nx_host_tx_ring *tx_ring;
912
913 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
914 return 0;
915
916 err = netxen_init_firmware(adapter);
917 if (err != 0) {
918 printk(KERN_ERR "Failed to init firmware\n");
919 return -EIO;
920 }
921
922 err = netxen_alloc_sw_resources(adapter);
923 if (err) {
924 printk(KERN_ERR "%s: Error in setting sw resources\n",
925 netdev->name);
926 return err;
927 }
928
929 netxen_nic_clear_stats(adapter);
930
931 err = netxen_alloc_hw_resources(adapter);
932 if (err) {
933 printk(KERN_ERR "%s: Error in setting hw resources\n",
934 netdev->name);
935 goto err_out_free_sw;
936 }
937
938 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
939 tx_ring = adapter->tx_ring;
940 tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum];
941 tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum];
942
943 tx_ring->producer = 0;
944 tx_ring->sw_consumer = 0;
945
946 netxen_nic_update_cmd_producer(adapter, tx_ring);
947 netxen_nic_update_cmd_consumer(adapter, tx_ring);
948 }
949
950 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
951 rds_ring = &adapter->recv_ctx.rds_rings[ring];
952 netxen_post_rx_buffers(adapter, ring, rds_ring);
953 }
954
955 err = netxen_nic_request_irq(adapter);
956 if (err) {
957 dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
958 netdev->name);
959 goto err_out_free_rxbuf;
960 }
961
962 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
963 netxen_nic_init_coalesce_defaults(adapter);
964
965 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
966 return 0;
967
968 err_out_free_rxbuf:
969 netxen_release_rx_buffers(adapter);
970 netxen_free_hw_resources(adapter);
971 err_out_free_sw:
972 netxen_free_sw_resources(adapter);
973 return err;
974 }
975
976 static void
977 netxen_nic_detach(struct netxen_adapter *adapter)
978 {
979 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
980 return;
981
982 netxen_free_hw_resources(adapter);
983 netxen_release_rx_buffers(adapter);
984 netxen_nic_free_irq(adapter);
985 netxen_free_sw_resources(adapter);
986
987 adapter->is_up = 0;
988 }
989
990 int
991 netxen_nic_reset_context(struct netxen_adapter *adapter)
992 {
993 int err = 0;
994 struct net_device *netdev = adapter->netdev;
995
996 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
997
998 if (netif_running(netdev))
999 netxen_nic_down(adapter, netdev);
1000
1001 netxen_nic_detach(adapter);
1002
1003 err = netxen_nic_attach(adapter);
1004 if (err)
1005 goto done;
1006
1007 if (netif_running(netdev))
1008 err = netxen_nic_up(adapter, netdev);
1009 }
1010 done:
1011 return err;
1012 }
1013
1014 static int
1015 netxen_setup_netdev(struct netxen_adapter *adapter,
1016 struct net_device *netdev)
1017 {
1018 int err = 0;
1019 struct pci_dev *pdev = adapter->pdev;
1020
1021 adapter->rx_csum = 1;
1022 adapter->mc_enabled = 0;
1023 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
1024 adapter->max_mc_count = 38;
1025 else
1026 adapter->max_mc_count = 16;
1027
1028 netdev->netdev_ops = &netxen_netdev_ops;
1029 netdev->watchdog_timeo = 2*HZ;
1030
1031 netxen_nic_change_mtu(netdev, netdev->mtu);
1032
1033 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
1034
1035 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1036 netdev->features |= (NETIF_F_GRO);
1037 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1038
1039 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1040 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1041 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1042 }
1043
1044 if (adapter->pci_using_dac) {
1045 netdev->features |= NETIF_F_HIGHDMA;
1046 netdev->vlan_features |= NETIF_F_HIGHDMA;
1047 }
1048
1049 if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
1050 netdev->features |= (NETIF_F_HW_VLAN_TX);
1051
1052 netdev->irq = adapter->msix_entries[0].vector;
1053
1054 err = netxen_napi_add(adapter, netdev);
1055 if (err)
1056 return err;
1057
1058 init_timer(&adapter->watchdog_timer);
1059 adapter->watchdog_timer.function = &netxen_watchdog;
1060 adapter->watchdog_timer.data = (unsigned long)adapter;
1061 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
1062 INIT_WORK(&adapter->tx_timeout_task, netxen_reset_task);
1063
1064 if (netxen_read_mac_addr(adapter))
1065 dev_warn(&pdev->dev, "failed to read mac addr\n");
1066
1067 netif_carrier_off(netdev);
1068 netif_stop_queue(netdev);
1069
1070 err = register_netdev(netdev);
1071 if (err) {
1072 dev_err(&pdev->dev, "failed to register net device\n");
1073 return err;
1074 }
1075
1076 return 0;
1077 }
1078
1079 static int __devinit
1080 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1081 {
1082 struct net_device *netdev = NULL;
1083 struct netxen_adapter *adapter = NULL;
1084 int i = 0, err;
1085 int pci_func_id = PCI_FUNC(pdev->devfn);
1086 uint8_t revision_id;
1087
1088 if (pdev->class != 0x020000) {
1089 printk(KERN_DEBUG "NetXen function %d, class %x will not "
1090 "be enabled.\n",pci_func_id, pdev->class);
1091 return -ENODEV;
1092 }
1093
1094 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
1095 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
1096 "will not be enabled.\n",
1097 NX_P3_A0, NX_P3_B1);
1098 return -ENODEV;
1099 }
1100
1101 if ((err = pci_enable_device(pdev)))
1102 return err;
1103
1104 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1105 err = -ENODEV;
1106 goto err_out_disable_pdev;
1107 }
1108
1109 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
1110 goto err_out_disable_pdev;
1111
1112 pci_set_master(pdev);
1113
1114 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
1115 if(!netdev) {
1116 dev_err(&pdev->dev, "failed to allocate net_device\n");
1117 err = -ENOMEM;
1118 goto err_out_free_res;
1119 }
1120
1121 SET_NETDEV_DEV(netdev, &pdev->dev);
1122
1123 adapter = netdev_priv(netdev);
1124 adapter->netdev = netdev;
1125 adapter->pdev = pdev;
1126 adapter->ahw.pci_func = pci_func_id;
1127
1128 revision_id = pdev->revision;
1129 adapter->ahw.revision_id = revision_id;
1130
1131 err = nx_set_dma_mask(adapter, revision_id);
1132 if (err)
1133 goto err_out_free_netdev;
1134
1135 rwlock_init(&adapter->adapter_lock);
1136 spin_lock_init(&adapter->tx_clean_lock);
1137 INIT_LIST_HEAD(&adapter->mac_list);
1138
1139 err = netxen_setup_pci_map(adapter);
1140 if (err)
1141 goto err_out_free_netdev;
1142
1143 /* This will be reset for mezz cards */
1144 adapter->portnum = pci_func_id;
1145
1146 err = netxen_nic_get_board_info(adapter);
1147 if (err) {
1148 dev_err(&pdev->dev, "Error getting board config info.\n");
1149 goto err_out_iounmap;
1150 }
1151
1152 netxen_initialize_adapter_ops(adapter);
1153
1154 /* Mezz cards have PCI function 0,2,3 enabled */
1155 switch (adapter->ahw.board_type) {
1156 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
1157 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
1158 if (pci_func_id >= 2)
1159 adapter->portnum = pci_func_id - 2;
1160 break;
1161 default:
1162 break;
1163 }
1164
1165 err = netxen_start_firmware(adapter, 1);
1166 if (err)
1167 goto err_out_iounmap;
1168
1169 /*
1170 * See if the firmware gave us a virtual-physical port mapping.
1171 */
1172 adapter->physical_port = adapter->portnum;
1173 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1174 i = NXRD32(adapter, CRB_V2P(adapter->portnum));
1175 if (i != 0x55555555)
1176 adapter->physical_port = i;
1177 }
1178
1179 netxen_check_options(adapter);
1180
1181 netxen_setup_intr(adapter);
1182
1183 err = netxen_setup_netdev(adapter, netdev);
1184 if (err)
1185 goto err_out_disable_msi;
1186
1187 pci_set_drvdata(pdev, adapter);
1188
1189 switch (adapter->ahw.port_type) {
1190 case NETXEN_NIC_GBE:
1191 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1192 adapter->netdev->name);
1193 break;
1194 case NETXEN_NIC_XGBE:
1195 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1196 adapter->netdev->name);
1197 break;
1198 }
1199
1200 return 0;
1201
1202 err_out_disable_msi:
1203 netxen_teardown_intr(adapter);
1204
1205 netxen_free_dummy_dma(adapter);
1206
1207 err_out_iounmap:
1208 netxen_cleanup_pci_map(adapter);
1209
1210 err_out_free_netdev:
1211 free_netdev(netdev);
1212
1213 err_out_free_res:
1214 pci_release_regions(pdev);
1215
1216 err_out_disable_pdev:
1217 pci_set_drvdata(pdev, NULL);
1218 pci_disable_device(pdev);
1219 return err;
1220 }
1221
1222 static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1223 {
1224 struct netxen_adapter *adapter;
1225 struct net_device *netdev;
1226
1227 adapter = pci_get_drvdata(pdev);
1228 if (adapter == NULL)
1229 return;
1230
1231 netdev = adapter->netdev;
1232
1233 unregister_netdev(netdev);
1234
1235 netxen_nic_detach(adapter);
1236
1237 if (adapter->portnum == 0)
1238 netxen_free_dummy_dma(adapter);
1239
1240 netxen_teardown_intr(adapter);
1241 netxen_free_sds_rings(&adapter->recv_ctx);
1242
1243 netxen_cleanup_pci_map(adapter);
1244
1245 netxen_release_firmware(adapter);
1246
1247 pci_release_regions(pdev);
1248 pci_disable_device(pdev);
1249 pci_set_drvdata(pdev, NULL);
1250
1251 free_netdev(netdev);
1252 }
1253
1254 #ifdef CONFIG_PM
1255 static int
1256 netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1257 {
1258
1259 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1260 struct net_device *netdev = adapter->netdev;
1261
1262 netif_device_detach(netdev);
1263
1264 if (netif_running(netdev))
1265 netxen_nic_down(adapter, netdev);
1266
1267 netxen_nic_detach(adapter);
1268
1269 pci_save_state(pdev);
1270
1271 if (netxen_nic_wol_supported(adapter)) {
1272 pci_enable_wake(pdev, PCI_D3cold, 1);
1273 pci_enable_wake(pdev, PCI_D3hot, 1);
1274 }
1275
1276 pci_disable_device(pdev);
1277 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1278
1279 return 0;
1280 }
1281
1282 static int
1283 netxen_nic_resume(struct pci_dev *pdev)
1284 {
1285 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1286 struct net_device *netdev = adapter->netdev;
1287 int err;
1288
1289 pci_set_power_state(pdev, PCI_D0);
1290 pci_restore_state(pdev);
1291
1292 err = pci_enable_device(pdev);
1293 if (err)
1294 return err;
1295
1296 adapter->curr_window = 255;
1297
1298 err = netxen_start_firmware(adapter, 0);
1299 if (err) {
1300 dev_err(&pdev->dev, "failed to start firmware\n");
1301 return err;
1302 }
1303
1304 if (netif_running(netdev)) {
1305 err = netxen_nic_attach(adapter);
1306 if (err)
1307 return err;
1308
1309 err = netxen_nic_up(adapter, netdev);
1310 if (err)
1311 return err;
1312
1313 netif_device_attach(netdev);
1314 }
1315
1316 return 0;
1317 }
1318 #endif
1319
1320 static int netxen_nic_open(struct net_device *netdev)
1321 {
1322 struct netxen_adapter *adapter = netdev_priv(netdev);
1323 int err = 0;
1324
1325 if (adapter->driver_mismatch)
1326 return -EIO;
1327
1328 err = netxen_nic_attach(adapter);
1329 if (err)
1330 return err;
1331
1332 err = netxen_nic_up(adapter, netdev);
1333 if (err)
1334 goto err_out;
1335
1336 netif_start_queue(netdev);
1337
1338 return 0;
1339
1340 err_out:
1341 netxen_nic_detach(adapter);
1342 return err;
1343 }
1344
1345 /*
1346 * netxen_nic_close - Disables a network interface entry point
1347 */
1348 static int netxen_nic_close(struct net_device *netdev)
1349 {
1350 struct netxen_adapter *adapter = netdev_priv(netdev);
1351
1352 netxen_nic_down(adapter, netdev);
1353 return 0;
1354 }
1355
1356 static void
1357 netxen_tso_check(struct net_device *netdev,
1358 struct nx_host_tx_ring *tx_ring,
1359 struct cmd_desc_type0 *first_desc,
1360 struct sk_buff *skb)
1361 {
1362 u8 opcode = TX_ETHER_PKT;
1363 __be16 protocol = skb->protocol;
1364 u16 flags = 0, vid = 0;
1365 u32 producer;
1366 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1367 struct cmd_desc_type0 *hwdesc;
1368 struct vlan_ethhdr *vh;
1369
1370 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1371
1372 vh = (struct vlan_ethhdr *)skb->data;
1373 protocol = vh->h_vlan_encapsulated_proto;
1374 flags = FLAGS_VLAN_TAGGED;
1375
1376 } else if (vlan_tx_tag_present(skb)) {
1377
1378 flags = FLAGS_VLAN_OOB;
1379 vid = vlan_tx_tag_get(skb);
1380 netxen_set_tx_vlan_tci(first_desc, vid);
1381 vlan_oob = 1;
1382 }
1383
1384 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1385 skb_shinfo(skb)->gso_size > 0) {
1386
1387 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1388
1389 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1390 first_desc->total_hdr_length = hdr_len;
1391 if (vlan_oob) {
1392 first_desc->total_hdr_length += VLAN_HLEN;
1393 first_desc->tcp_hdr_offset = VLAN_HLEN;
1394 first_desc->ip_hdr_offset = VLAN_HLEN;
1395 /* Only in case of TSO on vlan device */
1396 flags |= FLAGS_VLAN_TAGGED;
1397 }
1398
1399 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1400 TX_TCP_LSO6 : TX_TCP_LSO;
1401 tso = 1;
1402
1403 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1404 u8 l4proto;
1405
1406 if (protocol == cpu_to_be16(ETH_P_IP)) {
1407 l4proto = ip_hdr(skb)->protocol;
1408
1409 if (l4proto == IPPROTO_TCP)
1410 opcode = TX_TCP_PKT;
1411 else if(l4proto == IPPROTO_UDP)
1412 opcode = TX_UDP_PKT;
1413 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1414 l4proto = ipv6_hdr(skb)->nexthdr;
1415
1416 if (l4proto == IPPROTO_TCP)
1417 opcode = TX_TCPV6_PKT;
1418 else if(l4proto == IPPROTO_UDP)
1419 opcode = TX_UDPV6_PKT;
1420 }
1421 }
1422
1423 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1424 first_desc->ip_hdr_offset += skb_network_offset(skb);
1425 netxen_set_tx_flags_opcode(first_desc, flags, opcode);
1426
1427 if (!tso)
1428 return;
1429
1430 /* For LSO, we need to copy the MAC/IP/TCP headers into
1431 * the descriptor ring
1432 */
1433 producer = tx_ring->producer;
1434 copied = 0;
1435 offset = 2;
1436
1437 if (vlan_oob) {
1438 /* Create a TSO vlan header template for firmware */
1439
1440 hwdesc = &tx_ring->desc_head[producer];
1441 tx_ring->cmd_buf_arr[producer].skb = NULL;
1442
1443 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1444 hdr_len + VLAN_HLEN);
1445
1446 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1447 skb_copy_from_linear_data(skb, vh, 12);
1448 vh->h_vlan_proto = htons(ETH_P_8021Q);
1449 vh->h_vlan_TCI = htons(vid);
1450 skb_copy_from_linear_data_offset(skb, 12,
1451 (char *)vh + 16, copy_len - 16);
1452
1453 copied = copy_len;
1454 offset = 0;
1455
1456 producer = get_next_index(producer, tx_ring->num_desc);
1457 }
1458
1459 while (copied < hdr_len) {
1460
1461 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1462 (hdr_len - copied));
1463
1464 hwdesc = &tx_ring->desc_head[producer];
1465 tx_ring->cmd_buf_arr[producer].skb = NULL;
1466
1467 skb_copy_from_linear_data_offset(skb, copied,
1468 (char *)hwdesc + offset, copy_len);
1469
1470 copied += copy_len;
1471 offset = 0;
1472
1473 producer = get_next_index(producer, tx_ring->num_desc);
1474 }
1475
1476 tx_ring->producer = producer;
1477 barrier();
1478 }
1479
1480 static void
1481 netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
1482 struct netxen_cmd_buffer *pbuf, int last)
1483 {
1484 int k;
1485 struct netxen_skb_frag *buffrag;
1486
1487 buffrag = &pbuf->frag_array[0];
1488 pci_unmap_single(pdev, buffrag->dma,
1489 buffrag->length, PCI_DMA_TODEVICE);
1490
1491 for (k = 1; k < last; k++) {
1492 buffrag = &pbuf->frag_array[k];
1493 pci_unmap_page(pdev, buffrag->dma,
1494 buffrag->length, PCI_DMA_TODEVICE);
1495 }
1496 }
1497
1498 static inline void
1499 netxen_clear_cmddesc(u64 *desc)
1500 {
1501 desc[0] = 0ULL;
1502 desc[2] = 0ULL;
1503 }
1504
1505 static int
1506 netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1507 {
1508 struct netxen_adapter *adapter = netdev_priv(netdev);
1509 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
1510 struct skb_frag_struct *frag;
1511 struct netxen_cmd_buffer *pbuf;
1512 struct netxen_skb_frag *buffrag;
1513 struct cmd_desc_type0 *hwdesc, *first_desc;
1514 struct pci_dev *pdev;
1515 dma_addr_t temp_dma;
1516 int i, k;
1517 unsigned long offset;
1518
1519 u32 producer;
1520 int len, frag_count, no_of_desc;
1521 u32 num_txd = tx_ring->num_desc;
1522
1523 frag_count = skb_shinfo(skb)->nr_frags + 1;
1524
1525 /* 4 fragments per cmd des */
1526 no_of_desc = (frag_count + 3) >> 2;
1527
1528 if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) {
1529 netif_stop_queue(netdev);
1530 return NETDEV_TX_BUSY;
1531 }
1532
1533 producer = tx_ring->producer;
1534
1535 pdev = adapter->pdev;
1536 len = skb->len - skb->data_len;
1537
1538 temp_dma = pci_map_single(pdev, skb->data, len, PCI_DMA_TODEVICE);
1539 if (pci_dma_mapping_error(pdev, temp_dma))
1540 goto drop_packet;
1541
1542 pbuf = &tx_ring->cmd_buf_arr[producer];
1543 pbuf->skb = skb;
1544 pbuf->frag_count = frag_count;
1545
1546 buffrag = &pbuf->frag_array[0];
1547 buffrag->dma = temp_dma;
1548 buffrag->length = len;
1549
1550 first_desc = hwdesc = &tx_ring->desc_head[producer];
1551 netxen_clear_cmddesc((u64 *)hwdesc);
1552 netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
1553 netxen_set_tx_port(hwdesc, adapter->portnum);
1554
1555 hwdesc->buffer_length[0] = cpu_to_le16(len);
1556 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
1557
1558 for (i = 1, k = 1; i < frag_count; i++, k++) {
1559
1560 /* move to next desc. if there is a need */
1561 if ((i & 0x3) == 0) {
1562 k = 0;
1563 producer = get_next_index(producer, num_txd);
1564 hwdesc = &tx_ring->desc_head[producer];
1565 netxen_clear_cmddesc((u64 *)hwdesc);
1566 pbuf = &tx_ring->cmd_buf_arr[producer];
1567 pbuf->skb = NULL;
1568 }
1569 buffrag = &pbuf->frag_array[i];
1570 frag = &skb_shinfo(skb)->frags[i - 1];
1571 len = frag->size;
1572 offset = frag->page_offset;
1573
1574 temp_dma = pci_map_page(pdev, frag->page, offset,
1575 len, PCI_DMA_TODEVICE);
1576 if (pci_dma_mapping_error(pdev, temp_dma)) {
1577 netxen_clean_tx_dma_mapping(pdev, pbuf, i);
1578 goto drop_packet;
1579 }
1580
1581 buffrag->dma = temp_dma;
1582 buffrag->length = len;
1583
1584 hwdesc->buffer_length[k] = cpu_to_le16(len);
1585 switch (k) {
1586 case 0:
1587 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
1588 break;
1589 case 1:
1590 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
1591 break;
1592 case 2:
1593 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
1594 break;
1595 case 3:
1596 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
1597 break;
1598 }
1599 }
1600 tx_ring->producer = get_next_index(producer, num_txd);
1601
1602 netxen_tso_check(netdev, tx_ring, first_desc, skb);
1603
1604 netxen_nic_update_cmd_producer(adapter, tx_ring);
1605
1606 adapter->stats.txbytes += skb->len;
1607 adapter->stats.xmitcalled++;
1608
1609 return NETDEV_TX_OK;
1610
1611 drop_packet:
1612 adapter->stats.txdropped++;
1613 dev_kfree_skb_any(skb);
1614 return NETDEV_TX_OK;
1615 }
1616
1617 static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1618 {
1619 struct net_device *netdev = adapter->netdev;
1620 uint32_t temp, temp_state, temp_val;
1621 int rv = 0;
1622
1623 temp = NXRD32(adapter, CRB_TEMP_STATE);
1624
1625 temp_state = nx_get_temp_state(temp);
1626 temp_val = nx_get_temp_val(temp);
1627
1628 if (temp_state == NX_TEMP_PANIC) {
1629 printk(KERN_ALERT
1630 "%s: Device temperature %d degrees C exceeds"
1631 " maximum allowed. Hardware has been shut down.\n",
1632 netdev->name, temp_val);
1633
1634 netif_device_detach(netdev);
1635 netxen_nic_down(adapter, netdev);
1636 netxen_nic_detach(adapter);
1637
1638 rv = 1;
1639 } else if (temp_state == NX_TEMP_WARN) {
1640 if (adapter->temp == NX_TEMP_NORMAL) {
1641 printk(KERN_ALERT
1642 "%s: Device temperature %d degrees C "
1643 "exceeds operating range."
1644 " Immediate action needed.\n",
1645 netdev->name, temp_val);
1646 }
1647 } else {
1648 if (adapter->temp == NX_TEMP_WARN) {
1649 printk(KERN_INFO
1650 "%s: Device temperature is now %d degrees C"
1651 " in normal range.\n", netdev->name,
1652 temp_val);
1653 }
1654 }
1655 adapter->temp = temp_state;
1656 return rv;
1657 }
1658
1659 void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
1660 {
1661 struct net_device *netdev = adapter->netdev;
1662
1663 if (adapter->ahw.linkup && !linkup) {
1664 printk(KERN_INFO "%s: %s NIC Link is down\n",
1665 netxen_nic_driver_name, netdev->name);
1666 adapter->ahw.linkup = 0;
1667 if (netif_running(netdev)) {
1668 netif_carrier_off(netdev);
1669 netif_stop_queue(netdev);
1670 }
1671
1672 if (!adapter->has_link_events)
1673 netxen_nic_set_link_parameters(adapter);
1674
1675 } else if (!adapter->ahw.linkup && linkup) {
1676 printk(KERN_INFO "%s: %s NIC Link is up\n",
1677 netxen_nic_driver_name, netdev->name);
1678 adapter->ahw.linkup = 1;
1679 if (netif_running(netdev)) {
1680 netif_carrier_on(netdev);
1681 netif_wake_queue(netdev);
1682 }
1683
1684 if (!adapter->has_link_events)
1685 netxen_nic_set_link_parameters(adapter);
1686 }
1687 }
1688
1689 static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1690 {
1691 u32 val, port, linkup;
1692
1693 port = adapter->physical_port;
1694
1695 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1696 val = NXRD32(adapter, CRB_XG_STATE_P3);
1697 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1698 linkup = (val == XG_LINK_UP_P3);
1699 } else {
1700 val = NXRD32(adapter, CRB_XG_STATE);
1701 if (adapter->ahw.port_type == NETXEN_NIC_GBE)
1702 linkup = (val >> port) & 1;
1703 else {
1704 val = (val >> port*8) & 0xff;
1705 linkup = (val == XG_LINK_UP);
1706 }
1707 }
1708
1709 netxen_advert_link_change(adapter, linkup);
1710 }
1711
1712 static void netxen_watchdog(unsigned long v)
1713 {
1714 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
1715
1716 SCHEDULE_WORK(&adapter->watchdog_task);
1717 }
1718
1719 void netxen_watchdog_task(struct work_struct *work)
1720 {
1721 struct netxen_adapter *adapter =
1722 container_of(work, struct netxen_adapter, watchdog_task);
1723
1724 if (netxen_nic_check_temp(adapter))
1725 return;
1726
1727 if (!adapter->has_link_events)
1728 netxen_nic_handle_phy_intr(adapter);
1729
1730 if (netif_running(adapter->netdev))
1731 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1732 }
1733
1734 static void netxen_tx_timeout(struct net_device *netdev)
1735 {
1736 struct netxen_adapter *adapter = (struct netxen_adapter *)
1737 netdev_priv(netdev);
1738
1739 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1740
1741 SCHEDULE_WORK(&adapter->tx_timeout_task);
1742 }
1743
1744 static void netxen_reset_task(struct work_struct *work)
1745 {
1746 struct netxen_adapter *adapter =
1747 container_of(work, struct netxen_adapter, tx_timeout_task);
1748
1749 if (!netif_running(adapter->netdev))
1750 return;
1751
1752 netxen_napi_disable(adapter);
1753
1754 adapter->netdev->trans_start = jiffies;
1755
1756 netxen_napi_enable(adapter);
1757 netif_wake_queue(adapter->netdev);
1758 }
1759
1760 struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1761 {
1762 struct netxen_adapter *adapter = netdev_priv(netdev);
1763 struct net_device_stats *stats = &adapter->net_stats;
1764
1765 memset(stats, 0, sizeof(*stats));
1766
1767 stats->rx_packets = adapter->stats.no_rcv;
1768 stats->tx_packets = adapter->stats.xmitfinished;
1769 stats->rx_bytes = adapter->stats.rxbytes;
1770 stats->tx_bytes = adapter->stats.txbytes;
1771 stats->rx_dropped = adapter->stats.rxdropped;
1772 stats->tx_dropped = adapter->stats.txdropped;
1773
1774 return stats;
1775 }
1776
1777 static irqreturn_t netxen_intr(int irq, void *data)
1778 {
1779 struct nx_host_sds_ring *sds_ring = data;
1780 struct netxen_adapter *adapter = sds_ring->adapter;
1781 u32 status = 0;
1782
1783 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1784
1785 if (!(status & adapter->legacy_intr.int_vec_bit))
1786 return IRQ_NONE;
1787
1788 if (adapter->ahw.revision_id >= NX_P3_B1) {
1789 /* check interrupt state machine, to be sure */
1790 status = adapter->pci_read_immediate(adapter,
1791 ISR_INT_STATE_REG);
1792 if (!ISR_LEGACY_INT_TRIGGERED(status))
1793 return IRQ_NONE;
1794
1795 } else {
1796 unsigned long our_int = 0;
1797
1798 our_int = NXRD32(adapter, CRB_INT_VECTOR);
1799
1800 /* not our interrupt */
1801 if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
1802 return IRQ_NONE;
1803
1804 /* claim interrupt */
1805 NXWR32(adapter, CRB_INT_VECTOR, (our_int & 0xffffffff));
1806 }
1807
1808 /* clear interrupt */
1809 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
1810 netxen_nic_disable_int(sds_ring);
1811
1812 adapter->pci_write_immediate(adapter,
1813 adapter->legacy_intr.tgt_status_reg,
1814 0xffffffff);
1815 /* read twice to ensure write is flushed */
1816 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1817 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1818
1819 napi_schedule(&sds_ring->napi);
1820
1821 return IRQ_HANDLED;
1822 }
1823
1824 static irqreturn_t netxen_msi_intr(int irq, void *data)
1825 {
1826 struct nx_host_sds_ring *sds_ring = data;
1827 struct netxen_adapter *adapter = sds_ring->adapter;
1828
1829 /* clear interrupt */
1830 adapter->pci_write_immediate(adapter,
1831 adapter->msi_tgt_status, 0xffffffff);
1832
1833 napi_schedule(&sds_ring->napi);
1834 return IRQ_HANDLED;
1835 }
1836
1837 static irqreturn_t netxen_msix_intr(int irq, void *data)
1838 {
1839 struct nx_host_sds_ring *sds_ring = data;
1840
1841 napi_schedule(&sds_ring->napi);
1842 return IRQ_HANDLED;
1843 }
1844
1845 static int netxen_nic_poll(struct napi_struct *napi, int budget)
1846 {
1847 struct nx_host_sds_ring *sds_ring =
1848 container_of(napi, struct nx_host_sds_ring, napi);
1849
1850 struct netxen_adapter *adapter = sds_ring->adapter;
1851
1852 int tx_complete;
1853 int work_done;
1854
1855 tx_complete = netxen_process_cmd_ring(adapter);
1856
1857 work_done = netxen_process_rcv_ring(sds_ring, budget);
1858
1859 if ((work_done < budget) && tx_complete) {
1860 napi_complete(&sds_ring->napi);
1861 if (netif_running(adapter->netdev))
1862 netxen_nic_enable_int(sds_ring);
1863 }
1864
1865 return work_done;
1866 }
1867
1868 #ifdef CONFIG_NET_POLL_CONTROLLER
1869 static void netxen_nic_poll_controller(struct net_device *netdev)
1870 {
1871 struct netxen_adapter *adapter = netdev_priv(netdev);
1872 disable_irq(adapter->irq);
1873 netxen_intr(adapter->irq, adapter);
1874 enable_irq(adapter->irq);
1875 }
1876 #endif
1877
1878 #define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
1879
1880 static int
1881 netxen_destip_supported(struct netxen_adapter *adapter)
1882 {
1883 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
1884 return 0;
1885
1886 if (adapter->ahw.cut_through)
1887 return 0;
1888
1889 return 1;
1890 }
1891
1892 static int netxen_netdev_event(struct notifier_block *this,
1893 unsigned long event, void *ptr)
1894 {
1895 struct netxen_adapter *adapter;
1896 struct net_device *dev = (struct net_device *)ptr;
1897 struct in_device *indev;
1898
1899 recheck:
1900 if (dev == NULL)
1901 goto done;
1902
1903 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1904 dev = vlan_dev_real_dev(dev);
1905 goto recheck;
1906 }
1907
1908 if (!is_netxen_netdev(dev))
1909 goto done;
1910
1911 adapter = netdev_priv(dev);
1912
1913 if (!adapter || !netxen_destip_supported(adapter))
1914 goto done;
1915
1916 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
1917 goto done;
1918
1919 indev = in_dev_get(dev);
1920 if (!indev)
1921 goto done;
1922
1923 for_ifa(indev) {
1924 switch (event) {
1925 case NETDEV_UP:
1926 netxen_config_ipaddr(adapter,
1927 ifa->ifa_address, NX_IP_UP);
1928 break;
1929 case NETDEV_DOWN:
1930 netxen_config_ipaddr(adapter,
1931 ifa->ifa_address, NX_IP_DOWN);
1932 break;
1933 default:
1934 break;
1935 }
1936 } endfor_ifa(indev);
1937
1938 in_dev_put(indev);
1939 done:
1940 return NOTIFY_DONE;
1941 }
1942
1943 static int
1944 netxen_inetaddr_event(struct notifier_block *this,
1945 unsigned long event, void *ptr)
1946 {
1947 struct netxen_adapter *adapter;
1948 struct net_device *dev;
1949
1950 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1951
1952 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
1953
1954 recheck:
1955 if (dev == NULL || !netif_running(dev))
1956 goto done;
1957
1958 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1959 dev = vlan_dev_real_dev(dev);
1960 goto recheck;
1961 }
1962
1963 if (!is_netxen_netdev(dev))
1964 goto done;
1965
1966 adapter = netdev_priv(dev);
1967
1968 if (!adapter || !netxen_destip_supported(adapter))
1969 goto done;
1970
1971 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
1972 goto done;
1973
1974 switch (event) {
1975 case NETDEV_UP:
1976 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
1977 break;
1978 case NETDEV_DOWN:
1979 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
1980 break;
1981 default:
1982 break;
1983 }
1984
1985 done:
1986 return NOTIFY_DONE;
1987 }
1988
1989 static struct notifier_block netxen_netdev_cb = {
1990 .notifier_call = netxen_netdev_event,
1991 };
1992
1993 static struct notifier_block netxen_inetaddr_cb = {
1994 .notifier_call = netxen_inetaddr_event,
1995 };
1996
1997 static struct pci_driver netxen_driver = {
1998 .name = netxen_nic_driver_name,
1999 .id_table = netxen_pci_tbl,
2000 .probe = netxen_nic_probe,
2001 .remove = __devexit_p(netxen_nic_remove),
2002 #ifdef CONFIG_PM
2003 .suspend = netxen_nic_suspend,
2004 .resume = netxen_nic_resume
2005 #endif
2006 };
2007
2008 static int __init netxen_init_module(void)
2009 {
2010 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
2011
2012 if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL)
2013 return -ENOMEM;
2014
2015 register_netdevice_notifier(&netxen_netdev_cb);
2016 register_inetaddr_notifier(&netxen_inetaddr_cb);
2017
2018 return pci_register_driver(&netxen_driver);
2019 }
2020
2021 module_init(netxen_init_module);
2022
2023 static void __exit netxen_exit_module(void)
2024 {
2025 pci_unregister_driver(&netxen_driver);
2026
2027 unregister_inetaddr_notifier(&netxen_inetaddr_cb);
2028 unregister_netdevice_notifier(&netxen_netdev_cb);
2029 destroy_workqueue(netxen_workq);
2030 }
2031
2032 module_exit(netxen_exit_module);
This page took 0.081849 seconds and 6 git commands to generate.