d24e1cb93a26e1b5e0954d760eee83815ec4637a
[deliverable/linux.git] / drivers / net / netxen / netxen_nic_main.c
1 /*
2 * Copyright (C) 2003 - 2009 NetXen, Inc.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
22 *
23 * Contact Information:
24 * info@netxen.com
25 * NetXen Inc,
26 * 18922 Forge Drive
27 * Cupertino, CA 95014-0701
28 *
29 */
30
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include "netxen_nic_hw.h"
34
35 #include "netxen_nic.h"
36 #include "netxen_nic_phan_reg.h"
37
38 #include <linux/dma-mapping.h>
39 #include <linux/if_vlan.h>
40 #include <net/ip.h>
41 #include <linux/ipv6.h>
42
43 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
46
47 char netxen_nic_driver_name[] = "netxen_nic";
48 static char netxen_nic_driver_string[] = "NetXen Network Driver version "
49 NETXEN_NIC_LINUX_VERSIONID;
50
51 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
52
53 /* Default to restricted 1G auto-neg mode */
54 static int wol_port_mode = 5;
55
56 static int use_msi = 1;
57
58 static int use_msi_x = 1;
59
60 /* Local functions to NetXen NIC driver */
61 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
62 const struct pci_device_id *ent);
63 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
64 static int netxen_nic_open(struct net_device *netdev);
65 static int netxen_nic_close(struct net_device *netdev);
66 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
67 static void netxen_tx_timeout(struct net_device *netdev);
68 static void netxen_tx_timeout_task(struct work_struct *work);
69 static void netxen_watchdog(unsigned long);
70 static int netxen_nic_poll(struct napi_struct *napi, int budget);
71 #ifdef CONFIG_NET_POLL_CONTROLLER
72 static void netxen_nic_poll_controller(struct net_device *netdev);
73 #endif
74 static irqreturn_t netxen_intr(int irq, void *data);
75 static irqreturn_t netxen_msi_intr(int irq, void *data);
76 static irqreturn_t netxen_msix_intr(int irq, void *data);
77
78 /* PCI Device ID Table */
79 #define ENTRY(device) \
80 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
82
83 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
84 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
85 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
86 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
87 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
88 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
89 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
91 ENTRY(PCI_DEVICE_ID_NX3031),
92 {0,}
93 };
94
95 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
96
97 static void netxen_watchdog(unsigned long);
98
99 static uint32_t crb_cmd_producer[4] = {
100 CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
101 CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
102 };
103
104 void
105 netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
106 struct nx_host_tx_ring *tx_ring)
107 {
108 NXWR32(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
109
110 if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) {
111 netif_stop_queue(adapter->netdev);
112 smp_mb();
113 }
114 }
115
116 static uint32_t crb_cmd_consumer[4] = {
117 CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
118 CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
119 };
120
121 static inline void
122 netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
123 struct nx_host_tx_ring *tx_ring)
124 {
125 NXWR32(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
126 }
127
128 static uint32_t msi_tgt_status[8] = {
129 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
130 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
131 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
132 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
133 };
134
135 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
136
137 static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
138 {
139 struct netxen_adapter *adapter = sds_ring->adapter;
140
141 NXWR32(adapter, sds_ring->crb_intr_mask, 0);
142 }
143
144 static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
145 {
146 struct netxen_adapter *adapter = sds_ring->adapter;
147
148 NXWR32(adapter, sds_ring->crb_intr_mask, 0x1);
149
150 if (!NETXEN_IS_MSI_FAMILY(adapter))
151 adapter->pci_write_immediate(adapter,
152 adapter->legacy_intr.tgt_mask_reg, 0xfbff);
153 }
154
155 static int
156 netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
157 {
158 int size = sizeof(struct nx_host_sds_ring) * count;
159
160 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
161
162 return (recv_ctx->sds_rings == NULL);
163 }
164
165 static void
166 netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
167 {
168 if (recv_ctx->sds_rings != NULL)
169 kfree(recv_ctx->sds_rings);
170 }
171
172 static int
173 netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
174 {
175 int ring;
176 struct nx_host_sds_ring *sds_ring;
177 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
178
179 if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
180 return 1;
181
182 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
183 sds_ring = &recv_ctx->sds_rings[ring];
184 netif_napi_add(netdev, &sds_ring->napi,
185 netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
186 }
187
188 return 0;
189 }
190
191 static void
192 netxen_napi_enable(struct netxen_adapter *adapter)
193 {
194 int ring;
195 struct nx_host_sds_ring *sds_ring;
196 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
197
198 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
199 sds_ring = &recv_ctx->sds_rings[ring];
200 napi_enable(&sds_ring->napi);
201 netxen_nic_enable_int(sds_ring);
202 }
203 }
204
205 static void
206 netxen_napi_disable(struct netxen_adapter *adapter)
207 {
208 int ring;
209 struct nx_host_sds_ring *sds_ring;
210 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
211
212 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
213 sds_ring = &recv_ctx->sds_rings[ring];
214 netxen_nic_disable_int(sds_ring);
215 napi_synchronize(&sds_ring->napi);
216 napi_disable(&sds_ring->napi);
217 }
218 }
219
220 static int nx_set_dma_mask(struct netxen_adapter *adapter)
221 {
222 struct pci_dev *pdev = adapter->pdev;
223 uint64_t mask, cmask;
224
225 adapter->pci_using_dac = 0;
226
227 mask = DMA_BIT_MASK(32);
228 cmask = DMA_BIT_MASK(32);
229
230 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
231 #ifndef CONFIG_IA64
232 mask = DMA_BIT_MASK(35);
233 #endif
234 } else {
235 mask = DMA_BIT_MASK(39);
236 cmask = mask;
237 }
238
239 if (pci_set_dma_mask(pdev, mask) == 0 &&
240 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
241 adapter->pci_using_dac = 1;
242 return 0;
243 }
244
245 return -EIO;
246 }
247
248 /* Update addressable range if firmware supports it */
249 static int
250 nx_update_dma_mask(struct netxen_adapter *adapter)
251 {
252 int change, shift, err;
253 uint64_t mask, old_mask, old_cmask;
254 struct pci_dev *pdev = adapter->pdev;
255
256 change = 0;
257
258 shift = NXRD32(adapter, CRB_DMA_SHIFT);
259 if (shift > 32)
260 return 0;
261
262 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
263 change = 1;
264 else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
265 change = 1;
266
267 if (change) {
268 old_mask = pdev->dma_mask;
269 old_cmask = pdev->dev.coherent_dma_mask;
270
271 mask = DMA_BIT_MASK(32+shift);
272
273 err = pci_set_dma_mask(pdev, mask);
274 if (err)
275 goto err_out;
276
277 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
278
279 err = pci_set_consistent_dma_mask(pdev, mask);
280 if (err)
281 goto err_out;
282 }
283 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
284 }
285
286 return 0;
287
288 err_out:
289 pci_set_dma_mask(pdev, old_mask);
290 pci_set_consistent_dma_mask(pdev, old_cmask);
291 return err;
292 }
293
294 static void netxen_check_options(struct netxen_adapter *adapter)
295 {
296 if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
297 adapter->num_rxd = MAX_RCV_DESCRIPTORS_10G;
298 else if (adapter->ahw.port_type == NETXEN_NIC_GBE)
299 adapter->num_rxd = MAX_RCV_DESCRIPTORS_1G;
300
301 adapter->msix_supported = 0;
302 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
303 adapter->msix_supported = !!use_msi_x;
304 adapter->rss_supported = !!use_msi_x;
305 } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
306 switch (adapter->ahw.board_type) {
307 case NETXEN_BRDTYPE_P2_SB31_10G:
308 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
309 adapter->msix_supported = !!use_msi_x;
310 adapter->rss_supported = !!use_msi_x;
311 break;
312 default:
313 break;
314 }
315 }
316
317 adapter->num_txd = MAX_CMD_DESCRIPTORS_HOST;
318 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS;
319 adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
320
321 return;
322 }
323
324 static int
325 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
326 {
327 u32 val, timeout;
328
329 if (first_boot == 0x55555555) {
330 /* This is the first boot after power up */
331 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
332
333 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
334 return 0;
335
336 /* PCI bus master workaround */
337 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
338 if (!(first_boot & 0x4)) {
339 first_boot |= 0x4;
340 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
341 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
342 }
343
344 /* This is the first boot after power up */
345 first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
346 if (first_boot != 0x80000f) {
347 /* clear the register for future unloads/loads */
348 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
349 return -EIO;
350 }
351
352 /* Start P2 boot loader */
353 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
354 NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
355 timeout = 0;
356 do {
357 msleep(1);
358 val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
359
360 if (++timeout > 5000)
361 return -EIO;
362
363 } while (val == NETXEN_BDINFO_MAGIC);
364 }
365 return 0;
366 }
367
368 static void netxen_set_port_mode(struct netxen_adapter *adapter)
369 {
370 u32 val, data;
371
372 val = adapter->ahw.board_type;
373 if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
374 (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
375 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
376 data = NETXEN_PORT_MODE_802_3_AP;
377 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
378 } else if (port_mode == NETXEN_PORT_MODE_XG) {
379 data = NETXEN_PORT_MODE_XG;
380 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
381 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
382 data = NETXEN_PORT_MODE_AUTO_NEG_1G;
383 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
384 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
385 data = NETXEN_PORT_MODE_AUTO_NEG_XG;
386 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
387 } else {
388 data = NETXEN_PORT_MODE_AUTO_NEG;
389 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
390 }
391
392 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
393 (wol_port_mode != NETXEN_PORT_MODE_XG) &&
394 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
395 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
396 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
397 }
398 NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
399 }
400 }
401
402 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
403 {
404 u32 control;
405 int pos;
406
407 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
408 if (pos) {
409 pci_read_config_dword(pdev, pos, &control);
410 if (enable)
411 control |= PCI_MSIX_FLAGS_ENABLE;
412 else
413 control = 0;
414 pci_write_config_dword(pdev, pos, control);
415 }
416 }
417
418 static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
419 {
420 int i;
421
422 for (i = 0; i < count; i++)
423 adapter->msix_entries[i].entry = i;
424 }
425
426 static int
427 netxen_read_mac_addr(struct netxen_adapter *adapter)
428 {
429 int i;
430 unsigned char *p;
431 __le64 mac_addr;
432 struct net_device *netdev = adapter->netdev;
433 struct pci_dev *pdev = adapter->pdev;
434
435 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
436 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
437 return -EIO;
438 } else {
439 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
440 return -EIO;
441 }
442
443 p = (unsigned char *)&mac_addr;
444 for (i = 0; i < 6; i++)
445 netdev->dev_addr[i] = *(p + 5 - i);
446
447 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
448
449 /* set station address */
450
451 if (!is_valid_ether_addr(netdev->perm_addr))
452 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
453
454 return 0;
455 }
456
457 int netxen_nic_set_mac(struct net_device *netdev, void *p)
458 {
459 struct netxen_adapter *adapter = netdev_priv(netdev);
460 struct sockaddr *addr = p;
461
462 if (!is_valid_ether_addr(addr->sa_data))
463 return -EINVAL;
464
465 if (netif_running(netdev)) {
466 netif_device_detach(netdev);
467 netxen_napi_disable(adapter);
468 }
469
470 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
471 adapter->macaddr_set(adapter, addr->sa_data);
472
473 if (netif_running(netdev)) {
474 netif_device_attach(netdev);
475 netxen_napi_enable(adapter);
476 }
477 return 0;
478 }
479
480 static void netxen_set_multicast_list(struct net_device *dev)
481 {
482 struct netxen_adapter *adapter = netdev_priv(dev);
483
484 adapter->set_multi(dev);
485 }
486
487 static const struct net_device_ops netxen_netdev_ops = {
488 .ndo_open = netxen_nic_open,
489 .ndo_stop = netxen_nic_close,
490 .ndo_start_xmit = netxen_nic_xmit_frame,
491 .ndo_get_stats = netxen_nic_get_stats,
492 .ndo_validate_addr = eth_validate_addr,
493 .ndo_set_multicast_list = netxen_set_multicast_list,
494 .ndo_set_mac_address = netxen_nic_set_mac,
495 .ndo_change_mtu = netxen_nic_change_mtu,
496 .ndo_tx_timeout = netxen_tx_timeout,
497 #ifdef CONFIG_NET_POLL_CONTROLLER
498 .ndo_poll_controller = netxen_nic_poll_controller,
499 #endif
500 };
501
502 static void
503 netxen_setup_intr(struct netxen_adapter *adapter)
504 {
505 struct netxen_legacy_intr_set *legacy_intrp;
506 struct pci_dev *pdev = adapter->pdev;
507 int err, num_msix;
508
509 if (adapter->rss_supported) {
510 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
511 MSIX_ENTRIES_PER_ADAPTER : 2;
512 } else
513 num_msix = 1;
514
515 adapter->max_sds_rings = 1;
516
517 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
518
519 if (adapter->ahw.revision_id >= NX_P3_B0)
520 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
521 else
522 legacy_intrp = &legacy_intr[0];
523 adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit;
524 adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg;
525 adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg;
526 adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg;
527
528 netxen_set_msix_bit(pdev, 0);
529
530 if (adapter->msix_supported) {
531
532 netxen_init_msix_entries(adapter, num_msix);
533 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
534 if (err == 0) {
535 adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
536 netxen_set_msix_bit(pdev, 1);
537
538 if (adapter->rss_supported)
539 adapter->max_sds_rings = num_msix;
540
541 dev_info(&pdev->dev, "using msi-x interrupts\n");
542 return;
543 }
544
545 if (err > 0)
546 pci_disable_msix(pdev);
547
548 /* fall through for msi */
549 }
550
551 if (use_msi && !pci_enable_msi(pdev)) {
552 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
553 adapter->msi_tgt_status =
554 msi_tgt_status[adapter->ahw.pci_func];
555 dev_info(&pdev->dev, "using msi interrupts\n");
556 adapter->msix_entries[0].vector = pdev->irq;
557 return;
558 }
559
560 dev_info(&pdev->dev, "using legacy interrupts\n");
561 adapter->msix_entries[0].vector = pdev->irq;
562 }
563
564 static void
565 netxen_teardown_intr(struct netxen_adapter *adapter)
566 {
567 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
568 pci_disable_msix(adapter->pdev);
569 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
570 pci_disable_msi(adapter->pdev);
571 }
572
573 static void
574 netxen_cleanup_pci_map(struct netxen_adapter *adapter)
575 {
576 if (adapter->ahw.db_base != NULL)
577 iounmap(adapter->ahw.db_base);
578 if (adapter->ahw.pci_base0 != NULL)
579 iounmap(adapter->ahw.pci_base0);
580 if (adapter->ahw.pci_base1 != NULL)
581 iounmap(adapter->ahw.pci_base1);
582 if (adapter->ahw.pci_base2 != NULL)
583 iounmap(adapter->ahw.pci_base2);
584 }
585
586 static int
587 netxen_setup_pci_map(struct netxen_adapter *adapter)
588 {
589 void __iomem *mem_ptr0 = NULL;
590 void __iomem *mem_ptr1 = NULL;
591 void __iomem *mem_ptr2 = NULL;
592 void __iomem *db_ptr = NULL;
593
594 unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0;
595
596 struct pci_dev *pdev = adapter->pdev;
597 int pci_func = adapter->ahw.pci_func;
598
599 int err = 0;
600
601 /*
602 * Set the CRB window to invalid. If any register in window 0 is
603 * accessed it should set the window to 0 and then reset it to 1.
604 */
605 adapter->curr_window = 255;
606 adapter->ahw.qdr_sn_window = -1;
607 adapter->ahw.ddr_mn_window = -1;
608
609 /* remap phys address */
610 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
611 mem_len = pci_resource_len(pdev, 0);
612 pci_len0 = 0;
613
614 adapter->hw_write_wx = netxen_nic_hw_write_wx_128M;
615 adapter->hw_read_wx = netxen_nic_hw_read_wx_128M;
616 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M;
617 adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M;
618 adapter->pci_set_window = netxen_nic_pci_set_window_128M;
619 adapter->pci_mem_read = netxen_nic_pci_mem_read_128M;
620 adapter->pci_mem_write = netxen_nic_pci_mem_write_128M;
621
622 /* 128 Meg of memory */
623 if (mem_len == NETXEN_PCI_128MB_SIZE) {
624 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
625 mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
626 SECOND_PAGE_GROUP_SIZE);
627 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
628 THIRD_PAGE_GROUP_SIZE);
629 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
630 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
631 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
632 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
633 } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
634 adapter->hw_write_wx = netxen_nic_hw_write_wx_2M;
635 adapter->hw_read_wx = netxen_nic_hw_read_wx_2M;
636 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M;
637 adapter->pci_write_immediate =
638 netxen_nic_pci_write_immediate_2M;
639 adapter->pci_set_window = netxen_nic_pci_set_window_2M;
640 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
641 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
642
643 mem_ptr0 = pci_ioremap_bar(pdev, 0);
644 if (mem_ptr0 == NULL) {
645 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
646 return -EIO;
647 }
648 pci_len0 = mem_len;
649
650 adapter->ahw.ddr_mn_window = 0;
651 adapter->ahw.qdr_sn_window = 0;
652
653 adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW +
654 (pci_func * 0x20);
655 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW;
656 if (pci_func < 4)
657 adapter->ahw.ms_win_crb += (pci_func * 0x20);
658 else
659 adapter->ahw.ms_win_crb +=
660 0xA0 + ((pci_func - 4) * 0x10);
661 } else {
662 return -EIO;
663 }
664
665 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
666
667 adapter->ahw.pci_base0 = mem_ptr0;
668 adapter->ahw.pci_len0 = pci_len0;
669 adapter->ahw.pci_base1 = mem_ptr1;
670 adapter->ahw.pci_base2 = mem_ptr2;
671
672 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
673 goto skip_doorbell;
674
675 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
676 db_len = pci_resource_len(pdev, 4);
677
678 if (db_len == 0) {
679 printk(KERN_ERR "%s: doorbell is disabled\n",
680 netxen_nic_driver_name);
681 err = -EIO;
682 goto err_out;
683 }
684
685 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
686 if (!db_ptr) {
687 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
688 netxen_nic_driver_name);
689 err = -EIO;
690 goto err_out;
691 }
692
693 skip_doorbell:
694 adapter->ahw.db_base = db_ptr;
695 adapter->ahw.db_len = db_len;
696 return 0;
697
698 err_out:
699 netxen_cleanup_pci_map(adapter);
700 return err;
701 }
702
703 static int
704 netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
705 {
706 int val, err, first_boot;
707 struct pci_dev *pdev = adapter->pdev;
708
709 int first_driver = 0;
710
711 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
712 first_driver = (adapter->portnum == 0);
713 else
714 first_driver = (adapter->ahw.pci_func == 0);
715
716 if (!first_driver)
717 goto wait_init;
718
719 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
720
721 err = netxen_check_hw_init(adapter, first_boot);
722 if (err) {
723 dev_err(&pdev->dev, "error in init HW init sequence\n");
724 return err;
725 }
726
727 if (request_fw)
728 netxen_request_firmware(adapter);
729
730 err = netxen_need_fw_reset(adapter);
731 if (err <= 0)
732 return err;
733
734 if (first_boot != 0x55555555) {
735 NXWR32(adapter, CRB_CMDPEG_STATE, 0);
736 netxen_pinit_from_rom(adapter, 0);
737 msleep(1);
738 }
739
740 NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
741 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
742 netxen_set_port_mode(adapter);
743
744 netxen_load_firmware(adapter);
745
746 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
747
748 /* Initialize multicast addr pool owners */
749 val = 0x7654;
750 if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
751 val |= 0x0f000000;
752 NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
753
754 }
755
756 err = netxen_initialize_adapter_offload(adapter);
757 if (err)
758 return err;
759
760 /*
761 * Tell the hardware our version number.
762 */
763 val = (_NETXEN_NIC_LINUX_MAJOR << 16)
764 | ((_NETXEN_NIC_LINUX_MINOR << 8))
765 | (_NETXEN_NIC_LINUX_SUBVERSION);
766 NXWR32(adapter, CRB_DRIVER_VERSION, val);
767
768 wait_init:
769 /* Handshake with the card before we register the devices. */
770 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
771 if (err) {
772 netxen_free_adapter_offload(adapter);
773 return err;
774 }
775
776 return 0;
777 }
778
779 static int
780 netxen_nic_request_irq(struct netxen_adapter *adapter)
781 {
782 irq_handler_t handler;
783 struct nx_host_sds_ring *sds_ring;
784 int err, ring;
785
786 unsigned long flags = IRQF_SAMPLE_RANDOM;
787 struct net_device *netdev = adapter->netdev;
788 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
789
790 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
791 handler = netxen_msix_intr;
792 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
793 handler = netxen_msi_intr;
794 else {
795 flags |= IRQF_SHARED;
796 handler = netxen_intr;
797 }
798 adapter->irq = netdev->irq;
799
800 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
801 sds_ring = &recv_ctx->sds_rings[ring];
802 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
803 err = request_irq(sds_ring->irq, handler,
804 flags, sds_ring->name, sds_ring);
805 if (err)
806 return err;
807 }
808
809 return 0;
810 }
811
812 static void
813 netxen_nic_free_irq(struct netxen_adapter *adapter)
814 {
815 int ring;
816 struct nx_host_sds_ring *sds_ring;
817
818 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
819
820 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
821 sds_ring = &recv_ctx->sds_rings[ring];
822 free_irq(sds_ring->irq, sds_ring);
823 }
824 }
825
826 static int
827 netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
828 {
829 int err;
830
831 err = adapter->init_port(adapter, adapter->physical_port);
832 if (err) {
833 printk(KERN_ERR "%s: Failed to initialize port %d\n",
834 netxen_nic_driver_name, adapter->portnum);
835 return err;
836 }
837 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
838 adapter->macaddr_set(adapter, netdev->dev_addr);
839
840 adapter->set_multi(netdev);
841 adapter->set_mtu(adapter, netdev->mtu);
842
843 adapter->ahw.linkup = 0;
844
845 if (adapter->max_sds_rings > 1)
846 netxen_config_rss(adapter, 1);
847
848 netxen_napi_enable(adapter);
849
850 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
851 netxen_linkevent_request(adapter, 1);
852 else
853 netxen_nic_set_link_parameters(adapter);
854
855 mod_timer(&adapter->watchdog_timer, jiffies);
856
857 return 0;
858 }
859
860 static void
861 netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
862 {
863 spin_lock(&adapter->tx_clean_lock);
864 netif_carrier_off(netdev);
865 netif_tx_disable(netdev);
866
867 if (adapter->stop_port)
868 adapter->stop_port(adapter);
869
870 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
871 netxen_p3_free_mac_list(adapter);
872
873 netxen_napi_disable(adapter);
874
875 netxen_release_tx_buffers(adapter);
876 spin_unlock(&adapter->tx_clean_lock);
877
878 del_timer_sync(&adapter->watchdog_timer);
879 }
880
881
882 static int
883 netxen_nic_attach(struct netxen_adapter *adapter)
884 {
885 struct net_device *netdev = adapter->netdev;
886 struct pci_dev *pdev = adapter->pdev;
887 int err, ring;
888 struct nx_host_rds_ring *rds_ring;
889 struct nx_host_tx_ring *tx_ring;
890
891 err = netxen_init_firmware(adapter);
892 if (err != 0) {
893 printk(KERN_ERR "Failed to init firmware\n");
894 return -EIO;
895 }
896
897 if (adapter->fw_major < 4)
898 adapter->max_rds_rings = 3;
899 else
900 adapter->max_rds_rings = 2;
901
902 err = netxen_alloc_sw_resources(adapter);
903 if (err) {
904 printk(KERN_ERR "%s: Error in setting sw resources\n",
905 netdev->name);
906 return err;
907 }
908
909 netxen_nic_clear_stats(adapter);
910
911 err = netxen_alloc_hw_resources(adapter);
912 if (err) {
913 printk(KERN_ERR "%s: Error in setting hw resources\n",
914 netdev->name);
915 goto err_out_free_sw;
916 }
917
918 if (adapter->fw_major < 4) {
919 tx_ring = adapter->tx_ring;
920 tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum];
921 tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum];
922
923 tx_ring->producer = 0;
924 tx_ring->sw_consumer = 0;
925
926 netxen_nic_update_cmd_producer(adapter, tx_ring);
927 netxen_nic_update_cmd_consumer(adapter, tx_ring);
928 }
929
930 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
931 rds_ring = &adapter->recv_ctx.rds_rings[ring];
932 netxen_post_rx_buffers(adapter, ring, rds_ring);
933 }
934
935 err = netxen_nic_request_irq(adapter);
936 if (err) {
937 dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
938 netdev->name);
939 goto err_out_free_rxbuf;
940 }
941
942 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
943 return 0;
944
945 err_out_free_rxbuf:
946 netxen_release_rx_buffers(adapter);
947 netxen_free_hw_resources(adapter);
948 err_out_free_sw:
949 netxen_free_sw_resources(adapter);
950 return err;
951 }
952
953 static void
954 netxen_nic_detach(struct netxen_adapter *adapter)
955 {
956 netxen_free_hw_resources(adapter);
957 netxen_release_rx_buffers(adapter);
958 netxen_nic_free_irq(adapter);
959 netxen_free_sw_resources(adapter);
960
961 adapter->is_up = 0;
962 }
963
964 static int __devinit
965 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
966 {
967 struct net_device *netdev = NULL;
968 struct netxen_adapter *adapter = NULL;
969 int i = 0, err;
970 int pci_func_id = PCI_FUNC(pdev->devfn);
971 uint8_t revision_id;
972
973 if (pdev->class != 0x020000) {
974 printk(KERN_DEBUG "NetXen function %d, class %x will not "
975 "be enabled.\n",pci_func_id, pdev->class);
976 return -ENODEV;
977 }
978
979 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
980 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
981 "will not be enabled.\n",
982 NX_P3_A0, NX_P3_B1);
983 return -ENODEV;
984 }
985
986 if ((err = pci_enable_device(pdev)))
987 return err;
988
989 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
990 err = -ENODEV;
991 goto err_out_disable_pdev;
992 }
993
994 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
995 goto err_out_disable_pdev;
996
997 pci_set_master(pdev);
998
999 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
1000 if(!netdev) {
1001 printk(KERN_ERR"%s: Failed to allocate memory for the "
1002 "device block.Check system memory resource"
1003 " usage.\n", netxen_nic_driver_name);
1004 goto err_out_free_res;
1005 }
1006
1007 SET_NETDEV_DEV(netdev, &pdev->dev);
1008
1009 adapter = netdev_priv(netdev);
1010 adapter->netdev = netdev;
1011 adapter->pdev = pdev;
1012 adapter->ahw.pci_func = pci_func_id;
1013
1014 revision_id = pdev->revision;
1015 adapter->ahw.revision_id = revision_id;
1016
1017 err = nx_set_dma_mask(adapter);
1018 if (err)
1019 goto err_out_free_netdev;
1020
1021 rwlock_init(&adapter->adapter_lock);
1022 spin_lock_init(&adapter->tx_clean_lock);
1023 INIT_LIST_HEAD(&adapter->mac_list);
1024
1025 err = netxen_setup_pci_map(adapter);
1026 if (err)
1027 goto err_out_free_netdev;
1028
1029 /* This will be reset for mezz cards */
1030 adapter->portnum = pci_func_id;
1031 adapter->rx_csum = 1;
1032 adapter->mc_enabled = 0;
1033 if (NX_IS_REVISION_P3(revision_id))
1034 adapter->max_mc_count = 38;
1035 else
1036 adapter->max_mc_count = 16;
1037
1038 netdev->netdev_ops = &netxen_netdev_ops;
1039 netdev->watchdog_timeo = 2*HZ;
1040
1041 netxen_nic_change_mtu(netdev, netdev->mtu);
1042
1043 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
1044
1045 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1046 netdev->features |= (NETIF_F_GRO);
1047 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1048
1049 if (NX_IS_REVISION_P3(revision_id)) {
1050 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1051 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1052 }
1053
1054 if (adapter->pci_using_dac) {
1055 netdev->features |= NETIF_F_HIGHDMA;
1056 netdev->vlan_features |= NETIF_F_HIGHDMA;
1057 }
1058
1059 if (netxen_nic_get_board_info(adapter) != 0) {
1060 printk("%s: Error getting board config info.\n",
1061 netxen_nic_driver_name);
1062 err = -EIO;
1063 goto err_out_iounmap;
1064 }
1065
1066 netxen_initialize_adapter_ops(adapter);
1067
1068 /* Mezz cards have PCI function 0,2,3 enabled */
1069 switch (adapter->ahw.board_type) {
1070 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
1071 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
1072 if (pci_func_id >= 2)
1073 adapter->portnum = pci_func_id - 2;
1074 break;
1075 default:
1076 break;
1077 }
1078
1079 err = netxen_start_firmware(adapter, 1);
1080 if (err)
1081 goto err_out_iounmap;
1082
1083 nx_update_dma_mask(adapter);
1084
1085 netxen_nic_get_firmware_info(adapter);
1086
1087 /*
1088 * See if the firmware gave us a virtual-physical port mapping.
1089 */
1090 adapter->physical_port = adapter->portnum;
1091 if (adapter->fw_major < 4) {
1092 i = NXRD32(adapter, CRB_V2P(adapter->portnum));
1093 if (i != 0x55555555)
1094 adapter->physical_port = i;
1095 }
1096
1097 netxen_check_options(adapter);
1098
1099 netxen_setup_intr(adapter);
1100
1101 netdev->irq = adapter->msix_entries[0].vector;
1102
1103 if (netxen_napi_add(adapter, netdev))
1104 goto err_out_disable_msi;
1105
1106 init_timer(&adapter->watchdog_timer);
1107 adapter->watchdog_timer.function = &netxen_watchdog;
1108 adapter->watchdog_timer.data = (unsigned long)adapter;
1109 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
1110 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
1111
1112 err = netxen_read_mac_addr(adapter);
1113 if (err)
1114 dev_warn(&pdev->dev, "failed to read mac addr\n");
1115
1116 netif_carrier_off(netdev);
1117 netif_stop_queue(netdev);
1118
1119 if ((err = register_netdev(netdev))) {
1120 printk(KERN_ERR "%s: register_netdev failed port #%d"
1121 " aborting\n", netxen_nic_driver_name,
1122 adapter->portnum);
1123 err = -EIO;
1124 goto err_out_disable_msi;
1125 }
1126
1127 pci_set_drvdata(pdev, adapter);
1128
1129 switch (adapter->ahw.port_type) {
1130 case NETXEN_NIC_GBE:
1131 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1132 adapter->netdev->name);
1133 break;
1134 case NETXEN_NIC_XGBE:
1135 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1136 adapter->netdev->name);
1137 break;
1138 }
1139
1140 return 0;
1141
1142 err_out_disable_msi:
1143 netxen_teardown_intr(adapter);
1144
1145 netxen_free_adapter_offload(adapter);
1146
1147 err_out_iounmap:
1148 netxen_cleanup_pci_map(adapter);
1149
1150 err_out_free_netdev:
1151 free_netdev(netdev);
1152
1153 err_out_free_res:
1154 pci_release_regions(pdev);
1155
1156 err_out_disable_pdev:
1157 pci_set_drvdata(pdev, NULL);
1158 pci_disable_device(pdev);
1159 return err;
1160 }
1161
1162 static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1163 {
1164 struct netxen_adapter *adapter;
1165 struct net_device *netdev;
1166
1167 adapter = pci_get_drvdata(pdev);
1168 if (adapter == NULL)
1169 return;
1170
1171 netdev = adapter->netdev;
1172
1173 unregister_netdev(netdev);
1174
1175 cancel_work_sync(&adapter->watchdog_task);
1176 cancel_work_sync(&adapter->tx_timeout_task);
1177
1178 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
1179 netxen_nic_detach(adapter);
1180 }
1181
1182 if (adapter->portnum == 0)
1183 netxen_free_adapter_offload(adapter);
1184
1185 netxen_teardown_intr(adapter);
1186 netxen_free_sds_rings(&adapter->recv_ctx);
1187
1188 netxen_cleanup_pci_map(adapter);
1189
1190 netxen_release_firmware(adapter);
1191
1192 pci_release_regions(pdev);
1193 pci_disable_device(pdev);
1194 pci_set_drvdata(pdev, NULL);
1195
1196 free_netdev(netdev);
1197 }
1198
1199 #ifdef CONFIG_PM
1200 static int
1201 netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1202 {
1203
1204 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1205 struct net_device *netdev = adapter->netdev;
1206
1207 netif_device_detach(netdev);
1208
1209 if (netif_running(netdev))
1210 netxen_nic_down(adapter, netdev);
1211
1212 cancel_work_sync(&adapter->watchdog_task);
1213 cancel_work_sync(&adapter->tx_timeout_task);
1214
1215 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
1216 netxen_nic_detach(adapter);
1217
1218 pci_save_state(pdev);
1219
1220 if (netxen_nic_wol_supported(adapter)) {
1221 pci_enable_wake(pdev, PCI_D3cold, 1);
1222 pci_enable_wake(pdev, PCI_D3hot, 1);
1223 }
1224
1225 pci_disable_device(pdev);
1226 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1227
1228 return 0;
1229 }
1230
1231 static int
1232 netxen_nic_resume(struct pci_dev *pdev)
1233 {
1234 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1235 struct net_device *netdev = adapter->netdev;
1236 int err;
1237
1238 pci_set_power_state(pdev, PCI_D0);
1239 pci_restore_state(pdev);
1240
1241 err = pci_enable_device(pdev);
1242 if (err)
1243 return err;
1244
1245 adapter->curr_window = 255;
1246
1247 err = netxen_start_firmware(adapter, 0);
1248 if (err) {
1249 dev_err(&pdev->dev, "failed to start firmware\n");
1250 return err;
1251 }
1252
1253 if (netif_running(netdev)) {
1254 err = netxen_nic_attach(adapter);
1255 if (err)
1256 return err;
1257
1258 err = netxen_nic_up(adapter, netdev);
1259 if (err)
1260 return err;
1261
1262 netif_device_attach(netdev);
1263 }
1264
1265 return 0;
1266 }
1267 #endif
1268
1269 static int netxen_nic_open(struct net_device *netdev)
1270 {
1271 struct netxen_adapter *adapter = netdev_priv(netdev);
1272 int err = 0;
1273
1274 if (adapter->driver_mismatch)
1275 return -EIO;
1276
1277 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
1278 err = netxen_nic_attach(adapter);
1279 if (err)
1280 return err;
1281 }
1282
1283 err = netxen_nic_up(adapter, netdev);
1284 if (err)
1285 goto err_out;
1286
1287 netif_start_queue(netdev);
1288
1289 return 0;
1290
1291 err_out:
1292 netxen_nic_detach(adapter);
1293 return err;
1294 }
1295
1296 /*
1297 * netxen_nic_close - Disables a network interface entry point
1298 */
1299 static int netxen_nic_close(struct net_device *netdev)
1300 {
1301 struct netxen_adapter *adapter = netdev_priv(netdev);
1302
1303 netxen_nic_down(adapter, netdev);
1304 return 0;
1305 }
1306
1307 static bool netxen_tso_check(struct net_device *netdev,
1308 struct cmd_desc_type0 *desc, struct sk_buff *skb)
1309 {
1310 bool tso = false;
1311 u8 opcode = TX_ETHER_PKT;
1312 __be16 protocol = skb->protocol;
1313 u16 flags = 0;
1314
1315 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1316 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data;
1317 protocol = vh->h_vlan_encapsulated_proto;
1318 flags = FLAGS_VLAN_TAGGED;
1319 }
1320
1321 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1322 skb_shinfo(skb)->gso_size > 0) {
1323
1324 desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1325 desc->total_hdr_length =
1326 skb_transport_offset(skb) + tcp_hdrlen(skb);
1327
1328 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1329 TX_TCP_LSO6 : TX_TCP_LSO;
1330 tso = true;
1331
1332 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1333 u8 l4proto;
1334
1335 if (protocol == cpu_to_be16(ETH_P_IP)) {
1336 l4proto = ip_hdr(skb)->protocol;
1337
1338 if (l4proto == IPPROTO_TCP)
1339 opcode = TX_TCP_PKT;
1340 else if(l4proto == IPPROTO_UDP)
1341 opcode = TX_UDP_PKT;
1342 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1343 l4proto = ipv6_hdr(skb)->nexthdr;
1344
1345 if (l4proto == IPPROTO_TCP)
1346 opcode = TX_TCPV6_PKT;
1347 else if(l4proto == IPPROTO_UDP)
1348 opcode = TX_UDPV6_PKT;
1349 }
1350 }
1351 desc->tcp_hdr_offset = skb_transport_offset(skb);
1352 desc->ip_hdr_offset = skb_network_offset(skb);
1353 netxen_set_tx_flags_opcode(desc, flags, opcode);
1354 return tso;
1355 }
1356
1357 static void
1358 netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
1359 struct netxen_cmd_buffer *pbuf, int last)
1360 {
1361 int k;
1362 struct netxen_skb_frag *buffrag;
1363
1364 buffrag = &pbuf->frag_array[0];
1365 pci_unmap_single(pdev, buffrag->dma,
1366 buffrag->length, PCI_DMA_TODEVICE);
1367
1368 for (k = 1; k < last; k++) {
1369 buffrag = &pbuf->frag_array[k];
1370 pci_unmap_page(pdev, buffrag->dma,
1371 buffrag->length, PCI_DMA_TODEVICE);
1372 }
1373 }
1374
1375 static inline void
1376 netxen_clear_cmddesc(u64 *desc)
1377 {
1378 int i;
1379 for (i = 0; i < 8; i++)
1380 desc[i] = 0ULL;
1381 }
1382
1383 static int
1384 netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1385 {
1386 struct netxen_adapter *adapter = netdev_priv(netdev);
1387 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
1388 unsigned int first_seg_len = skb->len - skb->data_len;
1389 struct netxen_cmd_buffer *pbuf;
1390 struct netxen_skb_frag *buffrag;
1391 struct cmd_desc_type0 *hwdesc;
1392 struct pci_dev *pdev = adapter->pdev;
1393 dma_addr_t temp_dma;
1394 int i, k;
1395
1396 u32 producer;
1397 int frag_count, no_of_desc;
1398 u32 num_txd = tx_ring->num_desc;
1399 bool is_tso = false;
1400
1401 frag_count = skb_shinfo(skb)->nr_frags + 1;
1402
1403 /* 4 fragments per cmd des */
1404 no_of_desc = (frag_count + 3) >> 2;
1405
1406 if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) {
1407 netif_stop_queue(netdev);
1408 return NETDEV_TX_BUSY;
1409 }
1410
1411 producer = tx_ring->producer;
1412
1413 hwdesc = &tx_ring->desc_head[producer];
1414 netxen_clear_cmddesc((u64 *)hwdesc);
1415 pbuf = &tx_ring->cmd_buf_arr[producer];
1416
1417 is_tso = netxen_tso_check(netdev, hwdesc, skb);
1418
1419 pbuf->skb = skb;
1420 pbuf->frag_count = frag_count;
1421 buffrag = &pbuf->frag_array[0];
1422 temp_dma = pci_map_single(pdev, skb->data, first_seg_len,
1423 PCI_DMA_TODEVICE);
1424 if (pci_dma_mapping_error(pdev, temp_dma))
1425 goto drop_packet;
1426
1427 buffrag->dma = temp_dma;
1428 buffrag->length = first_seg_len;
1429 netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
1430 netxen_set_tx_port(hwdesc, adapter->portnum);
1431
1432 hwdesc->buffer_length[0] = cpu_to_le16(first_seg_len);
1433 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1434
1435 for (i = 1, k = 1; i < frag_count; i++, k++) {
1436 struct skb_frag_struct *frag;
1437 int len, temp_len;
1438 unsigned long offset;
1439
1440 /* move to next desc. if there is a need */
1441 if ((i & 0x3) == 0) {
1442 k = 0;
1443 producer = get_next_index(producer, num_txd);
1444 hwdesc = &tx_ring->desc_head[producer];
1445 netxen_clear_cmddesc((u64 *)hwdesc);
1446 pbuf = &tx_ring->cmd_buf_arr[producer];
1447 pbuf->skb = NULL;
1448 }
1449 frag = &skb_shinfo(skb)->frags[i - 1];
1450 len = frag->size;
1451 offset = frag->page_offset;
1452
1453 temp_len = len;
1454 temp_dma = pci_map_page(pdev, frag->page, offset,
1455 len, PCI_DMA_TODEVICE);
1456 if (pci_dma_mapping_error(pdev, temp_dma)) {
1457 netxen_clean_tx_dma_mapping(pdev, pbuf, i);
1458 goto drop_packet;
1459 }
1460
1461 buffrag++;
1462 buffrag->dma = temp_dma;
1463 buffrag->length = temp_len;
1464
1465 hwdesc->buffer_length[k] = cpu_to_le16(temp_len);
1466 switch (k) {
1467 case 0:
1468 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
1469 break;
1470 case 1:
1471 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
1472 break;
1473 case 2:
1474 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
1475 break;
1476 case 3:
1477 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
1478 break;
1479 }
1480 frag++;
1481 }
1482 producer = get_next_index(producer, num_txd);
1483
1484 /* For LSO, we need to copy the MAC/IP/TCP headers into
1485 * the descriptor ring
1486 */
1487 if (is_tso) {
1488 int hdr_len, first_hdr_len, more_hdr;
1489 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1490 if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
1491 first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
1492 more_hdr = 1;
1493 } else {
1494 first_hdr_len = hdr_len;
1495 more_hdr = 0;
1496 }
1497 /* copy the MAC/IP/TCP headers to the cmd descriptor list */
1498 hwdesc = &tx_ring->desc_head[producer];
1499 pbuf = &tx_ring->cmd_buf_arr[producer];
1500 pbuf->skb = NULL;
1501
1502 /* copy the first 64 bytes */
1503 memcpy(((void *)hwdesc) + 2,
1504 (void *)(skb->data), first_hdr_len);
1505 producer = get_next_index(producer, num_txd);
1506
1507 if (more_hdr) {
1508 hwdesc = &tx_ring->desc_head[producer];
1509 pbuf = &tx_ring->cmd_buf_arr[producer];
1510 pbuf->skb = NULL;
1511 /* copy the next 64 bytes - should be enough except
1512 * for pathological case
1513 */
1514 skb_copy_from_linear_data_offset(skb, first_hdr_len,
1515 hwdesc,
1516 (hdr_len -
1517 first_hdr_len));
1518 producer = get_next_index(producer, num_txd);
1519 }
1520 }
1521
1522 tx_ring->producer = producer;
1523 adapter->stats.txbytes += skb->len;
1524
1525 netxen_nic_update_cmd_producer(adapter, tx_ring);
1526
1527 adapter->stats.xmitcalled++;
1528
1529 return NETDEV_TX_OK;
1530
1531 drop_packet:
1532 adapter->stats.txdropped++;
1533 dev_kfree_skb_any(skb);
1534 return NETDEV_TX_OK;
1535 }
1536
1537 static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1538 {
1539 struct net_device *netdev = adapter->netdev;
1540 uint32_t temp, temp_state, temp_val;
1541 int rv = 0;
1542
1543 temp = NXRD32(adapter, CRB_TEMP_STATE);
1544
1545 temp_state = nx_get_temp_state(temp);
1546 temp_val = nx_get_temp_val(temp);
1547
1548 if (temp_state == NX_TEMP_PANIC) {
1549 printk(KERN_ALERT
1550 "%s: Device temperature %d degrees C exceeds"
1551 " maximum allowed. Hardware has been shut down.\n",
1552 netdev->name, temp_val);
1553 rv = 1;
1554 } else if (temp_state == NX_TEMP_WARN) {
1555 if (adapter->temp == NX_TEMP_NORMAL) {
1556 printk(KERN_ALERT
1557 "%s: Device temperature %d degrees C "
1558 "exceeds operating range."
1559 " Immediate action needed.\n",
1560 netdev->name, temp_val);
1561 }
1562 } else {
1563 if (adapter->temp == NX_TEMP_WARN) {
1564 printk(KERN_INFO
1565 "%s: Device temperature is now %d degrees C"
1566 " in normal range.\n", netdev->name,
1567 temp_val);
1568 }
1569 }
1570 adapter->temp = temp_state;
1571 return rv;
1572 }
1573
1574 void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
1575 {
1576 struct net_device *netdev = adapter->netdev;
1577
1578 if (adapter->ahw.linkup && !linkup) {
1579 printk(KERN_INFO "%s: %s NIC Link is down\n",
1580 netxen_nic_driver_name, netdev->name);
1581 adapter->ahw.linkup = 0;
1582 if (netif_running(netdev)) {
1583 netif_carrier_off(netdev);
1584 netif_stop_queue(netdev);
1585 }
1586 adapter->link_changed = !adapter->has_link_events;
1587 } else if (!adapter->ahw.linkup && linkup) {
1588 printk(KERN_INFO "%s: %s NIC Link is up\n",
1589 netxen_nic_driver_name, netdev->name);
1590 adapter->ahw.linkup = 1;
1591 if (netif_running(netdev)) {
1592 netif_carrier_on(netdev);
1593 netif_wake_queue(netdev);
1594 }
1595 adapter->link_changed = !adapter->has_link_events;
1596 }
1597 }
1598
1599 static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1600 {
1601 u32 val, port, linkup;
1602
1603 port = adapter->physical_port;
1604
1605 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1606 val = NXRD32(adapter, CRB_XG_STATE_P3);
1607 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1608 linkup = (val == XG_LINK_UP_P3);
1609 } else {
1610 val = NXRD32(adapter, CRB_XG_STATE);
1611 if (adapter->ahw.port_type == NETXEN_NIC_GBE)
1612 linkup = (val >> port) & 1;
1613 else {
1614 val = (val >> port*8) & 0xff;
1615 linkup = (val == XG_LINK_UP);
1616 }
1617 }
1618
1619 netxen_advert_link_change(adapter, linkup);
1620 }
1621
1622 static void netxen_nic_thermal_shutdown(struct netxen_adapter *adapter)
1623 {
1624 struct net_device *netdev = adapter->netdev;
1625
1626 netif_device_detach(netdev);
1627 netxen_nic_down(adapter, netdev);
1628 netxen_nic_detach(adapter);
1629 }
1630
1631 static void netxen_watchdog(unsigned long v)
1632 {
1633 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
1634
1635 if (netxen_nic_check_temp(adapter))
1636 goto do_sched;
1637
1638 if (!adapter->has_link_events) {
1639 netxen_nic_handle_phy_intr(adapter);
1640
1641 if (adapter->link_changed)
1642 goto do_sched;
1643 }
1644
1645 if (netif_running(adapter->netdev))
1646 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1647
1648 return;
1649
1650 do_sched:
1651 schedule_work(&adapter->watchdog_task);
1652 }
1653
1654 void netxen_watchdog_task(struct work_struct *work)
1655 {
1656 struct netxen_adapter *adapter =
1657 container_of(work, struct netxen_adapter, watchdog_task);
1658
1659 if (adapter->temp == NX_TEMP_PANIC) {
1660 netxen_nic_thermal_shutdown(adapter);
1661 return;
1662 }
1663
1664 if (adapter->link_changed)
1665 netxen_nic_set_link_parameters(adapter);
1666
1667 if (netif_running(adapter->netdev))
1668 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1669 }
1670
1671 static void netxen_tx_timeout(struct net_device *netdev)
1672 {
1673 struct netxen_adapter *adapter = netdev_priv(netdev);
1674 schedule_work(&adapter->tx_timeout_task);
1675 }
1676
1677 static void netxen_tx_timeout_task(struct work_struct *work)
1678 {
1679 struct netxen_adapter *adapter =
1680 container_of(work, struct netxen_adapter, tx_timeout_task);
1681
1682 if (!netif_running(adapter->netdev))
1683 return;
1684
1685 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
1686 netxen_nic_driver_name, adapter->netdev->name);
1687
1688 netxen_napi_disable(adapter);
1689
1690 adapter->netdev->trans_start = jiffies;
1691
1692 netxen_napi_enable(adapter);
1693 netif_wake_queue(adapter->netdev);
1694 }
1695
1696 struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1697 {
1698 struct netxen_adapter *adapter = netdev_priv(netdev);
1699 struct net_device_stats *stats = &adapter->net_stats;
1700
1701 memset(stats, 0, sizeof(*stats));
1702
1703 stats->rx_packets = adapter->stats.no_rcv;
1704 stats->tx_packets = adapter->stats.xmitfinished;
1705 stats->rx_bytes = adapter->stats.rxbytes;
1706 stats->tx_bytes = adapter->stats.txbytes;
1707 stats->rx_dropped = adapter->stats.rxdropped;
1708 stats->tx_dropped = adapter->stats.txdropped;
1709
1710 return stats;
1711 }
1712
1713 static irqreturn_t netxen_intr(int irq, void *data)
1714 {
1715 struct nx_host_sds_ring *sds_ring = data;
1716 struct netxen_adapter *adapter = sds_ring->adapter;
1717 u32 status = 0;
1718
1719 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1720
1721 if (!(status & adapter->legacy_intr.int_vec_bit))
1722 return IRQ_NONE;
1723
1724 if (adapter->ahw.revision_id >= NX_P3_B1) {
1725 /* check interrupt state machine, to be sure */
1726 status = adapter->pci_read_immediate(adapter,
1727 ISR_INT_STATE_REG);
1728 if (!ISR_LEGACY_INT_TRIGGERED(status))
1729 return IRQ_NONE;
1730
1731 } else {
1732 unsigned long our_int = 0;
1733
1734 our_int = NXRD32(adapter, CRB_INT_VECTOR);
1735
1736 /* not our interrupt */
1737 if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
1738 return IRQ_NONE;
1739
1740 /* claim interrupt */
1741 NXWR32(adapter, CRB_INT_VECTOR, (our_int & 0xffffffff));
1742 }
1743
1744 /* clear interrupt */
1745 if (adapter->fw_major < 4)
1746 netxen_nic_disable_int(sds_ring);
1747
1748 adapter->pci_write_immediate(adapter,
1749 adapter->legacy_intr.tgt_status_reg,
1750 0xffffffff);
1751 /* read twice to ensure write is flushed */
1752 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1753 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1754
1755 napi_schedule(&sds_ring->napi);
1756
1757 return IRQ_HANDLED;
1758 }
1759
1760 static irqreturn_t netxen_msi_intr(int irq, void *data)
1761 {
1762 struct nx_host_sds_ring *sds_ring = data;
1763 struct netxen_adapter *adapter = sds_ring->adapter;
1764
1765 /* clear interrupt */
1766 adapter->pci_write_immediate(adapter,
1767 adapter->msi_tgt_status, 0xffffffff);
1768
1769 napi_schedule(&sds_ring->napi);
1770 return IRQ_HANDLED;
1771 }
1772
1773 static irqreturn_t netxen_msix_intr(int irq, void *data)
1774 {
1775 struct nx_host_sds_ring *sds_ring = data;
1776
1777 napi_schedule(&sds_ring->napi);
1778 return IRQ_HANDLED;
1779 }
1780
1781 static int netxen_nic_poll(struct napi_struct *napi, int budget)
1782 {
1783 struct nx_host_sds_ring *sds_ring =
1784 container_of(napi, struct nx_host_sds_ring, napi);
1785
1786 struct netxen_adapter *adapter = sds_ring->adapter;
1787
1788 int tx_complete;
1789 int work_done;
1790
1791 tx_complete = netxen_process_cmd_ring(adapter);
1792
1793 work_done = netxen_process_rcv_ring(sds_ring, budget);
1794
1795 if ((work_done < budget) && tx_complete) {
1796 napi_complete(&sds_ring->napi);
1797 if (netif_running(adapter->netdev))
1798 netxen_nic_enable_int(sds_ring);
1799 }
1800
1801 return work_done;
1802 }
1803
1804 #ifdef CONFIG_NET_POLL_CONTROLLER
1805 static void netxen_nic_poll_controller(struct net_device *netdev)
1806 {
1807 struct netxen_adapter *adapter = netdev_priv(netdev);
1808 disable_irq(adapter->irq);
1809 netxen_intr(adapter->irq, adapter);
1810 enable_irq(adapter->irq);
1811 }
1812 #endif
1813
1814 static struct pci_driver netxen_driver = {
1815 .name = netxen_nic_driver_name,
1816 .id_table = netxen_pci_tbl,
1817 .probe = netxen_nic_probe,
1818 .remove = __devexit_p(netxen_nic_remove),
1819 #ifdef CONFIG_PM
1820 .suspend = netxen_nic_suspend,
1821 .resume = netxen_nic_resume
1822 #endif
1823 };
1824
1825 /* Driver Registration on NetXen card */
1826
1827 static int __init netxen_init_module(void)
1828 {
1829 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
1830
1831 return pci_register_driver(&netxen_driver);
1832 }
1833
1834 module_init(netxen_init_module);
1835
1836 static void __exit netxen_exit_module(void)
1837 {
1838 pci_unregister_driver(&netxen_driver);
1839 }
1840
1841 module_exit(netxen_exit_module);
This page took 0.069137 seconds and 4 git commands to generate.