netxen: Add default and limit macros for ring sizes.
[deliverable/linux.git] / drivers / net / netxen / netxen_nic_main.c
1 /*
2 * Copyright (C) 2003 - 2009 NetXen, Inc.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
22 *
23 * Contact Information:
24 * info@netxen.com
25 * NetXen Inc,
26 * 18922 Forge Drive
27 * Cupertino, CA 95014-0701
28 *
29 */
30
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include "netxen_nic_hw.h"
34
35 #include "netxen_nic.h"
36 #include "netxen_nic_phan_reg.h"
37
38 #include <linux/dma-mapping.h>
39 #include <linux/if_vlan.h>
40 #include <net/ip.h>
41 #include <linux/ipv6.h>
42 #include <linux/inetdevice.h>
43
44 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
45 MODULE_LICENSE("GPL");
46 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
47
48 char netxen_nic_driver_name[] = "netxen_nic";
49 static char netxen_nic_driver_string[] = "NetXen Network Driver version "
50 NETXEN_NIC_LINUX_VERSIONID;
51
52 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
53
54 /* Default to restricted 1G auto-neg mode */
55 static int wol_port_mode = 5;
56
57 static int use_msi = 1;
58
59 static int use_msi_x = 1;
60
61 /* Local functions to NetXen NIC driver */
62 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
63 const struct pci_device_id *ent);
64 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
65 static int netxen_nic_open(struct net_device *netdev);
66 static int netxen_nic_close(struct net_device *netdev);
67 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
68 static void netxen_tx_timeout(struct net_device *netdev);
69 static void netxen_tx_timeout_task(struct work_struct *work);
70 static void netxen_watchdog(unsigned long);
71 static int netxen_nic_poll(struct napi_struct *napi, int budget);
72 #ifdef CONFIG_NET_POLL_CONTROLLER
73 static void netxen_nic_poll_controller(struct net_device *netdev);
74 #endif
75 static irqreturn_t netxen_intr(int irq, void *data);
76 static irqreturn_t netxen_msi_intr(int irq, void *data);
77 static irqreturn_t netxen_msix_intr(int irq, void *data);
78
79 /* PCI Device ID Table */
80 #define ENTRY(device) \
81 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
82 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
83
84 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
85 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
86 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
87 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
88 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
89 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
91 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
92 ENTRY(PCI_DEVICE_ID_NX3031),
93 {0,}
94 };
95
96 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
97
98 static struct workqueue_struct *netxen_workq;
99 #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
100 #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
101
102 static void netxen_watchdog(unsigned long);
103
104 static uint32_t crb_cmd_producer[4] = {
105 CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
106 CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
107 };
108
109 void
110 netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
111 struct nx_host_tx_ring *tx_ring)
112 {
113 NXWR32(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
114
115 if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) {
116 netif_stop_queue(adapter->netdev);
117 smp_mb();
118 }
119 }
120
121 static uint32_t crb_cmd_consumer[4] = {
122 CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
123 CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
124 };
125
126 static inline void
127 netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
128 struct nx_host_tx_ring *tx_ring)
129 {
130 NXWR32(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
131 }
132
133 static uint32_t msi_tgt_status[8] = {
134 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
135 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
136 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
137 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
138 };
139
140 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
141
142 static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
143 {
144 struct netxen_adapter *adapter = sds_ring->adapter;
145
146 NXWR32(adapter, sds_ring->crb_intr_mask, 0);
147 }
148
149 static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
150 {
151 struct netxen_adapter *adapter = sds_ring->adapter;
152
153 NXWR32(adapter, sds_ring->crb_intr_mask, 0x1);
154
155 if (!NETXEN_IS_MSI_FAMILY(adapter))
156 adapter->pci_write_immediate(adapter,
157 adapter->legacy_intr.tgt_mask_reg, 0xfbff);
158 }
159
160 static int
161 netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
162 {
163 int size = sizeof(struct nx_host_sds_ring) * count;
164
165 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
166
167 return (recv_ctx->sds_rings == NULL);
168 }
169
170 static void
171 netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
172 {
173 if (recv_ctx->sds_rings != NULL)
174 kfree(recv_ctx->sds_rings);
175 }
176
177 static int
178 netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
179 {
180 int ring;
181 struct nx_host_sds_ring *sds_ring;
182 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
183
184 if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
185 return 1;
186
187 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
188 sds_ring = &recv_ctx->sds_rings[ring];
189 netif_napi_add(netdev, &sds_ring->napi,
190 netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
191 }
192
193 return 0;
194 }
195
196 static void
197 netxen_napi_enable(struct netxen_adapter *adapter)
198 {
199 int ring;
200 struct nx_host_sds_ring *sds_ring;
201 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
202
203 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
204 sds_ring = &recv_ctx->sds_rings[ring];
205 napi_enable(&sds_ring->napi);
206 netxen_nic_enable_int(sds_ring);
207 }
208 }
209
210 static void
211 netxen_napi_disable(struct netxen_adapter *adapter)
212 {
213 int ring;
214 struct nx_host_sds_ring *sds_ring;
215 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
216
217 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
218 sds_ring = &recv_ctx->sds_rings[ring];
219 netxen_nic_disable_int(sds_ring);
220 napi_synchronize(&sds_ring->napi);
221 napi_disable(&sds_ring->napi);
222 }
223 }
224
225 static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
226 {
227 struct pci_dev *pdev = adapter->pdev;
228 uint64_t mask, cmask;
229
230 adapter->pci_using_dac = 0;
231
232 mask = DMA_BIT_MASK(32);
233 /*
234 * Consistent DMA mask is set to 32 bit because it cannot be set to
235 * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
236 * come off this pool.
237 */
238 cmask = DMA_BIT_MASK(32);
239
240 #ifndef CONFIG_IA64
241 if (revision_id >= NX_P3_B0)
242 mask = DMA_BIT_MASK(39);
243 else if (revision_id == NX_P2_C1)
244 mask = DMA_BIT_MASK(35);
245 #endif
246 if (pci_set_dma_mask(pdev, mask) == 0 &&
247 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
248 adapter->pci_using_dac = 1;
249 return 0;
250 }
251
252 return -EIO;
253 }
254
255 /* Update addressable range if firmware supports it */
256 static int
257 nx_update_dma_mask(struct netxen_adapter *adapter)
258 {
259 int change, shift, err;
260 uint64_t mask, old_mask;
261 struct pci_dev *pdev = adapter->pdev;
262
263 change = 0;
264
265 shift = NXRD32(adapter, CRB_DMA_SHIFT);
266 if (shift >= 32)
267 return 0;
268
269 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
270 change = 1;
271 else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
272 change = 1;
273
274 if (change) {
275 old_mask = pdev->dma_mask;
276 mask = (1ULL<<(32+shift)) - 1;
277
278 err = pci_set_dma_mask(pdev, mask);
279 if (err)
280 return pci_set_dma_mask(pdev, old_mask);
281 }
282
283 return 0;
284 }
285
286 static void
287 netxen_check_options(struct netxen_adapter *adapter)
288 {
289 if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
290 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
291 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
292 } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
293 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
294 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
295 }
296
297 adapter->msix_supported = 0;
298 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
299 adapter->msix_supported = !!use_msi_x;
300 adapter->rss_supported = !!use_msi_x;
301 } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
302 switch (adapter->ahw.board_type) {
303 case NETXEN_BRDTYPE_P2_SB31_10G:
304 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
305 adapter->msix_supported = !!use_msi_x;
306 adapter->rss_supported = !!use_msi_x;
307 break;
308 default:
309 break;
310 }
311 }
312
313 adapter->num_txd = MAX_CMD_DESCRIPTORS;
314
315 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
316 adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
317 adapter->max_rds_rings = 3;
318 } else {
319 adapter->num_lro_rxd = 0;
320 adapter->max_rds_rings = 2;
321 }
322 }
323
324 static int
325 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
326 {
327 u32 val, timeout;
328
329 if (first_boot == 0x55555555) {
330 /* This is the first boot after power up */
331 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
332
333 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
334 return 0;
335
336 /* PCI bus master workaround */
337 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
338 if (!(first_boot & 0x4)) {
339 first_boot |= 0x4;
340 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
341 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
342 }
343
344 /* This is the first boot after power up */
345 first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
346 if (first_boot != 0x80000f) {
347 /* clear the register for future unloads/loads */
348 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
349 return -EIO;
350 }
351
352 /* Start P2 boot loader */
353 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
354 NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
355 timeout = 0;
356 do {
357 msleep(1);
358 val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
359
360 if (++timeout > 5000)
361 return -EIO;
362
363 } while (val == NETXEN_BDINFO_MAGIC);
364 }
365 return 0;
366 }
367
368 static void netxen_set_port_mode(struct netxen_adapter *adapter)
369 {
370 u32 val, data;
371
372 val = adapter->ahw.board_type;
373 if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
374 (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
375 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
376 data = NETXEN_PORT_MODE_802_3_AP;
377 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
378 } else if (port_mode == NETXEN_PORT_MODE_XG) {
379 data = NETXEN_PORT_MODE_XG;
380 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
381 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
382 data = NETXEN_PORT_MODE_AUTO_NEG_1G;
383 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
384 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
385 data = NETXEN_PORT_MODE_AUTO_NEG_XG;
386 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
387 } else {
388 data = NETXEN_PORT_MODE_AUTO_NEG;
389 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
390 }
391
392 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
393 (wol_port_mode != NETXEN_PORT_MODE_XG) &&
394 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
395 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
396 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
397 }
398 NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
399 }
400 }
401
402 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
403 {
404 u32 control;
405 int pos;
406
407 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
408 if (pos) {
409 pci_read_config_dword(pdev, pos, &control);
410 if (enable)
411 control |= PCI_MSIX_FLAGS_ENABLE;
412 else
413 control = 0;
414 pci_write_config_dword(pdev, pos, control);
415 }
416 }
417
418 static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
419 {
420 int i;
421
422 for (i = 0; i < count; i++)
423 adapter->msix_entries[i].entry = i;
424 }
425
426 static int
427 netxen_read_mac_addr(struct netxen_adapter *adapter)
428 {
429 int i;
430 unsigned char *p;
431 __le64 mac_addr;
432 struct net_device *netdev = adapter->netdev;
433 struct pci_dev *pdev = adapter->pdev;
434
435 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
436 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
437 return -EIO;
438 } else {
439 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
440 return -EIO;
441 }
442
443 p = (unsigned char *)&mac_addr;
444 for (i = 0; i < 6; i++)
445 netdev->dev_addr[i] = *(p + 5 - i);
446
447 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
448
449 /* set station address */
450
451 if (!is_valid_ether_addr(netdev->perm_addr))
452 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
453
454 return 0;
455 }
456
457 int netxen_nic_set_mac(struct net_device *netdev, void *p)
458 {
459 struct netxen_adapter *adapter = netdev_priv(netdev);
460 struct sockaddr *addr = p;
461
462 if (!is_valid_ether_addr(addr->sa_data))
463 return -EINVAL;
464
465 if (netif_running(netdev)) {
466 netif_device_detach(netdev);
467 netxen_napi_disable(adapter);
468 }
469
470 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
471 adapter->macaddr_set(adapter, addr->sa_data);
472
473 if (netif_running(netdev)) {
474 netif_device_attach(netdev);
475 netxen_napi_enable(adapter);
476 }
477 return 0;
478 }
479
480 static void netxen_set_multicast_list(struct net_device *dev)
481 {
482 struct netxen_adapter *adapter = netdev_priv(dev);
483
484 adapter->set_multi(dev);
485 }
486
487 static const struct net_device_ops netxen_netdev_ops = {
488 .ndo_open = netxen_nic_open,
489 .ndo_stop = netxen_nic_close,
490 .ndo_start_xmit = netxen_nic_xmit_frame,
491 .ndo_get_stats = netxen_nic_get_stats,
492 .ndo_validate_addr = eth_validate_addr,
493 .ndo_set_multicast_list = netxen_set_multicast_list,
494 .ndo_set_mac_address = netxen_nic_set_mac,
495 .ndo_change_mtu = netxen_nic_change_mtu,
496 .ndo_tx_timeout = netxen_tx_timeout,
497 #ifdef CONFIG_NET_POLL_CONTROLLER
498 .ndo_poll_controller = netxen_nic_poll_controller,
499 #endif
500 };
501
502 static void
503 netxen_setup_intr(struct netxen_adapter *adapter)
504 {
505 struct netxen_legacy_intr_set *legacy_intrp;
506 struct pci_dev *pdev = adapter->pdev;
507 int err, num_msix;
508
509 if (adapter->rss_supported) {
510 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
511 MSIX_ENTRIES_PER_ADAPTER : 2;
512 } else
513 num_msix = 1;
514
515 adapter->max_sds_rings = 1;
516
517 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
518
519 if (adapter->ahw.revision_id >= NX_P3_B0)
520 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
521 else
522 legacy_intrp = &legacy_intr[0];
523 adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit;
524 adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg;
525 adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg;
526 adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg;
527
528 netxen_set_msix_bit(pdev, 0);
529
530 if (adapter->msix_supported) {
531
532 netxen_init_msix_entries(adapter, num_msix);
533 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
534 if (err == 0) {
535 adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
536 netxen_set_msix_bit(pdev, 1);
537
538 if (adapter->rss_supported)
539 adapter->max_sds_rings = num_msix;
540
541 dev_info(&pdev->dev, "using msi-x interrupts\n");
542 return;
543 }
544
545 if (err > 0)
546 pci_disable_msix(pdev);
547
548 /* fall through for msi */
549 }
550
551 if (use_msi && !pci_enable_msi(pdev)) {
552 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
553 adapter->msi_tgt_status =
554 msi_tgt_status[adapter->ahw.pci_func];
555 dev_info(&pdev->dev, "using msi interrupts\n");
556 adapter->msix_entries[0].vector = pdev->irq;
557 return;
558 }
559
560 dev_info(&pdev->dev, "using legacy interrupts\n");
561 adapter->msix_entries[0].vector = pdev->irq;
562 }
563
564 static void
565 netxen_teardown_intr(struct netxen_adapter *adapter)
566 {
567 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
568 pci_disable_msix(adapter->pdev);
569 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
570 pci_disable_msi(adapter->pdev);
571 }
572
573 static void
574 netxen_cleanup_pci_map(struct netxen_adapter *adapter)
575 {
576 if (adapter->ahw.db_base != NULL)
577 iounmap(adapter->ahw.db_base);
578 if (adapter->ahw.pci_base0 != NULL)
579 iounmap(adapter->ahw.pci_base0);
580 if (adapter->ahw.pci_base1 != NULL)
581 iounmap(adapter->ahw.pci_base1);
582 if (adapter->ahw.pci_base2 != NULL)
583 iounmap(adapter->ahw.pci_base2);
584 }
585
586 static int
587 netxen_setup_pci_map(struct netxen_adapter *adapter)
588 {
589 void __iomem *mem_ptr0 = NULL;
590 void __iomem *mem_ptr1 = NULL;
591 void __iomem *mem_ptr2 = NULL;
592 void __iomem *db_ptr = NULL;
593
594 unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0;
595
596 struct pci_dev *pdev = adapter->pdev;
597 int pci_func = adapter->ahw.pci_func;
598
599 int err = 0;
600
601 /*
602 * Set the CRB window to invalid. If any register in window 0 is
603 * accessed it should set the window to 0 and then reset it to 1.
604 */
605 adapter->curr_window = 255;
606 adapter->ahw.qdr_sn_window = -1;
607 adapter->ahw.ddr_mn_window = -1;
608
609 /* remap phys address */
610 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
611 mem_len = pci_resource_len(pdev, 0);
612 pci_len0 = 0;
613
614 adapter->hw_write_wx = netxen_nic_hw_write_wx_128M;
615 adapter->hw_read_wx = netxen_nic_hw_read_wx_128M;
616 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M;
617 adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M;
618 adapter->pci_set_window = netxen_nic_pci_set_window_128M;
619 adapter->pci_mem_read = netxen_nic_pci_mem_read_128M;
620 adapter->pci_mem_write = netxen_nic_pci_mem_write_128M;
621
622 /* 128 Meg of memory */
623 if (mem_len == NETXEN_PCI_128MB_SIZE) {
624 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
625 mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
626 SECOND_PAGE_GROUP_SIZE);
627 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
628 THIRD_PAGE_GROUP_SIZE);
629 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
630 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
631 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
632 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
633 } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
634 adapter->hw_write_wx = netxen_nic_hw_write_wx_2M;
635 adapter->hw_read_wx = netxen_nic_hw_read_wx_2M;
636 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M;
637 adapter->pci_write_immediate =
638 netxen_nic_pci_write_immediate_2M;
639 adapter->pci_set_window = netxen_nic_pci_set_window_2M;
640 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
641 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
642
643 mem_ptr0 = pci_ioremap_bar(pdev, 0);
644 if (mem_ptr0 == NULL) {
645 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
646 return -EIO;
647 }
648 pci_len0 = mem_len;
649
650 adapter->ahw.ddr_mn_window = 0;
651 adapter->ahw.qdr_sn_window = 0;
652
653 adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW +
654 (pci_func * 0x20);
655 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW;
656 if (pci_func < 4)
657 adapter->ahw.ms_win_crb += (pci_func * 0x20);
658 else
659 adapter->ahw.ms_win_crb +=
660 0xA0 + ((pci_func - 4) * 0x10);
661 } else {
662 return -EIO;
663 }
664
665 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
666
667 adapter->ahw.pci_base0 = mem_ptr0;
668 adapter->ahw.pci_len0 = pci_len0;
669 adapter->ahw.pci_base1 = mem_ptr1;
670 adapter->ahw.pci_base2 = mem_ptr2;
671
672 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
673 goto skip_doorbell;
674
675 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
676 db_len = pci_resource_len(pdev, 4);
677
678 if (db_len == 0) {
679 printk(KERN_ERR "%s: doorbell is disabled\n",
680 netxen_nic_driver_name);
681 err = -EIO;
682 goto err_out;
683 }
684
685 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
686 if (!db_ptr) {
687 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
688 netxen_nic_driver_name);
689 err = -EIO;
690 goto err_out;
691 }
692
693 skip_doorbell:
694 adapter->ahw.db_base = db_ptr;
695 adapter->ahw.db_len = db_len;
696 return 0;
697
698 err_out:
699 netxen_cleanup_pci_map(adapter);
700 return err;
701 }
702
703 static int
704 netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
705 {
706 int val, err, first_boot;
707 struct pci_dev *pdev = adapter->pdev;
708
709 int first_driver = 0;
710
711 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
712 first_driver = (adapter->portnum == 0);
713 else
714 first_driver = (adapter->ahw.pci_func == 0);
715
716 if (!first_driver)
717 goto wait_init;
718
719 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
720
721 err = netxen_check_hw_init(adapter, first_boot);
722 if (err) {
723 dev_err(&pdev->dev, "error in init HW init sequence\n");
724 return err;
725 }
726
727 if (request_fw)
728 netxen_request_firmware(adapter);
729
730 err = netxen_need_fw_reset(adapter);
731 if (err <= 0)
732 return err;
733
734 if (first_boot != 0x55555555) {
735 NXWR32(adapter, CRB_CMDPEG_STATE, 0);
736 netxen_pinit_from_rom(adapter, 0);
737 msleep(1);
738 }
739
740 NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
741 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
742 netxen_set_port_mode(adapter);
743
744 netxen_load_firmware(adapter);
745
746 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
747
748 /* Initialize multicast addr pool owners */
749 val = 0x7654;
750 if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
751 val |= 0x0f000000;
752 NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
753
754 }
755
756 err = netxen_init_dummy_dma(adapter);
757 if (err)
758 return err;
759
760 /*
761 * Tell the hardware our version number.
762 */
763 val = (_NETXEN_NIC_LINUX_MAJOR << 16)
764 | ((_NETXEN_NIC_LINUX_MINOR << 8))
765 | (_NETXEN_NIC_LINUX_SUBVERSION);
766 NXWR32(adapter, CRB_DRIVER_VERSION, val);
767
768 wait_init:
769 /* Handshake with the card before we register the devices. */
770 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
771 if (err) {
772 netxen_free_dummy_dma(adapter);
773 return err;
774 }
775
776 nx_update_dma_mask(adapter);
777
778 netxen_nic_get_firmware_info(adapter);
779
780 return 0;
781 }
782
783 static int
784 netxen_nic_request_irq(struct netxen_adapter *adapter)
785 {
786 irq_handler_t handler;
787 struct nx_host_sds_ring *sds_ring;
788 int err, ring;
789
790 unsigned long flags = IRQF_SAMPLE_RANDOM;
791 struct net_device *netdev = adapter->netdev;
792 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
793
794 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
795 handler = netxen_msix_intr;
796 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
797 handler = netxen_msi_intr;
798 else {
799 flags |= IRQF_SHARED;
800 handler = netxen_intr;
801 }
802 adapter->irq = netdev->irq;
803
804 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
805 sds_ring = &recv_ctx->sds_rings[ring];
806 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
807 err = request_irq(sds_ring->irq, handler,
808 flags, sds_ring->name, sds_ring);
809 if (err)
810 return err;
811 }
812
813 return 0;
814 }
815
816 static void
817 netxen_nic_free_irq(struct netxen_adapter *adapter)
818 {
819 int ring;
820 struct nx_host_sds_ring *sds_ring;
821
822 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
823
824 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
825 sds_ring = &recv_ctx->sds_rings[ring];
826 free_irq(sds_ring->irq, sds_ring);
827 }
828 }
829
830 static void
831 netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter)
832 {
833 adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
834 adapter->coal.normal.data.rx_time_us =
835 NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
836 adapter->coal.normal.data.rx_packets =
837 NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
838 adapter->coal.normal.data.tx_time_us =
839 NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US;
840 adapter->coal.normal.data.tx_packets =
841 NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS;
842 }
843
844 static int
845 netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
846 {
847 int err;
848
849 err = adapter->init_port(adapter, adapter->physical_port);
850 if (err) {
851 printk(KERN_ERR "%s: Failed to initialize port %d\n",
852 netxen_nic_driver_name, adapter->portnum);
853 return err;
854 }
855 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
856 adapter->macaddr_set(adapter, netdev->dev_addr);
857
858 adapter->set_multi(netdev);
859 adapter->set_mtu(adapter, netdev->mtu);
860
861 adapter->ahw.linkup = 0;
862
863 if (adapter->max_sds_rings > 1)
864 netxen_config_rss(adapter, 1);
865
866 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
867 netxen_config_intr_coalesce(adapter);
868
869 netxen_napi_enable(adapter);
870
871 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
872 netxen_linkevent_request(adapter, 1);
873 else
874 netxen_nic_set_link_parameters(adapter);
875
876 mod_timer(&adapter->watchdog_timer, jiffies);
877
878 return 0;
879 }
880
881 static void
882 netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
883 {
884 spin_lock(&adapter->tx_clean_lock);
885 netif_carrier_off(netdev);
886 netif_tx_disable(netdev);
887
888 if (adapter->stop_port)
889 adapter->stop_port(adapter);
890
891 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
892 netxen_p3_free_mac_list(adapter);
893
894 netxen_napi_disable(adapter);
895
896 netxen_release_tx_buffers(adapter);
897 spin_unlock(&adapter->tx_clean_lock);
898
899 del_timer_sync(&adapter->watchdog_timer);
900 FLUSH_SCHEDULED_WORK();
901 }
902
903
904 static int
905 netxen_nic_attach(struct netxen_adapter *adapter)
906 {
907 struct net_device *netdev = adapter->netdev;
908 struct pci_dev *pdev = adapter->pdev;
909 int err, ring;
910 struct nx_host_rds_ring *rds_ring;
911 struct nx_host_tx_ring *tx_ring;
912
913 err = netxen_init_firmware(adapter);
914 if (err != 0) {
915 printk(KERN_ERR "Failed to init firmware\n");
916 return -EIO;
917 }
918
919 err = netxen_alloc_sw_resources(adapter);
920 if (err) {
921 printk(KERN_ERR "%s: Error in setting sw resources\n",
922 netdev->name);
923 return err;
924 }
925
926 netxen_nic_clear_stats(adapter);
927
928 err = netxen_alloc_hw_resources(adapter);
929 if (err) {
930 printk(KERN_ERR "%s: Error in setting hw resources\n",
931 netdev->name);
932 goto err_out_free_sw;
933 }
934
935 if (adapter->fw_major < 4) {
936 tx_ring = adapter->tx_ring;
937 tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum];
938 tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum];
939
940 tx_ring->producer = 0;
941 tx_ring->sw_consumer = 0;
942
943 netxen_nic_update_cmd_producer(adapter, tx_ring);
944 netxen_nic_update_cmd_consumer(adapter, tx_ring);
945 }
946
947 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
948 rds_ring = &adapter->recv_ctx.rds_rings[ring];
949 netxen_post_rx_buffers(adapter, ring, rds_ring);
950 }
951
952 err = netxen_nic_request_irq(adapter);
953 if (err) {
954 dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
955 netdev->name);
956 goto err_out_free_rxbuf;
957 }
958
959 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
960 netxen_nic_init_coalesce_defaults(adapter);
961
962 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
963 return 0;
964
965 err_out_free_rxbuf:
966 netxen_release_rx_buffers(adapter);
967 netxen_free_hw_resources(adapter);
968 err_out_free_sw:
969 netxen_free_sw_resources(adapter);
970 return err;
971 }
972
973 static void
974 netxen_nic_detach(struct netxen_adapter *adapter)
975 {
976 netxen_free_hw_resources(adapter);
977 netxen_release_rx_buffers(adapter);
978 netxen_nic_free_irq(adapter);
979 netxen_free_sw_resources(adapter);
980
981 adapter->is_up = 0;
982 }
983
984 static int __devinit
985 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
986 {
987 struct net_device *netdev = NULL;
988 struct netxen_adapter *adapter = NULL;
989 int i = 0, err;
990 int pci_func_id = PCI_FUNC(pdev->devfn);
991 uint8_t revision_id;
992
993 if (pdev->class != 0x020000) {
994 printk(KERN_DEBUG "NetXen function %d, class %x will not "
995 "be enabled.\n",pci_func_id, pdev->class);
996 return -ENODEV;
997 }
998
999 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
1000 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
1001 "will not be enabled.\n",
1002 NX_P3_A0, NX_P3_B1);
1003 return -ENODEV;
1004 }
1005
1006 if ((err = pci_enable_device(pdev)))
1007 return err;
1008
1009 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1010 err = -ENODEV;
1011 goto err_out_disable_pdev;
1012 }
1013
1014 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
1015 goto err_out_disable_pdev;
1016
1017 pci_set_master(pdev);
1018
1019 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
1020 if(!netdev) {
1021 printk(KERN_ERR"%s: Failed to allocate memory for the "
1022 "device block.Check system memory resource"
1023 " usage.\n", netxen_nic_driver_name);
1024 goto err_out_free_res;
1025 }
1026
1027 SET_NETDEV_DEV(netdev, &pdev->dev);
1028
1029 adapter = netdev_priv(netdev);
1030 adapter->netdev = netdev;
1031 adapter->pdev = pdev;
1032 adapter->ahw.pci_func = pci_func_id;
1033
1034 revision_id = pdev->revision;
1035 adapter->ahw.revision_id = revision_id;
1036
1037 err = nx_set_dma_mask(adapter, revision_id);
1038 if (err)
1039 goto err_out_free_netdev;
1040
1041 rwlock_init(&adapter->adapter_lock);
1042 spin_lock_init(&adapter->tx_clean_lock);
1043 INIT_LIST_HEAD(&adapter->mac_list);
1044
1045 err = netxen_setup_pci_map(adapter);
1046 if (err)
1047 goto err_out_free_netdev;
1048
1049 /* This will be reset for mezz cards */
1050 adapter->portnum = pci_func_id;
1051 adapter->rx_csum = 1;
1052 adapter->mc_enabled = 0;
1053 if (NX_IS_REVISION_P3(revision_id))
1054 adapter->max_mc_count = 38;
1055 else
1056 adapter->max_mc_count = 16;
1057
1058 netdev->netdev_ops = &netxen_netdev_ops;
1059 netdev->watchdog_timeo = 2*HZ;
1060
1061 netxen_nic_change_mtu(netdev, netdev->mtu);
1062
1063 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
1064
1065 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1066 netdev->features |= (NETIF_F_GRO);
1067 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1068
1069 if (NX_IS_REVISION_P3(revision_id)) {
1070 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1071 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1072 }
1073
1074 if (adapter->pci_using_dac) {
1075 netdev->features |= NETIF_F_HIGHDMA;
1076 netdev->vlan_features |= NETIF_F_HIGHDMA;
1077 }
1078
1079 if (netxen_nic_get_board_info(adapter) != 0) {
1080 printk("%s: Error getting board config info.\n",
1081 netxen_nic_driver_name);
1082 err = -EIO;
1083 goto err_out_iounmap;
1084 }
1085
1086 netxen_initialize_adapter_ops(adapter);
1087
1088 /* Mezz cards have PCI function 0,2,3 enabled */
1089 switch (adapter->ahw.board_type) {
1090 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
1091 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
1092 if (pci_func_id >= 2)
1093 adapter->portnum = pci_func_id - 2;
1094 break;
1095 default:
1096 break;
1097 }
1098
1099 err = netxen_start_firmware(adapter, 1);
1100 if (err)
1101 goto err_out_iounmap;
1102 /*
1103 * See if the firmware gave us a virtual-physical port mapping.
1104 */
1105 adapter->physical_port = adapter->portnum;
1106 if (adapter->fw_major < 4) {
1107 i = NXRD32(adapter, CRB_V2P(adapter->portnum));
1108 if (i != 0x55555555)
1109 adapter->physical_port = i;
1110 }
1111
1112 netxen_check_options(adapter);
1113
1114 netxen_setup_intr(adapter);
1115
1116 netdev->irq = adapter->msix_entries[0].vector;
1117
1118 if (netxen_napi_add(adapter, netdev))
1119 goto err_out_disable_msi;
1120
1121 init_timer(&adapter->watchdog_timer);
1122 adapter->watchdog_timer.function = &netxen_watchdog;
1123 adapter->watchdog_timer.data = (unsigned long)adapter;
1124 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
1125 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
1126
1127 err = netxen_read_mac_addr(adapter);
1128 if (err)
1129 dev_warn(&pdev->dev, "failed to read mac addr\n");
1130
1131 netif_carrier_off(netdev);
1132 netif_stop_queue(netdev);
1133
1134 if ((err = register_netdev(netdev))) {
1135 printk(KERN_ERR "%s: register_netdev failed port #%d"
1136 " aborting\n", netxen_nic_driver_name,
1137 adapter->portnum);
1138 err = -EIO;
1139 goto err_out_disable_msi;
1140 }
1141
1142 pci_set_drvdata(pdev, adapter);
1143
1144 switch (adapter->ahw.port_type) {
1145 case NETXEN_NIC_GBE:
1146 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1147 adapter->netdev->name);
1148 break;
1149 case NETXEN_NIC_XGBE:
1150 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1151 adapter->netdev->name);
1152 break;
1153 }
1154
1155 return 0;
1156
1157 err_out_disable_msi:
1158 netxen_teardown_intr(adapter);
1159
1160 netxen_free_dummy_dma(adapter);
1161
1162 err_out_iounmap:
1163 netxen_cleanup_pci_map(adapter);
1164
1165 err_out_free_netdev:
1166 free_netdev(netdev);
1167
1168 err_out_free_res:
1169 pci_release_regions(pdev);
1170
1171 err_out_disable_pdev:
1172 pci_set_drvdata(pdev, NULL);
1173 pci_disable_device(pdev);
1174 return err;
1175 }
1176
1177 static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1178 {
1179 struct netxen_adapter *adapter;
1180 struct net_device *netdev;
1181
1182 adapter = pci_get_drvdata(pdev);
1183 if (adapter == NULL)
1184 return;
1185
1186 netdev = adapter->netdev;
1187
1188 unregister_netdev(netdev);
1189
1190 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
1191 netxen_nic_detach(adapter);
1192 }
1193
1194 if (adapter->portnum == 0)
1195 netxen_free_dummy_dma(adapter);
1196
1197 netxen_teardown_intr(adapter);
1198 netxen_free_sds_rings(&adapter->recv_ctx);
1199
1200 netxen_cleanup_pci_map(adapter);
1201
1202 netxen_release_firmware(adapter);
1203
1204 pci_release_regions(pdev);
1205 pci_disable_device(pdev);
1206 pci_set_drvdata(pdev, NULL);
1207
1208 free_netdev(netdev);
1209 }
1210
1211 #ifdef CONFIG_PM
1212 static int
1213 netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1214 {
1215
1216 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1217 struct net_device *netdev = adapter->netdev;
1218
1219 netif_device_detach(netdev);
1220
1221 if (netif_running(netdev))
1222 netxen_nic_down(adapter, netdev);
1223
1224 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
1225 netxen_nic_detach(adapter);
1226
1227 pci_save_state(pdev);
1228
1229 if (netxen_nic_wol_supported(adapter)) {
1230 pci_enable_wake(pdev, PCI_D3cold, 1);
1231 pci_enable_wake(pdev, PCI_D3hot, 1);
1232 }
1233
1234 pci_disable_device(pdev);
1235 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1236
1237 return 0;
1238 }
1239
1240 static int
1241 netxen_nic_resume(struct pci_dev *pdev)
1242 {
1243 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1244 struct net_device *netdev = adapter->netdev;
1245 int err;
1246
1247 pci_set_power_state(pdev, PCI_D0);
1248 pci_restore_state(pdev);
1249
1250 err = pci_enable_device(pdev);
1251 if (err)
1252 return err;
1253
1254 adapter->curr_window = 255;
1255
1256 err = netxen_start_firmware(adapter, 0);
1257 if (err) {
1258 dev_err(&pdev->dev, "failed to start firmware\n");
1259 return err;
1260 }
1261
1262 if (netif_running(netdev)) {
1263 err = netxen_nic_attach(adapter);
1264 if (err)
1265 return err;
1266
1267 err = netxen_nic_up(adapter, netdev);
1268 if (err)
1269 return err;
1270
1271 netif_device_attach(netdev);
1272 }
1273
1274 return 0;
1275 }
1276 #endif
1277
1278 static int netxen_nic_open(struct net_device *netdev)
1279 {
1280 struct netxen_adapter *adapter = netdev_priv(netdev);
1281 int err = 0;
1282
1283 if (adapter->driver_mismatch)
1284 return -EIO;
1285
1286 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
1287 err = netxen_nic_attach(adapter);
1288 if (err)
1289 return err;
1290 }
1291
1292 err = netxen_nic_up(adapter, netdev);
1293 if (err)
1294 goto err_out;
1295
1296 netif_start_queue(netdev);
1297
1298 return 0;
1299
1300 err_out:
1301 netxen_nic_detach(adapter);
1302 return err;
1303 }
1304
1305 /*
1306 * netxen_nic_close - Disables a network interface entry point
1307 */
1308 static int netxen_nic_close(struct net_device *netdev)
1309 {
1310 struct netxen_adapter *adapter = netdev_priv(netdev);
1311
1312 netxen_nic_down(adapter, netdev);
1313 return 0;
1314 }
1315
1316 static void
1317 netxen_tso_check(struct net_device *netdev,
1318 struct nx_host_tx_ring *tx_ring,
1319 struct cmd_desc_type0 *first_desc,
1320 struct sk_buff *skb)
1321 {
1322 u8 opcode = TX_ETHER_PKT;
1323 __be16 protocol = skb->protocol;
1324 u16 flags = 0;
1325 u32 producer;
1326 int copied, offset, copy_len, hdr_len = 0, tso = 0;
1327 struct cmd_desc_type0 *hwdesc;
1328
1329 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1330 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data;
1331 protocol = vh->h_vlan_encapsulated_proto;
1332 flags = FLAGS_VLAN_TAGGED;
1333 }
1334
1335 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1336 skb_shinfo(skb)->gso_size > 0) {
1337
1338 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1339
1340 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1341 first_desc->total_hdr_length = hdr_len;
1342
1343 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1344 TX_TCP_LSO6 : TX_TCP_LSO;
1345 tso = 1;
1346
1347 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1348 u8 l4proto;
1349
1350 if (protocol == cpu_to_be16(ETH_P_IP)) {
1351 l4proto = ip_hdr(skb)->protocol;
1352
1353 if (l4proto == IPPROTO_TCP)
1354 opcode = TX_TCP_PKT;
1355 else if(l4proto == IPPROTO_UDP)
1356 opcode = TX_UDP_PKT;
1357 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1358 l4proto = ipv6_hdr(skb)->nexthdr;
1359
1360 if (l4proto == IPPROTO_TCP)
1361 opcode = TX_TCPV6_PKT;
1362 else if(l4proto == IPPROTO_UDP)
1363 opcode = TX_UDPV6_PKT;
1364 }
1365 }
1366 first_desc->tcp_hdr_offset = skb_transport_offset(skb);
1367 first_desc->ip_hdr_offset = skb_network_offset(skb);
1368 netxen_set_tx_flags_opcode(first_desc, flags, opcode);
1369
1370 if (!tso)
1371 return;
1372
1373 /* For LSO, we need to copy the MAC/IP/TCP headers into
1374 * the descriptor ring
1375 */
1376 producer = tx_ring->producer;
1377 copied = 0;
1378 offset = 2;
1379
1380 while (copied < hdr_len) {
1381
1382 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1383 (hdr_len - copied));
1384
1385 hwdesc = &tx_ring->desc_head[producer];
1386 tx_ring->cmd_buf_arr[producer].skb = NULL;
1387
1388 skb_copy_from_linear_data_offset(skb, copied,
1389 (char *)hwdesc + offset, copy_len);
1390
1391 copied += copy_len;
1392 offset = 0;
1393
1394 producer = get_next_index(producer, tx_ring->num_desc);
1395 }
1396
1397 tx_ring->producer = producer;
1398 barrier();
1399 }
1400
1401 static void
1402 netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
1403 struct netxen_cmd_buffer *pbuf, int last)
1404 {
1405 int k;
1406 struct netxen_skb_frag *buffrag;
1407
1408 buffrag = &pbuf->frag_array[0];
1409 pci_unmap_single(pdev, buffrag->dma,
1410 buffrag->length, PCI_DMA_TODEVICE);
1411
1412 for (k = 1; k < last; k++) {
1413 buffrag = &pbuf->frag_array[k];
1414 pci_unmap_page(pdev, buffrag->dma,
1415 buffrag->length, PCI_DMA_TODEVICE);
1416 }
1417 }
1418
1419 static inline void
1420 netxen_clear_cmddesc(u64 *desc)
1421 {
1422 desc[0] = 0ULL;
1423 desc[2] = 0ULL;
1424 }
1425
1426 static int
1427 netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1428 {
1429 struct netxen_adapter *adapter = netdev_priv(netdev);
1430 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
1431 struct skb_frag_struct *frag;
1432 struct netxen_cmd_buffer *pbuf;
1433 struct netxen_skb_frag *buffrag;
1434 struct cmd_desc_type0 *hwdesc, *first_desc;
1435 struct pci_dev *pdev;
1436 dma_addr_t temp_dma;
1437 int i, k;
1438 unsigned long offset;
1439
1440 u32 producer;
1441 int len, frag_count, no_of_desc;
1442 u32 num_txd = tx_ring->num_desc;
1443
1444 frag_count = skb_shinfo(skb)->nr_frags + 1;
1445
1446 /* 4 fragments per cmd des */
1447 no_of_desc = (frag_count + 3) >> 2;
1448
1449 if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) {
1450 netif_stop_queue(netdev);
1451 return NETDEV_TX_BUSY;
1452 }
1453
1454 producer = tx_ring->producer;
1455
1456 pdev = adapter->pdev;
1457 len = skb->len - skb->data_len;
1458
1459 temp_dma = pci_map_single(pdev, skb->data, len, PCI_DMA_TODEVICE);
1460 if (pci_dma_mapping_error(pdev, temp_dma))
1461 goto drop_packet;
1462
1463 pbuf = &tx_ring->cmd_buf_arr[producer];
1464 pbuf->skb = skb;
1465 pbuf->frag_count = frag_count;
1466
1467 buffrag = &pbuf->frag_array[0];
1468 buffrag->dma = temp_dma;
1469 buffrag->length = len;
1470
1471 first_desc = hwdesc = &tx_ring->desc_head[producer];
1472 netxen_clear_cmddesc((u64 *)hwdesc);
1473 netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
1474 netxen_set_tx_port(hwdesc, adapter->portnum);
1475
1476 hwdesc->buffer_length[0] = cpu_to_le16(len);
1477 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
1478
1479 for (i = 1, k = 1; i < frag_count; i++, k++) {
1480
1481 /* move to next desc. if there is a need */
1482 if ((i & 0x3) == 0) {
1483 k = 0;
1484 producer = get_next_index(producer, num_txd);
1485 hwdesc = &tx_ring->desc_head[producer];
1486 netxen_clear_cmddesc((u64 *)hwdesc);
1487 pbuf = &tx_ring->cmd_buf_arr[producer];
1488 pbuf->skb = NULL;
1489 }
1490 buffrag = &pbuf->frag_array[i];
1491 frag = &skb_shinfo(skb)->frags[i - 1];
1492 len = frag->size;
1493 offset = frag->page_offset;
1494
1495 temp_dma = pci_map_page(pdev, frag->page, offset,
1496 len, PCI_DMA_TODEVICE);
1497 if (pci_dma_mapping_error(pdev, temp_dma)) {
1498 netxen_clean_tx_dma_mapping(pdev, pbuf, i);
1499 goto drop_packet;
1500 }
1501
1502 buffrag->dma = temp_dma;
1503 buffrag->length = len;
1504
1505 hwdesc->buffer_length[k] = cpu_to_le16(len);
1506 switch (k) {
1507 case 0:
1508 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
1509 break;
1510 case 1:
1511 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
1512 break;
1513 case 2:
1514 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
1515 break;
1516 case 3:
1517 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
1518 break;
1519 }
1520 }
1521 tx_ring->producer = get_next_index(producer, num_txd);
1522
1523 netxen_tso_check(netdev, tx_ring, first_desc, skb);
1524
1525 netxen_nic_update_cmd_producer(adapter, tx_ring);
1526
1527 adapter->stats.txbytes += skb->len;
1528 adapter->stats.xmitcalled++;
1529
1530 return NETDEV_TX_OK;
1531
1532 drop_packet:
1533 adapter->stats.txdropped++;
1534 dev_kfree_skb_any(skb);
1535 return NETDEV_TX_OK;
1536 }
1537
1538 static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1539 {
1540 struct net_device *netdev = adapter->netdev;
1541 uint32_t temp, temp_state, temp_val;
1542 int rv = 0;
1543
1544 temp = NXRD32(adapter, CRB_TEMP_STATE);
1545
1546 temp_state = nx_get_temp_state(temp);
1547 temp_val = nx_get_temp_val(temp);
1548
1549 if (temp_state == NX_TEMP_PANIC) {
1550 printk(KERN_ALERT
1551 "%s: Device temperature %d degrees C exceeds"
1552 " maximum allowed. Hardware has been shut down.\n",
1553 netdev->name, temp_val);
1554
1555 netif_device_detach(netdev);
1556 netxen_nic_down(adapter, netdev);
1557 netxen_nic_detach(adapter);
1558
1559 rv = 1;
1560 } else if (temp_state == NX_TEMP_WARN) {
1561 if (adapter->temp == NX_TEMP_NORMAL) {
1562 printk(KERN_ALERT
1563 "%s: Device temperature %d degrees C "
1564 "exceeds operating range."
1565 " Immediate action needed.\n",
1566 netdev->name, temp_val);
1567 }
1568 } else {
1569 if (adapter->temp == NX_TEMP_WARN) {
1570 printk(KERN_INFO
1571 "%s: Device temperature is now %d degrees C"
1572 " in normal range.\n", netdev->name,
1573 temp_val);
1574 }
1575 }
1576 adapter->temp = temp_state;
1577 return rv;
1578 }
1579
1580 void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
1581 {
1582 struct net_device *netdev = adapter->netdev;
1583
1584 if (adapter->ahw.linkup && !linkup) {
1585 printk(KERN_INFO "%s: %s NIC Link is down\n",
1586 netxen_nic_driver_name, netdev->name);
1587 adapter->ahw.linkup = 0;
1588 if (netif_running(netdev)) {
1589 netif_carrier_off(netdev);
1590 netif_stop_queue(netdev);
1591 }
1592
1593 if (!adapter->has_link_events)
1594 netxen_nic_set_link_parameters(adapter);
1595
1596 } else if (!adapter->ahw.linkup && linkup) {
1597 printk(KERN_INFO "%s: %s NIC Link is up\n",
1598 netxen_nic_driver_name, netdev->name);
1599 adapter->ahw.linkup = 1;
1600 if (netif_running(netdev)) {
1601 netif_carrier_on(netdev);
1602 netif_wake_queue(netdev);
1603 }
1604
1605 if (!adapter->has_link_events)
1606 netxen_nic_set_link_parameters(adapter);
1607 }
1608 }
1609
1610 static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1611 {
1612 u32 val, port, linkup;
1613
1614 port = adapter->physical_port;
1615
1616 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1617 val = NXRD32(adapter, CRB_XG_STATE_P3);
1618 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1619 linkup = (val == XG_LINK_UP_P3);
1620 } else {
1621 val = NXRD32(adapter, CRB_XG_STATE);
1622 if (adapter->ahw.port_type == NETXEN_NIC_GBE)
1623 linkup = (val >> port) & 1;
1624 else {
1625 val = (val >> port*8) & 0xff;
1626 linkup = (val == XG_LINK_UP);
1627 }
1628 }
1629
1630 netxen_advert_link_change(adapter, linkup);
1631 }
1632
1633 static void netxen_watchdog(unsigned long v)
1634 {
1635 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
1636
1637 SCHEDULE_WORK(&adapter->watchdog_task);
1638 }
1639
1640 void netxen_watchdog_task(struct work_struct *work)
1641 {
1642 struct netxen_adapter *adapter =
1643 container_of(work, struct netxen_adapter, watchdog_task);
1644
1645 if (netxen_nic_check_temp(adapter))
1646 return;
1647
1648 if (!adapter->has_link_events)
1649 netxen_nic_handle_phy_intr(adapter);
1650
1651 if (netif_running(adapter->netdev))
1652 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1653 }
1654
1655 static void netxen_tx_timeout(struct net_device *netdev)
1656 {
1657 struct netxen_adapter *adapter = (struct netxen_adapter *)
1658 netdev_priv(netdev);
1659 SCHEDULE_WORK(&adapter->tx_timeout_task);
1660 }
1661
1662 static void netxen_tx_timeout_task(struct work_struct *work)
1663 {
1664 struct netxen_adapter *adapter =
1665 container_of(work, struct netxen_adapter, tx_timeout_task);
1666
1667 if (!netif_running(adapter->netdev))
1668 return;
1669
1670 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
1671 netxen_nic_driver_name, adapter->netdev->name);
1672
1673 netxen_napi_disable(adapter);
1674
1675 adapter->netdev->trans_start = jiffies;
1676
1677 netxen_napi_enable(adapter);
1678 netif_wake_queue(adapter->netdev);
1679 }
1680
1681 struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1682 {
1683 struct netxen_adapter *adapter = netdev_priv(netdev);
1684 struct net_device_stats *stats = &adapter->net_stats;
1685
1686 memset(stats, 0, sizeof(*stats));
1687
1688 stats->rx_packets = adapter->stats.no_rcv;
1689 stats->tx_packets = adapter->stats.xmitfinished;
1690 stats->rx_bytes = adapter->stats.rxbytes;
1691 stats->tx_bytes = adapter->stats.txbytes;
1692 stats->rx_dropped = adapter->stats.rxdropped;
1693 stats->tx_dropped = adapter->stats.txdropped;
1694
1695 return stats;
1696 }
1697
1698 static irqreturn_t netxen_intr(int irq, void *data)
1699 {
1700 struct nx_host_sds_ring *sds_ring = data;
1701 struct netxen_adapter *adapter = sds_ring->adapter;
1702 u32 status = 0;
1703
1704 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1705
1706 if (!(status & adapter->legacy_intr.int_vec_bit))
1707 return IRQ_NONE;
1708
1709 if (adapter->ahw.revision_id >= NX_P3_B1) {
1710 /* check interrupt state machine, to be sure */
1711 status = adapter->pci_read_immediate(adapter,
1712 ISR_INT_STATE_REG);
1713 if (!ISR_LEGACY_INT_TRIGGERED(status))
1714 return IRQ_NONE;
1715
1716 } else {
1717 unsigned long our_int = 0;
1718
1719 our_int = NXRD32(adapter, CRB_INT_VECTOR);
1720
1721 /* not our interrupt */
1722 if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
1723 return IRQ_NONE;
1724
1725 /* claim interrupt */
1726 NXWR32(adapter, CRB_INT_VECTOR, (our_int & 0xffffffff));
1727 }
1728
1729 /* clear interrupt */
1730 if (adapter->fw_major < 4)
1731 netxen_nic_disable_int(sds_ring);
1732
1733 adapter->pci_write_immediate(adapter,
1734 adapter->legacy_intr.tgt_status_reg,
1735 0xffffffff);
1736 /* read twice to ensure write is flushed */
1737 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1738 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1739
1740 napi_schedule(&sds_ring->napi);
1741
1742 return IRQ_HANDLED;
1743 }
1744
1745 static irqreturn_t netxen_msi_intr(int irq, void *data)
1746 {
1747 struct nx_host_sds_ring *sds_ring = data;
1748 struct netxen_adapter *adapter = sds_ring->adapter;
1749
1750 /* clear interrupt */
1751 adapter->pci_write_immediate(adapter,
1752 adapter->msi_tgt_status, 0xffffffff);
1753
1754 napi_schedule(&sds_ring->napi);
1755 return IRQ_HANDLED;
1756 }
1757
1758 static irqreturn_t netxen_msix_intr(int irq, void *data)
1759 {
1760 struct nx_host_sds_ring *sds_ring = data;
1761
1762 napi_schedule(&sds_ring->napi);
1763 return IRQ_HANDLED;
1764 }
1765
1766 static int netxen_nic_poll(struct napi_struct *napi, int budget)
1767 {
1768 struct nx_host_sds_ring *sds_ring =
1769 container_of(napi, struct nx_host_sds_ring, napi);
1770
1771 struct netxen_adapter *adapter = sds_ring->adapter;
1772
1773 int tx_complete;
1774 int work_done;
1775
1776 tx_complete = netxen_process_cmd_ring(adapter);
1777
1778 work_done = netxen_process_rcv_ring(sds_ring, budget);
1779
1780 if ((work_done < budget) && tx_complete) {
1781 napi_complete(&sds_ring->napi);
1782 if (netif_running(adapter->netdev))
1783 netxen_nic_enable_int(sds_ring);
1784 }
1785
1786 return work_done;
1787 }
1788
1789 #ifdef CONFIG_NET_POLL_CONTROLLER
1790 static void netxen_nic_poll_controller(struct net_device *netdev)
1791 {
1792 struct netxen_adapter *adapter = netdev_priv(netdev);
1793 disable_irq(adapter->irq);
1794 netxen_intr(adapter->irq, adapter);
1795 enable_irq(adapter->irq);
1796 }
1797 #endif
1798
1799 #define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
1800
1801 static int
1802 netxen_destip_supported(struct netxen_adapter *adapter)
1803 {
1804 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
1805 return 0;
1806
1807 if (adapter->ahw.cut_through)
1808 return 0;
1809
1810 return 1;
1811 }
1812
1813 static int netxen_netdev_event(struct notifier_block *this,
1814 unsigned long event, void *ptr)
1815 {
1816 struct netxen_adapter *adapter;
1817 struct net_device *dev = (struct net_device *)ptr;
1818 struct in_device *indev;
1819
1820 recheck:
1821 if (dev == NULL)
1822 goto done;
1823
1824 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1825 dev = vlan_dev_real_dev(dev);
1826 goto recheck;
1827 }
1828
1829 if (!is_netxen_netdev(dev))
1830 goto done;
1831
1832 adapter = netdev_priv(dev);
1833
1834 if (!adapter || !netxen_destip_supported(adapter))
1835 goto done;
1836
1837 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
1838 goto done;
1839
1840 indev = in_dev_get(dev);
1841 if (!indev)
1842 goto done;
1843
1844 for_ifa(indev) {
1845 switch (event) {
1846 case NETDEV_UP:
1847 netxen_config_ipaddr(adapter,
1848 ifa->ifa_address, NX_IP_UP);
1849 break;
1850 case NETDEV_DOWN:
1851 netxen_config_ipaddr(adapter,
1852 ifa->ifa_address, NX_IP_DOWN);
1853 break;
1854 default:
1855 break;
1856 }
1857 } endfor_ifa(indev);
1858
1859 in_dev_put(indev);
1860 done:
1861 return NOTIFY_DONE;
1862 }
1863
1864 static int
1865 netxen_inetaddr_event(struct notifier_block *this,
1866 unsigned long event, void *ptr)
1867 {
1868 struct netxen_adapter *adapter;
1869 struct net_device *dev;
1870
1871 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1872
1873 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
1874
1875 recheck:
1876 if (dev == NULL || !netif_running(dev))
1877 goto done;
1878
1879 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1880 dev = vlan_dev_real_dev(dev);
1881 goto recheck;
1882 }
1883
1884 if (!is_netxen_netdev(dev))
1885 goto done;
1886
1887 adapter = netdev_priv(dev);
1888
1889 if (!adapter || !netxen_destip_supported(adapter))
1890 goto done;
1891
1892 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
1893 goto done;
1894
1895 switch (event) {
1896 case NETDEV_UP:
1897 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
1898 break;
1899 case NETDEV_DOWN:
1900 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
1901 break;
1902 default:
1903 break;
1904 }
1905
1906 done:
1907 return NOTIFY_DONE;
1908 }
1909
1910 static struct notifier_block netxen_netdev_cb = {
1911 .notifier_call = netxen_netdev_event,
1912 };
1913
1914 static struct notifier_block netxen_inetaddr_cb = {
1915 .notifier_call = netxen_inetaddr_event,
1916 };
1917
1918 static struct pci_driver netxen_driver = {
1919 .name = netxen_nic_driver_name,
1920 .id_table = netxen_pci_tbl,
1921 .probe = netxen_nic_probe,
1922 .remove = __devexit_p(netxen_nic_remove),
1923 #ifdef CONFIG_PM
1924 .suspend = netxen_nic_suspend,
1925 .resume = netxen_nic_resume
1926 #endif
1927 };
1928
1929 static int __init netxen_init_module(void)
1930 {
1931 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
1932
1933 if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL)
1934 return -ENOMEM;
1935
1936 register_netdevice_notifier(&netxen_netdev_cb);
1937 register_inetaddr_notifier(&netxen_inetaddr_cb);
1938
1939 return pci_register_driver(&netxen_driver);
1940 }
1941
1942 module_init(netxen_init_module);
1943
1944 static void __exit netxen_exit_module(void)
1945 {
1946 pci_unregister_driver(&netxen_driver);
1947
1948 unregister_inetaddr_notifier(&netxen_inetaddr_cb);
1949 unregister_netdevice_notifier(&netxen_netdev_cb);
1950 destroy_workqueue(netxen_workq);
1951 }
1952
1953 module_exit(netxen_exit_module);
This page took 0.087685 seconds and 6 git commands to generate.