43a99f6a23dfccbd1e24c927ead6146f8672c153
[deliverable/linux.git] / drivers / net / netxen / netxen_nic_main.c
1 /*
2 * Copyright (C) 2003 - 2009 NetXen, Inc.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
22 *
23 * Contact Information:
24 * info@netxen.com
25 * NetXen Inc,
26 * 18922 Forge Drive
27 * Cupertino, CA 95014-0701
28 *
29 */
30
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include "netxen_nic_hw.h"
34
35 #include "netxen_nic.h"
36 #include "netxen_nic_phan_reg.h"
37
38 #include <linux/dma-mapping.h>
39 #include <linux/if_vlan.h>
40 #include <net/ip.h>
41 #include <linux/ipv6.h>
42 #include <linux/inetdevice.h>
43
44 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
45 MODULE_LICENSE("GPL");
46 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
47
48 char netxen_nic_driver_name[] = "netxen_nic";
49 static char netxen_nic_driver_string[] = "NetXen Network Driver version "
50 NETXEN_NIC_LINUX_VERSIONID;
51
52 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
53
54 /* Default to restricted 1G auto-neg mode */
55 static int wol_port_mode = 5;
56
57 static int use_msi = 1;
58
59 static int use_msi_x = 1;
60
61 /* Local functions to NetXen NIC driver */
62 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
63 const struct pci_device_id *ent);
64 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
65 static int netxen_nic_open(struct net_device *netdev);
66 static int netxen_nic_close(struct net_device *netdev);
67 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
68 static void netxen_tx_timeout(struct net_device *netdev);
69 static void netxen_reset_task(struct work_struct *work);
70 static void netxen_watchdog(unsigned long);
71 static int netxen_nic_poll(struct napi_struct *napi, int budget);
72 #ifdef CONFIG_NET_POLL_CONTROLLER
73 static void netxen_nic_poll_controller(struct net_device *netdev);
74 #endif
75 static irqreturn_t netxen_intr(int irq, void *data);
76 static irqreturn_t netxen_msi_intr(int irq, void *data);
77 static irqreturn_t netxen_msix_intr(int irq, void *data);
78
79 /* PCI Device ID Table */
80 #define ENTRY(device) \
81 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
82 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
83
84 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
85 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
86 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
87 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
88 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
89 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
91 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
92 ENTRY(PCI_DEVICE_ID_NX3031),
93 {0,}
94 };
95
96 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
97
98 static struct workqueue_struct *netxen_workq;
99 #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
100 #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
101
102 static void netxen_watchdog(unsigned long);
103
104 static uint32_t crb_cmd_producer[4] = {
105 CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
106 CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
107 };
108
109 void
110 netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
111 struct nx_host_tx_ring *tx_ring)
112 {
113 NXWR32(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
114
115 if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) {
116 netif_stop_queue(adapter->netdev);
117 smp_mb();
118 }
119 }
120
121 static uint32_t crb_cmd_consumer[4] = {
122 CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
123 CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
124 };
125
126 static inline void
127 netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
128 struct nx_host_tx_ring *tx_ring)
129 {
130 NXWR32(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
131 }
132
133 static uint32_t msi_tgt_status[8] = {
134 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
135 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
136 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
137 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
138 };
139
140 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
141
142 static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
143 {
144 struct netxen_adapter *adapter = sds_ring->adapter;
145
146 NXWR32(adapter, sds_ring->crb_intr_mask, 0);
147 }
148
149 static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
150 {
151 struct netxen_adapter *adapter = sds_ring->adapter;
152
153 NXWR32(adapter, sds_ring->crb_intr_mask, 0x1);
154
155 if (!NETXEN_IS_MSI_FAMILY(adapter))
156 adapter->pci_write_immediate(adapter,
157 adapter->legacy_intr.tgt_mask_reg, 0xfbff);
158 }
159
160 static int
161 netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
162 {
163 int size = sizeof(struct nx_host_sds_ring) * count;
164
165 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
166
167 return (recv_ctx->sds_rings == NULL);
168 }
169
170 static void
171 netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
172 {
173 if (recv_ctx->sds_rings != NULL)
174 kfree(recv_ctx->sds_rings);
175 }
176
177 static int
178 netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
179 {
180 int ring;
181 struct nx_host_sds_ring *sds_ring;
182 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
183
184 if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
185 return -ENOMEM;
186
187 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
188 sds_ring = &recv_ctx->sds_rings[ring];
189 netif_napi_add(netdev, &sds_ring->napi,
190 netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
191 }
192
193 return 0;
194 }
195
196 static void
197 netxen_napi_enable(struct netxen_adapter *adapter)
198 {
199 int ring;
200 struct nx_host_sds_ring *sds_ring;
201 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
202
203 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
204 sds_ring = &recv_ctx->sds_rings[ring];
205 napi_enable(&sds_ring->napi);
206 netxen_nic_enable_int(sds_ring);
207 }
208 }
209
210 static void
211 netxen_napi_disable(struct netxen_adapter *adapter)
212 {
213 int ring;
214 struct nx_host_sds_ring *sds_ring;
215 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
216
217 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
218 sds_ring = &recv_ctx->sds_rings[ring];
219 netxen_nic_disable_int(sds_ring);
220 napi_synchronize(&sds_ring->napi);
221 napi_disable(&sds_ring->napi);
222 }
223 }
224
225 static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
226 {
227 struct pci_dev *pdev = adapter->pdev;
228 uint64_t mask, cmask;
229
230 adapter->pci_using_dac = 0;
231
232 mask = DMA_BIT_MASK(32);
233 /*
234 * Consistent DMA mask is set to 32 bit because it cannot be set to
235 * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
236 * come off this pool.
237 */
238 cmask = DMA_BIT_MASK(32);
239
240 #ifndef CONFIG_IA64
241 if (revision_id >= NX_P3_B0)
242 mask = DMA_BIT_MASK(39);
243 else if (revision_id == NX_P2_C1)
244 mask = DMA_BIT_MASK(35);
245 #endif
246 if (pci_set_dma_mask(pdev, mask) == 0 &&
247 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
248 adapter->pci_using_dac = 1;
249 return 0;
250 }
251
252 return -EIO;
253 }
254
255 /* Update addressable range if firmware supports it */
256 static int
257 nx_update_dma_mask(struct netxen_adapter *adapter)
258 {
259 int change, shift, err;
260 uint64_t mask, old_mask;
261 struct pci_dev *pdev = adapter->pdev;
262
263 change = 0;
264
265 shift = NXRD32(adapter, CRB_DMA_SHIFT);
266 if (shift >= 32)
267 return 0;
268
269 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
270 change = 1;
271 else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
272 change = 1;
273
274 if (change) {
275 old_mask = pdev->dma_mask;
276 mask = (1ULL<<(32+shift)) - 1;
277
278 err = pci_set_dma_mask(pdev, mask);
279 if (err)
280 return pci_set_dma_mask(pdev, old_mask);
281 }
282
283 return 0;
284 }
285
286 static void
287 netxen_check_options(struct netxen_adapter *adapter)
288 {
289 if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
290 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
291 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
292 } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
293 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
294 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
295 }
296
297 adapter->msix_supported = 0;
298 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
299 adapter->msix_supported = !!use_msi_x;
300 adapter->rss_supported = !!use_msi_x;
301 } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
302 switch (adapter->ahw.board_type) {
303 case NETXEN_BRDTYPE_P2_SB31_10G:
304 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
305 adapter->msix_supported = !!use_msi_x;
306 adapter->rss_supported = !!use_msi_x;
307 break;
308 default:
309 break;
310 }
311 }
312
313 adapter->num_txd = MAX_CMD_DESCRIPTORS;
314
315 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
316 adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
317 adapter->max_rds_rings = 3;
318 } else {
319 adapter->num_lro_rxd = 0;
320 adapter->max_rds_rings = 2;
321 }
322 }
323
324 static int
325 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
326 {
327 u32 val, timeout;
328
329 if (first_boot == 0x55555555) {
330 /* This is the first boot after power up */
331 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
332
333 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
334 return 0;
335
336 /* PCI bus master workaround */
337 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
338 if (!(first_boot & 0x4)) {
339 first_boot |= 0x4;
340 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
341 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
342 }
343
344 /* This is the first boot after power up */
345 first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
346 if (first_boot != 0x80000f) {
347 /* clear the register for future unloads/loads */
348 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
349 return -EIO;
350 }
351
352 /* Start P2 boot loader */
353 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
354 NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
355 timeout = 0;
356 do {
357 msleep(1);
358 val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
359
360 if (++timeout > 5000)
361 return -EIO;
362
363 } while (val == NETXEN_BDINFO_MAGIC);
364 }
365 return 0;
366 }
367
368 static void netxen_set_port_mode(struct netxen_adapter *adapter)
369 {
370 u32 val, data;
371
372 val = adapter->ahw.board_type;
373 if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
374 (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
375 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
376 data = NETXEN_PORT_MODE_802_3_AP;
377 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
378 } else if (port_mode == NETXEN_PORT_MODE_XG) {
379 data = NETXEN_PORT_MODE_XG;
380 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
381 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
382 data = NETXEN_PORT_MODE_AUTO_NEG_1G;
383 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
384 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
385 data = NETXEN_PORT_MODE_AUTO_NEG_XG;
386 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
387 } else {
388 data = NETXEN_PORT_MODE_AUTO_NEG;
389 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
390 }
391
392 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
393 (wol_port_mode != NETXEN_PORT_MODE_XG) &&
394 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
395 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
396 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
397 }
398 NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
399 }
400 }
401
402 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
403 {
404 u32 control;
405 int pos;
406
407 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
408 if (pos) {
409 pci_read_config_dword(pdev, pos, &control);
410 if (enable)
411 control |= PCI_MSIX_FLAGS_ENABLE;
412 else
413 control = 0;
414 pci_write_config_dword(pdev, pos, control);
415 }
416 }
417
418 static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
419 {
420 int i;
421
422 for (i = 0; i < count; i++)
423 adapter->msix_entries[i].entry = i;
424 }
425
426 static int
427 netxen_read_mac_addr(struct netxen_adapter *adapter)
428 {
429 int i;
430 unsigned char *p;
431 __le64 mac_addr;
432 struct net_device *netdev = adapter->netdev;
433 struct pci_dev *pdev = adapter->pdev;
434
435 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
436 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
437 return -EIO;
438 } else {
439 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
440 return -EIO;
441 }
442
443 p = (unsigned char *)&mac_addr;
444 for (i = 0; i < 6; i++)
445 netdev->dev_addr[i] = *(p + 5 - i);
446
447 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
448
449 /* set station address */
450
451 if (!is_valid_ether_addr(netdev->perm_addr))
452 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
453
454 return 0;
455 }
456
457 int netxen_nic_set_mac(struct net_device *netdev, void *p)
458 {
459 struct netxen_adapter *adapter = netdev_priv(netdev);
460 struct sockaddr *addr = p;
461
462 if (!is_valid_ether_addr(addr->sa_data))
463 return -EINVAL;
464
465 if (netif_running(netdev)) {
466 netif_device_detach(netdev);
467 netxen_napi_disable(adapter);
468 }
469
470 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
471 adapter->macaddr_set(adapter, addr->sa_data);
472
473 if (netif_running(netdev)) {
474 netif_device_attach(netdev);
475 netxen_napi_enable(adapter);
476 }
477 return 0;
478 }
479
480 static void netxen_set_multicast_list(struct net_device *dev)
481 {
482 struct netxen_adapter *adapter = netdev_priv(dev);
483
484 adapter->set_multi(dev);
485 }
486
487 static const struct net_device_ops netxen_netdev_ops = {
488 .ndo_open = netxen_nic_open,
489 .ndo_stop = netxen_nic_close,
490 .ndo_start_xmit = netxen_nic_xmit_frame,
491 .ndo_get_stats = netxen_nic_get_stats,
492 .ndo_validate_addr = eth_validate_addr,
493 .ndo_set_multicast_list = netxen_set_multicast_list,
494 .ndo_set_mac_address = netxen_nic_set_mac,
495 .ndo_change_mtu = netxen_nic_change_mtu,
496 .ndo_tx_timeout = netxen_tx_timeout,
497 #ifdef CONFIG_NET_POLL_CONTROLLER
498 .ndo_poll_controller = netxen_nic_poll_controller,
499 #endif
500 };
501
502 static void
503 netxen_setup_intr(struct netxen_adapter *adapter)
504 {
505 struct netxen_legacy_intr_set *legacy_intrp;
506 struct pci_dev *pdev = adapter->pdev;
507 int err, num_msix;
508
509 if (adapter->rss_supported) {
510 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
511 MSIX_ENTRIES_PER_ADAPTER : 2;
512 } else
513 num_msix = 1;
514
515 adapter->max_sds_rings = 1;
516
517 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
518
519 if (adapter->ahw.revision_id >= NX_P3_B0)
520 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
521 else
522 legacy_intrp = &legacy_intr[0];
523 adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit;
524 adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg;
525 adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg;
526 adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg;
527
528 netxen_set_msix_bit(pdev, 0);
529
530 if (adapter->msix_supported) {
531
532 netxen_init_msix_entries(adapter, num_msix);
533 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
534 if (err == 0) {
535 adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
536 netxen_set_msix_bit(pdev, 1);
537
538 if (adapter->rss_supported)
539 adapter->max_sds_rings = num_msix;
540
541 dev_info(&pdev->dev, "using msi-x interrupts\n");
542 return;
543 }
544
545 if (err > 0)
546 pci_disable_msix(pdev);
547
548 /* fall through for msi */
549 }
550
551 if (use_msi && !pci_enable_msi(pdev)) {
552 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
553 adapter->msi_tgt_status =
554 msi_tgt_status[adapter->ahw.pci_func];
555 dev_info(&pdev->dev, "using msi interrupts\n");
556 adapter->msix_entries[0].vector = pdev->irq;
557 return;
558 }
559
560 dev_info(&pdev->dev, "using legacy interrupts\n");
561 adapter->msix_entries[0].vector = pdev->irq;
562 }
563
564 static void
565 netxen_teardown_intr(struct netxen_adapter *adapter)
566 {
567 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
568 pci_disable_msix(adapter->pdev);
569 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
570 pci_disable_msi(adapter->pdev);
571 }
572
573 static void
574 netxen_cleanup_pci_map(struct netxen_adapter *adapter)
575 {
576 if (adapter->ahw.db_base != NULL)
577 iounmap(adapter->ahw.db_base);
578 if (adapter->ahw.pci_base0 != NULL)
579 iounmap(adapter->ahw.pci_base0);
580 if (adapter->ahw.pci_base1 != NULL)
581 iounmap(adapter->ahw.pci_base1);
582 if (adapter->ahw.pci_base2 != NULL)
583 iounmap(adapter->ahw.pci_base2);
584 }
585
586 static int
587 netxen_setup_pci_map(struct netxen_adapter *adapter)
588 {
589 void __iomem *mem_ptr0 = NULL;
590 void __iomem *mem_ptr1 = NULL;
591 void __iomem *mem_ptr2 = NULL;
592 void __iomem *db_ptr = NULL;
593
594 unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0;
595
596 struct pci_dev *pdev = adapter->pdev;
597 int pci_func = adapter->ahw.pci_func;
598
599 int err = 0;
600
601 /*
602 * Set the CRB window to invalid. If any register in window 0 is
603 * accessed it should set the window to 0 and then reset it to 1.
604 */
605 adapter->curr_window = 255;
606 adapter->ahw.qdr_sn_window = -1;
607 adapter->ahw.ddr_mn_window = -1;
608
609 /* remap phys address */
610 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
611 mem_len = pci_resource_len(pdev, 0);
612 pci_len0 = 0;
613
614 adapter->hw_write_wx = netxen_nic_hw_write_wx_128M;
615 adapter->hw_read_wx = netxen_nic_hw_read_wx_128M;
616 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M;
617 adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M;
618 adapter->pci_set_window = netxen_nic_pci_set_window_128M;
619 adapter->pci_mem_read = netxen_nic_pci_mem_read_128M;
620 adapter->pci_mem_write = netxen_nic_pci_mem_write_128M;
621
622 /* 128 Meg of memory */
623 if (mem_len == NETXEN_PCI_128MB_SIZE) {
624 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
625 mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
626 SECOND_PAGE_GROUP_SIZE);
627 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
628 THIRD_PAGE_GROUP_SIZE);
629 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
630 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
631 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
632 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
633 } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
634 adapter->hw_write_wx = netxen_nic_hw_write_wx_2M;
635 adapter->hw_read_wx = netxen_nic_hw_read_wx_2M;
636 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M;
637 adapter->pci_write_immediate =
638 netxen_nic_pci_write_immediate_2M;
639 adapter->pci_set_window = netxen_nic_pci_set_window_2M;
640 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
641 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
642
643 mem_ptr0 = pci_ioremap_bar(pdev, 0);
644 if (mem_ptr0 == NULL) {
645 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
646 return -EIO;
647 }
648 pci_len0 = mem_len;
649
650 adapter->ahw.ddr_mn_window = 0;
651 adapter->ahw.qdr_sn_window = 0;
652
653 adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW +
654 (pci_func * 0x20);
655 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW;
656 if (pci_func < 4)
657 adapter->ahw.ms_win_crb += (pci_func * 0x20);
658 else
659 adapter->ahw.ms_win_crb +=
660 0xA0 + ((pci_func - 4) * 0x10);
661 } else {
662 return -EIO;
663 }
664
665 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
666
667 adapter->ahw.pci_base0 = mem_ptr0;
668 adapter->ahw.pci_len0 = pci_len0;
669 adapter->ahw.pci_base1 = mem_ptr1;
670 adapter->ahw.pci_base2 = mem_ptr2;
671
672 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
673 goto skip_doorbell;
674
675 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
676 db_len = pci_resource_len(pdev, 4);
677
678 if (db_len == 0) {
679 printk(KERN_ERR "%s: doorbell is disabled\n",
680 netxen_nic_driver_name);
681 err = -EIO;
682 goto err_out;
683 }
684
685 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
686 if (!db_ptr) {
687 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
688 netxen_nic_driver_name);
689 err = -EIO;
690 goto err_out;
691 }
692
693 skip_doorbell:
694 adapter->ahw.db_base = db_ptr;
695 adapter->ahw.db_len = db_len;
696 return 0;
697
698 err_out:
699 netxen_cleanup_pci_map(adapter);
700 return err;
701 }
702
703 static int
704 netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
705 {
706 int val, err, first_boot;
707 struct pci_dev *pdev = adapter->pdev;
708
709 int first_driver = 0;
710
711 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
712 first_driver = (adapter->portnum == 0);
713 else
714 first_driver = (adapter->ahw.pci_func == 0);
715
716 if (!first_driver)
717 goto wait_init;
718
719 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
720
721 err = netxen_check_hw_init(adapter, first_boot);
722 if (err) {
723 dev_err(&pdev->dev, "error in init HW init sequence\n");
724 return err;
725 }
726
727 if (request_fw)
728 netxen_request_firmware(adapter);
729
730 err = netxen_need_fw_reset(adapter);
731 if (err <= 0)
732 return err;
733
734 if (first_boot != 0x55555555) {
735 NXWR32(adapter, CRB_CMDPEG_STATE, 0);
736 netxen_pinit_from_rom(adapter, 0);
737 msleep(1);
738 }
739
740 NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
741 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
742 netxen_set_port_mode(adapter);
743
744 netxen_load_firmware(adapter);
745
746 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
747
748 /* Initialize multicast addr pool owners */
749 val = 0x7654;
750 if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
751 val |= 0x0f000000;
752 NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
753
754 }
755
756 err = netxen_init_dummy_dma(adapter);
757 if (err)
758 return err;
759
760 /*
761 * Tell the hardware our version number.
762 */
763 val = (_NETXEN_NIC_LINUX_MAJOR << 16)
764 | ((_NETXEN_NIC_LINUX_MINOR << 8))
765 | (_NETXEN_NIC_LINUX_SUBVERSION);
766 NXWR32(adapter, CRB_DRIVER_VERSION, val);
767
768 wait_init:
769 /* Handshake with the card before we register the devices. */
770 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
771 if (err) {
772 netxen_free_dummy_dma(adapter);
773 return err;
774 }
775
776 nx_update_dma_mask(adapter);
777
778 netxen_nic_get_firmware_info(adapter);
779
780 return 0;
781 }
782
783 static int
784 netxen_nic_request_irq(struct netxen_adapter *adapter)
785 {
786 irq_handler_t handler;
787 struct nx_host_sds_ring *sds_ring;
788 int err, ring;
789
790 unsigned long flags = IRQF_SAMPLE_RANDOM;
791 struct net_device *netdev = adapter->netdev;
792 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
793
794 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
795 handler = netxen_msix_intr;
796 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
797 handler = netxen_msi_intr;
798 else {
799 flags |= IRQF_SHARED;
800 handler = netxen_intr;
801 }
802 adapter->irq = netdev->irq;
803
804 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
805 sds_ring = &recv_ctx->sds_rings[ring];
806 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
807 err = request_irq(sds_ring->irq, handler,
808 flags, sds_ring->name, sds_ring);
809 if (err)
810 return err;
811 }
812
813 return 0;
814 }
815
816 static void
817 netxen_nic_free_irq(struct netxen_adapter *adapter)
818 {
819 int ring;
820 struct nx_host_sds_ring *sds_ring;
821
822 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
823
824 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
825 sds_ring = &recv_ctx->sds_rings[ring];
826 free_irq(sds_ring->irq, sds_ring);
827 }
828 }
829
830 static void
831 netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter)
832 {
833 adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
834 adapter->coal.normal.data.rx_time_us =
835 NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
836 adapter->coal.normal.data.rx_packets =
837 NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
838 adapter->coal.normal.data.tx_time_us =
839 NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US;
840 adapter->coal.normal.data.tx_packets =
841 NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS;
842 }
843
844 static int
845 netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
846 {
847 int err;
848
849 err = adapter->init_port(adapter, adapter->physical_port);
850 if (err) {
851 printk(KERN_ERR "%s: Failed to initialize port %d\n",
852 netxen_nic_driver_name, adapter->portnum);
853 return err;
854 }
855 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
856 adapter->macaddr_set(adapter, netdev->dev_addr);
857
858 adapter->set_multi(netdev);
859 adapter->set_mtu(adapter, netdev->mtu);
860
861 adapter->ahw.linkup = 0;
862
863 if (adapter->max_sds_rings > 1)
864 netxen_config_rss(adapter, 1);
865
866 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
867 netxen_config_intr_coalesce(adapter);
868
869 netxen_napi_enable(adapter);
870
871 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
872 netxen_linkevent_request(adapter, 1);
873 else
874 netxen_nic_set_link_parameters(adapter);
875
876 mod_timer(&adapter->watchdog_timer, jiffies);
877
878 return 0;
879 }
880
881 static void
882 netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
883 {
884 spin_lock(&adapter->tx_clean_lock);
885 netif_carrier_off(netdev);
886 netif_tx_disable(netdev);
887
888 if (adapter->stop_port)
889 adapter->stop_port(adapter);
890
891 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
892 netxen_p3_free_mac_list(adapter);
893
894 netxen_napi_disable(adapter);
895
896 netxen_release_tx_buffers(adapter);
897 spin_unlock(&adapter->tx_clean_lock);
898
899 del_timer_sync(&adapter->watchdog_timer);
900 FLUSH_SCHEDULED_WORK();
901 }
902
903
904 static int
905 netxen_nic_attach(struct netxen_adapter *adapter)
906 {
907 struct net_device *netdev = adapter->netdev;
908 struct pci_dev *pdev = adapter->pdev;
909 int err, ring;
910 struct nx_host_rds_ring *rds_ring;
911 struct nx_host_tx_ring *tx_ring;
912
913 err = netxen_init_firmware(adapter);
914 if (err != 0) {
915 printk(KERN_ERR "Failed to init firmware\n");
916 return -EIO;
917 }
918
919 err = netxen_alloc_sw_resources(adapter);
920 if (err) {
921 printk(KERN_ERR "%s: Error in setting sw resources\n",
922 netdev->name);
923 return err;
924 }
925
926 netxen_nic_clear_stats(adapter);
927
928 err = netxen_alloc_hw_resources(adapter);
929 if (err) {
930 printk(KERN_ERR "%s: Error in setting hw resources\n",
931 netdev->name);
932 goto err_out_free_sw;
933 }
934
935 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
936 tx_ring = adapter->tx_ring;
937 tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum];
938 tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum];
939
940 tx_ring->producer = 0;
941 tx_ring->sw_consumer = 0;
942
943 netxen_nic_update_cmd_producer(adapter, tx_ring);
944 netxen_nic_update_cmd_consumer(adapter, tx_ring);
945 }
946
947 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
948 rds_ring = &adapter->recv_ctx.rds_rings[ring];
949 netxen_post_rx_buffers(adapter, ring, rds_ring);
950 }
951
952 err = netxen_nic_request_irq(adapter);
953 if (err) {
954 dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
955 netdev->name);
956 goto err_out_free_rxbuf;
957 }
958
959 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
960 netxen_nic_init_coalesce_defaults(adapter);
961
962 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
963 return 0;
964
965 err_out_free_rxbuf:
966 netxen_release_rx_buffers(adapter);
967 netxen_free_hw_resources(adapter);
968 err_out_free_sw:
969 netxen_free_sw_resources(adapter);
970 return err;
971 }
972
973 static void
974 netxen_nic_detach(struct netxen_adapter *adapter)
975 {
976 netxen_free_hw_resources(adapter);
977 netxen_release_rx_buffers(adapter);
978 netxen_nic_free_irq(adapter);
979 netxen_free_sw_resources(adapter);
980
981 adapter->is_up = 0;
982 }
983
984 static int
985 netxen_setup_netdev(struct netxen_adapter *adapter,
986 struct net_device *netdev)
987 {
988 int err = 0;
989 struct pci_dev *pdev = adapter->pdev;
990
991 adapter->rx_csum = 1;
992 adapter->mc_enabled = 0;
993 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
994 adapter->max_mc_count = 38;
995 else
996 adapter->max_mc_count = 16;
997
998 netdev->netdev_ops = &netxen_netdev_ops;
999 netdev->watchdog_timeo = 2*HZ;
1000
1001 netxen_nic_change_mtu(netdev, netdev->mtu);
1002
1003 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
1004
1005 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1006 netdev->features |= (NETIF_F_GRO);
1007 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1008
1009 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1010 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1011 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1012 }
1013
1014 if (adapter->pci_using_dac) {
1015 netdev->features |= NETIF_F_HIGHDMA;
1016 netdev->vlan_features |= NETIF_F_HIGHDMA;
1017 }
1018
1019 netdev->irq = adapter->msix_entries[0].vector;
1020
1021 err = netxen_napi_add(adapter, netdev);
1022 if (err)
1023 return err;
1024
1025 init_timer(&adapter->watchdog_timer);
1026 adapter->watchdog_timer.function = &netxen_watchdog;
1027 adapter->watchdog_timer.data = (unsigned long)adapter;
1028 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
1029 INIT_WORK(&adapter->tx_timeout_task, netxen_reset_task);
1030
1031 if (netxen_read_mac_addr(adapter))
1032 dev_warn(&pdev->dev, "failed to read mac addr\n");
1033
1034 netif_carrier_off(netdev);
1035 netif_stop_queue(netdev);
1036
1037 err = register_netdev(netdev);
1038 if (err) {
1039 dev_err(&pdev->dev, "failed to register net device\n");
1040 return err;
1041 }
1042
1043 return 0;
1044 }
1045
1046 static int __devinit
1047 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1048 {
1049 struct net_device *netdev = NULL;
1050 struct netxen_adapter *adapter = NULL;
1051 int i = 0, err;
1052 int pci_func_id = PCI_FUNC(pdev->devfn);
1053 uint8_t revision_id;
1054
1055 if (pdev->class != 0x020000) {
1056 printk(KERN_DEBUG "NetXen function %d, class %x will not "
1057 "be enabled.\n",pci_func_id, pdev->class);
1058 return -ENODEV;
1059 }
1060
1061 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
1062 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
1063 "will not be enabled.\n",
1064 NX_P3_A0, NX_P3_B1);
1065 return -ENODEV;
1066 }
1067
1068 if ((err = pci_enable_device(pdev)))
1069 return err;
1070
1071 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1072 err = -ENODEV;
1073 goto err_out_disable_pdev;
1074 }
1075
1076 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
1077 goto err_out_disable_pdev;
1078
1079 pci_set_master(pdev);
1080
1081 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
1082 if(!netdev) {
1083 dev_err(&pdev->dev, "failed to allocate net_device\n");
1084 err = -ENOMEM;
1085 goto err_out_free_res;
1086 }
1087
1088 SET_NETDEV_DEV(netdev, &pdev->dev);
1089
1090 adapter = netdev_priv(netdev);
1091 adapter->netdev = netdev;
1092 adapter->pdev = pdev;
1093 adapter->ahw.pci_func = pci_func_id;
1094
1095 revision_id = pdev->revision;
1096 adapter->ahw.revision_id = revision_id;
1097
1098 err = nx_set_dma_mask(adapter, revision_id);
1099 if (err)
1100 goto err_out_free_netdev;
1101
1102 rwlock_init(&adapter->adapter_lock);
1103 spin_lock_init(&adapter->tx_clean_lock);
1104 INIT_LIST_HEAD(&adapter->mac_list);
1105
1106 err = netxen_setup_pci_map(adapter);
1107 if (err)
1108 goto err_out_free_netdev;
1109
1110 /* This will be reset for mezz cards */
1111 adapter->portnum = pci_func_id;
1112
1113 err = netxen_nic_get_board_info(adapter);
1114 if (err) {
1115 dev_err(&pdev->dev, "Error getting board config info.\n");
1116 goto err_out_iounmap;
1117 }
1118
1119 netxen_initialize_adapter_ops(adapter);
1120
1121 /* Mezz cards have PCI function 0,2,3 enabled */
1122 switch (adapter->ahw.board_type) {
1123 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
1124 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
1125 if (pci_func_id >= 2)
1126 adapter->portnum = pci_func_id - 2;
1127 break;
1128 default:
1129 break;
1130 }
1131
1132 err = netxen_start_firmware(adapter, 1);
1133 if (err)
1134 goto err_out_iounmap;
1135
1136 /*
1137 * See if the firmware gave us a virtual-physical port mapping.
1138 */
1139 adapter->physical_port = adapter->portnum;
1140 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1141 i = NXRD32(adapter, CRB_V2P(adapter->portnum));
1142 if (i != 0x55555555)
1143 adapter->physical_port = i;
1144 }
1145
1146 netxen_check_options(adapter);
1147
1148 netxen_setup_intr(adapter);
1149
1150 err = netxen_setup_netdev(adapter, netdev);
1151 if (err)
1152 goto err_out_disable_msi;
1153
1154 pci_set_drvdata(pdev, adapter);
1155
1156 switch (adapter->ahw.port_type) {
1157 case NETXEN_NIC_GBE:
1158 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1159 adapter->netdev->name);
1160 break;
1161 case NETXEN_NIC_XGBE:
1162 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1163 adapter->netdev->name);
1164 break;
1165 }
1166
1167 return 0;
1168
1169 err_out_disable_msi:
1170 netxen_teardown_intr(adapter);
1171
1172 netxen_free_dummy_dma(adapter);
1173
1174 err_out_iounmap:
1175 netxen_cleanup_pci_map(adapter);
1176
1177 err_out_free_netdev:
1178 free_netdev(netdev);
1179
1180 err_out_free_res:
1181 pci_release_regions(pdev);
1182
1183 err_out_disable_pdev:
1184 pci_set_drvdata(pdev, NULL);
1185 pci_disable_device(pdev);
1186 return err;
1187 }
1188
1189 static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1190 {
1191 struct netxen_adapter *adapter;
1192 struct net_device *netdev;
1193
1194 adapter = pci_get_drvdata(pdev);
1195 if (adapter == NULL)
1196 return;
1197
1198 netdev = adapter->netdev;
1199
1200 unregister_netdev(netdev);
1201
1202 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
1203 netxen_nic_detach(adapter);
1204 }
1205
1206 if (adapter->portnum == 0)
1207 netxen_free_dummy_dma(adapter);
1208
1209 netxen_teardown_intr(adapter);
1210 netxen_free_sds_rings(&adapter->recv_ctx);
1211
1212 netxen_cleanup_pci_map(adapter);
1213
1214 netxen_release_firmware(adapter);
1215
1216 pci_release_regions(pdev);
1217 pci_disable_device(pdev);
1218 pci_set_drvdata(pdev, NULL);
1219
1220 free_netdev(netdev);
1221 }
1222
1223 #ifdef CONFIG_PM
1224 static int
1225 netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1226 {
1227
1228 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1229 struct net_device *netdev = adapter->netdev;
1230
1231 netif_device_detach(netdev);
1232
1233 if (netif_running(netdev))
1234 netxen_nic_down(adapter, netdev);
1235
1236 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
1237 netxen_nic_detach(adapter);
1238
1239 pci_save_state(pdev);
1240
1241 if (netxen_nic_wol_supported(adapter)) {
1242 pci_enable_wake(pdev, PCI_D3cold, 1);
1243 pci_enable_wake(pdev, PCI_D3hot, 1);
1244 }
1245
1246 pci_disable_device(pdev);
1247 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1248
1249 return 0;
1250 }
1251
1252 static int
1253 netxen_nic_resume(struct pci_dev *pdev)
1254 {
1255 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1256 struct net_device *netdev = adapter->netdev;
1257 int err;
1258
1259 pci_set_power_state(pdev, PCI_D0);
1260 pci_restore_state(pdev);
1261
1262 err = pci_enable_device(pdev);
1263 if (err)
1264 return err;
1265
1266 adapter->curr_window = 255;
1267
1268 err = netxen_start_firmware(adapter, 0);
1269 if (err) {
1270 dev_err(&pdev->dev, "failed to start firmware\n");
1271 return err;
1272 }
1273
1274 if (netif_running(netdev)) {
1275 err = netxen_nic_attach(adapter);
1276 if (err)
1277 return err;
1278
1279 err = netxen_nic_up(adapter, netdev);
1280 if (err)
1281 return err;
1282
1283 netif_device_attach(netdev);
1284 }
1285
1286 return 0;
1287 }
1288 #endif
1289
1290 static int netxen_nic_open(struct net_device *netdev)
1291 {
1292 struct netxen_adapter *adapter = netdev_priv(netdev);
1293 int err = 0;
1294
1295 if (adapter->driver_mismatch)
1296 return -EIO;
1297
1298 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
1299 err = netxen_nic_attach(adapter);
1300 if (err)
1301 return err;
1302 }
1303
1304 err = netxen_nic_up(adapter, netdev);
1305 if (err)
1306 goto err_out;
1307
1308 netif_start_queue(netdev);
1309
1310 return 0;
1311
1312 err_out:
1313 netxen_nic_detach(adapter);
1314 return err;
1315 }
1316
1317 /*
1318 * netxen_nic_close - Disables a network interface entry point
1319 */
1320 static int netxen_nic_close(struct net_device *netdev)
1321 {
1322 struct netxen_adapter *adapter = netdev_priv(netdev);
1323
1324 netxen_nic_down(adapter, netdev);
1325 return 0;
1326 }
1327
1328 static void
1329 netxen_tso_check(struct net_device *netdev,
1330 struct nx_host_tx_ring *tx_ring,
1331 struct cmd_desc_type0 *first_desc,
1332 struct sk_buff *skb)
1333 {
1334 u8 opcode = TX_ETHER_PKT;
1335 __be16 protocol = skb->protocol;
1336 u16 flags = 0;
1337 u32 producer;
1338 int copied, offset, copy_len, hdr_len = 0, tso = 0;
1339 struct cmd_desc_type0 *hwdesc;
1340
1341 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1342 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data;
1343 protocol = vh->h_vlan_encapsulated_proto;
1344 flags = FLAGS_VLAN_TAGGED;
1345 }
1346
1347 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1348 skb_shinfo(skb)->gso_size > 0) {
1349
1350 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1351
1352 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1353 first_desc->total_hdr_length = hdr_len;
1354
1355 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1356 TX_TCP_LSO6 : TX_TCP_LSO;
1357 tso = 1;
1358
1359 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1360 u8 l4proto;
1361
1362 if (protocol == cpu_to_be16(ETH_P_IP)) {
1363 l4proto = ip_hdr(skb)->protocol;
1364
1365 if (l4proto == IPPROTO_TCP)
1366 opcode = TX_TCP_PKT;
1367 else if(l4proto == IPPROTO_UDP)
1368 opcode = TX_UDP_PKT;
1369 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1370 l4proto = ipv6_hdr(skb)->nexthdr;
1371
1372 if (l4proto == IPPROTO_TCP)
1373 opcode = TX_TCPV6_PKT;
1374 else if(l4proto == IPPROTO_UDP)
1375 opcode = TX_UDPV6_PKT;
1376 }
1377 }
1378 first_desc->tcp_hdr_offset = skb_transport_offset(skb);
1379 first_desc->ip_hdr_offset = skb_network_offset(skb);
1380 netxen_set_tx_flags_opcode(first_desc, flags, opcode);
1381
1382 if (!tso)
1383 return;
1384
1385 /* For LSO, we need to copy the MAC/IP/TCP headers into
1386 * the descriptor ring
1387 */
1388 producer = tx_ring->producer;
1389 copied = 0;
1390 offset = 2;
1391
1392 while (copied < hdr_len) {
1393
1394 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1395 (hdr_len - copied));
1396
1397 hwdesc = &tx_ring->desc_head[producer];
1398 tx_ring->cmd_buf_arr[producer].skb = NULL;
1399
1400 skb_copy_from_linear_data_offset(skb, copied,
1401 (char *)hwdesc + offset, copy_len);
1402
1403 copied += copy_len;
1404 offset = 0;
1405
1406 producer = get_next_index(producer, tx_ring->num_desc);
1407 }
1408
1409 tx_ring->producer = producer;
1410 barrier();
1411 }
1412
1413 static void
1414 netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
1415 struct netxen_cmd_buffer *pbuf, int last)
1416 {
1417 int k;
1418 struct netxen_skb_frag *buffrag;
1419
1420 buffrag = &pbuf->frag_array[0];
1421 pci_unmap_single(pdev, buffrag->dma,
1422 buffrag->length, PCI_DMA_TODEVICE);
1423
1424 for (k = 1; k < last; k++) {
1425 buffrag = &pbuf->frag_array[k];
1426 pci_unmap_page(pdev, buffrag->dma,
1427 buffrag->length, PCI_DMA_TODEVICE);
1428 }
1429 }
1430
1431 static inline void
1432 netxen_clear_cmddesc(u64 *desc)
1433 {
1434 desc[0] = 0ULL;
1435 desc[2] = 0ULL;
1436 }
1437
1438 static int
1439 netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1440 {
1441 struct netxen_adapter *adapter = netdev_priv(netdev);
1442 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
1443 struct skb_frag_struct *frag;
1444 struct netxen_cmd_buffer *pbuf;
1445 struct netxen_skb_frag *buffrag;
1446 struct cmd_desc_type0 *hwdesc, *first_desc;
1447 struct pci_dev *pdev;
1448 dma_addr_t temp_dma;
1449 int i, k;
1450 unsigned long offset;
1451
1452 u32 producer;
1453 int len, frag_count, no_of_desc;
1454 u32 num_txd = tx_ring->num_desc;
1455
1456 frag_count = skb_shinfo(skb)->nr_frags + 1;
1457
1458 /* 4 fragments per cmd des */
1459 no_of_desc = (frag_count + 3) >> 2;
1460
1461 if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) {
1462 netif_stop_queue(netdev);
1463 return NETDEV_TX_BUSY;
1464 }
1465
1466 producer = tx_ring->producer;
1467
1468 pdev = adapter->pdev;
1469 len = skb->len - skb->data_len;
1470
1471 temp_dma = pci_map_single(pdev, skb->data, len, PCI_DMA_TODEVICE);
1472 if (pci_dma_mapping_error(pdev, temp_dma))
1473 goto drop_packet;
1474
1475 pbuf = &tx_ring->cmd_buf_arr[producer];
1476 pbuf->skb = skb;
1477 pbuf->frag_count = frag_count;
1478
1479 buffrag = &pbuf->frag_array[0];
1480 buffrag->dma = temp_dma;
1481 buffrag->length = len;
1482
1483 first_desc = hwdesc = &tx_ring->desc_head[producer];
1484 netxen_clear_cmddesc((u64 *)hwdesc);
1485 netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
1486 netxen_set_tx_port(hwdesc, adapter->portnum);
1487
1488 hwdesc->buffer_length[0] = cpu_to_le16(len);
1489 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
1490
1491 for (i = 1, k = 1; i < frag_count; i++, k++) {
1492
1493 /* move to next desc. if there is a need */
1494 if ((i & 0x3) == 0) {
1495 k = 0;
1496 producer = get_next_index(producer, num_txd);
1497 hwdesc = &tx_ring->desc_head[producer];
1498 netxen_clear_cmddesc((u64 *)hwdesc);
1499 pbuf = &tx_ring->cmd_buf_arr[producer];
1500 pbuf->skb = NULL;
1501 }
1502 buffrag = &pbuf->frag_array[i];
1503 frag = &skb_shinfo(skb)->frags[i - 1];
1504 len = frag->size;
1505 offset = frag->page_offset;
1506
1507 temp_dma = pci_map_page(pdev, frag->page, offset,
1508 len, PCI_DMA_TODEVICE);
1509 if (pci_dma_mapping_error(pdev, temp_dma)) {
1510 netxen_clean_tx_dma_mapping(pdev, pbuf, i);
1511 goto drop_packet;
1512 }
1513
1514 buffrag->dma = temp_dma;
1515 buffrag->length = len;
1516
1517 hwdesc->buffer_length[k] = cpu_to_le16(len);
1518 switch (k) {
1519 case 0:
1520 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
1521 break;
1522 case 1:
1523 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
1524 break;
1525 case 2:
1526 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
1527 break;
1528 case 3:
1529 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
1530 break;
1531 }
1532 }
1533 tx_ring->producer = get_next_index(producer, num_txd);
1534
1535 netxen_tso_check(netdev, tx_ring, first_desc, skb);
1536
1537 netxen_nic_update_cmd_producer(adapter, tx_ring);
1538
1539 adapter->stats.txbytes += skb->len;
1540 adapter->stats.xmitcalled++;
1541
1542 return NETDEV_TX_OK;
1543
1544 drop_packet:
1545 adapter->stats.txdropped++;
1546 dev_kfree_skb_any(skb);
1547 return NETDEV_TX_OK;
1548 }
1549
1550 static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1551 {
1552 struct net_device *netdev = adapter->netdev;
1553 uint32_t temp, temp_state, temp_val;
1554 int rv = 0;
1555
1556 temp = NXRD32(adapter, CRB_TEMP_STATE);
1557
1558 temp_state = nx_get_temp_state(temp);
1559 temp_val = nx_get_temp_val(temp);
1560
1561 if (temp_state == NX_TEMP_PANIC) {
1562 printk(KERN_ALERT
1563 "%s: Device temperature %d degrees C exceeds"
1564 " maximum allowed. Hardware has been shut down.\n",
1565 netdev->name, temp_val);
1566
1567 netif_device_detach(netdev);
1568 netxen_nic_down(adapter, netdev);
1569 netxen_nic_detach(adapter);
1570
1571 rv = 1;
1572 } else if (temp_state == NX_TEMP_WARN) {
1573 if (adapter->temp == NX_TEMP_NORMAL) {
1574 printk(KERN_ALERT
1575 "%s: Device temperature %d degrees C "
1576 "exceeds operating range."
1577 " Immediate action needed.\n",
1578 netdev->name, temp_val);
1579 }
1580 } else {
1581 if (adapter->temp == NX_TEMP_WARN) {
1582 printk(KERN_INFO
1583 "%s: Device temperature is now %d degrees C"
1584 " in normal range.\n", netdev->name,
1585 temp_val);
1586 }
1587 }
1588 adapter->temp = temp_state;
1589 return rv;
1590 }
1591
1592 void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
1593 {
1594 struct net_device *netdev = adapter->netdev;
1595
1596 if (adapter->ahw.linkup && !linkup) {
1597 printk(KERN_INFO "%s: %s NIC Link is down\n",
1598 netxen_nic_driver_name, netdev->name);
1599 adapter->ahw.linkup = 0;
1600 if (netif_running(netdev)) {
1601 netif_carrier_off(netdev);
1602 netif_stop_queue(netdev);
1603 }
1604
1605 if (!adapter->has_link_events)
1606 netxen_nic_set_link_parameters(adapter);
1607
1608 } else if (!adapter->ahw.linkup && linkup) {
1609 printk(KERN_INFO "%s: %s NIC Link is up\n",
1610 netxen_nic_driver_name, netdev->name);
1611 adapter->ahw.linkup = 1;
1612 if (netif_running(netdev)) {
1613 netif_carrier_on(netdev);
1614 netif_wake_queue(netdev);
1615 }
1616
1617 if (!adapter->has_link_events)
1618 netxen_nic_set_link_parameters(adapter);
1619 }
1620 }
1621
1622 static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1623 {
1624 u32 val, port, linkup;
1625
1626 port = adapter->physical_port;
1627
1628 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1629 val = NXRD32(adapter, CRB_XG_STATE_P3);
1630 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1631 linkup = (val == XG_LINK_UP_P3);
1632 } else {
1633 val = NXRD32(adapter, CRB_XG_STATE);
1634 if (adapter->ahw.port_type == NETXEN_NIC_GBE)
1635 linkup = (val >> port) & 1;
1636 else {
1637 val = (val >> port*8) & 0xff;
1638 linkup = (val == XG_LINK_UP);
1639 }
1640 }
1641
1642 netxen_advert_link_change(adapter, linkup);
1643 }
1644
1645 static void netxen_watchdog(unsigned long v)
1646 {
1647 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
1648
1649 SCHEDULE_WORK(&adapter->watchdog_task);
1650 }
1651
1652 void netxen_watchdog_task(struct work_struct *work)
1653 {
1654 struct netxen_adapter *adapter =
1655 container_of(work, struct netxen_adapter, watchdog_task);
1656
1657 if (netxen_nic_check_temp(adapter))
1658 return;
1659
1660 if (!adapter->has_link_events)
1661 netxen_nic_handle_phy_intr(adapter);
1662
1663 if (netif_running(adapter->netdev))
1664 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1665 }
1666
1667 static void netxen_tx_timeout(struct net_device *netdev)
1668 {
1669 struct netxen_adapter *adapter = (struct netxen_adapter *)
1670 netdev_priv(netdev);
1671
1672 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1673
1674 SCHEDULE_WORK(&adapter->tx_timeout_task);
1675 }
1676
1677 static void netxen_reset_task(struct work_struct *work)
1678 {
1679 struct netxen_adapter *adapter =
1680 container_of(work, struct netxen_adapter, tx_timeout_task);
1681
1682 if (!netif_running(adapter->netdev))
1683 return;
1684
1685 netxen_napi_disable(adapter);
1686
1687 adapter->netdev->trans_start = jiffies;
1688
1689 netxen_napi_enable(adapter);
1690 netif_wake_queue(adapter->netdev);
1691 }
1692
1693 struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1694 {
1695 struct netxen_adapter *adapter = netdev_priv(netdev);
1696 struct net_device_stats *stats = &adapter->net_stats;
1697
1698 memset(stats, 0, sizeof(*stats));
1699
1700 stats->rx_packets = adapter->stats.no_rcv;
1701 stats->tx_packets = adapter->stats.xmitfinished;
1702 stats->rx_bytes = adapter->stats.rxbytes;
1703 stats->tx_bytes = adapter->stats.txbytes;
1704 stats->rx_dropped = adapter->stats.rxdropped;
1705 stats->tx_dropped = adapter->stats.txdropped;
1706
1707 return stats;
1708 }
1709
1710 static irqreturn_t netxen_intr(int irq, void *data)
1711 {
1712 struct nx_host_sds_ring *sds_ring = data;
1713 struct netxen_adapter *adapter = sds_ring->adapter;
1714 u32 status = 0;
1715
1716 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1717
1718 if (!(status & adapter->legacy_intr.int_vec_bit))
1719 return IRQ_NONE;
1720
1721 if (adapter->ahw.revision_id >= NX_P3_B1) {
1722 /* check interrupt state machine, to be sure */
1723 status = adapter->pci_read_immediate(adapter,
1724 ISR_INT_STATE_REG);
1725 if (!ISR_LEGACY_INT_TRIGGERED(status))
1726 return IRQ_NONE;
1727
1728 } else {
1729 unsigned long our_int = 0;
1730
1731 our_int = NXRD32(adapter, CRB_INT_VECTOR);
1732
1733 /* not our interrupt */
1734 if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
1735 return IRQ_NONE;
1736
1737 /* claim interrupt */
1738 NXWR32(adapter, CRB_INT_VECTOR, (our_int & 0xffffffff));
1739 }
1740
1741 /* clear interrupt */
1742 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
1743 netxen_nic_disable_int(sds_ring);
1744
1745 adapter->pci_write_immediate(adapter,
1746 adapter->legacy_intr.tgt_status_reg,
1747 0xffffffff);
1748 /* read twice to ensure write is flushed */
1749 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1750 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1751
1752 napi_schedule(&sds_ring->napi);
1753
1754 return IRQ_HANDLED;
1755 }
1756
1757 static irqreturn_t netxen_msi_intr(int irq, void *data)
1758 {
1759 struct nx_host_sds_ring *sds_ring = data;
1760 struct netxen_adapter *adapter = sds_ring->adapter;
1761
1762 /* clear interrupt */
1763 adapter->pci_write_immediate(adapter,
1764 adapter->msi_tgt_status, 0xffffffff);
1765
1766 napi_schedule(&sds_ring->napi);
1767 return IRQ_HANDLED;
1768 }
1769
1770 static irqreturn_t netxen_msix_intr(int irq, void *data)
1771 {
1772 struct nx_host_sds_ring *sds_ring = data;
1773
1774 napi_schedule(&sds_ring->napi);
1775 return IRQ_HANDLED;
1776 }
1777
1778 static int netxen_nic_poll(struct napi_struct *napi, int budget)
1779 {
1780 struct nx_host_sds_ring *sds_ring =
1781 container_of(napi, struct nx_host_sds_ring, napi);
1782
1783 struct netxen_adapter *adapter = sds_ring->adapter;
1784
1785 int tx_complete;
1786 int work_done;
1787
1788 tx_complete = netxen_process_cmd_ring(adapter);
1789
1790 work_done = netxen_process_rcv_ring(sds_ring, budget);
1791
1792 if ((work_done < budget) && tx_complete) {
1793 napi_complete(&sds_ring->napi);
1794 if (netif_running(adapter->netdev))
1795 netxen_nic_enable_int(sds_ring);
1796 }
1797
1798 return work_done;
1799 }
1800
1801 #ifdef CONFIG_NET_POLL_CONTROLLER
1802 static void netxen_nic_poll_controller(struct net_device *netdev)
1803 {
1804 struct netxen_adapter *adapter = netdev_priv(netdev);
1805 disable_irq(adapter->irq);
1806 netxen_intr(adapter->irq, adapter);
1807 enable_irq(adapter->irq);
1808 }
1809 #endif
1810
1811 #define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
1812
1813 static int
1814 netxen_destip_supported(struct netxen_adapter *adapter)
1815 {
1816 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
1817 return 0;
1818
1819 if (adapter->ahw.cut_through)
1820 return 0;
1821
1822 return 1;
1823 }
1824
1825 static int netxen_netdev_event(struct notifier_block *this,
1826 unsigned long event, void *ptr)
1827 {
1828 struct netxen_adapter *adapter;
1829 struct net_device *dev = (struct net_device *)ptr;
1830 struct in_device *indev;
1831
1832 recheck:
1833 if (dev == NULL)
1834 goto done;
1835
1836 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1837 dev = vlan_dev_real_dev(dev);
1838 goto recheck;
1839 }
1840
1841 if (!is_netxen_netdev(dev))
1842 goto done;
1843
1844 adapter = netdev_priv(dev);
1845
1846 if (!adapter || !netxen_destip_supported(adapter))
1847 goto done;
1848
1849 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
1850 goto done;
1851
1852 indev = in_dev_get(dev);
1853 if (!indev)
1854 goto done;
1855
1856 for_ifa(indev) {
1857 switch (event) {
1858 case NETDEV_UP:
1859 netxen_config_ipaddr(adapter,
1860 ifa->ifa_address, NX_IP_UP);
1861 break;
1862 case NETDEV_DOWN:
1863 netxen_config_ipaddr(adapter,
1864 ifa->ifa_address, NX_IP_DOWN);
1865 break;
1866 default:
1867 break;
1868 }
1869 } endfor_ifa(indev);
1870
1871 in_dev_put(indev);
1872 done:
1873 return NOTIFY_DONE;
1874 }
1875
1876 static int
1877 netxen_inetaddr_event(struct notifier_block *this,
1878 unsigned long event, void *ptr)
1879 {
1880 struct netxen_adapter *adapter;
1881 struct net_device *dev;
1882
1883 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1884
1885 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
1886
1887 recheck:
1888 if (dev == NULL || !netif_running(dev))
1889 goto done;
1890
1891 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1892 dev = vlan_dev_real_dev(dev);
1893 goto recheck;
1894 }
1895
1896 if (!is_netxen_netdev(dev))
1897 goto done;
1898
1899 adapter = netdev_priv(dev);
1900
1901 if (!adapter || !netxen_destip_supported(adapter))
1902 goto done;
1903
1904 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
1905 goto done;
1906
1907 switch (event) {
1908 case NETDEV_UP:
1909 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
1910 break;
1911 case NETDEV_DOWN:
1912 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
1913 break;
1914 default:
1915 break;
1916 }
1917
1918 done:
1919 return NOTIFY_DONE;
1920 }
1921
1922 static struct notifier_block netxen_netdev_cb = {
1923 .notifier_call = netxen_netdev_event,
1924 };
1925
1926 static struct notifier_block netxen_inetaddr_cb = {
1927 .notifier_call = netxen_inetaddr_event,
1928 };
1929
1930 static struct pci_driver netxen_driver = {
1931 .name = netxen_nic_driver_name,
1932 .id_table = netxen_pci_tbl,
1933 .probe = netxen_nic_probe,
1934 .remove = __devexit_p(netxen_nic_remove),
1935 #ifdef CONFIG_PM
1936 .suspend = netxen_nic_suspend,
1937 .resume = netxen_nic_resume
1938 #endif
1939 };
1940
1941 static int __init netxen_init_module(void)
1942 {
1943 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
1944
1945 if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL)
1946 return -ENOMEM;
1947
1948 register_netdevice_notifier(&netxen_netdev_cb);
1949 register_inetaddr_notifier(&netxen_inetaddr_cb);
1950
1951 return pci_register_driver(&netxen_driver);
1952 }
1953
1954 module_init(netxen_init_module);
1955
1956 static void __exit netxen_exit_module(void)
1957 {
1958 pci_unregister_driver(&netxen_driver);
1959
1960 unregister_inetaddr_notifier(&netxen_inetaddr_cb);
1961 unregister_netdevice_notifier(&netxen_netdev_cb);
1962 destroy_workqueue(netxen_workq);
1963 }
1964
1965 module_exit(netxen_exit_module);
This page took 0.068889 seconds and 4 git commands to generate.