netxen: avoid invalid iounmap
[deliverable/linux.git] / drivers / net / netxen / netxen_nic_main.c
1 /*
2 * Copyright (C) 2003 - 2006 NetXen, Inc.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
22 *
23 * Contact Information:
24 * info@netxen.com
25 * NetXen,
26 * 3965 Freedom Circle, Fourth floor,
27 * Santa Clara, CA 95054
28 *
29 *
30 * Main source file for NetXen NIC Driver on Linux
31 *
32 */
33
34 #include <linux/vmalloc.h>
35 #include <linux/highmem.h>
36 #include "netxen_nic_hw.h"
37
38 #include "netxen_nic.h"
39 #include "netxen_nic_phan_reg.h"
40
41 #include <linux/dma-mapping.h>
42 #include <linux/if_vlan.h>
43 #include <net/ip.h>
44
45 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
48
49 char netxen_nic_driver_name[] = "netxen_nic";
50 static char netxen_nic_driver_string[] = "NetXen Network Driver version "
51 NETXEN_NIC_LINUX_VERSIONID;
52
53 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
54
55 /* Default to restricted 1G auto-neg mode */
56 static int wol_port_mode = 5;
57
58 static int use_msi = 1;
59
60 static int use_msi_x = 1;
61
62 /* Local functions to NetXen NIC driver */
63 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
64 const struct pci_device_id *ent);
65 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
66 static int netxen_nic_open(struct net_device *netdev);
67 static int netxen_nic_close(struct net_device *netdev);
68 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
69 static void netxen_tx_timeout(struct net_device *netdev);
70 static void netxen_tx_timeout_task(struct work_struct *work);
71 static void netxen_watchdog(unsigned long);
72 static int netxen_nic_poll(struct napi_struct *napi, int budget);
73 #ifdef CONFIG_NET_POLL_CONTROLLER
74 static void netxen_nic_poll_controller(struct net_device *netdev);
75 #endif
76 static irqreturn_t netxen_intr(int irq, void *data);
77 static irqreturn_t netxen_msi_intr(int irq, void *data);
78
79 /* PCI Device ID Table */
80 #define ENTRY(device) \
81 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
82 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
83
84 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
85 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
86 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
87 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
88 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
89 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
91 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
92 ENTRY(PCI_DEVICE_ID_NX3031),
93 {0,}
94 };
95
96 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
97
98 /*
99 * In netxen_nic_down(), we must wait for any pending callback requests into
100 * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
101 * reenabled right after it is deleted in netxen_nic_down().
102 * FLUSH_SCHEDULED_WORK() does this synchronization.
103 *
104 * Normally, schedule_work()/flush_scheduled_work() could have worked, but
105 * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
106 * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
107 * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
108 * linkwatch_event() to be executed which also attempts to acquire the rtnl
109 * lock thus causing a deadlock.
110 */
111
112 static struct workqueue_struct *netxen_workq;
113 #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
114 #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
115
116 static void netxen_watchdog(unsigned long);
117
118 static uint32_t crb_cmd_producer[4] = {
119 CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
120 CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
121 };
122
123 void
124 netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
125 uint32_t crb_producer)
126 {
127 adapter->pci_write_normalize(adapter,
128 adapter->crb_addr_cmd_producer, crb_producer);
129 }
130
131 static uint32_t crb_cmd_consumer[4] = {
132 CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
133 CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
134 };
135
136 static inline void
137 netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
138 u32 crb_consumer)
139 {
140 adapter->pci_write_normalize(adapter,
141 adapter->crb_addr_cmd_consumer, crb_consumer);
142 }
143
144 static uint32_t msi_tgt_status[8] = {
145 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
146 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
147 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
148 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
149 };
150
151 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
152
153 static inline void netxen_nic_disable_int(struct netxen_adapter *adapter)
154 {
155 adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0);
156 }
157
158 static inline void netxen_nic_enable_int(struct netxen_adapter *adapter)
159 {
160 adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1);
161
162 if (!NETXEN_IS_MSI_FAMILY(adapter))
163 adapter->pci_write_immediate(adapter,
164 adapter->legacy_intr.tgt_mask_reg, 0xfbff);
165 }
166
167 static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
168 {
169 struct pci_dev *pdev = adapter->pdev;
170 int err;
171 uint64_t mask;
172
173 #ifdef CONFIG_IA64
174 adapter->dma_mask = DMA_32BIT_MASK;
175 #else
176 if (revision_id >= NX_P3_B0) {
177 /* should go to DMA_64BIT_MASK */
178 adapter->dma_mask = DMA_39BIT_MASK;
179 mask = DMA_39BIT_MASK;
180 } else if (revision_id == NX_P3_A2) {
181 adapter->dma_mask = DMA_39BIT_MASK;
182 mask = DMA_39BIT_MASK;
183 } else if (revision_id == NX_P2_C1) {
184 adapter->dma_mask = DMA_35BIT_MASK;
185 mask = DMA_35BIT_MASK;
186 } else {
187 adapter->dma_mask = DMA_32BIT_MASK;
188 mask = DMA_32BIT_MASK;
189 goto set_32_bit_mask;
190 }
191
192 /*
193 * Consistent DMA mask is set to 32 bit because it cannot be set to
194 * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
195 * come off this pool.
196 */
197 if (pci_set_dma_mask(pdev, mask) == 0 &&
198 pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) == 0) {
199 adapter->pci_using_dac = 1;
200 return 0;
201 }
202 #endif /* CONFIG_IA64 */
203
204 set_32_bit_mask:
205 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
206 if (!err)
207 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
208 if (err) {
209 DPRINTK(ERR, "No usable DMA configuration, aborting:%d\n", err);
210 return err;
211 }
212
213 adapter->pci_using_dac = 0;
214 return 0;
215 }
216
217 static void netxen_check_options(struct netxen_adapter *adapter)
218 {
219 switch (adapter->ahw.boardcfg.board_type) {
220 case NETXEN_BRDTYPE_P3_HMEZ:
221 case NETXEN_BRDTYPE_P3_XG_LOM:
222 case NETXEN_BRDTYPE_P3_10G_CX4:
223 case NETXEN_BRDTYPE_P3_10G_CX4_LP:
224 case NETXEN_BRDTYPE_P3_IMEZ:
225 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
226 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
227 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
228 case NETXEN_BRDTYPE_P3_10G_XFP:
229 case NETXEN_BRDTYPE_P3_10000_BASE_T:
230 adapter->msix_supported = !!use_msi_x;
231 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
232 break;
233
234 case NETXEN_BRDTYPE_P2_SB31_10G:
235 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
236 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
237 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
238 adapter->msix_supported = 0;
239 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
240 break;
241
242 case NETXEN_BRDTYPE_P3_REF_QG:
243 case NETXEN_BRDTYPE_P3_4_GB:
244 case NETXEN_BRDTYPE_P3_4_GB_MM:
245 adapter->msix_supported = !!use_msi_x;
246 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
247 break;
248
249 case NETXEN_BRDTYPE_P2_SB35_4G:
250 case NETXEN_BRDTYPE_P2_SB31_2G:
251 adapter->msix_supported = 0;
252 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
253 break;
254
255 case NETXEN_BRDTYPE_P3_10G_TP:
256 adapter->msix_supported = !!use_msi_x;
257 if (adapter->ahw.board_type == NETXEN_NIC_XGBE)
258 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
259 else
260 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
261 break;
262
263 default:
264 adapter->msix_supported = 0;
265 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
266
267 printk(KERN_WARNING "Unknown board type(0x%x)\n",
268 adapter->ahw.boardcfg.board_type);
269 break;
270 }
271
272 adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS_HOST;
273 adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
274 adapter->max_lro_rx_desc_count = MAX_LRO_RCV_DESCRIPTORS;
275
276 adapter->max_possible_rss_rings = 1;
277 return;
278 }
279
280 static int
281 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
282 {
283 u32 val, timeout;
284
285 if (first_boot == 0x55555555) {
286 /* This is the first boot after power up */
287 adapter->pci_write_normalize(adapter,
288 NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
289
290 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
291 return 0;
292
293 /* PCI bus master workaround */
294 adapter->hw_read_wx(adapter,
295 NETXEN_PCIE_REG(0x4), &first_boot, 4);
296 if (!(first_boot & 0x4)) {
297 first_boot |= 0x4;
298 adapter->hw_write_wx(adapter,
299 NETXEN_PCIE_REG(0x4), &first_boot, 4);
300 adapter->hw_read_wx(adapter,
301 NETXEN_PCIE_REG(0x4), &first_boot, 4);
302 }
303
304 /* This is the first boot after power up */
305 adapter->hw_read_wx(adapter,
306 NETXEN_ROMUSB_GLB_SW_RESET, &first_boot, 4);
307 if (first_boot != 0x80000f) {
308 /* clear the register for future unloads/loads */
309 adapter->pci_write_normalize(adapter,
310 NETXEN_CAM_RAM(0x1fc), 0);
311 return -EIO;
312 }
313
314 /* Start P2 boot loader */
315 val = adapter->pci_read_normalize(adapter,
316 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
317 adapter->pci_write_normalize(adapter,
318 NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
319 timeout = 0;
320 do {
321 msleep(1);
322 val = adapter->pci_read_normalize(adapter,
323 NETXEN_CAM_RAM(0x1fc));
324
325 if (++timeout > 5000)
326 return -EIO;
327
328 } while (val == NETXEN_BDINFO_MAGIC);
329 }
330 return 0;
331 }
332
333 static void netxen_set_port_mode(struct netxen_adapter *adapter)
334 {
335 u32 val, data;
336
337 val = adapter->ahw.boardcfg.board_type;
338 if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
339 (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
340 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
341 data = NETXEN_PORT_MODE_802_3_AP;
342 adapter->hw_write_wx(adapter,
343 NETXEN_PORT_MODE_ADDR, &data, 4);
344 } else if (port_mode == NETXEN_PORT_MODE_XG) {
345 data = NETXEN_PORT_MODE_XG;
346 adapter->hw_write_wx(adapter,
347 NETXEN_PORT_MODE_ADDR, &data, 4);
348 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
349 data = NETXEN_PORT_MODE_AUTO_NEG_1G;
350 adapter->hw_write_wx(adapter,
351 NETXEN_PORT_MODE_ADDR, &data, 4);
352 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
353 data = NETXEN_PORT_MODE_AUTO_NEG_XG;
354 adapter->hw_write_wx(adapter,
355 NETXEN_PORT_MODE_ADDR, &data, 4);
356 } else {
357 data = NETXEN_PORT_MODE_AUTO_NEG;
358 adapter->hw_write_wx(adapter,
359 NETXEN_PORT_MODE_ADDR, &data, 4);
360 }
361
362 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
363 (wol_port_mode != NETXEN_PORT_MODE_XG) &&
364 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
365 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
366 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
367 }
368 adapter->hw_write_wx(adapter, NETXEN_WOL_PORT_MODE,
369 &wol_port_mode, 4);
370 }
371 }
372
373 #define PCI_CAP_ID_GEN 0x10
374
375 static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
376 {
377 u32 pdevfuncsave;
378 u32 c8c9value = 0;
379 u32 chicken = 0;
380 u32 control = 0;
381 int i, pos;
382 struct pci_dev *pdev;
383
384 pdev = adapter->pdev;
385
386 adapter->hw_read_wx(adapter,
387 NETXEN_PCIE_REG(PCIE_CHICKEN3), &chicken, 4);
388 /* clear chicken3.25:24 */
389 chicken &= 0xFCFFFFFF;
390 /*
391 * if gen1 and B0, set F1020 - if gen 2, do nothing
392 * if gen2 set to F1000
393 */
394 pos = pci_find_capability(pdev, PCI_CAP_ID_GEN);
395 if (pos == 0xC0) {
396 pci_read_config_dword(pdev, pos + 0x10, &control);
397 if ((control & 0x000F0000) != 0x00020000) {
398 /* set chicken3.24 if gen1 */
399 chicken |= 0x01000000;
400 }
401 printk(KERN_INFO "%s Gen2 strapping detected\n",
402 netxen_nic_driver_name);
403 c8c9value = 0xF1000;
404 } else {
405 /* set chicken3.24 if gen1 */
406 chicken |= 0x01000000;
407 printk(KERN_INFO "%s Gen1 strapping detected\n",
408 netxen_nic_driver_name);
409 if (adapter->ahw.revision_id == NX_P3_B0)
410 c8c9value = 0xF1020;
411 else
412 c8c9value = 0;
413
414 }
415 adapter->hw_write_wx(adapter,
416 NETXEN_PCIE_REG(PCIE_CHICKEN3), &chicken, 4);
417
418 if (!c8c9value)
419 return;
420
421 pdevfuncsave = pdev->devfn;
422 if (pdevfuncsave & 0x07)
423 return;
424
425 for (i = 0; i < 8; i++) {
426 pci_read_config_dword(pdev, pos + 8, &control);
427 pci_read_config_dword(pdev, pos + 8, &control);
428 pci_write_config_dword(pdev, pos + 8, c8c9value);
429 pdev->devfn++;
430 }
431 pdev->devfn = pdevfuncsave;
432 }
433
434 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
435 {
436 u32 control;
437 int pos;
438
439 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
440 if (pos) {
441 pci_read_config_dword(pdev, pos, &control);
442 if (enable)
443 control |= PCI_MSIX_FLAGS_ENABLE;
444 else
445 control = 0;
446 pci_write_config_dword(pdev, pos, control);
447 }
448 }
449
450 static void netxen_init_msix_entries(struct netxen_adapter *adapter)
451 {
452 int i;
453
454 for (i = 0; i < MSIX_ENTRIES_PER_ADAPTER; i++)
455 adapter->msix_entries[i].entry = i;
456 }
457
458 static int
459 netxen_read_mac_addr(struct netxen_adapter *adapter)
460 {
461 int i;
462 unsigned char *p;
463 __le64 mac_addr;
464 struct net_device *netdev = adapter->netdev;
465 struct pci_dev *pdev = adapter->pdev;
466
467 if (netxen_is_flash_supported(adapter) != 0)
468 return -EIO;
469
470 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
471 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
472 return -EIO;
473 } else {
474 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
475 return -EIO;
476 }
477
478 p = (unsigned char *)&mac_addr;
479 for (i = 0; i < 6; i++)
480 netdev->dev_addr[i] = *(p + 5 - i);
481
482 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
483
484 /* set station address */
485
486 if (!is_valid_ether_addr(netdev->perm_addr))
487 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
488 else
489 adapter->macaddr_set(adapter, netdev->dev_addr);
490
491 return 0;
492 }
493
494 static void netxen_set_multicast_list(struct net_device *dev)
495 {
496 struct netxen_adapter *adapter = netdev_priv(dev);
497
498 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
499 netxen_p3_nic_set_multi(dev);
500 else
501 netxen_p2_nic_set_multi(dev);
502 }
503
504 static const struct net_device_ops netxen_netdev_ops = {
505 .ndo_open = netxen_nic_open,
506 .ndo_stop = netxen_nic_close,
507 .ndo_start_xmit = netxen_nic_xmit_frame,
508 .ndo_get_stats = netxen_nic_get_stats,
509 .ndo_validate_addr = eth_validate_addr,
510 .ndo_set_multicast_list = netxen_set_multicast_list,
511 .ndo_set_mac_address = netxen_nic_set_mac,
512 .ndo_change_mtu = netxen_nic_change_mtu,
513 .ndo_tx_timeout = netxen_tx_timeout,
514 #ifdef CONFIG_NET_POLL_CONTROLLER
515 .ndo_poll_controller = netxen_nic_poll_controller,
516 #endif
517 };
518
519 /*
520 * netxen_nic_probe()
521 *
522 * The Linux system will invoke this after identifying the vendor ID and
523 * device Id in the pci_tbl supported by this module.
524 *
525 * A quad port card has one operational PCI config space, (function 0),
526 * which is used to access all four ports.
527 *
528 * This routine will initialize the adapter, and setup the global parameters
529 * along with the port's specific structure.
530 */
531 static int __devinit
532 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
533 {
534 struct net_device *netdev = NULL;
535 struct netxen_adapter *adapter = NULL;
536 void __iomem *mem_ptr0 = NULL;
537 void __iomem *mem_ptr1 = NULL;
538 void __iomem *mem_ptr2 = NULL;
539 unsigned long first_page_group_end;
540 unsigned long first_page_group_start;
541
542
543 u8 __iomem *db_ptr = NULL;
544 unsigned long mem_base, mem_len, db_base, db_len, pci_len0 = 0;
545 int i = 0, err;
546 int first_driver, first_boot;
547 u32 val;
548 int pci_func_id = PCI_FUNC(pdev->devfn);
549 struct netxen_legacy_intr_set *legacy_intrp;
550 uint8_t revision_id;
551
552 if (pci_func_id == 0)
553 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
554
555 if (pdev->class != 0x020000) {
556 printk(KERN_DEBUG "NetXen function %d, class %x will not "
557 "be enabled.\n",pci_func_id, pdev->class);
558 return -ENODEV;
559 }
560
561 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
562 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
563 "will not be enabled.\n",
564 NX_P3_A0, NX_P3_B1);
565 return -ENODEV;
566 }
567
568 if ((err = pci_enable_device(pdev)))
569 return err;
570
571 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
572 err = -ENODEV;
573 goto err_out_disable_pdev;
574 }
575
576 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
577 goto err_out_disable_pdev;
578
579 pci_set_master(pdev);
580
581 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
582 if(!netdev) {
583 printk(KERN_ERR"%s: Failed to allocate memory for the "
584 "device block.Check system memory resource"
585 " usage.\n", netxen_nic_driver_name);
586 goto err_out_free_res;
587 }
588
589 SET_NETDEV_DEV(netdev, &pdev->dev);
590
591 adapter = netdev_priv(netdev);
592 adapter->netdev = netdev;
593 adapter->pdev = pdev;
594 adapter->ahw.pci_func = pci_func_id;
595
596 revision_id = pdev->revision;
597 adapter->ahw.revision_id = revision_id;
598
599 err = nx_set_dma_mask(adapter, revision_id);
600 if (err)
601 goto err_out_free_netdev;
602
603 rwlock_init(&adapter->adapter_lock);
604 adapter->ahw.qdr_sn_window = -1;
605 adapter->ahw.ddr_mn_window = -1;
606
607 /* remap phys address */
608 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
609 mem_len = pci_resource_len(pdev, 0);
610 pci_len0 = 0;
611
612 adapter->hw_write_wx = netxen_nic_hw_write_wx_128M;
613 adapter->hw_read_wx = netxen_nic_hw_read_wx_128M;
614 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M;
615 adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M;
616 adapter->pci_read_normalize = netxen_nic_pci_read_normalize_128M;
617 adapter->pci_write_normalize = netxen_nic_pci_write_normalize_128M;
618 adapter->pci_set_window = netxen_nic_pci_set_window_128M;
619 adapter->pci_mem_read = netxen_nic_pci_mem_read_128M;
620 adapter->pci_mem_write = netxen_nic_pci_mem_write_128M;
621
622 /* 128 Meg of memory */
623 if (mem_len == NETXEN_PCI_128MB_SIZE) {
624 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
625 mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
626 SECOND_PAGE_GROUP_SIZE);
627 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
628 THIRD_PAGE_GROUP_SIZE);
629 first_page_group_start = FIRST_PAGE_GROUP_START;
630 first_page_group_end = FIRST_PAGE_GROUP_END;
631 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
632 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
633 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
634 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
635 first_page_group_start = 0;
636 first_page_group_end = 0;
637 } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
638 adapter->hw_write_wx = netxen_nic_hw_write_wx_2M;
639 adapter->hw_read_wx = netxen_nic_hw_read_wx_2M;
640 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M;
641 adapter->pci_write_immediate =
642 netxen_nic_pci_write_immediate_2M;
643 adapter->pci_read_normalize = netxen_nic_pci_read_normalize_2M;
644 adapter->pci_write_normalize =
645 netxen_nic_pci_write_normalize_2M;
646 adapter->pci_set_window = netxen_nic_pci_set_window_2M;
647 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
648 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
649
650 mem_ptr0 = ioremap(mem_base, mem_len);
651 pci_len0 = mem_len;
652 first_page_group_start = 0;
653 first_page_group_end = 0;
654
655 adapter->ahw.ddr_mn_window = 0;
656 adapter->ahw.qdr_sn_window = 0;
657
658 adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW +
659 (pci_func_id * 0x20);
660 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW;
661 if (pci_func_id < 4)
662 adapter->ahw.ms_win_crb += (pci_func_id * 0x20);
663 else
664 adapter->ahw.ms_win_crb +=
665 0xA0 + ((pci_func_id - 4) * 0x10);
666 } else {
667 err = -EIO;
668 goto err_out_free_netdev;
669 }
670
671 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
672
673 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
674 db_len = pci_resource_len(pdev, 4);
675
676 if (db_len == 0) {
677 printk(KERN_ERR "%s: doorbell is disabled\n",
678 netxen_nic_driver_name);
679 err = -EIO;
680 goto err_out_iounmap;
681 }
682 DPRINTK(INFO, "doorbell ioremap from %lx a size of %lx\n", db_base,
683 db_len);
684
685 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
686 if (!db_ptr) {
687 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
688 netxen_nic_driver_name);
689 err = -EIO;
690 goto err_out_iounmap;
691 }
692 DPRINTK(INFO, "doorbell ioremaped at %p\n", db_ptr);
693
694 adapter->ahw.pci_base0 = mem_ptr0;
695 adapter->ahw.pci_len0 = pci_len0;
696 adapter->ahw.first_page_group_start = first_page_group_start;
697 adapter->ahw.first_page_group_end = first_page_group_end;
698 adapter->ahw.pci_base1 = mem_ptr1;
699 adapter->ahw.pci_base2 = mem_ptr2;
700 adapter->ahw.db_base = db_ptr;
701 adapter->ahw.db_len = db_len;
702
703 netif_napi_add(netdev, &adapter->napi,
704 netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
705
706 if (revision_id >= NX_P3_B0)
707 legacy_intrp = &legacy_intr[pci_func_id];
708 else
709 legacy_intrp = &legacy_intr[0];
710
711 adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit;
712 adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg;
713 adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg;
714 adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg;
715
716 /* this will be read from FW later */
717 adapter->intr_scheme = -1;
718 adapter->msi_mode = -1;
719
720 /* This will be reset for mezz cards */
721 adapter->portnum = pci_func_id;
722 adapter->status &= ~NETXEN_NETDEV_STATUS;
723 adapter->rx_csum = 1;
724 adapter->mc_enabled = 0;
725 if (NX_IS_REVISION_P3(revision_id))
726 adapter->max_mc_count = 38;
727 else
728 adapter->max_mc_count = 16;
729
730 netdev->netdev_ops = &netxen_netdev_ops;
731 netdev->watchdog_timeo = 2*HZ;
732
733 netxen_nic_change_mtu(netdev, netdev->mtu);
734
735 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
736
737 /* ScatterGather support */
738 netdev->features = NETIF_F_SG;
739 netdev->features |= NETIF_F_IP_CSUM;
740 netdev->features |= NETIF_F_TSO;
741 if (NX_IS_REVISION_P3(revision_id)) {
742 netdev->features |= NETIF_F_IPV6_CSUM;
743 netdev->features |= NETIF_F_TSO6;
744 }
745
746 if (adapter->pci_using_dac)
747 netdev->features |= NETIF_F_HIGHDMA;
748
749 /*
750 * Set the CRB window to invalid. If any register in window 0 is
751 * accessed it should set the window to 0 and then reset it to 1.
752 */
753 adapter->curr_window = 255;
754
755 if (netxen_nic_get_board_info(adapter) != 0) {
756 printk("%s: Error getting board config info.\n",
757 netxen_nic_driver_name);
758 err = -EIO;
759 goto err_out_iounmap;
760 }
761
762 netxen_initialize_adapter_ops(adapter);
763
764 /* Mezz cards have PCI function 0,2,3 enabled */
765 switch (adapter->ahw.boardcfg.board_type) {
766 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
767 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
768 if (pci_func_id >= 2)
769 adapter->portnum = pci_func_id - 2;
770 break;
771 default:
772 break;
773 }
774
775 /*
776 * This call will setup various max rx/tx counts.
777 * It must be done before any buffer/ring allocations.
778 */
779 netxen_check_options(adapter);
780
781 first_driver = 0;
782 if (NX_IS_REVISION_P3(revision_id)) {
783 if (adapter->ahw.pci_func == 0)
784 first_driver = 1;
785 } else {
786 if (adapter->portnum == 0)
787 first_driver = 1;
788 }
789
790 if (first_driver) {
791 first_boot = adapter->pci_read_normalize(adapter,
792 NETXEN_CAM_RAM(0x1fc));
793
794 err = netxen_check_hw_init(adapter, first_boot);
795 if (err) {
796 printk(KERN_ERR "%s: error in init HW init sequence\n",
797 netxen_nic_driver_name);
798 goto err_out_iounmap;
799 }
800
801 if (NX_IS_REVISION_P3(revision_id))
802 netxen_set_port_mode(adapter);
803
804 if (first_boot != 0x55555555) {
805 adapter->pci_write_normalize(adapter,
806 CRB_CMDPEG_STATE, 0);
807 netxen_pinit_from_rom(adapter, 0);
808 msleep(1);
809 }
810 netxen_load_firmware(adapter);
811
812 if (NX_IS_REVISION_P3(revision_id))
813 netxen_pcie_strap_init(adapter);
814
815 if (NX_IS_REVISION_P2(revision_id)) {
816
817 /* Initialize multicast addr pool owners */
818 val = 0x7654;
819 if (adapter->ahw.board_type == NETXEN_NIC_XGBE)
820 val |= 0x0f000000;
821 netxen_crb_writelit_adapter(adapter,
822 NETXEN_MAC_ADDR_CNTL_REG, val);
823
824 }
825
826 err = netxen_initialize_adapter_offload(adapter);
827 if (err)
828 goto err_out_iounmap;
829
830 /*
831 * Tell the hardware our version number.
832 */
833 i = (_NETXEN_NIC_LINUX_MAJOR << 16)
834 | ((_NETXEN_NIC_LINUX_MINOR << 8))
835 | (_NETXEN_NIC_LINUX_SUBVERSION);
836 adapter->pci_write_normalize(adapter, CRB_DRIVER_VERSION, i);
837
838 /* Handshake with the card before we register the devices. */
839 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
840 if (err)
841 goto err_out_free_offload;
842
843 } /* first_driver */
844
845 netxen_nic_flash_print(adapter);
846
847 if (NX_IS_REVISION_P3(revision_id)) {
848 adapter->hw_read_wx(adapter,
849 NETXEN_MIU_MN_CONTROL, &val, 4);
850 adapter->ahw.cut_through = (val & 0x4) ? 1 : 0;
851 dev_info(&pdev->dev, "firmware running in %s mode\n",
852 adapter->ahw.cut_through ? "cut through" : "legacy");
853 }
854
855 /*
856 * See if the firmware gave us a virtual-physical port mapping.
857 */
858 adapter->physical_port = adapter->portnum;
859 i = adapter->pci_read_normalize(adapter, CRB_V2P(adapter->portnum));
860 if (i != 0x55555555)
861 adapter->physical_port = i;
862
863 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
864
865 netxen_set_msix_bit(pdev, 0);
866
867 if (NX_IS_REVISION_P3(revision_id)) {
868 if ((mem_len != NETXEN_PCI_128MB_SIZE) &&
869 mem_len != NETXEN_PCI_2MB_SIZE)
870 adapter->msix_supported = 0;
871 }
872
873 if (adapter->msix_supported) {
874
875 netxen_init_msix_entries(adapter);
876
877 if (pci_enable_msix(pdev, adapter->msix_entries,
878 MSIX_ENTRIES_PER_ADAPTER))
879 goto request_msi;
880
881 adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
882 netxen_set_msix_bit(pdev, 1);
883 dev_info(&pdev->dev, "using msi-x interrupts\n");
884
885 } else {
886 request_msi:
887 if (use_msi && !pci_enable_msi(pdev)) {
888 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
889 dev_info(&pdev->dev, "using msi interrupts\n");
890 } else
891 dev_info(&pdev->dev, "using legacy interrupts\n");
892 }
893
894 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
895 netdev->irq = adapter->msix_entries[0].vector;
896 else
897 netdev->irq = pdev->irq;
898
899 err = netxen_receive_peg_ready(adapter);
900 if (err)
901 goto err_out_disable_msi;
902
903 init_timer(&adapter->watchdog_timer);
904 adapter->watchdog_timer.function = &netxen_watchdog;
905 adapter->watchdog_timer.data = (unsigned long)adapter;
906 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
907 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
908
909 err = netxen_read_mac_addr(adapter);
910 if (err)
911 dev_warn(&pdev->dev, "failed to read mac addr\n");
912
913 netif_carrier_off(netdev);
914 netif_stop_queue(netdev);
915
916 if ((err = register_netdev(netdev))) {
917 printk(KERN_ERR "%s: register_netdev failed port #%d"
918 " aborting\n", netxen_nic_driver_name,
919 adapter->portnum);
920 err = -EIO;
921 goto err_out_disable_msi;
922 }
923
924 pci_set_drvdata(pdev, adapter);
925
926 switch (adapter->ahw.board_type) {
927 case NETXEN_NIC_GBE:
928 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
929 adapter->netdev->name);
930 break;
931 case NETXEN_NIC_XGBE:
932 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
933 adapter->netdev->name);
934 break;
935 }
936
937 return 0;
938
939 err_out_disable_msi:
940 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
941 pci_disable_msix(pdev);
942 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
943 pci_disable_msi(pdev);
944
945 err_out_free_offload:
946 if (first_driver)
947 netxen_free_adapter_offload(adapter);
948
949 err_out_iounmap:
950 if (db_ptr)
951 iounmap(db_ptr);
952
953 if (mem_ptr0)
954 iounmap(mem_ptr0);
955 if (mem_ptr1)
956 iounmap(mem_ptr1);
957 if (mem_ptr2)
958 iounmap(mem_ptr2);
959
960 err_out_free_netdev:
961 free_netdev(netdev);
962
963 err_out_free_res:
964 pci_release_regions(pdev);
965
966 err_out_disable_pdev:
967 pci_set_drvdata(pdev, NULL);
968 pci_disable_device(pdev);
969 return err;
970 }
971
972 static void __devexit netxen_nic_remove(struct pci_dev *pdev)
973 {
974 struct netxen_adapter *adapter;
975 struct net_device *netdev;
976
977 adapter = pci_get_drvdata(pdev);
978 if (adapter == NULL)
979 return;
980
981 netdev = adapter->netdev;
982
983 unregister_netdev(netdev);
984
985 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
986 netxen_free_hw_resources(adapter);
987 netxen_release_rx_buffers(adapter);
988 netxen_free_sw_resources(adapter);
989
990 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
991 netxen_p3_free_mac_list(adapter);
992 }
993
994 if (adapter->portnum == 0)
995 netxen_free_adapter_offload(adapter);
996
997 if (adapter->irq)
998 free_irq(adapter->irq, adapter);
999
1000 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
1001 pci_disable_msix(pdev);
1002 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
1003 pci_disable_msi(pdev);
1004
1005 iounmap(adapter->ahw.db_base);
1006 iounmap(adapter->ahw.pci_base0);
1007 if (adapter->ahw.pci_base1 != NULL)
1008 iounmap(adapter->ahw.pci_base1);
1009 if (adapter->ahw.pci_base2 != NULL)
1010 iounmap(adapter->ahw.pci_base2);
1011
1012 pci_release_regions(pdev);
1013 pci_disable_device(pdev);
1014 pci_set_drvdata(pdev, NULL);
1015
1016 free_netdev(netdev);
1017 }
1018
1019 /*
1020 * Called when a network interface is made active
1021 * @returns 0 on success, negative value on failure
1022 */
1023 static int netxen_nic_open(struct net_device *netdev)
1024 {
1025 struct netxen_adapter *adapter = netdev_priv(netdev);
1026 int err = 0;
1027 int ctx, ring;
1028 irq_handler_t handler;
1029 unsigned long flags = IRQF_SAMPLE_RANDOM;
1030
1031 if (adapter->driver_mismatch)
1032 return -EIO;
1033
1034 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
1035 err = netxen_init_firmware(adapter);
1036 if (err != 0) {
1037 printk(KERN_ERR "Failed to init firmware\n");
1038 return -EIO;
1039 }
1040
1041 if (adapter->fw_major < 4)
1042 adapter->max_rds_rings = 3;
1043 else
1044 adapter->max_rds_rings = 2;
1045
1046 err = netxen_alloc_sw_resources(adapter);
1047 if (err) {
1048 printk(KERN_ERR "%s: Error in setting sw resources\n",
1049 netdev->name);
1050 return err;
1051 }
1052
1053 netxen_nic_clear_stats(adapter);
1054
1055 err = netxen_alloc_hw_resources(adapter);
1056 if (err) {
1057 printk(KERN_ERR "%s: Error in setting hw resources\n",
1058 netdev->name);
1059 goto err_out_free_sw;
1060 }
1061
1062 if ((adapter->msi_mode != MSI_MODE_MULTIFUNC) ||
1063 (adapter->intr_scheme != INTR_SCHEME_PERPORT)) {
1064 printk(KERN_ERR "%s: Firmware interrupt scheme is "
1065 "incompatible with driver\n",
1066 netdev->name);
1067 adapter->driver_mismatch = 1;
1068 goto err_out_free_hw;
1069 }
1070
1071 if (adapter->fw_major < 4) {
1072 adapter->crb_addr_cmd_producer =
1073 crb_cmd_producer[adapter->portnum];
1074 adapter->crb_addr_cmd_consumer =
1075 crb_cmd_consumer[adapter->portnum];
1076
1077 netxen_nic_update_cmd_producer(adapter, 0);
1078 netxen_nic_update_cmd_consumer(adapter, 0);
1079 }
1080
1081 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
1082 for (ring = 0; ring < adapter->max_rds_rings; ring++)
1083 netxen_post_rx_buffers(adapter, ctx, ring);
1084 }
1085 if (NETXEN_IS_MSI_FAMILY(adapter))
1086 handler = netxen_msi_intr;
1087 else {
1088 flags |= IRQF_SHARED;
1089 handler = netxen_intr;
1090 }
1091 adapter->irq = netdev->irq;
1092 err = request_irq(adapter->irq, handler,
1093 flags, netdev->name, adapter);
1094 if (err) {
1095 printk(KERN_ERR "request_irq failed with: %d\n", err);
1096 goto err_out_free_rxbuf;
1097 }
1098
1099 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
1100 }
1101
1102 /* Done here again so that even if phantom sw overwrote it,
1103 * we set it */
1104 err = adapter->init_port(adapter, adapter->physical_port);
1105 if (err) {
1106 printk(KERN_ERR "%s: Failed to initialize port %d\n",
1107 netxen_nic_driver_name, adapter->portnum);
1108 goto err_out_free_irq;
1109 }
1110 adapter->macaddr_set(adapter, netdev->dev_addr);
1111
1112 netxen_nic_set_link_parameters(adapter);
1113
1114 netxen_set_multicast_list(netdev);
1115 if (adapter->set_mtu)
1116 adapter->set_mtu(adapter, netdev->mtu);
1117
1118 adapter->ahw.linkup = 0;
1119 mod_timer(&adapter->watchdog_timer, jiffies);
1120
1121 napi_enable(&adapter->napi);
1122 netxen_nic_enable_int(adapter);
1123
1124 netif_start_queue(netdev);
1125
1126 return 0;
1127
1128 err_out_free_irq:
1129 free_irq(adapter->irq, adapter);
1130 err_out_free_rxbuf:
1131 netxen_release_rx_buffers(adapter);
1132 err_out_free_hw:
1133 netxen_free_hw_resources(adapter);
1134 err_out_free_sw:
1135 netxen_free_sw_resources(adapter);
1136 return err;
1137 }
1138
1139 /*
1140 * netxen_nic_close - Disables a network interface entry point
1141 */
1142 static int netxen_nic_close(struct net_device *netdev)
1143 {
1144 struct netxen_adapter *adapter = netdev_priv(netdev);
1145
1146 netif_carrier_off(netdev);
1147 netif_stop_queue(netdev);
1148 napi_disable(&adapter->napi);
1149
1150 if (adapter->stop_port)
1151 adapter->stop_port(adapter);
1152
1153 netxen_nic_disable_int(adapter);
1154
1155 netxen_release_tx_buffers(adapter);
1156
1157 FLUSH_SCHEDULED_WORK();
1158 del_timer_sync(&adapter->watchdog_timer);
1159
1160 return 0;
1161 }
1162
1163 static bool netxen_tso_check(struct net_device *netdev,
1164 struct cmd_desc_type0 *desc, struct sk_buff *skb)
1165 {
1166 bool tso = false;
1167 u8 opcode = TX_ETHER_PKT;
1168
1169 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1170 skb_shinfo(skb)->gso_size > 0) {
1171
1172 desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1173 desc->total_hdr_length =
1174 skb_transport_offset(skb) + tcp_hdrlen(skb);
1175
1176 opcode = (skb->protocol == htons(ETH_P_IPV6)) ?
1177 TX_TCP_LSO6 : TX_TCP_LSO;
1178 tso = true;
1179
1180 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1181 u8 l4proto;
1182
1183 if (skb->protocol == htons(ETH_P_IP)) {
1184 l4proto = ip_hdr(skb)->protocol;
1185
1186 if (l4proto == IPPROTO_TCP)
1187 opcode = TX_TCP_PKT;
1188 else if(l4proto == IPPROTO_UDP)
1189 opcode = TX_UDP_PKT;
1190 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1191 l4proto = ipv6_hdr(skb)->nexthdr;
1192
1193 if (l4proto == IPPROTO_TCP)
1194 opcode = TX_TCPV6_PKT;
1195 else if(l4proto == IPPROTO_UDP)
1196 opcode = TX_UDPV6_PKT;
1197 }
1198 }
1199 desc->tcp_hdr_offset = skb_transport_offset(skb);
1200 desc->ip_hdr_offset = skb_network_offset(skb);
1201 netxen_set_tx_flags_opcode(desc, 0, opcode);
1202 return tso;
1203 }
1204
1205 static void
1206 netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
1207 struct netxen_cmd_buffer *pbuf, int last)
1208 {
1209 int k;
1210 struct netxen_skb_frag *buffrag;
1211
1212 buffrag = &pbuf->frag_array[0];
1213 pci_unmap_single(pdev, buffrag->dma,
1214 buffrag->length, PCI_DMA_TODEVICE);
1215
1216 for (k = 1; k < last; k++) {
1217 buffrag = &pbuf->frag_array[k];
1218 pci_unmap_page(pdev, buffrag->dma,
1219 buffrag->length, PCI_DMA_TODEVICE);
1220 }
1221 }
1222
1223 static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1224 {
1225 struct netxen_adapter *adapter = netdev_priv(netdev);
1226 struct netxen_hardware_context *hw = &adapter->ahw;
1227 unsigned int first_seg_len = skb->len - skb->data_len;
1228 struct netxen_cmd_buffer *pbuf;
1229 struct netxen_skb_frag *buffrag;
1230 struct cmd_desc_type0 *hwdesc;
1231 struct pci_dev *pdev = adapter->pdev;
1232 dma_addr_t temp_dma;
1233 int i, k;
1234
1235 u32 producer, consumer;
1236 int frag_count, no_of_desc;
1237 u32 num_txd = adapter->max_tx_desc_count;
1238 bool is_tso = false;
1239
1240 frag_count = skb_shinfo(skb)->nr_frags + 1;
1241
1242 /* There 4 fragments per descriptor */
1243 no_of_desc = (frag_count + 3) >> 2;
1244
1245 producer = adapter->cmd_producer;
1246 smp_mb();
1247 consumer = adapter->last_cmd_consumer;
1248 if ((no_of_desc+2) > find_diff_among(producer, consumer, num_txd)) {
1249 netif_stop_queue(netdev);
1250 smp_mb();
1251 return NETDEV_TX_BUSY;
1252 }
1253
1254 /* Copy the descriptors into the hardware */
1255 hwdesc = &hw->cmd_desc_head[producer];
1256 memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
1257 /* Take skb->data itself */
1258 pbuf = &adapter->cmd_buf_arr[producer];
1259
1260 is_tso = netxen_tso_check(netdev, hwdesc, skb);
1261
1262 pbuf->skb = skb;
1263 pbuf->frag_count = frag_count;
1264 buffrag = &pbuf->frag_array[0];
1265 temp_dma = pci_map_single(pdev, skb->data, first_seg_len,
1266 PCI_DMA_TODEVICE);
1267 if (pci_dma_mapping_error(pdev, temp_dma))
1268 goto drop_packet;
1269
1270 buffrag->dma = temp_dma;
1271 buffrag->length = first_seg_len;
1272 netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
1273 netxen_set_tx_port(hwdesc, adapter->portnum);
1274
1275 hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
1276 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1277
1278 for (i = 1, k = 1; i < frag_count; i++, k++) {
1279 struct skb_frag_struct *frag;
1280 int len, temp_len;
1281 unsigned long offset;
1282
1283 /* move to next desc. if there is a need */
1284 if ((i & 0x3) == 0) {
1285 k = 0;
1286 producer = get_next_index(producer, num_txd);
1287 hwdesc = &hw->cmd_desc_head[producer];
1288 memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
1289 pbuf = &adapter->cmd_buf_arr[producer];
1290 pbuf->skb = NULL;
1291 }
1292 frag = &skb_shinfo(skb)->frags[i - 1];
1293 len = frag->size;
1294 offset = frag->page_offset;
1295
1296 temp_len = len;
1297 temp_dma = pci_map_page(pdev, frag->page, offset,
1298 len, PCI_DMA_TODEVICE);
1299 if (pci_dma_mapping_error(pdev, temp_dma)) {
1300 netxen_clean_tx_dma_mapping(pdev, pbuf, i);
1301 goto drop_packet;
1302 }
1303
1304 buffrag++;
1305 buffrag->dma = temp_dma;
1306 buffrag->length = temp_len;
1307
1308 switch (k) {
1309 case 0:
1310 hwdesc->buffer1_length = cpu_to_le16(temp_len);
1311 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
1312 break;
1313 case 1:
1314 hwdesc->buffer2_length = cpu_to_le16(temp_len);
1315 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
1316 break;
1317 case 2:
1318 hwdesc->buffer3_length = cpu_to_le16(temp_len);
1319 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
1320 break;
1321 case 3:
1322 hwdesc->buffer4_length = cpu_to_le16(temp_len);
1323 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
1324 break;
1325 }
1326 frag++;
1327 }
1328 producer = get_next_index(producer, num_txd);
1329
1330 /* For LSO, we need to copy the MAC/IP/TCP headers into
1331 * the descriptor ring
1332 */
1333 if (is_tso) {
1334 int hdr_len, first_hdr_len, more_hdr;
1335 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1336 if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
1337 first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
1338 more_hdr = 1;
1339 } else {
1340 first_hdr_len = hdr_len;
1341 more_hdr = 0;
1342 }
1343 /* copy the MAC/IP/TCP headers to the cmd descriptor list */
1344 hwdesc = &hw->cmd_desc_head[producer];
1345 pbuf = &adapter->cmd_buf_arr[producer];
1346 pbuf->skb = NULL;
1347
1348 /* copy the first 64 bytes */
1349 memcpy(((void *)hwdesc) + 2,
1350 (void *)(skb->data), first_hdr_len);
1351 producer = get_next_index(producer, num_txd);
1352
1353 if (more_hdr) {
1354 hwdesc = &hw->cmd_desc_head[producer];
1355 pbuf = &adapter->cmd_buf_arr[producer];
1356 pbuf->skb = NULL;
1357 /* copy the next 64 bytes - should be enough except
1358 * for pathological case
1359 */
1360 skb_copy_from_linear_data_offset(skb, first_hdr_len,
1361 hwdesc,
1362 (hdr_len -
1363 first_hdr_len));
1364 producer = get_next_index(producer, num_txd);
1365 }
1366 }
1367
1368 adapter->cmd_producer = producer;
1369 adapter->stats.txbytes += skb->len;
1370
1371 netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer);
1372
1373 adapter->stats.xmitcalled++;
1374 netdev->trans_start = jiffies;
1375
1376 return NETDEV_TX_OK;
1377
1378 drop_packet:
1379 adapter->stats.txdropped++;
1380 dev_kfree_skb_any(skb);
1381 return NETDEV_TX_OK;
1382 }
1383
1384 static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1385 {
1386 struct net_device *netdev = adapter->netdev;
1387 uint32_t temp, temp_state, temp_val;
1388 int rv = 0;
1389
1390 temp = adapter->pci_read_normalize(adapter, CRB_TEMP_STATE);
1391
1392 temp_state = nx_get_temp_state(temp);
1393 temp_val = nx_get_temp_val(temp);
1394
1395 if (temp_state == NX_TEMP_PANIC) {
1396 printk(KERN_ALERT
1397 "%s: Device temperature %d degrees C exceeds"
1398 " maximum allowed. Hardware has been shut down.\n",
1399 netxen_nic_driver_name, temp_val);
1400
1401 netif_carrier_off(netdev);
1402 netif_stop_queue(netdev);
1403 rv = 1;
1404 } else if (temp_state == NX_TEMP_WARN) {
1405 if (adapter->temp == NX_TEMP_NORMAL) {
1406 printk(KERN_ALERT
1407 "%s: Device temperature %d degrees C "
1408 "exceeds operating range."
1409 " Immediate action needed.\n",
1410 netxen_nic_driver_name, temp_val);
1411 }
1412 } else {
1413 if (adapter->temp == NX_TEMP_WARN) {
1414 printk(KERN_INFO
1415 "%s: Device temperature is now %d degrees C"
1416 " in normal range.\n", netxen_nic_driver_name,
1417 temp_val);
1418 }
1419 }
1420 adapter->temp = temp_state;
1421 return rv;
1422 }
1423
1424 static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1425 {
1426 struct net_device *netdev = adapter->netdev;
1427 u32 val, port, linkup;
1428
1429 port = adapter->physical_port;
1430
1431 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1432 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3);
1433 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1434 linkup = (val == XG_LINK_UP_P3);
1435 } else {
1436 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
1437 if (adapter->ahw.board_type == NETXEN_NIC_GBE)
1438 linkup = (val >> port) & 1;
1439 else {
1440 val = (val >> port*8) & 0xff;
1441 linkup = (val == XG_LINK_UP);
1442 }
1443 }
1444
1445 if (adapter->ahw.linkup && !linkup) {
1446 printk(KERN_INFO "%s: %s NIC Link is down\n",
1447 netxen_nic_driver_name, netdev->name);
1448 adapter->ahw.linkup = 0;
1449 if (netif_running(netdev)) {
1450 netif_carrier_off(netdev);
1451 netif_stop_queue(netdev);
1452 }
1453
1454 netxen_nic_set_link_parameters(adapter);
1455 } else if (!adapter->ahw.linkup && linkup) {
1456 printk(KERN_INFO "%s: %s NIC Link is up\n",
1457 netxen_nic_driver_name, netdev->name);
1458 adapter->ahw.linkup = 1;
1459 if (netif_running(netdev)) {
1460 netif_carrier_on(netdev);
1461 netif_wake_queue(netdev);
1462 }
1463
1464 netxen_nic_set_link_parameters(adapter);
1465 }
1466 }
1467
1468 static void netxen_watchdog(unsigned long v)
1469 {
1470 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
1471
1472 SCHEDULE_WORK(&adapter->watchdog_task);
1473 }
1474
1475 void netxen_watchdog_task(struct work_struct *work)
1476 {
1477 struct netxen_adapter *adapter =
1478 container_of(work, struct netxen_adapter, watchdog_task);
1479
1480 if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter))
1481 return;
1482
1483 netxen_nic_handle_phy_intr(adapter);
1484
1485 if (netif_running(adapter->netdev))
1486 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1487 }
1488
1489 static void netxen_tx_timeout(struct net_device *netdev)
1490 {
1491 struct netxen_adapter *adapter = (struct netxen_adapter *)
1492 netdev_priv(netdev);
1493 SCHEDULE_WORK(&adapter->tx_timeout_task);
1494 }
1495
1496 static void netxen_tx_timeout_task(struct work_struct *work)
1497 {
1498 struct netxen_adapter *adapter =
1499 container_of(work, struct netxen_adapter, tx_timeout_task);
1500
1501 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
1502 netxen_nic_driver_name, adapter->netdev->name);
1503
1504 netxen_nic_disable_int(adapter);
1505 napi_disable(&adapter->napi);
1506
1507 adapter->netdev->trans_start = jiffies;
1508
1509 napi_enable(&adapter->napi);
1510 netxen_nic_enable_int(adapter);
1511 netif_wake_queue(adapter->netdev);
1512 }
1513
1514 /*
1515 * netxen_nic_get_stats - Get System Network Statistics
1516 * @netdev: network interface device structure
1517 */
1518 struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1519 {
1520 struct netxen_adapter *adapter = netdev_priv(netdev);
1521 struct net_device_stats *stats = &adapter->net_stats;
1522
1523 memset(stats, 0, sizeof(*stats));
1524
1525 /* total packets received */
1526 stats->rx_packets = adapter->stats.no_rcv;
1527 /* total packets transmitted */
1528 stats->tx_packets = adapter->stats.xmitedframes +
1529 adapter->stats.xmitfinished;
1530 /* total bytes received */
1531 stats->rx_bytes = adapter->stats.rxbytes;
1532 /* total bytes transmitted */
1533 stats->tx_bytes = adapter->stats.txbytes;
1534 /* bad packets received */
1535 stats->rx_errors = adapter->stats.rcvdbadskb;
1536 /* packet transmit problems */
1537 stats->tx_errors = adapter->stats.nocmddescriptor;
1538 /* no space in linux buffers */
1539 stats->rx_dropped = adapter->stats.rxdropped;
1540 /* no space available in linux */
1541 stats->tx_dropped = adapter->stats.txdropped;
1542
1543 return stats;
1544 }
1545
1546 static irqreturn_t netxen_intr(int irq, void *data)
1547 {
1548 struct netxen_adapter *adapter = data;
1549 u32 status = 0;
1550
1551 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1552
1553 if (!(status & adapter->legacy_intr.int_vec_bit))
1554 return IRQ_NONE;
1555
1556 if (adapter->ahw.revision_id >= NX_P3_B1) {
1557 /* check interrupt state machine, to be sure */
1558 status = adapter->pci_read_immediate(adapter,
1559 ISR_INT_STATE_REG);
1560 if (!ISR_LEGACY_INT_TRIGGERED(status))
1561 return IRQ_NONE;
1562
1563 } else {
1564 unsigned long our_int = 0;
1565
1566 our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR);
1567
1568 /* not our interrupt */
1569 if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
1570 return IRQ_NONE;
1571
1572 /* claim interrupt */
1573 adapter->pci_write_normalize(adapter,
1574 CRB_INT_VECTOR, (our_int & 0xffffffff));
1575 }
1576
1577 /* clear interrupt */
1578 if (adapter->fw_major < 4)
1579 netxen_nic_disable_int(adapter);
1580
1581 adapter->pci_write_immediate(adapter,
1582 adapter->legacy_intr.tgt_status_reg,
1583 0xffffffff);
1584 /* read twice to ensure write is flushed */
1585 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1586 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1587
1588 napi_schedule(&adapter->napi);
1589
1590 return IRQ_HANDLED;
1591 }
1592
1593 static irqreturn_t netxen_msi_intr(int irq, void *data)
1594 {
1595 struct netxen_adapter *adapter = data;
1596
1597 /* clear interrupt */
1598 adapter->pci_write_immediate(adapter,
1599 msi_tgt_status[adapter->ahw.pci_func], 0xffffffff);
1600
1601 napi_schedule(&adapter->napi);
1602 return IRQ_HANDLED;
1603 }
1604
1605 static int netxen_nic_poll(struct napi_struct *napi, int budget)
1606 {
1607 struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi);
1608 int tx_complete;
1609 int ctx;
1610 int work_done;
1611
1612 tx_complete = netxen_process_cmd_ring(adapter);
1613
1614 work_done = 0;
1615 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
1616 /*
1617 * Fairness issue. This will give undue weight to the
1618 * receive context 0.
1619 */
1620
1621 /*
1622 * To avoid starvation, we give each of our receivers,
1623 * a fraction of the quota. Sometimes, it might happen that we
1624 * have enough quota to process every packet, but since all the
1625 * packets are on one context, it gets only half of the quota,
1626 * and ends up not processing it.
1627 */
1628 work_done += netxen_process_rcv_ring(adapter, ctx,
1629 budget / MAX_RCV_CTX);
1630 }
1631
1632 if ((work_done < budget) && tx_complete) {
1633 netif_rx_complete(&adapter->napi);
1634 netxen_nic_enable_int(adapter);
1635 }
1636
1637 return work_done;
1638 }
1639
1640 #ifdef CONFIG_NET_POLL_CONTROLLER
1641 static void netxen_nic_poll_controller(struct net_device *netdev)
1642 {
1643 struct netxen_adapter *adapter = netdev_priv(netdev);
1644 disable_irq(adapter->irq);
1645 netxen_intr(adapter->irq, adapter);
1646 enable_irq(adapter->irq);
1647 }
1648 #endif
1649
1650 static struct pci_driver netxen_driver = {
1651 .name = netxen_nic_driver_name,
1652 .id_table = netxen_pci_tbl,
1653 .probe = netxen_nic_probe,
1654 .remove = __devexit_p(netxen_nic_remove)
1655 };
1656
1657 /* Driver Registration on NetXen card */
1658
1659 static int __init netxen_init_module(void)
1660 {
1661 if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL)
1662 return -ENOMEM;
1663
1664 return pci_register_driver(&netxen_driver);
1665 }
1666
1667 module_init(netxen_init_module);
1668
1669 static void __exit netxen_exit_module(void)
1670 {
1671 pci_unregister_driver(&netxen_driver);
1672 destroy_workqueue(netxen_workq);
1673 }
1674
1675 module_exit(netxen_exit_module);
This page took 0.111289 seconds and 5 git commands to generate.