853dee8057d95e90b0305b615aaa555d15d30e6b
[deliverable/linux.git] / drivers / net / netxen / netxen_nic_main.c
1 /*
2 * Copyright (C) 2003 - 2009 NetXen, Inc.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
22 *
23 * Contact Information:
24 * info@netxen.com
25 * NetXen Inc,
26 * 18922 Forge Drive
27 * Cupertino, CA 95014-0701
28 *
29 */
30
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include "netxen_nic_hw.h"
34
35 #include "netxen_nic.h"
36 #include "netxen_nic_phan_reg.h"
37
38 #include <linux/dma-mapping.h>
39 #include <linux/if_vlan.h>
40 #include <net/ip.h>
41 #include <linux/ipv6.h>
42
43 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
46
47 char netxen_nic_driver_name[] = "netxen_nic";
48 static char netxen_nic_driver_string[] = "NetXen Network Driver version "
49 NETXEN_NIC_LINUX_VERSIONID;
50
51 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
52
53 /* Default to restricted 1G auto-neg mode */
54 static int wol_port_mode = 5;
55
56 static int use_msi = 1;
57
58 static int use_msi_x = 1;
59
60 /* Local functions to NetXen NIC driver */
61 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
62 const struct pci_device_id *ent);
63 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
64 static int netxen_nic_open(struct net_device *netdev);
65 static int netxen_nic_close(struct net_device *netdev);
66 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
67 static void netxen_tx_timeout(struct net_device *netdev);
68 static void netxen_tx_timeout_task(struct work_struct *work);
69 static void netxen_watchdog(unsigned long);
70 static int netxen_nic_poll(struct napi_struct *napi, int budget);
71 #ifdef CONFIG_NET_POLL_CONTROLLER
72 static void netxen_nic_poll_controller(struct net_device *netdev);
73 #endif
74 static irqreturn_t netxen_intr(int irq, void *data);
75 static irqreturn_t netxen_msi_intr(int irq, void *data);
76 static irqreturn_t netxen_msix_intr(int irq, void *data);
77
78 /* PCI Device ID Table */
79 #define ENTRY(device) \
80 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
82
83 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
84 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
85 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
86 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
87 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
88 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
89 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
91 ENTRY(PCI_DEVICE_ID_NX3031),
92 {0,}
93 };
94
95 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
96
97 static struct workqueue_struct *netxen_workq;
98 #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
99 #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
100
101 static void netxen_watchdog(unsigned long);
102
103 static uint32_t crb_cmd_producer[4] = {
104 CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
105 CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
106 };
107
108 void
109 netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
110 uint32_t crb_producer)
111 {
112 adapter->pci_write_normalize(adapter,
113 adapter->crb_addr_cmd_producer, crb_producer);
114 }
115
116 static uint32_t crb_cmd_consumer[4] = {
117 CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
118 CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
119 };
120
121 static inline void
122 netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
123 u32 crb_consumer)
124 {
125 adapter->pci_write_normalize(adapter,
126 adapter->crb_addr_cmd_consumer, crb_consumer);
127 }
128
129 static uint32_t msi_tgt_status[8] = {
130 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
131 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
132 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
133 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
134 };
135
136 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
137
138 static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
139 {
140 struct netxen_adapter *adapter = sds_ring->adapter;
141
142 adapter->pci_write_normalize(adapter, sds_ring->crb_intr_mask, 0);
143 }
144
145 static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
146 {
147 struct netxen_adapter *adapter = sds_ring->adapter;
148
149 adapter->pci_write_normalize(adapter, sds_ring->crb_intr_mask, 0x1);
150
151 if (!NETXEN_IS_MSI_FAMILY(adapter))
152 adapter->pci_write_immediate(adapter,
153 adapter->legacy_intr.tgt_mask_reg, 0xfbff);
154 }
155
156 static void
157 netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
158 {
159 int ring;
160 struct nx_host_sds_ring *sds_ring;
161 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
162
163 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
164 adapter->max_sds_rings = (num_online_cpus() >= 4) ? 4 : 2;
165 else
166 adapter->max_sds_rings = 1;
167
168 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
169 sds_ring = &recv_ctx->sds_rings[ring];
170 netif_napi_add(netdev, &sds_ring->napi,
171 netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
172 }
173 }
174
175 static void
176 netxen_napi_enable(struct netxen_adapter *adapter)
177 {
178 int ring;
179 struct nx_host_sds_ring *sds_ring;
180 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
181
182 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
183 sds_ring = &recv_ctx->sds_rings[ring];
184 napi_enable(&sds_ring->napi);
185 netxen_nic_enable_int(sds_ring);
186 }
187 }
188
189 static void
190 netxen_napi_disable(struct netxen_adapter *adapter)
191 {
192 int ring;
193 struct nx_host_sds_ring *sds_ring;
194 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
195
196 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
197 sds_ring = &recv_ctx->sds_rings[ring];
198 netxen_nic_disable_int(sds_ring);
199 napi_disable(&sds_ring->napi);
200 }
201 }
202
203 static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
204 {
205 struct pci_dev *pdev = adapter->pdev;
206 uint64_t mask, cmask;
207
208 adapter->pci_using_dac = 0;
209
210 mask = DMA_BIT_MASK(32);
211 /*
212 * Consistent DMA mask is set to 32 bit because it cannot be set to
213 * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
214 * come off this pool.
215 */
216 cmask = DMA_BIT_MASK(32);
217
218 #ifndef CONFIG_IA64
219 if (revision_id >= NX_P3_B0)
220 mask = DMA_BIT_MASK(39);
221 else if (revision_id == NX_P2_C1)
222 mask = DMA_BIT_MASK(35);
223 #endif
224 if (pci_set_dma_mask(pdev, mask) == 0 &&
225 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
226 adapter->pci_using_dac = 1;
227 return 0;
228 }
229
230 return -EIO;
231 }
232
233 /* Update addressable range if firmware supports it */
234 static int
235 nx_update_dma_mask(struct netxen_adapter *adapter)
236 {
237 int change, shift, err;
238 uint64_t mask, old_mask;
239 struct pci_dev *pdev = adapter->pdev;
240
241 change = 0;
242
243 shift = netxen_nic_reg_read(adapter, CRB_DMA_SHIFT);
244 if (shift >= 32)
245 return 0;
246
247 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
248 change = 1;
249 else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
250 change = 1;
251
252 if (change) {
253 old_mask = pdev->dma_mask;
254 mask = (1ULL<<(32+shift)) - 1;
255
256 err = pci_set_dma_mask(pdev, mask);
257 if (err)
258 return pci_set_dma_mask(pdev, old_mask);
259 }
260
261 return 0;
262 }
263
264 static void netxen_check_options(struct netxen_adapter *adapter)
265 {
266 if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
267 adapter->num_rxd = MAX_RCV_DESCRIPTORS_10G;
268 else if (adapter->ahw.port_type == NETXEN_NIC_GBE)
269 adapter->num_rxd = MAX_RCV_DESCRIPTORS_1G;
270
271 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
272 adapter->msix_supported = !!use_msi_x;
273 else
274 adapter->msix_supported = 0;
275
276 adapter->num_txd = MAX_CMD_DESCRIPTORS_HOST;
277 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS;
278 adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
279
280 return;
281 }
282
283 static int
284 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
285 {
286 u32 val, timeout;
287
288 if (first_boot == 0x55555555) {
289 /* This is the first boot after power up */
290 adapter->pci_write_normalize(adapter,
291 NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
292
293 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
294 return 0;
295
296 /* PCI bus master workaround */
297 adapter->hw_read_wx(adapter,
298 NETXEN_PCIE_REG(0x4), &first_boot, 4);
299 if (!(first_boot & 0x4)) {
300 first_boot |= 0x4;
301 adapter->hw_write_wx(adapter,
302 NETXEN_PCIE_REG(0x4), &first_boot, 4);
303 adapter->hw_read_wx(adapter,
304 NETXEN_PCIE_REG(0x4), &first_boot, 4);
305 }
306
307 /* This is the first boot after power up */
308 adapter->hw_read_wx(adapter,
309 NETXEN_ROMUSB_GLB_SW_RESET, &first_boot, 4);
310 if (first_boot != 0x80000f) {
311 /* clear the register for future unloads/loads */
312 adapter->pci_write_normalize(adapter,
313 NETXEN_CAM_RAM(0x1fc), 0);
314 return -EIO;
315 }
316
317 /* Start P2 boot loader */
318 val = adapter->pci_read_normalize(adapter,
319 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
320 adapter->pci_write_normalize(adapter,
321 NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
322 timeout = 0;
323 do {
324 msleep(1);
325 val = adapter->pci_read_normalize(adapter,
326 NETXEN_CAM_RAM(0x1fc));
327
328 if (++timeout > 5000)
329 return -EIO;
330
331 } while (val == NETXEN_BDINFO_MAGIC);
332 }
333 return 0;
334 }
335
336 static void netxen_set_port_mode(struct netxen_adapter *adapter)
337 {
338 u32 val, data;
339
340 val = adapter->ahw.board_type;
341 if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
342 (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
343 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
344 data = NETXEN_PORT_MODE_802_3_AP;
345 adapter->hw_write_wx(adapter,
346 NETXEN_PORT_MODE_ADDR, &data, 4);
347 } else if (port_mode == NETXEN_PORT_MODE_XG) {
348 data = NETXEN_PORT_MODE_XG;
349 adapter->hw_write_wx(adapter,
350 NETXEN_PORT_MODE_ADDR, &data, 4);
351 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
352 data = NETXEN_PORT_MODE_AUTO_NEG_1G;
353 adapter->hw_write_wx(adapter,
354 NETXEN_PORT_MODE_ADDR, &data, 4);
355 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
356 data = NETXEN_PORT_MODE_AUTO_NEG_XG;
357 adapter->hw_write_wx(adapter,
358 NETXEN_PORT_MODE_ADDR, &data, 4);
359 } else {
360 data = NETXEN_PORT_MODE_AUTO_NEG;
361 adapter->hw_write_wx(adapter,
362 NETXEN_PORT_MODE_ADDR, &data, 4);
363 }
364
365 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
366 (wol_port_mode != NETXEN_PORT_MODE_XG) &&
367 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
368 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
369 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
370 }
371 adapter->hw_write_wx(adapter, NETXEN_WOL_PORT_MODE,
372 &wol_port_mode, 4);
373 }
374 }
375
376 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
377 {
378 u32 control;
379 int pos;
380
381 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
382 if (pos) {
383 pci_read_config_dword(pdev, pos, &control);
384 if (enable)
385 control |= PCI_MSIX_FLAGS_ENABLE;
386 else
387 control = 0;
388 pci_write_config_dword(pdev, pos, control);
389 }
390 }
391
392 static void netxen_init_msix_entries(struct netxen_adapter *adapter)
393 {
394 int i;
395
396 for (i = 0; i < MSIX_ENTRIES_PER_ADAPTER; i++)
397 adapter->msix_entries[i].entry = i;
398 }
399
400 static int
401 netxen_read_mac_addr(struct netxen_adapter *adapter)
402 {
403 int i;
404 unsigned char *p;
405 __le64 mac_addr;
406 struct net_device *netdev = adapter->netdev;
407 struct pci_dev *pdev = adapter->pdev;
408
409 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
410 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
411 return -EIO;
412 } else {
413 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
414 return -EIO;
415 }
416
417 p = (unsigned char *)&mac_addr;
418 for (i = 0; i < 6; i++)
419 netdev->dev_addr[i] = *(p + 5 - i);
420
421 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
422
423 /* set station address */
424
425 if (!is_valid_ether_addr(netdev->perm_addr))
426 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
427 else
428 adapter->macaddr_set(adapter, netdev->dev_addr);
429
430 return 0;
431 }
432
433 static void netxen_set_multicast_list(struct net_device *dev)
434 {
435 struct netxen_adapter *adapter = netdev_priv(dev);
436
437 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
438 netxen_p3_nic_set_multi(dev);
439 else
440 netxen_p2_nic_set_multi(dev);
441 }
442
443 static const struct net_device_ops netxen_netdev_ops = {
444 .ndo_open = netxen_nic_open,
445 .ndo_stop = netxen_nic_close,
446 .ndo_start_xmit = netxen_nic_xmit_frame,
447 .ndo_get_stats = netxen_nic_get_stats,
448 .ndo_validate_addr = eth_validate_addr,
449 .ndo_set_multicast_list = netxen_set_multicast_list,
450 .ndo_set_mac_address = netxen_nic_set_mac,
451 .ndo_change_mtu = netxen_nic_change_mtu,
452 .ndo_tx_timeout = netxen_tx_timeout,
453 #ifdef CONFIG_NET_POLL_CONTROLLER
454 .ndo_poll_controller = netxen_nic_poll_controller,
455 #endif
456 };
457
458 static void
459 netxen_setup_intr(struct netxen_adapter *adapter)
460 {
461 struct netxen_legacy_intr_set *legacy_intrp;
462 struct pci_dev *pdev = adapter->pdev;
463
464 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
465
466 if (adapter->ahw.revision_id >= NX_P3_B0)
467 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
468 else
469 legacy_intrp = &legacy_intr[0];
470 adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit;
471 adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg;
472 adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg;
473 adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg;
474
475 netxen_set_msix_bit(pdev, 0);
476
477 if (adapter->msix_supported) {
478
479 netxen_init_msix_entries(adapter);
480 if (pci_enable_msix(pdev, adapter->msix_entries,
481 MSIX_ENTRIES_PER_ADAPTER))
482 goto request_msi;
483
484 adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
485 netxen_set_msix_bit(pdev, 1);
486 dev_info(&pdev->dev, "using msi-x interrupts\n");
487
488 } else {
489 request_msi:
490 if (use_msi && !pci_enable_msi(pdev)) {
491 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
492 dev_info(&pdev->dev, "using msi interrupts\n");
493 } else
494 dev_info(&pdev->dev, "using legacy interrupts\n");
495 adapter->msix_entries[0].vector = pdev->irq;
496 }
497 }
498
499 static void
500 netxen_teardown_intr(struct netxen_adapter *adapter)
501 {
502 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
503 pci_disable_msix(adapter->pdev);
504 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
505 pci_disable_msi(adapter->pdev);
506 }
507
508 static void
509 netxen_cleanup_pci_map(struct netxen_adapter *adapter)
510 {
511 if (adapter->ahw.db_base != NULL)
512 iounmap(adapter->ahw.db_base);
513 if (adapter->ahw.pci_base0 != NULL)
514 iounmap(adapter->ahw.pci_base0);
515 if (adapter->ahw.pci_base1 != NULL)
516 iounmap(adapter->ahw.pci_base1);
517 if (adapter->ahw.pci_base2 != NULL)
518 iounmap(adapter->ahw.pci_base2);
519 }
520
521 static int
522 netxen_setup_pci_map(struct netxen_adapter *adapter)
523 {
524 void __iomem *mem_ptr0 = NULL;
525 void __iomem *mem_ptr1 = NULL;
526 void __iomem *mem_ptr2 = NULL;
527 void __iomem *db_ptr = NULL;
528
529 unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0;
530
531 struct pci_dev *pdev = adapter->pdev;
532 int pci_func = adapter->ahw.pci_func;
533
534 int err = 0;
535
536 /*
537 * Set the CRB window to invalid. If any register in window 0 is
538 * accessed it should set the window to 0 and then reset it to 1.
539 */
540 adapter->curr_window = 255;
541 adapter->ahw.qdr_sn_window = -1;
542 adapter->ahw.ddr_mn_window = -1;
543
544 /* remap phys address */
545 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
546 mem_len = pci_resource_len(pdev, 0);
547 pci_len0 = 0;
548
549 adapter->hw_write_wx = netxen_nic_hw_write_wx_128M;
550 adapter->hw_read_wx = netxen_nic_hw_read_wx_128M;
551 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M;
552 adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M;
553 adapter->pci_read_normalize = netxen_nic_pci_read_normalize_128M;
554 adapter->pci_write_normalize = netxen_nic_pci_write_normalize_128M;
555 adapter->pci_set_window = netxen_nic_pci_set_window_128M;
556 adapter->pci_mem_read = netxen_nic_pci_mem_read_128M;
557 adapter->pci_mem_write = netxen_nic_pci_mem_write_128M;
558
559 /* 128 Meg of memory */
560 if (mem_len == NETXEN_PCI_128MB_SIZE) {
561 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
562 mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
563 SECOND_PAGE_GROUP_SIZE);
564 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
565 THIRD_PAGE_GROUP_SIZE);
566 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
567 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
568 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
569 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
570 } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
571 adapter->hw_write_wx = netxen_nic_hw_write_wx_2M;
572 adapter->hw_read_wx = netxen_nic_hw_read_wx_2M;
573 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M;
574 adapter->pci_write_immediate =
575 netxen_nic_pci_write_immediate_2M;
576 adapter->pci_read_normalize = netxen_nic_pci_read_normalize_2M;
577 adapter->pci_write_normalize =
578 netxen_nic_pci_write_normalize_2M;
579 adapter->pci_set_window = netxen_nic_pci_set_window_2M;
580 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
581 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
582
583 mem_ptr0 = pci_ioremap_bar(pdev, 0);
584 if (mem_ptr0 == NULL) {
585 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
586 return -EIO;
587 }
588 pci_len0 = mem_len;
589
590 adapter->ahw.ddr_mn_window = 0;
591 adapter->ahw.qdr_sn_window = 0;
592
593 adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW +
594 (pci_func * 0x20);
595 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW;
596 if (pci_func < 4)
597 adapter->ahw.ms_win_crb += (pci_func * 0x20);
598 else
599 adapter->ahw.ms_win_crb +=
600 0xA0 + ((pci_func - 4) * 0x10);
601 } else {
602 return -EIO;
603 }
604
605 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
606
607 adapter->ahw.pci_base0 = mem_ptr0;
608 adapter->ahw.pci_len0 = pci_len0;
609 adapter->ahw.pci_base1 = mem_ptr1;
610 adapter->ahw.pci_base2 = mem_ptr2;
611
612 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
613 goto skip_doorbell;
614
615 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
616 db_len = pci_resource_len(pdev, 4);
617
618 if (db_len == 0) {
619 printk(KERN_ERR "%s: doorbell is disabled\n",
620 netxen_nic_driver_name);
621 err = -EIO;
622 goto err_out;
623 }
624
625 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
626 if (!db_ptr) {
627 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
628 netxen_nic_driver_name);
629 err = -EIO;
630 goto err_out;
631 }
632
633 skip_doorbell:
634 adapter->ahw.db_base = db_ptr;
635 adapter->ahw.db_len = db_len;
636 return 0;
637
638 err_out:
639 netxen_cleanup_pci_map(adapter);
640 return err;
641 }
642
643 static int
644 netxen_start_firmware(struct netxen_adapter *adapter)
645 {
646 int val, err, first_boot;
647 struct pci_dev *pdev = adapter->pdev;
648
649 int first_driver = 0;
650 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
651 if (adapter->ahw.pci_func == 0)
652 first_driver = 1;
653 } else {
654 if (adapter->portnum == 0)
655 first_driver = 1;
656 }
657
658 if (!first_driver)
659 return 0;
660
661 first_boot = adapter->pci_read_normalize(adapter,
662 NETXEN_CAM_RAM(0x1fc));
663
664 err = netxen_check_hw_init(adapter, first_boot);
665 if (err) {
666 dev_err(&pdev->dev, "error in init HW init sequence\n");
667 return err;
668 }
669
670 if (first_boot != 0x55555555) {
671 adapter->pci_write_normalize(adapter,
672 CRB_CMDPEG_STATE, 0);
673 netxen_pinit_from_rom(adapter, 0);
674 msleep(1);
675 }
676
677 netxen_nic_reg_write(adapter, CRB_DMA_SHIFT, 0x55555555);
678 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
679 netxen_set_port_mode(adapter);
680
681 netxen_load_firmware(adapter);
682
683 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
684
685 /* Initialize multicast addr pool owners */
686 val = 0x7654;
687 if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
688 val |= 0x0f000000;
689 netxen_crb_writelit_adapter(adapter,
690 NETXEN_MAC_ADDR_CNTL_REG, val);
691
692 }
693
694 err = netxen_initialize_adapter_offload(adapter);
695 if (err)
696 return err;
697
698 /*
699 * Tell the hardware our version number.
700 */
701 val = (_NETXEN_NIC_LINUX_MAJOR << 16)
702 | ((_NETXEN_NIC_LINUX_MINOR << 8))
703 | (_NETXEN_NIC_LINUX_SUBVERSION);
704 adapter->pci_write_normalize(adapter, CRB_DRIVER_VERSION, val);
705
706 /* Handshake with the card before we register the devices. */
707 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
708 if (err) {
709 netxen_free_adapter_offload(adapter);
710 return err;
711 }
712
713 return 0;
714 }
715
716 static int
717 netxen_nic_request_irq(struct netxen_adapter *adapter)
718 {
719 irq_handler_t handler;
720 struct nx_host_sds_ring *sds_ring;
721 int err, ring;
722
723 unsigned long flags = IRQF_SAMPLE_RANDOM;
724 struct net_device *netdev = adapter->netdev;
725 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
726
727 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
728 handler = netxen_msix_intr;
729 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
730 handler = netxen_msi_intr;
731 else {
732 flags |= IRQF_SHARED;
733 handler = netxen_intr;
734 }
735 adapter->irq = netdev->irq;
736
737 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
738 sds_ring = &recv_ctx->sds_rings[ring];
739 sprintf(sds_ring->name, "%16s[%d]", netdev->name, ring);
740 err = request_irq(sds_ring->irq, handler,
741 flags, sds_ring->name, sds_ring);
742 if (err)
743 return err;
744 }
745
746 return 0;
747 }
748
749 static void
750 netxen_nic_free_irq(struct netxen_adapter *adapter)
751 {
752 int ring;
753 struct nx_host_sds_ring *sds_ring;
754
755 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
756
757 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
758 sds_ring = &recv_ctx->sds_rings[ring];
759 free_irq(sds_ring->irq, sds_ring);
760 }
761 }
762
763 static int
764 netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
765 {
766 int err;
767
768 err = adapter->init_port(adapter, adapter->physical_port);
769 if (err) {
770 printk(KERN_ERR "%s: Failed to initialize port %d\n",
771 netxen_nic_driver_name, adapter->portnum);
772 return err;
773 }
774 adapter->macaddr_set(adapter, netdev->dev_addr);
775
776 netxen_nic_set_link_parameters(adapter);
777
778 netxen_set_multicast_list(netdev);
779 if (adapter->set_mtu)
780 adapter->set_mtu(adapter, netdev->mtu);
781
782 adapter->ahw.linkup = 0;
783 mod_timer(&adapter->watchdog_timer, jiffies);
784
785 netxen_napi_enable(adapter);
786
787 if (adapter->max_sds_rings > 1)
788 netxen_config_rss(adapter, 1);
789
790 return 0;
791 }
792
793 static void
794 netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
795 {
796 netif_carrier_off(netdev);
797 netif_stop_queue(netdev);
798 netxen_napi_disable(adapter);
799
800 if (adapter->stop_port)
801 adapter->stop_port(adapter);
802
803 netxen_release_tx_buffers(adapter);
804
805 FLUSH_SCHEDULED_WORK();
806 del_timer_sync(&adapter->watchdog_timer);
807 }
808
809
810 static int
811 netxen_nic_attach(struct netxen_adapter *adapter)
812 {
813 struct net_device *netdev = adapter->netdev;
814 struct pci_dev *pdev = adapter->pdev;
815 int err, ring;
816 struct nx_host_rds_ring *rds_ring;
817
818 err = netxen_init_firmware(adapter);
819 if (err != 0) {
820 printk(KERN_ERR "Failed to init firmware\n");
821 return -EIO;
822 }
823
824 if (adapter->fw_major < 4)
825 adapter->max_rds_rings = 3;
826 else
827 adapter->max_rds_rings = 2;
828
829 err = netxen_alloc_sw_resources(adapter);
830 if (err) {
831 printk(KERN_ERR "%s: Error in setting sw resources\n",
832 netdev->name);
833 return err;
834 }
835
836 netxen_nic_clear_stats(adapter);
837
838 err = netxen_alloc_hw_resources(adapter);
839 if (err) {
840 printk(KERN_ERR "%s: Error in setting hw resources\n",
841 netdev->name);
842 goto err_out_free_sw;
843 }
844
845 if (adapter->fw_major < 4) {
846 adapter->crb_addr_cmd_producer =
847 crb_cmd_producer[adapter->portnum];
848 adapter->crb_addr_cmd_consumer =
849 crb_cmd_consumer[adapter->portnum];
850
851 netxen_nic_update_cmd_producer(adapter, 0);
852 netxen_nic_update_cmd_consumer(adapter, 0);
853 }
854
855 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
856 rds_ring = &adapter->recv_ctx.rds_rings[ring];
857 netxen_post_rx_buffers(adapter, ring, rds_ring);
858 }
859
860 err = netxen_nic_request_irq(adapter);
861 if (err) {
862 dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
863 netdev->name);
864 goto err_out_free_rxbuf;
865 }
866
867 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
868 return 0;
869
870 err_out_free_rxbuf:
871 netxen_release_rx_buffers(adapter);
872 netxen_free_hw_resources(adapter);
873 err_out_free_sw:
874 netxen_free_sw_resources(adapter);
875 return err;
876 }
877
878 static void
879 netxen_nic_detach(struct netxen_adapter *adapter)
880 {
881 netxen_nic_free_irq(adapter);
882
883 netxen_release_rx_buffers(adapter);
884 netxen_free_hw_resources(adapter);
885 netxen_free_sw_resources(adapter);
886
887 adapter->is_up = 0;
888 }
889
890 static int __devinit
891 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
892 {
893 struct net_device *netdev = NULL;
894 struct netxen_adapter *adapter = NULL;
895 int i = 0, err;
896 int pci_func_id = PCI_FUNC(pdev->devfn);
897 uint8_t revision_id;
898
899 if (pdev->class != 0x020000) {
900 printk(KERN_DEBUG "NetXen function %d, class %x will not "
901 "be enabled.\n",pci_func_id, pdev->class);
902 return -ENODEV;
903 }
904
905 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
906 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
907 "will not be enabled.\n",
908 NX_P3_A0, NX_P3_B1);
909 return -ENODEV;
910 }
911
912 if ((err = pci_enable_device(pdev)))
913 return err;
914
915 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
916 err = -ENODEV;
917 goto err_out_disable_pdev;
918 }
919
920 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
921 goto err_out_disable_pdev;
922
923 pci_set_master(pdev);
924
925 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
926 if(!netdev) {
927 printk(KERN_ERR"%s: Failed to allocate memory for the "
928 "device block.Check system memory resource"
929 " usage.\n", netxen_nic_driver_name);
930 goto err_out_free_res;
931 }
932
933 SET_NETDEV_DEV(netdev, &pdev->dev);
934
935 adapter = netdev_priv(netdev);
936 adapter->netdev = netdev;
937 adapter->pdev = pdev;
938 adapter->ahw.pci_func = pci_func_id;
939
940 revision_id = pdev->revision;
941 adapter->ahw.revision_id = revision_id;
942
943 err = nx_set_dma_mask(adapter, revision_id);
944 if (err)
945 goto err_out_free_netdev;
946
947 rwlock_init(&adapter->adapter_lock);
948 spin_lock_init(&adapter->tx_clean_lock);
949
950 err = netxen_setup_pci_map(adapter);
951 if (err)
952 goto err_out_free_netdev;
953
954 /* This will be reset for mezz cards */
955 adapter->portnum = pci_func_id;
956 adapter->rx_csum = 1;
957 adapter->mc_enabled = 0;
958 if (NX_IS_REVISION_P3(revision_id))
959 adapter->max_mc_count = 38;
960 else
961 adapter->max_mc_count = 16;
962
963 netdev->netdev_ops = &netxen_netdev_ops;
964 netdev->watchdog_timeo = 2*HZ;
965
966 netxen_nic_change_mtu(netdev, netdev->mtu);
967
968 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
969
970 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
971 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
972
973 if (NX_IS_REVISION_P3(revision_id)) {
974 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
975 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
976 }
977
978 if (adapter->pci_using_dac) {
979 netdev->features |= NETIF_F_HIGHDMA;
980 netdev->vlan_features |= NETIF_F_HIGHDMA;
981 }
982
983 if (netxen_nic_get_board_info(adapter) != 0) {
984 printk("%s: Error getting board config info.\n",
985 netxen_nic_driver_name);
986 err = -EIO;
987 goto err_out_iounmap;
988 }
989
990 netxen_initialize_adapter_ops(adapter);
991
992 /* Mezz cards have PCI function 0,2,3 enabled */
993 switch (adapter->ahw.board_type) {
994 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
995 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
996 if (pci_func_id >= 2)
997 adapter->portnum = pci_func_id - 2;
998 break;
999 default:
1000 break;
1001 }
1002
1003 err = netxen_start_firmware(adapter);
1004 if (err)
1005 goto err_out_iounmap;
1006
1007 nx_update_dma_mask(adapter);
1008
1009 netxen_nic_get_firmware_info(adapter);
1010
1011 /*
1012 * See if the firmware gave us a virtual-physical port mapping.
1013 */
1014 adapter->physical_port = adapter->portnum;
1015 if (adapter->fw_major < 4) {
1016 i = adapter->pci_read_normalize(adapter,
1017 CRB_V2P(adapter->portnum));
1018 if (i != 0x55555555)
1019 adapter->physical_port = i;
1020 }
1021
1022 netxen_check_options(adapter);
1023
1024 netxen_setup_intr(adapter);
1025
1026 netdev->irq = adapter->msix_entries[0].vector;
1027
1028 netxen_napi_add(adapter, netdev);
1029
1030 err = netxen_receive_peg_ready(adapter);
1031 if (err)
1032 goto err_out_disable_msi;
1033
1034 init_timer(&adapter->watchdog_timer);
1035 adapter->watchdog_timer.function = &netxen_watchdog;
1036 adapter->watchdog_timer.data = (unsigned long)adapter;
1037 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
1038 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
1039
1040 err = netxen_read_mac_addr(adapter);
1041 if (err)
1042 dev_warn(&pdev->dev, "failed to read mac addr\n");
1043
1044 netif_carrier_off(netdev);
1045 netif_stop_queue(netdev);
1046
1047 if ((err = register_netdev(netdev))) {
1048 printk(KERN_ERR "%s: register_netdev failed port #%d"
1049 " aborting\n", netxen_nic_driver_name,
1050 adapter->portnum);
1051 err = -EIO;
1052 goto err_out_disable_msi;
1053 }
1054
1055 pci_set_drvdata(pdev, adapter);
1056
1057 switch (adapter->ahw.port_type) {
1058 case NETXEN_NIC_GBE:
1059 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1060 adapter->netdev->name);
1061 break;
1062 case NETXEN_NIC_XGBE:
1063 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1064 adapter->netdev->name);
1065 break;
1066 }
1067
1068 return 0;
1069
1070 err_out_disable_msi:
1071 netxen_teardown_intr(adapter);
1072
1073 netxen_free_adapter_offload(adapter);
1074
1075 err_out_iounmap:
1076 netxen_cleanup_pci_map(adapter);
1077
1078 err_out_free_netdev:
1079 free_netdev(netdev);
1080
1081 err_out_free_res:
1082 pci_release_regions(pdev);
1083
1084 err_out_disable_pdev:
1085 pci_set_drvdata(pdev, NULL);
1086 pci_disable_device(pdev);
1087 return err;
1088 }
1089
1090 static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1091 {
1092 struct netxen_adapter *adapter;
1093 struct net_device *netdev;
1094
1095 adapter = pci_get_drvdata(pdev);
1096 if (adapter == NULL)
1097 return;
1098
1099 netdev = adapter->netdev;
1100
1101 unregister_netdev(netdev);
1102
1103 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
1104 netxen_nic_detach(adapter);
1105
1106 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
1107 netxen_p3_free_mac_list(adapter);
1108 }
1109
1110 if (adapter->portnum == 0)
1111 netxen_free_adapter_offload(adapter);
1112
1113 netxen_teardown_intr(adapter);
1114
1115 netxen_cleanup_pci_map(adapter);
1116
1117 pci_release_regions(pdev);
1118 pci_disable_device(pdev);
1119 pci_set_drvdata(pdev, NULL);
1120
1121 free_netdev(netdev);
1122 }
1123
1124 static int
1125 netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1126 {
1127
1128 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1129 struct net_device *netdev = adapter->netdev;
1130
1131 netif_device_detach(netdev);
1132
1133 if (netif_running(netdev))
1134 netxen_nic_down(adapter, netdev);
1135
1136 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
1137 netxen_nic_detach(adapter);
1138
1139 pci_save_state(pdev);
1140
1141 if (netxen_nic_wol_supported(adapter)) {
1142 pci_enable_wake(pdev, PCI_D3cold, 1);
1143 pci_enable_wake(pdev, PCI_D3hot, 1);
1144 }
1145
1146 pci_disable_device(pdev);
1147 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1148
1149 return 0;
1150 }
1151
1152 static int
1153 netxen_nic_resume(struct pci_dev *pdev)
1154 {
1155 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1156 struct net_device *netdev = adapter->netdev;
1157 int err;
1158
1159 pci_set_power_state(pdev, PCI_D0);
1160 pci_restore_state(pdev);
1161
1162 err = pci_enable_device(pdev);
1163 if (err)
1164 return err;
1165
1166 adapter->curr_window = 255;
1167
1168 err = netxen_start_firmware(adapter);
1169 if (err) {
1170 dev_err(&pdev->dev, "failed to start firmware\n");
1171 return err;
1172 }
1173
1174 if (netif_running(netdev)) {
1175 err = netxen_nic_attach(adapter);
1176 if (err)
1177 return err;
1178
1179 err = netxen_nic_up(adapter, netdev);
1180 if (err)
1181 return err;
1182
1183 netif_device_attach(netdev);
1184 }
1185
1186 return 0;
1187 }
1188
1189 static int netxen_nic_open(struct net_device *netdev)
1190 {
1191 struct netxen_adapter *adapter = netdev_priv(netdev);
1192 int err = 0;
1193
1194 if (adapter->driver_mismatch)
1195 return -EIO;
1196
1197 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
1198 err = netxen_nic_attach(adapter);
1199 if (err)
1200 return err;
1201 }
1202
1203 err = netxen_nic_up(adapter, netdev);
1204 if (err)
1205 goto err_out;
1206
1207 netif_start_queue(netdev);
1208
1209 return 0;
1210
1211 err_out:
1212 netxen_nic_detach(adapter);
1213 return err;
1214 }
1215
1216 /*
1217 * netxen_nic_close - Disables a network interface entry point
1218 */
1219 static int netxen_nic_close(struct net_device *netdev)
1220 {
1221 struct netxen_adapter *adapter = netdev_priv(netdev);
1222
1223 netxen_nic_down(adapter, netdev);
1224 return 0;
1225 }
1226
1227 static bool netxen_tso_check(struct net_device *netdev,
1228 struct cmd_desc_type0 *desc, struct sk_buff *skb)
1229 {
1230 bool tso = false;
1231 u8 opcode = TX_ETHER_PKT;
1232 __be16 protocol = skb->protocol;
1233 u16 flags = 0;
1234
1235 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1236 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data;
1237 protocol = vh->h_vlan_encapsulated_proto;
1238 flags = FLAGS_VLAN_TAGGED;
1239 }
1240
1241 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1242 skb_shinfo(skb)->gso_size > 0) {
1243
1244 desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1245 desc->total_hdr_length =
1246 skb_transport_offset(skb) + tcp_hdrlen(skb);
1247
1248 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1249 TX_TCP_LSO6 : TX_TCP_LSO;
1250 tso = true;
1251
1252 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1253 u8 l4proto;
1254
1255 if (protocol == cpu_to_be16(ETH_P_IP)) {
1256 l4proto = ip_hdr(skb)->protocol;
1257
1258 if (l4proto == IPPROTO_TCP)
1259 opcode = TX_TCP_PKT;
1260 else if(l4proto == IPPROTO_UDP)
1261 opcode = TX_UDP_PKT;
1262 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1263 l4proto = ipv6_hdr(skb)->nexthdr;
1264
1265 if (l4proto == IPPROTO_TCP)
1266 opcode = TX_TCPV6_PKT;
1267 else if(l4proto == IPPROTO_UDP)
1268 opcode = TX_UDPV6_PKT;
1269 }
1270 }
1271 desc->tcp_hdr_offset = skb_transport_offset(skb);
1272 desc->ip_hdr_offset = skb_network_offset(skb);
1273 netxen_set_tx_flags_opcode(desc, flags, opcode);
1274 return tso;
1275 }
1276
1277 static void
1278 netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
1279 struct netxen_cmd_buffer *pbuf, int last)
1280 {
1281 int k;
1282 struct netxen_skb_frag *buffrag;
1283
1284 buffrag = &pbuf->frag_array[0];
1285 pci_unmap_single(pdev, buffrag->dma,
1286 buffrag->length, PCI_DMA_TODEVICE);
1287
1288 for (k = 1; k < last; k++) {
1289 buffrag = &pbuf->frag_array[k];
1290 pci_unmap_page(pdev, buffrag->dma,
1291 buffrag->length, PCI_DMA_TODEVICE);
1292 }
1293 }
1294
1295 static inline void
1296 netxen_clear_cmddesc(u64 *desc)
1297 {
1298 int i;
1299 for (i = 0; i < 8; i++)
1300 desc[i] = 0ULL;
1301 }
1302
1303 static int
1304 netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1305 {
1306 struct netxen_adapter *adapter = netdev_priv(netdev);
1307 struct netxen_hardware_context *hw = &adapter->ahw;
1308 unsigned int first_seg_len = skb->len - skb->data_len;
1309 struct netxen_cmd_buffer *pbuf;
1310 struct netxen_skb_frag *buffrag;
1311 struct cmd_desc_type0 *hwdesc;
1312 struct pci_dev *pdev = adapter->pdev;
1313 dma_addr_t temp_dma;
1314 int i, k;
1315
1316 u32 producer, consumer;
1317 int frag_count, no_of_desc;
1318 u32 num_txd = adapter->num_txd;
1319 bool is_tso = false;
1320
1321 frag_count = skb_shinfo(skb)->nr_frags + 1;
1322
1323 /* There 4 fragments per descriptor */
1324 no_of_desc = (frag_count + 3) >> 2;
1325
1326 producer = adapter->cmd_producer;
1327 smp_mb();
1328 consumer = adapter->last_cmd_consumer;
1329 if ((no_of_desc+2) > find_diff_among(producer, consumer, num_txd)) {
1330 netif_stop_queue(netdev);
1331 smp_mb();
1332 return NETDEV_TX_BUSY;
1333 }
1334
1335 /* Copy the descriptors into the hardware */
1336 hwdesc = &hw->cmd_desc_head[producer];
1337 netxen_clear_cmddesc((u64 *)hwdesc);
1338 /* Take skb->data itself */
1339 pbuf = &adapter->cmd_buf_arr[producer];
1340
1341 is_tso = netxen_tso_check(netdev, hwdesc, skb);
1342
1343 pbuf->skb = skb;
1344 pbuf->frag_count = frag_count;
1345 buffrag = &pbuf->frag_array[0];
1346 temp_dma = pci_map_single(pdev, skb->data, first_seg_len,
1347 PCI_DMA_TODEVICE);
1348 if (pci_dma_mapping_error(pdev, temp_dma))
1349 goto drop_packet;
1350
1351 buffrag->dma = temp_dma;
1352 buffrag->length = first_seg_len;
1353 netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
1354 netxen_set_tx_port(hwdesc, adapter->portnum);
1355
1356 hwdesc->buffer_length[0] = cpu_to_le16(first_seg_len);
1357 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1358
1359 for (i = 1, k = 1; i < frag_count; i++, k++) {
1360 struct skb_frag_struct *frag;
1361 int len, temp_len;
1362 unsigned long offset;
1363
1364 /* move to next desc. if there is a need */
1365 if ((i & 0x3) == 0) {
1366 k = 0;
1367 producer = get_next_index(producer, num_txd);
1368 hwdesc = &hw->cmd_desc_head[producer];
1369 netxen_clear_cmddesc((u64 *)hwdesc);
1370 pbuf = &adapter->cmd_buf_arr[producer];
1371 pbuf->skb = NULL;
1372 }
1373 frag = &skb_shinfo(skb)->frags[i - 1];
1374 len = frag->size;
1375 offset = frag->page_offset;
1376
1377 temp_len = len;
1378 temp_dma = pci_map_page(pdev, frag->page, offset,
1379 len, PCI_DMA_TODEVICE);
1380 if (pci_dma_mapping_error(pdev, temp_dma)) {
1381 netxen_clean_tx_dma_mapping(pdev, pbuf, i);
1382 goto drop_packet;
1383 }
1384
1385 buffrag++;
1386 buffrag->dma = temp_dma;
1387 buffrag->length = temp_len;
1388
1389 hwdesc->buffer_length[k] = cpu_to_le16(temp_len);
1390 switch (k) {
1391 case 0:
1392 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
1393 break;
1394 case 1:
1395 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
1396 break;
1397 case 2:
1398 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
1399 break;
1400 case 3:
1401 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
1402 break;
1403 }
1404 frag++;
1405 }
1406 producer = get_next_index(producer, num_txd);
1407
1408 /* For LSO, we need to copy the MAC/IP/TCP headers into
1409 * the descriptor ring
1410 */
1411 if (is_tso) {
1412 int hdr_len, first_hdr_len, more_hdr;
1413 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1414 if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
1415 first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
1416 more_hdr = 1;
1417 } else {
1418 first_hdr_len = hdr_len;
1419 more_hdr = 0;
1420 }
1421 /* copy the MAC/IP/TCP headers to the cmd descriptor list */
1422 hwdesc = &hw->cmd_desc_head[producer];
1423 pbuf = &adapter->cmd_buf_arr[producer];
1424 pbuf->skb = NULL;
1425
1426 /* copy the first 64 bytes */
1427 memcpy(((void *)hwdesc) + 2,
1428 (void *)(skb->data), first_hdr_len);
1429 producer = get_next_index(producer, num_txd);
1430
1431 if (more_hdr) {
1432 hwdesc = &hw->cmd_desc_head[producer];
1433 pbuf = &adapter->cmd_buf_arr[producer];
1434 pbuf->skb = NULL;
1435 /* copy the next 64 bytes - should be enough except
1436 * for pathological case
1437 */
1438 skb_copy_from_linear_data_offset(skb, first_hdr_len,
1439 hwdesc,
1440 (hdr_len -
1441 first_hdr_len));
1442 producer = get_next_index(producer, num_txd);
1443 }
1444 }
1445
1446 adapter->cmd_producer = producer;
1447 adapter->stats.txbytes += skb->len;
1448
1449 netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer);
1450
1451 adapter->stats.xmitcalled++;
1452 netdev->trans_start = jiffies;
1453
1454 return NETDEV_TX_OK;
1455
1456 drop_packet:
1457 adapter->stats.txdropped++;
1458 dev_kfree_skb_any(skb);
1459 return NETDEV_TX_OK;
1460 }
1461
1462 static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1463 {
1464 struct net_device *netdev = adapter->netdev;
1465 uint32_t temp, temp_state, temp_val;
1466 int rv = 0;
1467
1468 temp = adapter->pci_read_normalize(adapter, CRB_TEMP_STATE);
1469
1470 temp_state = nx_get_temp_state(temp);
1471 temp_val = nx_get_temp_val(temp);
1472
1473 if (temp_state == NX_TEMP_PANIC) {
1474 printk(KERN_ALERT
1475 "%s: Device temperature %d degrees C exceeds"
1476 " maximum allowed. Hardware has been shut down.\n",
1477 netxen_nic_driver_name, temp_val);
1478
1479 netif_carrier_off(netdev);
1480 netif_stop_queue(netdev);
1481 rv = 1;
1482 } else if (temp_state == NX_TEMP_WARN) {
1483 if (adapter->temp == NX_TEMP_NORMAL) {
1484 printk(KERN_ALERT
1485 "%s: Device temperature %d degrees C "
1486 "exceeds operating range."
1487 " Immediate action needed.\n",
1488 netxen_nic_driver_name, temp_val);
1489 }
1490 } else {
1491 if (adapter->temp == NX_TEMP_WARN) {
1492 printk(KERN_INFO
1493 "%s: Device temperature is now %d degrees C"
1494 " in normal range.\n", netxen_nic_driver_name,
1495 temp_val);
1496 }
1497 }
1498 adapter->temp = temp_state;
1499 return rv;
1500 }
1501
1502 static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1503 {
1504 struct net_device *netdev = adapter->netdev;
1505 u32 val, port, linkup;
1506
1507 port = adapter->physical_port;
1508
1509 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1510 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3);
1511 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1512 linkup = (val == XG_LINK_UP_P3);
1513 } else {
1514 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
1515 if (adapter->ahw.port_type == NETXEN_NIC_GBE)
1516 linkup = (val >> port) & 1;
1517 else {
1518 val = (val >> port*8) & 0xff;
1519 linkup = (val == XG_LINK_UP);
1520 }
1521 }
1522
1523 if (adapter->ahw.linkup && !linkup) {
1524 printk(KERN_INFO "%s: %s NIC Link is down\n",
1525 netxen_nic_driver_name, netdev->name);
1526 adapter->ahw.linkup = 0;
1527 if (netif_running(netdev)) {
1528 netif_carrier_off(netdev);
1529 netif_stop_queue(netdev);
1530 }
1531
1532 netxen_nic_set_link_parameters(adapter);
1533 } else if (!adapter->ahw.linkup && linkup) {
1534 printk(KERN_INFO "%s: %s NIC Link is up\n",
1535 netxen_nic_driver_name, netdev->name);
1536 adapter->ahw.linkup = 1;
1537 if (netif_running(netdev)) {
1538 netif_carrier_on(netdev);
1539 netif_wake_queue(netdev);
1540 }
1541
1542 netxen_nic_set_link_parameters(adapter);
1543 }
1544 }
1545
1546 static void netxen_watchdog(unsigned long v)
1547 {
1548 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
1549
1550 SCHEDULE_WORK(&adapter->watchdog_task);
1551 }
1552
1553 void netxen_watchdog_task(struct work_struct *work)
1554 {
1555 struct netxen_adapter *adapter =
1556 container_of(work, struct netxen_adapter, watchdog_task);
1557
1558 if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter))
1559 return;
1560
1561 netxen_nic_handle_phy_intr(adapter);
1562
1563 if (netif_running(adapter->netdev))
1564 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1565 }
1566
1567 static void netxen_tx_timeout(struct net_device *netdev)
1568 {
1569 struct netxen_adapter *adapter = (struct netxen_adapter *)
1570 netdev_priv(netdev);
1571 SCHEDULE_WORK(&adapter->tx_timeout_task);
1572 }
1573
1574 static void netxen_tx_timeout_task(struct work_struct *work)
1575 {
1576 struct netxen_adapter *adapter =
1577 container_of(work, struct netxen_adapter, tx_timeout_task);
1578
1579 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
1580 netxen_nic_driver_name, adapter->netdev->name);
1581
1582 netxen_napi_disable(adapter);
1583
1584 adapter->netdev->trans_start = jiffies;
1585
1586 netxen_napi_enable(adapter);
1587 netif_wake_queue(adapter->netdev);
1588 }
1589
1590 struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1591 {
1592 struct netxen_adapter *adapter = netdev_priv(netdev);
1593 struct net_device_stats *stats = &adapter->net_stats;
1594
1595 memset(stats, 0, sizeof(*stats));
1596
1597 stats->rx_packets = adapter->stats.no_rcv;
1598 stats->tx_packets = adapter->stats.xmitfinished;
1599 stats->rx_bytes = adapter->stats.rxbytes;
1600 stats->tx_bytes = adapter->stats.txbytes;
1601 stats->rx_dropped = adapter->stats.rxdropped;
1602 stats->tx_dropped = adapter->stats.txdropped;
1603
1604 return stats;
1605 }
1606
1607 static irqreturn_t netxen_intr(int irq, void *data)
1608 {
1609 struct nx_host_sds_ring *sds_ring = data;
1610 struct netxen_adapter *adapter = sds_ring->adapter;
1611 u32 status = 0;
1612
1613 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1614
1615 if (!(status & adapter->legacy_intr.int_vec_bit))
1616 return IRQ_NONE;
1617
1618 if (adapter->ahw.revision_id >= NX_P3_B1) {
1619 /* check interrupt state machine, to be sure */
1620 status = adapter->pci_read_immediate(adapter,
1621 ISR_INT_STATE_REG);
1622 if (!ISR_LEGACY_INT_TRIGGERED(status))
1623 return IRQ_NONE;
1624
1625 } else {
1626 unsigned long our_int = 0;
1627
1628 our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR);
1629
1630 /* not our interrupt */
1631 if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
1632 return IRQ_NONE;
1633
1634 /* claim interrupt */
1635 adapter->pci_write_normalize(adapter,
1636 CRB_INT_VECTOR, (our_int & 0xffffffff));
1637 }
1638
1639 /* clear interrupt */
1640 if (adapter->fw_major < 4)
1641 netxen_nic_disable_int(sds_ring);
1642
1643 adapter->pci_write_immediate(adapter,
1644 adapter->legacy_intr.tgt_status_reg,
1645 0xffffffff);
1646 /* read twice to ensure write is flushed */
1647 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1648 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1649
1650 napi_schedule(&sds_ring->napi);
1651
1652 return IRQ_HANDLED;
1653 }
1654
1655 static irqreturn_t netxen_msi_intr(int irq, void *data)
1656 {
1657 struct nx_host_sds_ring *sds_ring = data;
1658 struct netxen_adapter *adapter = sds_ring->adapter;
1659
1660 /* clear interrupt */
1661 adapter->pci_write_immediate(adapter,
1662 msi_tgt_status[adapter->ahw.pci_func], 0xffffffff);
1663
1664 napi_schedule(&sds_ring->napi);
1665 return IRQ_HANDLED;
1666 }
1667
1668 static irqreturn_t netxen_msix_intr(int irq, void *data)
1669 {
1670 struct nx_host_sds_ring *sds_ring = data;
1671
1672 napi_schedule(&sds_ring->napi);
1673 return IRQ_HANDLED;
1674 }
1675
1676 static int netxen_nic_poll(struct napi_struct *napi, int budget)
1677 {
1678 struct nx_host_sds_ring *sds_ring =
1679 container_of(napi, struct nx_host_sds_ring, napi);
1680
1681 struct netxen_adapter *adapter = sds_ring->adapter;
1682
1683 int tx_complete;
1684 int work_done;
1685
1686 tx_complete = netxen_process_cmd_ring(adapter);
1687
1688 work_done = netxen_process_rcv_ring(sds_ring, budget);
1689
1690 if ((work_done < budget) && tx_complete) {
1691 napi_complete(&sds_ring->napi);
1692 netxen_nic_enable_int(sds_ring);
1693 }
1694
1695 return work_done;
1696 }
1697
1698 #ifdef CONFIG_NET_POLL_CONTROLLER
1699 static void netxen_nic_poll_controller(struct net_device *netdev)
1700 {
1701 struct netxen_adapter *adapter = netdev_priv(netdev);
1702 disable_irq(adapter->irq);
1703 netxen_intr(adapter->irq, adapter);
1704 enable_irq(adapter->irq);
1705 }
1706 #endif
1707
1708 static struct pci_driver netxen_driver = {
1709 .name = netxen_nic_driver_name,
1710 .id_table = netxen_pci_tbl,
1711 .probe = netxen_nic_probe,
1712 .remove = __devexit_p(netxen_nic_remove),
1713 .suspend = netxen_nic_suspend,
1714 .resume = netxen_nic_resume
1715 };
1716
1717 /* Driver Registration on NetXen card */
1718
1719 static int __init netxen_init_module(void)
1720 {
1721 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
1722
1723 if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL)
1724 return -ENOMEM;
1725
1726 return pci_register_driver(&netxen_driver);
1727 }
1728
1729 module_init(netxen_init_module);
1730
1731 static void __exit netxen_exit_module(void)
1732 {
1733 pci_unregister_driver(&netxen_driver);
1734 destroy_workqueue(netxen_workq);
1735 }
1736
1737 module_exit(netxen_exit_module);
This page took 0.111832 seconds and 4 git commands to generate.