583fbb470e9dbeb9963b286584c3d5339b0b024e
[deliverable/linux.git] / drivers / staging / sxg / sxg.c
1 /**************************************************************************
2 *
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
32 *
33 * Parts developed by LinSysSoft Sahara team
34 *
35 **************************************************************************/
36
37 /*
38 * FILENAME: sxg.c
39 *
40 * The SXG driver for Alacritech's 10Gbe products.
41 *
42 * NOTE: This is the standard, non-accelerated version of Alacritech's
43 * IS-NIC driver.
44 */
45
46 #include <linux/kernel.h>
47 #include <linux/string.h>
48 #include <linux/errno.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/firmware.h>
52 #include <linux/ioport.h>
53 #include <linux/slab.h>
54 #include <linux/interrupt.h>
55 #include <linux/timer.h>
56 #include <linux/pci.h>
57 #include <linux/spinlock.h>
58 #include <linux/init.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/ethtool.h>
62 #include <linux/skbuff.h>
63 #include <linux/delay.h>
64 #include <linux/types.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/mii.h>
67 #include <linux/ip.h>
68 #include <linux/in.h>
69 #include <linux/tcp.h>
70 #include <linux/ipv6.h>
71
72 #define SLIC_GET_STATS_ENABLED 0
73 #define LINUX_FREES_ADAPTER_RESOURCES 1
74 #define SXG_OFFLOAD_IP_CHECKSUM 0
75 #define SXG_POWER_MANAGEMENT_ENABLED 0
76 #define VPCI 0
77 #define ATK_DEBUG 1
78 #define SXG_UCODE_DEBUG 0
79
80
81 #include "sxg_os.h"
82 #include "sxghw.h"
83 #include "sxghif.h"
84 #include "sxg.h"
85 #include "sxgdbg.h"
86 #include "sxgphycode-1.2.h"
87
88 static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
89 enum sxg_buffer_type BufferType);
90 static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
91 void *RcvBlock,
92 dma_addr_t PhysicalAddress,
93 u32 Length);
94 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
95 struct sxg_scatter_gather *SxgSgl,
96 dma_addr_t PhysicalAddress,
97 u32 Length);
98
99 static void sxg_mcast_init_crc32(void);
100 static int sxg_entry_open(struct net_device *dev);
101 static int sxg_second_open(struct net_device * dev);
102 static int sxg_entry_halt(struct net_device *dev);
103 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
104 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
105 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
106 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
107 struct sxg_scatter_gather *SxgSgl);
108
109 static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
110 int budget);
111 static void sxg_interrupt(struct adapter_t *adapter);
112 static int sxg_poll(struct napi_struct *napi, int budget);
113 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
114 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
115 int *sxg_napi_continue, int *work_done, int budget);
116 static void sxg_complete_slow_send(struct adapter_t *adapter);
117 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
118 struct sxg_event *Event);
119 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
120 static bool sxg_mac_filter(struct adapter_t *adapter,
121 struct ether_header *EtherHdr, ushort length);
122 static struct net_device_stats *sxg_get_stats(struct net_device * dev);
123 void sxg_free_resources(struct adapter_t *adapter);
124 void sxg_free_rcvblocks(struct adapter_t *adapter);
125 void sxg_free_sgl_buffers(struct adapter_t *adapter);
126 void sxg_unmap_resources(struct adapter_t *adapter);
127 void sxg_free_mcast_addrs(struct adapter_t *adapter);
128 void sxg_collect_statistics(struct adapter_t *adapter);
129 static int sxg_register_interrupt(struct adapter_t *adapter);
130 static void sxg_remove_isr(struct adapter_t *adapter);
131 static irqreturn_t sxg_isr(int irq, void *dev_id);
132
133 static void sxg_watchdog(unsigned long data);
134 static void sxg_update_link_status (struct work_struct *work);
135
136 #define XXXTODO 0
137
138 #if XXXTODO
139 static int sxg_mac_set_address(struct net_device *dev, void *ptr);
140 #endif
141 static void sxg_mcast_set_list(struct net_device *dev);
142
143 static int sxg_adapter_set_hwaddr(struct adapter_t *adapter);
144
145 static int sxg_initialize_adapter(struct adapter_t *adapter);
146 static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
147 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
148 unsigned char Index);
149 int sxg_change_mtu (struct net_device *netdev, int new_mtu);
150 static int sxg_initialize_link(struct adapter_t *adapter);
151 static int sxg_phy_init(struct adapter_t *adapter);
152 static void sxg_link_event(struct adapter_t *adapter);
153 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
154 static void sxg_link_state(struct adapter_t *adapter,
155 enum SXG_LINK_STATE LinkState);
156 static int sxg_write_mdio_reg(struct adapter_t *adapter,
157 u32 DevAddr, u32 RegAddr, u32 Value);
158 static int sxg_read_mdio_reg(struct adapter_t *adapter,
159 u32 DevAddr, u32 RegAddr, u32 *pValue);
160 static void sxg_set_mcast_addr(struct adapter_t *adapter);
161
162 static unsigned int sxg_first_init = 1;
163 static char *sxg_banner =
164 "Alacritech SLIC Technology(tm) Server and Storage \
165 10Gbe Accelerator (Non-Accelerated)\n";
166
167 static int sxg_debug = 1;
168 static int debug = -1;
169 static struct net_device *head_netdevice = NULL;
170
171 static struct sxgbase_driver sxg_global = {
172 .dynamic_intagg = 1,
173 };
174 static int intagg_delay = 100;
175 static u32 dynamic_intagg = 0;
176
177 char sxg_driver_name[] = "sxg_nic";
178 #define DRV_AUTHOR "Alacritech, Inc. Engineering"
179 #define DRV_DESCRIPTION \
180 "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
181 #define DRV_COPYRIGHT \
182 "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
183
184 MODULE_AUTHOR(DRV_AUTHOR);
185 MODULE_DESCRIPTION(DRV_DESCRIPTION);
186 MODULE_LICENSE("GPL");
187
188 module_param(dynamic_intagg, int, 0);
189 MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
190 module_param(intagg_delay, int, 0);
191 MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
192
193 static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
194 {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
195 {0,}
196 };
197
198 MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
199
200 static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
201 {
202 writel(value, reg);
203 if (flush)
204 mb();
205 }
206
207 static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg,
208 u64 value, u32 cpu)
209 {
210 u32 value_high = (u32) (value >> 32);
211 u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
212 unsigned long flags;
213
214 spin_lock_irqsave(&adapter->Bit64RegLock, flags);
215 writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
216 writel(value_low, reg);
217 spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
218 }
219
220 static void sxg_init_driver(void)
221 {
222 if (sxg_first_init) {
223 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
224 __func__, jiffies);
225 sxg_first_init = 0;
226 spin_lock_init(&sxg_global.driver_lock);
227 }
228 }
229
230 static void sxg_dbg_macaddrs(struct adapter_t *adapter)
231 {
232 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
233 adapter->netdev->name, adapter->currmacaddr[0],
234 adapter->currmacaddr[1], adapter->currmacaddr[2],
235 adapter->currmacaddr[3], adapter->currmacaddr[4],
236 adapter->currmacaddr[5]);
237 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
238 adapter->netdev->name, adapter->macaddr[0],
239 adapter->macaddr[1], adapter->macaddr[2],
240 adapter->macaddr[3], adapter->macaddr[4],
241 adapter->macaddr[5]);
242 return;
243 }
244
245 /* SXG Globals */
246 static struct sxg_driver SxgDriver;
247
248 #ifdef ATKDBG
249 static struct sxg_trace_buffer LSxgTraceBuffer;
250 #endif /* ATKDBG */
251 static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
252
253 /*
254 * MSI Related API's
255 */
256 int sxg_register_intr(struct adapter_t *adapter);
257 int sxg_enable_msi_x(struct adapter_t *adapter);
258 int sxg_add_msi_isr(struct adapter_t *adapter);
259 void sxg_remove_msix_isr(struct adapter_t *adapter);
260 int sxg_set_interrupt_capability(struct adapter_t *adapter);
261
262 int sxg_set_interrupt_capability(struct adapter_t *adapter)
263 {
264 int ret;
265
266 ret = sxg_enable_msi_x(adapter);
267 if (ret != STATUS_SUCCESS) {
268 adapter->msi_enabled = FALSE;
269 DBG_ERROR("sxg_set_interrupt_capability MSI-X Disable\n");
270 } else {
271 adapter->msi_enabled = TRUE;
272 DBG_ERROR("sxg_set_interrupt_capability MSI-X Enable\n");
273 }
274 return ret;
275 }
276
277 int sxg_register_intr(struct adapter_t *adapter)
278 {
279 int ret = 0;
280
281 if (adapter->msi_enabled) {
282 ret = sxg_add_msi_isr(adapter);
283 }
284 else {
285 DBG_ERROR("MSI-X Enable Failed. Using Pin INT\n");
286 ret = sxg_register_interrupt(adapter);
287 if (ret != STATUS_SUCCESS) {
288 DBG_ERROR("sxg_register_interrupt Failed\n");
289 }
290 }
291 return ret;
292 }
293
294 int sxg_enable_msi_x(struct adapter_t *adapter)
295 {
296 int ret;
297
298 adapter->nr_msix_entries = 1;
299 adapter->msi_entries = kmalloc(adapter->nr_msix_entries *
300 sizeof(struct msix_entry),GFP_KERNEL);
301 if (!adapter->msi_entries) {
302 DBG_ERROR("%s:MSI Entries memory allocation Failed\n",__func__);
303 return -ENOMEM;
304 }
305 memset(adapter->msi_entries, 0, adapter->nr_msix_entries *
306 sizeof(struct msix_entry));
307
308 ret = pci_enable_msix(adapter->pcidev, adapter->msi_entries,
309 adapter->nr_msix_entries);
310 if (ret) {
311 DBG_ERROR("Enabling MSI-X with %d vectors failed\n",
312 adapter->nr_msix_entries);
313 /*Should try with less vector returned.*/
314 kfree(adapter->msi_entries);
315 return STATUS_FAILURE; /*MSI-X Enable failed.*/
316 }
317 return (STATUS_SUCCESS);
318 }
319
320 int sxg_add_msi_isr(struct adapter_t *adapter)
321 {
322 int ret,i;
323
324 if (!adapter->intrregistered) {
325 spin_unlock_irqrestore(&sxg_global.driver_lock,
326 sxg_global.flags);
327 for (i=0; i<adapter->nr_msix_entries; i++) {
328 ret = request_irq (adapter->msi_entries[i].vector,
329 sxg_isr,
330 IRQF_SHARED,
331 adapter->netdev->name,
332 adapter->netdev);
333 if (ret) {
334 spin_lock_irqsave(&sxg_global.driver_lock,
335 sxg_global.flags);
336 DBG_ERROR("sxg: MSI-X request_irq (%s) "
337 "FAILED [%x]\n", adapter->netdev->name,
338 ret);
339 return (ret);
340 }
341 }
342 }
343 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
344 adapter->msi_enabled = TRUE;
345 adapter->intrregistered = 1;
346 adapter->IntRegistered = TRUE;
347 return (STATUS_SUCCESS);
348 }
349
350 void sxg_remove_msix_isr(struct adapter_t *adapter)
351 {
352 int i,vector;
353 struct net_device *netdev = adapter->netdev;
354
355 for(i=0; i< adapter->nr_msix_entries;i++)
356 {
357 vector = adapter->msi_entries[i].vector;
358 DBG_ERROR("%s : Freeing IRQ vector#%d\n",__FUNCTION__,vector);
359 free_irq(vector,netdev);
360 }
361 }
362
363
364 static void sxg_remove_isr(struct adapter_t *adapter)
365 {
366 struct net_device *netdev = adapter->netdev;
367 if (adapter->msi_enabled)
368 sxg_remove_msix_isr(adapter);
369 else
370 free_irq(adapter->netdev->irq, netdev);
371 }
372
373 void sxg_reset_interrupt_capability(struct adapter_t *adapter)
374 {
375 if (adapter->msi_enabled) {
376 pci_disable_msix(adapter->pcidev);
377 kfree(adapter->msi_entries);
378 adapter->msi_entries = NULL;
379 }
380 return;
381 }
382
383 /*
384 * sxg_download_microcode
385 *
386 * Download Microcode to Sahara adapter using the Linux
387 * Firmware module to get the ucode.sys file.
388 *
389 * Arguments -
390 * adapter - A pointer to our adapter structure
391 * UcodeSel - microcode file selection
392 *
393 * Return
394 * int
395 */
396 static bool sxg_download_microcode(struct adapter_t *adapter,
397 enum SXG_UCODE_SEL UcodeSel)
398 {
399 const struct firmware *fw;
400 const char *file = "";
401 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
402 int ret;
403 int ucode_start;
404 u32 Section;
405 u32 ThisSectionSize;
406 u32 instruction = 0;
407 u32 BaseAddress, AddressOffset, Address;
408 /* u32 Failure; */
409 u32 ValueRead;
410 u32 i;
411 u32 index = 0;
412 u32 num_sections = 0;
413 u32 sectionSize[16];
414 u32 sectionStart[16];
415
416 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
417 adapter, 0, 0, 0);
418
419 /*
420 * This routine is only implemented to download the microcode
421 * for the Revision B Sahara chip. Rev A and Diagnostic
422 * microcode is not supported at this time. If Rev A or
423 * diagnostic ucode is required, this routine will obviously
424 * need to change. Also, eventually need to add support for
425 * Rev B checked version of ucode. That's easy enough once
426 * the free version of Rev B works.
427 */
428 ASSERT(UcodeSel == SXG_UCODE_SYSTEM);
429 ASSERT(adapter->asictype == SAHARA_REV_B);
430 #if SXG_UCODE_DEBUG
431 file = "sxg/saharadbgdownloadB.sys";
432 #else
433 file = "sxg/saharadownloadB.sys";
434 #endif
435 ret = request_firmware(&fw, file, &adapter->pcidev->dev);
436 if (ret) {
437 DBG_ERROR("%s SXG_NIC: Failed to load firmware %s\n", __func__,file);
438 return ret;
439 }
440
441 /*
442 * The microcode .sys file contains starts with a 4 byte word containing
443 * the number of sections. That is followed by "num_sections" 4 byte
444 * words containing each "section" size. That is followed num_sections
445 * 4 byte words containing each section "start" address.
446 *
447 * Following the above header, the .sys file contains num_sections,
448 * where each section size is specified, newline delineatetd 12 byte
449 * microcode instructions.
450 */
451 num_sections = *(u32 *)(fw->data + index);
452 index += 4;
453 ASSERT(num_sections <= 3);
454 for (i = 0; i < num_sections; i++) {
455 sectionSize[i] = *(u32 *)(fw->data + index);
456 index += 4;
457 }
458 for (i = 0; i < num_sections; i++) {
459 sectionStart[i] = *(u32 *)(fw->data + index);
460 index += 4;
461 }
462
463 /* First, reset the card */
464 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
465 udelay(50);
466 HwRegs = adapter->HwRegs;
467
468 /*
469 * Download each section of the microcode as specified in
470 * sectionSize[index] to sectionStart[index] address. As
471 * described above, the .sys file contains 12 byte word
472 * microcode instructions. The *download.sys file is generated
473 * using the objtosys.exe utility that was built for Sahara
474 * microcode.
475 */
476 /* See usage of this below when we read back for parity */
477 ucode_start = index;
478 instruction = *(u32 *)(fw->data + index);
479 index += 4;
480
481 for (Section = 0; Section < num_sections; Section++) {
482 BaseAddress = sectionStart[Section];
483 /* Size in instructions */
484 ThisSectionSize = sectionSize[Section] / 12;
485 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
486 AddressOffset++) {
487 u32 first_instr = 0; /* See comment below */
488
489 Address = BaseAddress + AddressOffset;
490 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
491 /* Write instruction bits 31 - 0 (low) */
492 first_instr = instruction;
493 WRITE_REG(HwRegs->UcodeDataLow, instruction, FLUSH);
494 instruction = *(u32 *)(fw->data + index);
495 index += 4; /* Advance to the "next" instruction */
496
497 /* Write instruction bits 63-32 (middle) */
498 WRITE_REG(HwRegs->UcodeDataMiddle, instruction, FLUSH);
499 instruction = *(u32 *)(fw->data + index);
500 index += 4; /* Advance to the "next" instruction */
501
502 /* Write instruction bits 95-64 (high) */
503 WRITE_REG(HwRegs->UcodeDataHigh, instruction, FLUSH);
504 instruction = *(u32 *)(fw->data + index);
505 index += 4; /* Advance to the "next" instruction */
506
507 /* Write instruction address with the WRITE bit set */
508 WRITE_REG(HwRegs->UcodeAddr,
509 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
510 /*
511 * Sahara bug in the ucode download logic - the write to DataLow
512 * for the next instruction could get corrupted. To avoid this,
513 * write to DataLow again for this instruction (which may get
514 * corrupted, but it doesn't matter), then increment the address
515 * and write the data for the next instruction to DataLow. That
516 * write should succeed.
517 */
518 WRITE_REG(HwRegs->UcodeDataLow, first_instr, FLUSH);
519 }
520 }
521 /*
522 * Now repeat the entire operation reading the instruction back and
523 * checking for parity errors
524 */
525 index = ucode_start;
526
527 for (Section = 0; Section < num_sections; Section++) {
528 BaseAddress = sectionStart[Section];
529 /* Size in instructions */
530 ThisSectionSize = sectionSize[Section] / 12;
531 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
532 AddressOffset++) {
533 Address = BaseAddress + AddressOffset;
534 /* Write the address with the READ bit set */
535 WRITE_REG(HwRegs->UcodeAddr,
536 (Address | MICROCODE_ADDRESS_READ), FLUSH);
537 /* Read it back and check parity bit. */
538 READ_REG(HwRegs->UcodeAddr, ValueRead);
539 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
540 DBG_ERROR("sxg: %s PARITY ERROR\n",
541 __func__);
542
543 return FALSE; /* Parity error */
544 }
545 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
546 /* Read the instruction back and compare */
547 /* First instruction */
548 instruction = *(u32 *)(fw->data + index);
549 index += 4;
550 READ_REG(HwRegs->UcodeDataLow, ValueRead);
551 if (ValueRead != instruction) {
552 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
553 __func__);
554 return FALSE; /* Miscompare */
555 }
556 instruction = *(u32 *)(fw->data + index);
557 index += 4;
558 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
559 if (ValueRead != instruction) {
560 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
561 __func__);
562 return FALSE; /* Miscompare */
563 }
564 instruction = *(u32 *)(fw->data + index);
565 index += 4;
566 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
567 if (ValueRead != instruction) {
568 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
569 __func__);
570 return FALSE; /* Miscompare */
571 }
572 }
573 }
574
575 /* download finished */
576 release_firmware(fw);
577 /* Everything OK, Go. */
578 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
579
580 /*
581 * Poll the CardUp register to wait for microcode to initialize
582 * Give up after 10,000 attemps (500ms).
583 */
584 for (i = 0; i < 10000; i++) {
585 udelay(50);
586 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
587 if (ValueRead == 0xCAFE) {
588 break;
589 }
590 }
591 if (i == 10000) {
592 DBG_ERROR("sxg: %s TIMEOUT bringing up card - verify MICROCODE\n", __func__);
593
594 return FALSE; /* Timeout */
595 }
596 /*
597 * Now write the LoadSync register. This is used to
598 * synchronize with the card so it can scribble on the memory
599 * that contained 0xCAFE from the "CardUp" step above
600 */
601 if (UcodeSel == SXG_UCODE_SYSTEM) {
602 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
603 }
604
605 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
606 adapter, 0, 0, 0);
607 return (TRUE);
608 }
609
610 /*
611 * sxg_allocate_resources - Allocate memory and locks
612 *
613 * Arguments -
614 * adapter - A pointer to our adapter structure
615 *
616 * Return - int
617 */
618 static int sxg_allocate_resources(struct adapter_t *adapter)
619 {
620 int status = STATUS_SUCCESS;
621 u32 RssIds, IsrCount;
622 /* struct sxg_xmt_ring *XmtRing; */
623 /* struct sxg_rcv_ring *RcvRing; */
624
625 DBG_ERROR("%s ENTER\n", __func__);
626
627 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
628 adapter, 0, 0, 0);
629
630 /* Windows tells us how many CPUs it plans to use for */
631 /* RSS */
632 RssIds = SXG_RSS_CPU_COUNT(adapter);
633 IsrCount = adapter->msi_enabled ? RssIds : 1;
634
635 DBG_ERROR("%s Setup the spinlocks\n", __func__);
636
637 /* Allocate spinlocks and initialize listheads first. */
638 spin_lock_init(&adapter->RcvQLock);
639 spin_lock_init(&adapter->SglQLock);
640 spin_lock_init(&adapter->XmtZeroLock);
641 spin_lock_init(&adapter->Bit64RegLock);
642 spin_lock_init(&adapter->AdapterLock);
643 atomic_set(&adapter->pending_allocations, 0);
644
645 DBG_ERROR("%s Setup the lists\n", __func__);
646
647 InitializeListHead(&adapter->FreeRcvBuffers);
648 InitializeListHead(&adapter->FreeRcvBlocks);
649 InitializeListHead(&adapter->AllRcvBlocks);
650 InitializeListHead(&adapter->FreeSglBuffers);
651 InitializeListHead(&adapter->AllSglBuffers);
652
653 /*
654 * Mark these basic allocations done. This flags essentially
655 * tells the SxgFreeResources routine that it can grab spinlocks
656 * and reference listheads.
657 */
658 adapter->BasicAllocations = TRUE;
659 /*
660 * Main allocation loop. Start with the maximum supported by
661 * the microcode and back off if memory allocation
662 * fails. If we hit a minimum, fail.
663 */
664
665 for (;;) {
666 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
667 (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
668
669 /*
670 * Start with big items first - receive and transmit rings.
671 * At the moment I'm going to keep the ring size fixed and
672 * adjust the TCBs if we fail. Later we might
673 * consider reducing the ring size as well..
674 */
675 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
676 sizeof(struct sxg_xmt_ring) *
677 1,
678 &adapter->PXmtRings);
679 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
680
681 if (!adapter->XmtRings) {
682 goto per_tcb_allocation_failed;
683 }
684 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
685
686 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
687 (unsigned int)(sizeof(struct sxg_rcv_ring) * 1));
688 adapter->RcvRings =
689 pci_alloc_consistent(adapter->pcidev,
690 sizeof(struct sxg_rcv_ring) * 1,
691 &adapter->PRcvRings);
692 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
693 if (!adapter->RcvRings) {
694 goto per_tcb_allocation_failed;
695 }
696 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
697 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
698 adapter->pucode_stats = pci_map_single(adapter->pcidev,
699 adapter->ucode_stats,
700 sizeof(struct sxg_ucode_stats),
701 PCI_DMA_FROMDEVICE);
702 // memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
703 break;
704
705 per_tcb_allocation_failed:
706 /* an allocation failed. Free any successful allocations. */
707 if (adapter->XmtRings) {
708 pci_free_consistent(adapter->pcidev,
709 sizeof(struct sxg_xmt_ring) * 1,
710 adapter->XmtRings,
711 adapter->PXmtRings);
712 adapter->XmtRings = NULL;
713 }
714 if (adapter->RcvRings) {
715 pci_free_consistent(adapter->pcidev,
716 sizeof(struct sxg_rcv_ring) * 1,
717 adapter->RcvRings,
718 adapter->PRcvRings);
719 adapter->RcvRings = NULL;
720 }
721 /* Loop around and try again.... */
722 if (adapter->ucode_stats) {
723 pci_unmap_single(adapter->pcidev,
724 sizeof(struct sxg_ucode_stats),
725 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
726 adapter->ucode_stats = NULL;
727 }
728
729 }
730
731 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
732 /* Initialize rcv zero and xmt zero rings */
733 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
734 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
735
736 /* Sanity check receive data structure format */
737 /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
738 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
739 ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
740 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
741
742 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
743 (unsigned int)(sizeof(struct sxg_event_ring) * RssIds));
744
745 /* Allocate event queues. */
746 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
747 sizeof(struct sxg_event_ring) *
748 RssIds,
749 &adapter->PEventRings);
750
751 if (!adapter->EventRings) {
752 /* Caller will call SxgFreeAdapter to clean up above
753 * allocations */
754 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
755 adapter, SXG_MAX_ENTRIES, 0, 0);
756 status = STATUS_RESOURCES;
757 goto per_tcb_allocation_failed;
758 }
759 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
760
761 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
762 /* Allocate ISR */
763 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
764 IsrCount, &adapter->PIsr);
765 if (!adapter->Isr) {
766 /* Caller will call SxgFreeAdapter to clean up above
767 * allocations */
768 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
769 adapter, SXG_MAX_ENTRIES, 0, 0);
770 status = STATUS_RESOURCES;
771 goto per_tcb_allocation_failed;
772 }
773 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
774
775 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
776 __func__, (unsigned int)sizeof(u32));
777
778 /* Allocate shared XMT ring zero index location */
779 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
780 sizeof(u32),
781 &adapter->
782 PXmtRingZeroIndex);
783 if (!adapter->XmtRingZeroIndex) {
784 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
785 adapter, SXG_MAX_ENTRIES, 0, 0);
786 status = STATUS_RESOURCES;
787 goto per_tcb_allocation_failed;
788 }
789 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
790
791 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
792 adapter, SXG_MAX_ENTRIES, 0, 0);
793
794 return status;
795 }
796
797 /*
798 * sxg_config_pci -
799 *
800 * Set up PCI Configuration space
801 *
802 * Arguments -
803 * pcidev - A pointer to our adapter structure
804 */
805 static void sxg_config_pci(struct pci_dev *pcidev)
806 {
807 u16 pci_command;
808 u16 new_command;
809
810 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
811 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
812 /* Set the command register */
813 new_command = pci_command | (
814 /* Memory Space Enable */
815 PCI_COMMAND_MEMORY |
816 /* Bus master enable */
817 PCI_COMMAND_MASTER |
818 /* Memory write and invalidate */
819 PCI_COMMAND_INVALIDATE |
820 /* Parity error response */
821 PCI_COMMAND_PARITY |
822 /* System ERR */
823 PCI_COMMAND_SERR |
824 /* Fast back-to-back */
825 PCI_COMMAND_FAST_BACK);
826 if (pci_command != new_command) {
827 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
828 __func__, pci_command, new_command);
829 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
830 }
831 }
832
833 /*
834 * sxg_read_config
835 * @adapter : Pointer to the adapter structure for the card
836 * This function will read the configuration data from EEPROM/FLASH
837 */
838 static inline int sxg_read_config(struct adapter_t *adapter)
839 {
840 /* struct sxg_config data; */
841 struct sxg_config *config;
842 struct sw_cfg_data *data;
843 dma_addr_t p_addr;
844 unsigned long status;
845 unsigned long i;
846 config = pci_alloc_consistent(adapter->pcidev,
847 sizeof(struct sxg_config), &p_addr);
848
849 if(!config) {
850 /*
851 * We cant get even this much memory. Raise a hell
852 * Get out of here
853 */
854 printk(KERN_ERR"%s : Could not allocate memory for reading \
855 EEPROM\n", __FUNCTION__);
856 return -ENOMEM;
857 }
858
859 data = &config->SwCfg;
860
861 /* Initialize (reflective memory) status register */
862 WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE);
863
864 /* Send request to fetch configuration data */
865 WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0);
866 for(i=0; i<1000; i++) {
867 READ_REG(adapter->UcodeRegs[0].ConfigStat, status);
868 if (status != SXG_CFG_TIMEOUT)
869 break;
870 mdelay(1); /* Do we really need this */
871 }
872
873 switch(status) {
874 /* Config read from EEPROM succeeded */
875 case SXG_CFG_LOAD_EEPROM:
876 /* Config read from Flash succeeded */
877 case SXG_CFG_LOAD_FLASH:
878 /*
879 * Copy the MAC address to adapter structure
880 * TODO: We are not doing the remaining part : FRU, etc
881 */
882 memcpy(adapter->macaddr, data->MacAddr[0].MacAddr,
883 sizeof(struct sxg_config_mac));
884 break;
885 case SXG_CFG_TIMEOUT:
886 case SXG_CFG_LOAD_INVALID:
887 case SXG_CFG_LOAD_ERROR:
888 default: /* Fix default handler later */
889 printk(KERN_WARNING"%s : We could not read the config \
890 word. Status = %ld\n", __FUNCTION__, status);
891 break;
892 }
893 pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
894 p_addr);
895 if (adapter->netdev) {
896 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
897 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
898 }
899 sxg_dbg_macaddrs(adapter);
900
901 return status;
902 }
903
904 static const struct net_device_ops sxg_netdev_ops = {
905 .ndo_open = sxg_entry_open,
906 .ndo_stop = sxg_entry_halt,
907 .ndo_start_xmit = sxg_send_packets,
908 .ndo_do_ioctl = sxg_ioctl,
909 .ndo_change_mtu = sxg_change_mtu,
910 .ndo_get_stats = sxg_get_stats,
911 .ndo_set_multicast_list = sxg_mcast_set_list,
912 .ndo_validate_addr = eth_validate_addr,
913 #if XXXTODO
914 .ndo_set_mac_address = sxg_mac_set_address,
915 #else
916 .ndo_set_mac_address = eth_mac_addr,
917 #endif
918 };
919
920 static int sxg_entry_probe(struct pci_dev *pcidev,
921 const struct pci_device_id *pci_tbl_entry)
922 {
923 static int did_version = 0;
924 int err;
925 struct net_device *netdev;
926 struct adapter_t *adapter;
927 void __iomem *memmapped_ioaddr;
928 u32 status = 0;
929 ulong mmio_start = 0;
930 ulong mmio_len = 0;
931 unsigned char revision_id;
932
933 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
934 __func__, jiffies, smp_processor_id());
935
936 /* Initialize trace buffer */
937 #ifdef ATKDBG
938 SxgTraceBuffer = &LSxgTraceBuffer;
939 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
940 #endif
941
942 sxg_global.dynamic_intagg = dynamic_intagg;
943
944 err = pci_enable_device(pcidev);
945
946 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
947 if (err) {
948 return err;
949 }
950
951 if (sxg_debug > 0 && did_version++ == 0) {
952 printk(KERN_INFO "%s\n", sxg_banner);
953 printk(KERN_INFO "%s\n", SXG_DRV_VERSION);
954 }
955
956 pci_read_config_byte(pcidev, PCI_REVISION_ID, &revision_id);
957
958 if (!(err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)))) {
959 DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(64)) successful\n");
960 } else {
961 if ((err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)))) {
962 DBG_ERROR
963 ("No usable DMA configuration, aborting err[%x]\n",
964 err);
965 return err;
966 }
967 DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(32)) successful\n");
968 }
969
970 DBG_ERROR("Call pci_request_regions\n");
971
972 err = pci_request_regions(pcidev, sxg_driver_name);
973 if (err) {
974 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
975 return err;
976 }
977
978 DBG_ERROR("call pci_set_master\n");
979 pci_set_master(pcidev);
980
981 DBG_ERROR("call alloc_etherdev\n");
982 netdev = alloc_etherdev(sizeof(struct adapter_t));
983 if (!netdev) {
984 err = -ENOMEM;
985 goto err_out_exit_sxg_probe;
986 }
987 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
988
989 SET_NETDEV_DEV(netdev, &pcidev->dev);
990
991 pci_set_drvdata(pcidev, netdev);
992 adapter = netdev_priv(netdev);
993 if (revision_id == 1) {
994 adapter->asictype = SAHARA_REV_A;
995 } else if (revision_id == 2) {
996 adapter->asictype = SAHARA_REV_B;
997 } else {
998 ASSERT(0);
999 DBG_ERROR("%s Unexpected revision ID %x\n", __FUNCTION__, revision_id);
1000 goto err_out_exit_sxg_probe;
1001 }
1002 adapter->netdev = netdev;
1003 adapter->pcidev = pcidev;
1004
1005 mmio_start = pci_resource_start(pcidev, 0);
1006 mmio_len = pci_resource_len(pcidev, 0);
1007
1008 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
1009 mmio_start, mmio_len);
1010
1011 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
1012 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
1013 memmapped_ioaddr);
1014 if (!memmapped_ioaddr) {
1015 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
1016 __func__, mmio_len, mmio_start);
1017 goto err_out_free_mmio_region_0;
1018 }
1019
1020 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
1021 len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
1022 mmio_len, pcidev->irq);
1023
1024 adapter->HwRegs = (void *)memmapped_ioaddr;
1025 adapter->base_addr = memmapped_ioaddr;
1026
1027 mmio_start = pci_resource_start(pcidev, 2);
1028 mmio_len = pci_resource_len(pcidev, 2);
1029
1030 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
1031 mmio_start, mmio_len);
1032
1033 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
1034 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
1035 memmapped_ioaddr);
1036 if (!memmapped_ioaddr) {
1037 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
1038 __func__, mmio_len, mmio_start);
1039 goto err_out_free_mmio_region_2;
1040 }
1041
1042 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
1043 "start[%lx] len[%lx], IRQ %d.\n", __func__,
1044 memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
1045
1046 adapter->UcodeRegs = (void *)memmapped_ioaddr;
1047
1048 adapter->State = SXG_STATE_INITIALIZING;
1049 /*
1050 * Maintain a list of all adapters anchored by
1051 * the global SxgDriver structure.
1052 */
1053 adapter->Next = SxgDriver.Adapters;
1054 SxgDriver.Adapters = adapter;
1055 adapter->AdapterID = ++SxgDriver.AdapterID;
1056
1057 /* Initialize CRC table used to determine multicast hash */
1058 sxg_mcast_init_crc32();
1059
1060 adapter->JumboEnabled = FALSE;
1061 adapter->RssEnabled = FALSE;
1062 if (adapter->JumboEnabled) {
1063 adapter->FrameSize = JUMBOMAXFRAME;
1064 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
1065 } else {
1066 adapter->FrameSize = ETHERMAXFRAME;
1067 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
1068 }
1069
1070 /*
1071 * status = SXG_READ_EEPROM(adapter);
1072 * if (!status) {
1073 * goto sxg_init_bad;
1074 * }
1075 */
1076
1077 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
1078 sxg_config_pci(pcidev);
1079 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
1080
1081 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
1082 sxg_init_driver();
1083 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
1084
1085 adapter->vendid = pci_tbl_entry->vendor;
1086 adapter->devid = pci_tbl_entry->device;
1087 adapter->subsysid = pci_tbl_entry->subdevice;
1088 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
1089 adapter->functionnumber = (pcidev->devfn & 0x7);
1090 adapter->memorylength = pci_resource_len(pcidev, 0);
1091 adapter->irq = pcidev->irq;
1092 adapter->next_netdevice = head_netdevice;
1093 head_netdevice = netdev;
1094 adapter->port = 0; /*adapter->functionnumber; */
1095
1096 /* Allocate memory and other resources */
1097 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
1098 status = sxg_allocate_resources(adapter);
1099 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
1100 __func__, status);
1101 if (status != STATUS_SUCCESS) {
1102 goto err_out_unmap;
1103 }
1104
1105 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
1106 if (sxg_download_microcode(adapter, SXG_UCODE_SYSTEM)) {
1107 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
1108 __func__);
1109 sxg_read_config(adapter);
1110 status = sxg_adapter_set_hwaddr(adapter);
1111 } else {
1112 adapter->state = ADAPT_FAIL;
1113 adapter->linkstate = LINK_DOWN;
1114 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
1115 }
1116
1117 netdev->base_addr = (unsigned long)adapter->base_addr;
1118 netdev->irq = adapter->irq;
1119 netdev->netdev_ops = &sxg_netdev_ops;
1120 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
1121 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1122 err = sxg_set_interrupt_capability(adapter);
1123 if (err != STATUS_SUCCESS)
1124 DBG_ERROR("Cannot enable MSI-X capability\n");
1125
1126 strcpy(netdev->name, "eth%d");
1127 /* strcpy(netdev->name, pci_name(pcidev)); */
1128 if ((err = register_netdev(netdev))) {
1129 DBG_ERROR("Cannot register net device, aborting. %s\n",
1130 netdev->name);
1131 goto err_out_unmap;
1132 }
1133
1134 netif_napi_add(netdev, &adapter->napi,
1135 sxg_poll, SXG_NETDEV_WEIGHT);
1136 netdev->watchdog_timeo = 2 * HZ;
1137 init_timer(&adapter->watchdog_timer);
1138 adapter->watchdog_timer.function = &sxg_watchdog;
1139 adapter->watchdog_timer.data = (unsigned long) adapter;
1140 INIT_WORK(&adapter->update_link_status, sxg_update_link_status);
1141
1142 DBG_ERROR
1143 ("sxg: %s addr 0x%lx, irq %d, MAC addr \
1144 %02X:%02X:%02X:%02X:%02X:%02X\n",
1145 netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
1146 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
1147 netdev->dev_addr[4], netdev->dev_addr[5]);
1148
1149 /* sxg_init_bad: */
1150 ASSERT(status == FALSE);
1151 /* sxg_free_adapter(adapter); */
1152
1153 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
1154 status, jiffies, smp_processor_id());
1155 return status;
1156
1157 err_out_unmap:
1158 sxg_free_resources(adapter);
1159
1160 err_out_free_mmio_region_2:
1161
1162 mmio_start = pci_resource_start(pcidev, 2);
1163 mmio_len = pci_resource_len(pcidev, 2);
1164 release_mem_region(mmio_start, mmio_len);
1165
1166 err_out_free_mmio_region_0:
1167
1168 mmio_start = pci_resource_start(pcidev, 0);
1169 mmio_len = pci_resource_len(pcidev, 0);
1170
1171 release_mem_region(mmio_start, mmio_len);
1172
1173 err_out_exit_sxg_probe:
1174
1175 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
1176 smp_processor_id());
1177
1178 pci_disable_device(pcidev);
1179 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
1180 kfree(netdev);
1181 printk("Exit %s, Sxg driver loading failed..\n", __FUNCTION__);
1182
1183 return -ENODEV;
1184 }
1185
1186 /*
1187 * LINE BASE Interrupt routines..
1188 *
1189 * sxg_disable_interrupt
1190 *
1191 * DisableInterrupt Handler
1192 *
1193 * Arguments:
1194 *
1195 * adapter: Our adapter structure
1196 *
1197 * Return Value:
1198 * None.
1199 */
1200 static void sxg_disable_interrupt(struct adapter_t *adapter)
1201 {
1202 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
1203 adapter, adapter->InterruptsEnabled, 0, 0);
1204 /* For now, RSS is disabled with line based interrupts */
1205 ASSERT(adapter->RssEnabled == FALSE);
1206 /* Turn off interrupts by writing to the icr register. */
1207 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
1208
1209 adapter->InterruptsEnabled = 0;
1210
1211 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
1212 adapter, adapter->InterruptsEnabled, 0, 0);
1213 }
1214
1215 /*
1216 * sxg_enable_interrupt
1217 *
1218 * EnableInterrupt Handler
1219 *
1220 * Arguments:
1221 *
1222 * adapter: Our adapter structure
1223 *
1224 * Return Value:
1225 * None.
1226 */
1227 static void sxg_enable_interrupt(struct adapter_t *adapter)
1228 {
1229 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
1230 adapter, adapter->InterruptsEnabled, 0, 0);
1231 /* For now, RSS is disabled with line based interrupts */
1232 ASSERT(adapter->RssEnabled == FALSE);
1233 /* Turn on interrupts by writing to the icr register. */
1234 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
1235
1236 adapter->InterruptsEnabled = 1;
1237
1238 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
1239 adapter, 0, 0, 0);
1240 }
1241
1242 /*
1243 * sxg_isr - Process an line-based interrupt
1244 *
1245 * Arguments:
1246 * Context - Our adapter structure
1247 * QueueDefault - Output parameter to queue to default CPU
1248 * TargetCpus - Output bitmap to schedule DPC's
1249 *
1250 * Return Value: TRUE if our interrupt
1251 */
1252 static irqreturn_t sxg_isr(int irq, void *dev_id)
1253 {
1254 struct net_device *dev = (struct net_device *) dev_id;
1255 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1256
1257 if(adapter->state != ADAPT_UP)
1258 return IRQ_NONE;
1259 adapter->Stats.NumInts++;
1260 if (adapter->Isr[0] == 0) {
1261 /*
1262 * The SLIC driver used to experience a number of spurious
1263 * interrupts due to the delay associated with the masking of
1264 * the interrupt (we'd bounce back in here). If we see that
1265 * again with Sahara,add a READ_REG of the Icr register after
1266 * the WRITE_REG below.
1267 */
1268 adapter->Stats.FalseInts++;
1269 return IRQ_NONE;
1270 }
1271 /*
1272 * Move the Isr contents and clear the value in
1273 * shared memory, and mask interrupts
1274 */
1275 /* ASSERT(adapter->IsrDpcsPending == 0); */
1276 #if XXXTODO /* RSS Stuff */
1277 /*
1278 * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1279 * schedule DPC's based on event queues.
1280 */
1281 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
1282 for (i = 0;
1283 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
1284 i++) {
1285 struct sxg_event_ring *EventRing =
1286 &adapter->EventRings[i];
1287 struct sxg_event *Event =
1288 &EventRing->Ring[adapter->NextEvent[i]];
1289 unsigned char Cpu =
1290 adapter->RssSystemInfo->RssIdToCpu[i];
1291 if (Event->Status & EVENT_STATUS_VALID) {
1292 adapter->IsrDpcsPending++;
1293 CpuMask |= (1 << Cpu);
1294 }
1295 }
1296 }
1297 /*
1298 * Now, either schedule the CPUs specified by the CpuMask,
1299 * or queue default
1300 */
1301 if (CpuMask) {
1302 *QueueDefault = FALSE;
1303 } else {
1304 adapter->IsrDpcsPending = 1;
1305 *QueueDefault = TRUE;
1306 }
1307 *TargetCpus = CpuMask;
1308 #endif
1309 sxg_interrupt(adapter);
1310
1311 return IRQ_HANDLED;
1312 }
1313
1314 static void sxg_interrupt(struct adapter_t *adapter)
1315 {
1316 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
1317
1318 if (napi_schedule_prep(&adapter->napi)) {
1319 __napi_schedule(&adapter->napi);
1320 }
1321 }
1322
1323 static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
1324 int budget)
1325 {
1326 /* unsigned char RssId = 0; */
1327 u32 NewIsr;
1328 int sxg_napi_continue = 1;
1329 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1330 adapter, adapter->IsrCopy[0], 0, 0);
1331 /* For now, RSS is disabled with line based interrupts */
1332 ASSERT(adapter->RssEnabled == FALSE);
1333
1334 adapter->IsrCopy[0] = adapter->Isr[0];
1335 adapter->Isr[0] = 0;
1336
1337 /* Always process the event queue. */
1338 while (sxg_napi_continue)
1339 {
1340 sxg_process_event_queue(adapter,
1341 (adapter->RssEnabled ? /*RssId */ 0 : 0),
1342 &sxg_napi_continue, work_done, budget);
1343 }
1344
1345 #if XXXTODO /* RSS stuff */
1346 if (--adapter->IsrDpcsPending) {
1347 /* We're done. */
1348 ASSERT(adapter->RssEnabled);
1349 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1350 adapter, 0, 0, 0);
1351 return;
1352 }
1353 #endif
1354 /* Last (or only) DPC processes the ISR and clears the interrupt. */
1355 NewIsr = sxg_process_isr(adapter, 0);
1356 /* Reenable interrupts */
1357 adapter->IsrCopy[0] = 0;
1358 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1359 adapter, NewIsr, 0, 0);
1360
1361 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1362 adapter, 0, 0, 0);
1363 }
1364 static int sxg_poll(struct napi_struct *napi, int budget)
1365 {
1366 struct adapter_t *adapter = container_of(napi, struct adapter_t, napi);
1367 int work_done = 0;
1368
1369 sxg_handle_interrupt(adapter, &work_done, budget);
1370
1371 if (work_done < budget) {
1372 napi_complete(napi);
1373 WRITE_REG(adapter->UcodeRegs[0].Isr, 0, TRUE);
1374 }
1375 return work_done;
1376 }
1377
1378 /*
1379 * sxg_process_isr - Process an interrupt. Called from the line-based and
1380 * message based interrupt DPC routines
1381 *
1382 * Arguments:
1383 * adapter - Our adapter structure
1384 * Queue - The ISR that needs processing
1385 *
1386 * Return Value:
1387 * None
1388 */
1389 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
1390 {
1391 u32 Isr = adapter->IsrCopy[MessageId];
1392 u32 NewIsr = 0;
1393
1394 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1395 adapter, Isr, 0, 0);
1396
1397 /* Error */
1398 if (Isr & SXG_ISR_ERR) {
1399 if (Isr & SXG_ISR_PDQF) {
1400 adapter->Stats.PdqFull++;
1401 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
1402 }
1403 /* No host buffer */
1404 if (Isr & SXG_ISR_RMISS) {
1405 /*
1406 * There is a bunch of code in the SLIC driver which
1407 * attempts to process more receive events per DPC
1408 * if we start to fall behind. We'll probablyd
1409 * need to do something similar here, but hold
1410 * off for now. I don't want to make the code more
1411 * complicated than strictly needed.
1412 */
1413 adapter->stats.rx_missed_errors++;
1414 if (adapter->stats.rx_missed_errors< 5) {
1415 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
1416 __func__);
1417 }
1418 }
1419 /* Card crash */
1420 if (Isr & SXG_ISR_DEAD) {
1421 /*
1422 * Set aside the crash info and set the adapter state
1423 * to RESET
1424 */
1425 adapter->CrashCpu = (unsigned char)
1426 ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
1427 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1428 adapter->Dead = TRUE;
1429 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
1430 adapter->CrashLocation, adapter->CrashCpu);
1431 }
1432 /* Event ring full */
1433 if (Isr & SXG_ISR_ERFULL) {
1434 /*
1435 * Same issue as RMISS, really. This means the
1436 * host is falling behind the card. Need to increase
1437 * event ring size, process more events per interrupt,
1438 * and/or reduce/remove interrupt aggregation.
1439 */
1440 adapter->Stats.EventRingFull++;
1441 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
1442 __func__);
1443 }
1444 /* Transmit drop - no DRAM buffers or XMT error */
1445 if (Isr & SXG_ISR_XDROP) {
1446 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
1447 }
1448 }
1449 /* Slowpath send completions */
1450 if (Isr & SXG_ISR_SPSEND) {
1451 sxg_complete_slow_send(adapter);
1452 }
1453 /* Dump */
1454 if (Isr & SXG_ISR_UPC) {
1455 /* Maybe change when debug is added.. */
1456 // ASSERT(adapter->DumpCmdRunning);
1457 adapter->DumpCmdRunning = FALSE;
1458 }
1459 /* Link event */
1460 if (Isr & SXG_ISR_LINK) {
1461 if (adapter->state != ADAPT_DOWN) {
1462 adapter->link_status_changed = 1;
1463 schedule_work(&adapter->update_link_status);
1464 }
1465 }
1466 /* Debug - breakpoint hit */
1467 if (Isr & SXG_ISR_BREAK) {
1468 /*
1469 * At the moment AGDB isn't written to support interactive
1470 * debug sessions. When it is, this interrupt will be used to
1471 * signal AGDB that it has hit a breakpoint. For now, ASSERT.
1472 */
1473 ASSERT(0);
1474 }
1475 /* Heartbeat response */
1476 if (Isr & SXG_ISR_PING) {
1477 adapter->PingOutstanding = FALSE;
1478 }
1479 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1480 adapter, Isr, NewIsr, 0);
1481
1482 return (NewIsr);
1483 }
1484
1485 /*
1486 * sxg_rcv_checksum - Set the checksum for received packet
1487 *
1488 * Arguements:
1489 * @adapter - Adapter structure on which packet is received
1490 * @skb - Packet which is receieved
1491 * @Event - Event read from hardware
1492 */
1493
1494 void sxg_rcv_checksum(struct adapter_t *adapter, struct sk_buff *skb,
1495 struct sxg_event *Event)
1496 {
1497 skb->ip_summed = CHECKSUM_NONE;
1498 if (likely(adapter->flags & SXG_RCV_IP_CSUM_ENABLED)) {
1499 if (likely(adapter->flags & SXG_RCV_TCP_CSUM_ENABLED)
1500 && (Event->Status & EVENT_STATUS_TCPIP)) {
1501 if(!(Event->Status & EVENT_STATUS_TCPBAD))
1502 skb->ip_summed = CHECKSUM_UNNECESSARY;
1503 if(!(Event->Status & EVENT_STATUS_IPBAD))
1504 skb->ip_summed = CHECKSUM_UNNECESSARY;
1505 } else if(Event->Status & EVENT_STATUS_IPONLY) {
1506 if(!(Event->Status & EVENT_STATUS_IPBAD))
1507 skb->ip_summed = CHECKSUM_UNNECESSARY;
1508 }
1509 }
1510 }
1511
1512 /*
1513 * sxg_process_event_queue - Process our event queue
1514 *
1515 * Arguments:
1516 * - adapter - Adapter structure
1517 * - RssId - The event queue requiring processing
1518 *
1519 * Return Value:
1520 * None.
1521 */
1522 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
1523 int *sxg_napi_continue, int *work_done, int budget)
1524 {
1525 struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
1526 struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1527 u32 EventsProcessed = 0, Batches = 0;
1528 struct sk_buff *skb;
1529 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1530 struct sk_buff *prev_skb = NULL;
1531 struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1532 u32 Index;
1533 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1534 #endif
1535 u32 ReturnStatus = 0;
1536 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
1537
1538 ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1539 (adapter->State == SXG_STATE_PAUSING) ||
1540 (adapter->State == SXG_STATE_PAUSED) ||
1541 (adapter->State == SXG_STATE_HALTING));
1542 /*
1543 * We may still have unprocessed events on the queue if
1544 * the card crashed. Don't process them.
1545 */
1546 if (adapter->Dead) {
1547 return (0);
1548 }
1549 /*
1550 * In theory there should only be a single processor that
1551 * accesses this queue, and only at interrupt-DPC time. So/
1552 * we shouldn't need a lock for any of this.
1553 */
1554 while (Event->Status & EVENT_STATUS_VALID) {
1555 (*sxg_napi_continue) = 1;
1556 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1557 Event, Event->Code, Event->Status,
1558 adapter->NextEvent);
1559 switch (Event->Code) {
1560 case EVENT_CODE_BUFFERS:
1561 /* struct sxg_ring_info Head & Tail == unsigned char */
1562 ASSERT(!(Event->CommandIndex & 0xFF00));
1563 sxg_complete_descriptor_blocks(adapter,
1564 Event->CommandIndex);
1565 break;
1566 case EVENT_CODE_SLOWRCV:
1567 (*work_done)++;
1568 --adapter->RcvBuffersOnCard;
1569 if ((skb = sxg_slow_receive(adapter, Event))) {
1570 u32 rx_bytes;
1571 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1572 /* Add it to our indication list */
1573 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1574 IndicationList, num_skbs);
1575 /*
1576 * Linux, we just pass up each skb to the
1577 * protocol above at this point, there is no
1578 * capability of an indication list.
1579 */
1580 #else
1581 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1582 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1583 rx_bytes = Event->Length;
1584 adapter->stats.rx_packets++;
1585 adapter->stats.rx_bytes += rx_bytes;
1586 sxg_rcv_checksum(adapter, skb, Event);
1587 skb->dev = adapter->netdev;
1588 netif_receive_skb(skb);
1589 #endif
1590 }
1591 break;
1592 default:
1593 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
1594 __func__, Event->Code);
1595 /* ASSERT(0); */
1596 }
1597 /*
1598 * See if we need to restock card receive buffers.
1599 * There are two things to note here:
1600 * First - This test is not SMP safe. The
1601 * adapter->BuffersOnCard field is protected via atomic
1602 * interlocked calls, but we do not protect it with respect
1603 * to these tests. The only way to do that is with a lock,
1604 * and I don't want to grab a lock every time we adjust the
1605 * BuffersOnCard count. Instead, we allow the buffer
1606 * replenishment to be off once in a while. The worst that
1607 * can happen is the card is given on more-or-less descriptor
1608 * block than the arbitrary value we've chosen. No big deal
1609 * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1610 * is adjusted.
1611 * Second - We expect this test to rarely
1612 * evaluate to true. We attempt to refill descriptor blocks
1613 * as they are returned to us (sxg_complete_descriptor_blocks)
1614 * so The only time this should evaluate to true is when
1615 * sxg_complete_descriptor_blocks failed to allocate
1616 * receive buffers.
1617 */
1618 if (adapter->JumboEnabled)
1619 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
1620
1621 if (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
1622 sxg_stock_rcv_buffers(adapter);
1623 }
1624 /*
1625 * It's more efficient to just set this to zero.
1626 * But clearing the top bit saves potential debug info...
1627 */
1628 Event->Status &= ~EVENT_STATUS_VALID;
1629 /* Advance to the next event */
1630 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1631 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1632 EventsProcessed++;
1633 if (EventsProcessed == EVENT_RING_BATCH) {
1634 /* Release a batch of events back to the card */
1635 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1636 EVENT_RING_BATCH, FALSE);
1637 EventsProcessed = 0;
1638 /*
1639 * If we've processed our batch limit, break out of the
1640 * loop and return SXG_ISR_EVENT to arrange for us to
1641 * be called again
1642 */
1643 if (Batches++ == EVENT_BATCH_LIMIT) {
1644 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1645 TRACE_NOISY, "EvtLimit", Batches,
1646 adapter->NextEvent, 0, 0);
1647 ReturnStatus = SXG_ISR_EVENT;
1648 break;
1649 }
1650 }
1651 if (*work_done >= budget) {
1652 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1653 EventsProcessed, FALSE);
1654 EventsProcessed = 0;
1655 (*sxg_napi_continue) = 0;
1656 break;
1657 }
1658 }
1659 if (!(Event->Status & EVENT_STATUS_VALID))
1660 (*sxg_napi_continue) = 0;
1661
1662 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1663 /* Indicate any received dumb-nic frames */
1664 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1665 #endif
1666 /* Release events back to the card. */
1667 if (EventsProcessed) {
1668 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1669 EventsProcessed, FALSE);
1670 }
1671 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1672 Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1673
1674 return (ReturnStatus);
1675 }
1676
1677 /*
1678 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1679 *
1680 * Arguments -
1681 * adapter - A pointer to our adapter structure
1682 * Return
1683 * None
1684 */
1685 static void sxg_complete_slow_send(struct adapter_t *adapter)
1686 {
1687 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1688 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
1689 u32 *ContextType;
1690 struct sxg_cmd *XmtCmd;
1691 unsigned long flags = 0;
1692 unsigned long sgl_flags = 0;
1693 unsigned int processed_count = 0;
1694
1695 /*
1696 * NOTE - This lock is dropped and regrabbed in this loop.
1697 * This means two different processors can both be running/
1698 * through this loop. Be *very* careful.
1699 */
1700 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1701
1702 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1703 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1704
1705 while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1706 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) {
1707 /*
1708 * Locate the current Cmd (ring descriptor entry), and
1709 * associated SGL, and advance the tail
1710 */
1711 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1712 ASSERT(ContextType);
1713 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1714 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
1715 /* Clear the SGL field. */
1716 XmtCmd->Sgl = 0;
1717
1718 switch (*ContextType) {
1719 case SXG_SGL_DUMB:
1720 {
1721 struct sk_buff *skb;
1722 struct sxg_scatter_gather *SxgSgl =
1723 (struct sxg_scatter_gather *)ContextType;
1724 dma64_addr_t FirstSgeAddress;
1725 u32 FirstSgeLength;
1726
1727 /* Dumb-nic send. Command context is the dumb-nic SGL */
1728 skb = (struct sk_buff *)ContextType;
1729 skb = SxgSgl->DumbPacket;
1730 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1731 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
1732 /* Complete the send */
1733 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1734 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1735 0, 0);
1736 ASSERT(adapter->Stats.XmtQLen);
1737 /*
1738 * Now drop the lock and complete the send
1739 * back to Microsoft. We need to drop the lock
1740 * because Microsoft can come back with a
1741 * chimney send, which results in a double trip
1742 * in SxgTcpOuput
1743 */
1744 spin_unlock_irqrestore(
1745 &adapter->XmtZeroLock, flags);
1746
1747 SxgSgl->DumbPacket = NULL;
1748 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1749 FirstSgeAddress,
1750 FirstSgeLength);
1751 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
1752 /* and reacquire.. */
1753 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1754 }
1755 break;
1756 default:
1757 ASSERT(0);
1758 }
1759 }
1760 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
1761 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1762 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1763 }
1764
1765 /*
1766 * sxg_slow_receive
1767 *
1768 * Arguments -
1769 * adapter - A pointer to our adapter structure
1770 * Event - Receive event
1771 *
1772 * Return - skb
1773 */
1774 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1775 struct sxg_event *Event)
1776 {
1777 u32 BufferSize = adapter->ReceiveBufferSize;
1778 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1779 struct sk_buff *Packet;
1780 static int read_counter = 0;
1781
1782 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
1783 if(read_counter++ & 0x100)
1784 {
1785 sxg_collect_statistics(adapter);
1786 read_counter = 0;
1787 }
1788 ASSERT(RcvDataBufferHdr);
1789 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
1790 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1791 RcvDataBufferHdr, RcvDataBufferHdr->State,
1792 /*RcvDataBufferHdr->VirtualAddress*/ 0);
1793 /* Drop rcv frames in non-running state */
1794 switch (adapter->State) {
1795 case SXG_STATE_RUNNING:
1796 break;
1797 case SXG_STATE_PAUSING:
1798 case SXG_STATE_PAUSED:
1799 case SXG_STATE_HALTING:
1800 goto drop;
1801 default:
1802 ASSERT(0);
1803 goto drop;
1804 }
1805
1806 /*
1807 * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1808 * RcvDataBufferHdr->VirtualAddress, Event->Length);
1809 */
1810
1811 /* Change buffer state to UPSTREAM */
1812 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1813 if (Event->Status & EVENT_STATUS_RCVERR) {
1814 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1815 Event, Event->Status, Event->HostHandle, 0);
1816 sxg_process_rcv_error(adapter, *(u32 *)
1817 SXG_RECEIVE_DATA_LOCATION
1818 (RcvDataBufferHdr));
1819 goto drop;
1820 }
1821 #if XXXTODO /* VLAN stuff */
1822 /* If there's a VLAN tag, extract it and validate it */
1823 if (((struct ether_header *)
1824 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
1825 == ETHERTYPE_VLAN) {
1826 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1827 STATUS_SUCCESS) {
1828 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1829 "BadVlan", Event,
1830 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1831 Event->Length, 0);
1832 goto drop;
1833 }
1834 }
1835 #endif
1836 /* Dumb-nic frame. See if it passes our mac filter and update stats */
1837
1838 if (!sxg_mac_filter(adapter,
1839 (struct ether_header *)(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)),
1840 Event->Length)) {
1841 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1842 Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1843 Event->Length, 0);
1844 goto drop;
1845 }
1846
1847 Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
1848 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1849 Packet->protocol = eth_type_trans(Packet, adapter->netdev);
1850
1851 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1852 RcvDataBufferHdr, Packet, Event->Length, 0);
1853 /* Lastly adjust the receive packet length. */
1854 RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
1855 RcvDataBufferHdr->PhysicalAddress = (dma_addr_t)NULL;
1856 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1857 if (RcvDataBufferHdr->skb)
1858 {
1859 spin_lock(&adapter->RcvQLock);
1860 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1861 // adapter->RcvBuffersOnCard ++;
1862 spin_unlock(&adapter->RcvQLock);
1863 }
1864 return (Packet);
1865
1866 drop:
1867 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1868 RcvDataBufferHdr, Event->Length, 0, 0);
1869 adapter->stats.rx_dropped++;
1870 // adapter->Stats.RcvDiscards++;
1871 spin_lock(&adapter->RcvQLock);
1872 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1873 spin_unlock(&adapter->RcvQLock);
1874 return (NULL);
1875 }
1876
1877 /*
1878 * sxg_process_rcv_error - process receive error and update
1879 * stats
1880 *
1881 * Arguments:
1882 * adapter - Adapter structure
1883 * ErrorStatus - 4-byte receive error status
1884 *
1885 * Return Value : None
1886 */
1887 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
1888 {
1889 u32 Error;
1890
1891 adapter->stats.rx_errors++;
1892
1893 if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1894 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1895 switch (Error) {
1896 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1897 adapter->Stats.TransportCsum++;
1898 break;
1899 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1900 adapter->Stats.TransportUflow++;
1901 break;
1902 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1903 adapter->Stats.TransportHdrLen++;
1904 break;
1905 }
1906 }
1907 if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1908 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1909 switch (Error) {
1910 case SXG_RCV_STATUS_NETWORK_CSUM:
1911 adapter->Stats.NetworkCsum++;
1912 break;
1913 case SXG_RCV_STATUS_NETWORK_UFLOW:
1914 adapter->Stats.NetworkUflow++;
1915 break;
1916 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1917 adapter->Stats.NetworkHdrLen++;
1918 break;
1919 }
1920 }
1921 if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1922 adapter->Stats.Parity++;
1923 }
1924 if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1925 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1926 switch (Error) {
1927 case SXG_RCV_STATUS_LINK_PARITY:
1928 adapter->Stats.LinkParity++;
1929 break;
1930 case SXG_RCV_STATUS_LINK_EARLY:
1931 adapter->Stats.LinkEarly++;
1932 break;
1933 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1934 adapter->Stats.LinkBufOflow++;
1935 break;
1936 case SXG_RCV_STATUS_LINK_CODE:
1937 adapter->Stats.LinkCode++;
1938 break;
1939 case SXG_RCV_STATUS_LINK_DRIBBLE:
1940 adapter->Stats.LinkDribble++;
1941 break;
1942 case SXG_RCV_STATUS_LINK_CRC:
1943 adapter->Stats.LinkCrc++;
1944 break;
1945 case SXG_RCV_STATUS_LINK_OFLOW:
1946 adapter->Stats.LinkOflow++;
1947 break;
1948 case SXG_RCV_STATUS_LINK_UFLOW:
1949 adapter->Stats.LinkUflow++;
1950 break;
1951 }
1952 }
1953 }
1954
1955 /*
1956 * sxg_mac_filter
1957 *
1958 * Arguments:
1959 * adapter - Adapter structure
1960 * pether - Ethernet header
1961 * length - Frame length
1962 *
1963 * Return Value : TRUE if the frame is to be allowed
1964 */
1965 static bool sxg_mac_filter(struct adapter_t *adapter,
1966 struct ether_header *EtherHdr, ushort length)
1967 {
1968 bool EqualAddr;
1969 struct net_device *dev = adapter->netdev;
1970
1971 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1972 if (SXG_BROADCAST_PACKET(EtherHdr)) {
1973 /* broadcast */
1974 if (adapter->MacFilter & MAC_BCAST) {
1975 adapter->Stats.DumbRcvBcastPkts++;
1976 adapter->Stats.DumbRcvBcastBytes += length;
1977 return (TRUE);
1978 }
1979 } else {
1980 /* multicast */
1981 if (adapter->MacFilter & MAC_ALLMCAST) {
1982 adapter->Stats.DumbRcvMcastPkts++;
1983 adapter->Stats.DumbRcvMcastBytes += length;
1984 return (TRUE);
1985 }
1986 if (adapter->MacFilter & MAC_MCAST) {
1987 struct dev_mc_list *mclist = dev->mc_list;
1988 while (mclist) {
1989 ETHER_EQ_ADDR(mclist->da_addr,
1990 EtherHdr->ether_dhost,
1991 EqualAddr);
1992 if (EqualAddr) {
1993 adapter->Stats.
1994 DumbRcvMcastPkts++;
1995 adapter->Stats.
1996 DumbRcvMcastBytes += length;
1997 return (TRUE);
1998 }
1999 mclist = mclist->next;
2000 }
2001 }
2002 }
2003 } else if (adapter->MacFilter & MAC_DIRECTED) {
2004 /*
2005 * Not broadcast or multicast. Must be directed at us or
2006 * the card is in promiscuous mode. Either way, consider it
2007 * ours if MAC_DIRECTED is set
2008 */
2009 adapter->Stats.DumbRcvUcastPkts++;
2010 adapter->Stats.DumbRcvUcastBytes += length;
2011 return (TRUE);
2012 }
2013 if (adapter->MacFilter & MAC_PROMISC) {
2014 /* Whatever it is, keep it. */
2015 return (TRUE);
2016 }
2017 return (FALSE);
2018 }
2019
2020 static int sxg_register_interrupt(struct adapter_t *adapter)
2021 {
2022 if (!adapter->intrregistered) {
2023 int retval;
2024
2025 DBG_ERROR
2026 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
2027 __func__, adapter, adapter->netdev->irq, NR_IRQS);
2028
2029 spin_unlock_irqrestore(&sxg_global.driver_lock,
2030 sxg_global.flags);
2031
2032 retval = request_irq(adapter->netdev->irq,
2033 &sxg_isr,
2034 IRQF_SHARED,
2035 adapter->netdev->name, adapter->netdev);
2036
2037 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2038
2039 if (retval) {
2040 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
2041 adapter->netdev->name, retval);
2042 return (retval);
2043 }
2044 adapter->intrregistered = 1;
2045 adapter->IntRegistered = TRUE;
2046 /* Disable RSS with line-based interrupts */
2047 adapter->RssEnabled = FALSE;
2048 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
2049 __func__, adapter, adapter->netdev->irq);
2050 }
2051 return (STATUS_SUCCESS);
2052 }
2053
2054 static void sxg_deregister_interrupt(struct adapter_t *adapter)
2055 {
2056 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
2057 #if XXXTODO
2058 slic_init_cleanup(adapter);
2059 #endif
2060 memset(&adapter->stats, 0, sizeof(struct net_device_stats));
2061 adapter->error_interrupts = 0;
2062 adapter->rcv_interrupts = 0;
2063 adapter->xmit_interrupts = 0;
2064 adapter->linkevent_interrupts = 0;
2065 adapter->upr_interrupts = 0;
2066 adapter->num_isrs = 0;
2067 adapter->xmit_completes = 0;
2068 adapter->rcv_broadcasts = 0;
2069 adapter->rcv_multicasts = 0;
2070 adapter->rcv_unicasts = 0;
2071 DBG_ERROR("sxg: %s EXIT\n", __func__);
2072 }
2073
2074 /*
2075 * sxg_if_init
2076 *
2077 * Perform initialization of our slic interface.
2078 *
2079 */
2080 static int sxg_if_init(struct adapter_t *adapter)
2081 {
2082 struct net_device *dev = adapter->netdev;
2083 int status = 0;
2084
2085 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
2086 __func__, adapter->netdev->name,
2087 adapter->state,
2088 adapter->linkstate, dev->flags);
2089
2090 /* adapter should be down at this point */
2091 if (adapter->state != ADAPT_DOWN) {
2092 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
2093 return (-EIO);
2094 }
2095 ASSERT(adapter->linkstate == LINK_DOWN);
2096
2097 adapter->devflags_prev = dev->flags;
2098 adapter->MacFilter = MAC_DIRECTED;
2099 if (dev->flags) {
2100 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
2101 adapter->netdev->name);
2102 if (dev->flags & IFF_BROADCAST) {
2103 adapter->MacFilter |= MAC_BCAST;
2104 DBG_ERROR("BCAST ");
2105 }
2106 if (dev->flags & IFF_PROMISC) {
2107 adapter->MacFilter |= MAC_PROMISC;
2108 DBG_ERROR("PROMISC ");
2109 }
2110 if (dev->flags & IFF_ALLMULTI) {
2111 adapter->MacFilter |= MAC_ALLMCAST;
2112 DBG_ERROR("ALL_MCAST ");
2113 }
2114 if (dev->flags & IFF_MULTICAST) {
2115 adapter->MacFilter |= MAC_MCAST;
2116 DBG_ERROR("MCAST ");
2117 }
2118 DBG_ERROR("\n");
2119 }
2120 status = sxg_register_intr(adapter);
2121 if (status != STATUS_SUCCESS) {
2122 DBG_ERROR("sxg_if_init: sxg_register_intr FAILED %x\n",
2123 status);
2124 sxg_deregister_interrupt(adapter);
2125 return (status);
2126 }
2127
2128 adapter->state = ADAPT_UP;
2129
2130 /* clear any pending events, then enable interrupts */
2131 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
2132
2133 return (STATUS_SUCCESS);
2134 }
2135
2136 void sxg_set_interrupt_aggregation(struct adapter_t *adapter)
2137 {
2138 /*
2139 * Top bit disables aggregation on xmt (SXG_AGG_XMT_DISABLE).
2140 * Make sure Max is less than 0x8000.
2141 */
2142 adapter->max_aggregation = SXG_MAX_AGG_DEFAULT;
2143 adapter->min_aggregation = SXG_MIN_AGG_DEFAULT;
2144 WRITE_REG(adapter->UcodeRegs[0].Aggregation,
2145 ((adapter->max_aggregation << SXG_MAX_AGG_SHIFT) |
2146 adapter->min_aggregation),
2147 TRUE);
2148 }
2149
2150 static int sxg_entry_open(struct net_device *dev)
2151 {
2152 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2153 int status;
2154 static int turn;
2155 int sxg_initial_rcv_data_buffers = SXG_INITIAL_RCV_DATA_BUFFERS;
2156 int i;
2157
2158 if (adapter->JumboEnabled == TRUE) {
2159 sxg_initial_rcv_data_buffers =
2160 SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS;
2161 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo,
2162 SXG_JUMBO_RCV_RING_SIZE);
2163 }
2164
2165 /*
2166 * Allocate receive data buffers. We allocate a block of buffers and
2167 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
2168 */
2169
2170 for (i = 0; i < sxg_initial_rcv_data_buffers;
2171 i += SXG_RCV_DESCRIPTORS_PER_BLOCK)
2172 {
2173 status = sxg_allocate_buffer_memory(adapter,
2174 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
2175 SXG_BUFFER_TYPE_RCV);
2176 if (status != STATUS_SUCCESS)
2177 return status;
2178 }
2179 /*
2180 * NBL resource allocation can fail in the 'AllocateComplete' routine,
2181 * which doesn't return status. Make sure we got the number of buffers
2182 * we requested
2183 */
2184
2185 if (adapter->FreeRcvBufferCount < sxg_initial_rcv_data_buffers) {
2186 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
2187 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
2188 0);
2189 return (STATUS_RESOURCES);
2190 }
2191 /*
2192 * The microcode expects it to be downloaded on every open.
2193 */
2194 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
2195 if (sxg_download_microcode(adapter, SXG_UCODE_SYSTEM)) {
2196 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
2197 __FUNCTION__);
2198 sxg_read_config(adapter);
2199 } else {
2200 adapter->state = ADAPT_FAIL;
2201 adapter->linkstate = LINK_DOWN;
2202 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n",
2203 status);
2204 }
2205 msleep(5);
2206
2207 if (turn) {
2208 sxg_second_open(adapter->netdev);
2209
2210 return STATUS_SUCCESS;
2211 }
2212
2213 turn++;
2214
2215 ASSERT(adapter);
2216 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
2217 adapter->activated);
2218 DBG_ERROR
2219 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
2220 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
2221 adapter->netdev, adapter, adapter->port);
2222
2223 netif_stop_queue(adapter->netdev);
2224
2225 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2226 if (!adapter->activated) {
2227 sxg_global.num_sxg_ports_active++;
2228 adapter->activated = 1;
2229 }
2230 /* Initialize the adapter */
2231 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
2232 status = sxg_initialize_adapter(adapter);
2233 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
2234 __func__, status);
2235
2236 if (status == STATUS_SUCCESS) {
2237 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
2238 status = sxg_if_init(adapter);
2239 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
2240 status);
2241 }
2242
2243 if (status != STATUS_SUCCESS) {
2244 if (adapter->activated) {
2245 sxg_global.num_sxg_ports_active--;
2246 adapter->activated = 0;
2247 }
2248 spin_unlock_irqrestore(&sxg_global.driver_lock,
2249 sxg_global.flags);
2250 return (status);
2251 }
2252 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
2253 sxg_set_interrupt_aggregation(adapter);
2254 napi_enable(&adapter->napi);
2255
2256 /* Enable interrupts */
2257 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2258
2259 DBG_ERROR("sxg: %s EXIT\n", __func__);
2260
2261 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2262 mod_timer(&adapter->watchdog_timer, jiffies);
2263
2264 return STATUS_SUCCESS;
2265 }
2266
2267 int sxg_second_open(struct net_device * dev)
2268 {
2269 struct adapter_t *adapter = (struct adapter_t*) netdev_priv(dev);
2270 int status = 0;
2271
2272 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2273 netif_start_queue(adapter->netdev);
2274 adapter->state = ADAPT_UP;
2275 adapter->linkstate = LINK_UP;
2276
2277 status = sxg_initialize_adapter(adapter);
2278 sxg_set_interrupt_aggregation(adapter);
2279 napi_enable(&adapter->napi);
2280 /* Re-enable interrupts */
2281 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2282
2283 sxg_register_intr(adapter);
2284 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2285 mod_timer(&adapter->watchdog_timer, jiffies);
2286 return (STATUS_SUCCESS);
2287
2288 }
2289
2290 static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
2291 {
2292 u32 mmio_start = 0;
2293 u32 mmio_len = 0;
2294
2295 struct net_device *dev = pci_get_drvdata(pcidev);
2296 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2297
2298 flush_scheduled_work();
2299
2300 /* Deallocate Resources */
2301 unregister_netdev(dev);
2302 sxg_reset_interrupt_capability(adapter);
2303 sxg_free_resources(adapter);
2304
2305 ASSERT(adapter);
2306
2307 mmio_start = pci_resource_start(pcidev, 0);
2308 mmio_len = pci_resource_len(pcidev, 0);
2309
2310 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
2311 mmio_start, mmio_len);
2312 release_mem_region(mmio_start, mmio_len);
2313
2314 mmio_start = pci_resource_start(pcidev, 2);
2315 mmio_len = pci_resource_len(pcidev, 2);
2316
2317 DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __FUNCTION__,
2318 mmio_start, mmio_len);
2319 release_mem_region(mmio_start, mmio_len);
2320
2321 pci_disable_device(pcidev);
2322
2323 DBG_ERROR("sxg: %s deallocate device\n", __func__);
2324 kfree(dev);
2325 DBG_ERROR("sxg: %s EXIT\n", __func__);
2326 }
2327
2328 static int sxg_entry_halt(struct net_device *dev)
2329 {
2330 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2331 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2332 int i;
2333 u32 RssIds, IsrCount;
2334 unsigned long flags;
2335
2336 RssIds = SXG_RSS_CPU_COUNT(adapter);
2337 IsrCount = adapter->msi_enabled ? RssIds : 1;
2338 /* Disable interrupts */
2339 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2340 SXG_DISABLE_ALL_INTERRUPTS(adapter);
2341 adapter->state = ADAPT_DOWN;
2342 adapter->linkstate = LINK_DOWN;
2343
2344 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2345 sxg_deregister_interrupt(adapter);
2346 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
2347 mdelay(5000);
2348
2349 del_timer_sync(&adapter->watchdog_timer);
2350 netif_stop_queue(dev);
2351 netif_carrier_off(dev);
2352
2353 napi_disable(&adapter->napi);
2354
2355 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 0, true);
2356 adapter->devflags_prev = 0;
2357 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
2358 __func__, dev->name, adapter, adapter->state);
2359
2360 spin_lock(&adapter->RcvQLock);
2361 /* Free all the blocks and the buffers, moved from remove() routine */
2362 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
2363 sxg_free_rcvblocks(adapter);
2364 }
2365
2366
2367 InitializeListHead(&adapter->FreeRcvBuffers);
2368 InitializeListHead(&adapter->FreeRcvBlocks);
2369 InitializeListHead(&adapter->AllRcvBlocks);
2370 InitializeListHead(&adapter->FreeSglBuffers);
2371 InitializeListHead(&adapter->AllSglBuffers);
2372
2373 adapter->FreeRcvBufferCount = 0;
2374 adapter->FreeRcvBlockCount = 0;
2375 adapter->AllRcvBlockCount = 0;
2376 adapter->RcvBuffersOnCard = 0;
2377 adapter->PendingRcvCount = 0;
2378
2379 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
2380 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
2381 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
2382 for (i = 0; i < SXG_MAX_RING_SIZE; i++)
2383 adapter->RcvRingZeroInfo.Context[i] = NULL;
2384 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
2385 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
2386
2387 spin_unlock(&adapter->RcvQLock);
2388
2389 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2390 adapter->AllSglBufferCount = 0;
2391 adapter->FreeSglBufferCount = 0;
2392 adapter->PendingXmtCount = 0;
2393 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
2394 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
2395 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2396
2397 for (i = 0; i < SXG_MAX_RSS; i++) {
2398 adapter->NextEvent[i] = 0;
2399 }
2400 atomic_set(&adapter->pending_allocations, 0);
2401 adapter->intrregistered = 0;
2402 sxg_remove_isr(adapter);
2403 DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name);
2404 return (STATUS_SUCCESS);
2405 }
2406
2407 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2408 {
2409 ASSERT(rq);
2410 /* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
2411 switch (cmd) {
2412 case SIOCSLICSETINTAGG:
2413 {
2414 /* struct adapter_t *adapter = (struct adapter_t *)
2415 * netdev_priv(dev);
2416 */
2417 u32 data[7];
2418 u32 intagg;
2419
2420 if (copy_from_user(data, rq->ifr_data, 28)) {
2421 DBG_ERROR("copy_from_user FAILED getting \
2422 initial params\n");
2423 return -EFAULT;
2424 }
2425 intagg = data[0];
2426 printk(KERN_EMERG
2427 "%s: set interrupt aggregation to %d\n",
2428 __func__, intagg);
2429 return 0;
2430 }
2431
2432 default:
2433 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
2434 return -EOPNOTSUPP;
2435 }
2436 return 0;
2437 }
2438
2439 #define NORMAL_ETHFRAME 0
2440
2441 /*
2442 * sxg_send_packets - Send a skb packet
2443 *
2444 * Arguments:
2445 * skb - The packet to send
2446 * dev - Our linux net device that refs our adapter
2447 *
2448 * Return:
2449 * 0 regardless of outcome XXXTODO refer to e1000 driver
2450 */
2451 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
2452 {
2453 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2454 u32 status = STATUS_SUCCESS;
2455
2456 /*
2457 * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
2458 * skb);
2459 */
2460
2461 /* Check the adapter state */
2462 switch (adapter->State) {
2463 case SXG_STATE_INITIALIZING:
2464 case SXG_STATE_HALTED:
2465 case SXG_STATE_SHUTDOWN:
2466 ASSERT(0); /* unexpected */
2467 /* fall through */
2468 case SXG_STATE_RESETTING:
2469 case SXG_STATE_SLEEP:
2470 case SXG_STATE_BOOTDIAG:
2471 case SXG_STATE_DIAG:
2472 case SXG_STATE_HALTING:
2473 status = STATUS_FAILURE;
2474 break;
2475 case SXG_STATE_RUNNING:
2476 if (adapter->LinkState != SXG_LINK_UP) {
2477 status = STATUS_FAILURE;
2478 }
2479 break;
2480 default:
2481 ASSERT(0);
2482 status = STATUS_FAILURE;
2483 }
2484 if (status != STATUS_SUCCESS) {
2485 goto xmit_fail;
2486 }
2487 /* send a packet */
2488 status = sxg_transmit_packet(adapter, skb);
2489 if (status == STATUS_SUCCESS) {
2490 goto xmit_done;
2491 }
2492
2493 xmit_fail:
2494 /* reject & complete all the packets if they cant be sent */
2495 if (status != STATUS_SUCCESS) {
2496 #if XXXTODO
2497 /* sxg_send_packets_fail(adapter, skb, status); */
2498 #else
2499 SXG_DROP_DUMB_SEND(adapter, skb);
2500 adapter->stats.tx_dropped++;
2501 return NETDEV_TX_BUSY;
2502 #endif
2503 }
2504 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
2505 status);
2506
2507 xmit_done:
2508 return NETDEV_TX_OK;
2509 }
2510
2511 /*
2512 * sxg_transmit_packet
2513 *
2514 * This function transmits a single packet.
2515 *
2516 * Arguments -
2517 * adapter - Pointer to our adapter structure
2518 * skb - The packet to be sent
2519 *
2520 * Return - STATUS of send
2521 */
2522 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
2523 {
2524 struct sxg_x64_sgl *pSgl;
2525 struct sxg_scatter_gather *SxgSgl;
2526 unsigned long sgl_flags;
2527 /* void *SglBuffer; */
2528 /* u32 SglBufferLength; */
2529
2530 /*
2531 * The vast majority of work is done in the shared
2532 * sxg_dumb_sgl routine.
2533 */
2534 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
2535 adapter, skb, 0, 0);
2536
2537 /* Allocate a SGL buffer */
2538 SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
2539 if (!SxgSgl) {
2540 adapter->Stats.NoSglBuf++;
2541 adapter->stats.tx_errors++;
2542 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
2543 adapter, skb, 0, 0);
2544 return (STATUS_RESOURCES);
2545 }
2546 ASSERT(SxgSgl->adapter == adapter);
2547 /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2548 SglBufferLength = SXG_SGL_BUF_SIZE; */
2549 SxgSgl->VlanTag.VlanTci = 0;
2550 SxgSgl->VlanTag.VlanTpid = 0;
2551 SxgSgl->Type = SXG_SGL_DUMB;
2552 SxgSgl->DumbPacket = skb;
2553 pSgl = NULL;
2554
2555 /* Call the common sxg_dumb_sgl routine to complete the send. */
2556 return (sxg_dumb_sgl(pSgl, SxgSgl));
2557 }
2558
2559 /*
2560 * sxg_dumb_sgl
2561 *
2562 * Arguments:
2563 * pSgl -
2564 * SxgSgl - struct sxg_scatter_gather
2565 *
2566 * Return Value:
2567 * Status of send operation.
2568 */
2569 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2570 struct sxg_scatter_gather *SxgSgl)
2571 {
2572 struct adapter_t *adapter = SxgSgl->adapter;
2573 struct sk_buff *skb = SxgSgl->DumbPacket;
2574 /* For now, all dumb-nic sends go on RSS queue zero */
2575 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
2576 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
2577 struct sxg_cmd *XmtCmd = NULL;
2578 /* u32 Index = 0; */
2579 u32 DataLength = skb->len;
2580 /* unsigned int BufLen; */
2581 /* u32 SglOffset; */
2582 u64 phys_addr;
2583 unsigned long flags;
2584 unsigned long queue_id=0;
2585
2586 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2587 pSgl, SxgSgl, 0, 0);
2588
2589 /* Set aside a pointer to the sgl */
2590 SxgSgl->pSgl = pSgl;
2591
2592 /* Sanity check that our SGL format is as we expect. */
2593 ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge));
2594 /* Shouldn't be a vlan tag on this frame */
2595 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2596 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2597
2598 /*
2599 * From here below we work with the SGL placed in our
2600 * buffer.
2601 */
2602
2603 SxgSgl->Sgl.NumberOfElements = 1;
2604 /*
2605 * Set ucode Queue ID based on bottom bits of destination TCP port.
2606 * This Queue ID splits slowpath/dumb-nic packet processing across
2607 * multiple threads on the card to improve performance. It is split
2608 * using the TCP port to avoid out-of-order packets that can result
2609 * from multithreaded processing. We use the destination port because
2610 * we expect to be run on a server, so in nearly all cases the local
2611 * port is likely to be constant (well-known server port) and the
2612 * remote port is likely to be random. The exception to this is iSCSI,
2613 * in which case we use the sport instead. Note
2614 * that original attempt at XOR'ing source and dest port resulted in
2615 * poor balance on NTTTCP/iometer applications since they tend to
2616 * line up (even-even, odd-odd..).
2617 */
2618
2619 if (skb->protocol == htons(ETH_P_IP)) {
2620 struct iphdr *ip;
2621
2622 ip = ip_hdr(skb);
2623 if (ip->protocol != IPPROTO_TCP || !tcp_hdr(skb))
2624 queue_id = 0;
2625 else if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof(
2626 struct tcphdr))){
2627 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2628 (ntohs (tcp_hdr(skb)->source) &
2629 SXG_LARGE_SEND_QUEUE_MASK):
2630 (ntohs(tcp_hdr(skb)->dest) &
2631 SXG_LARGE_SEND_QUEUE_MASK));
2632 }
2633 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2634 if (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP || !tcp_hdr(skb))
2635 queue_id = 0;
2636 else if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength
2637 >= sizeof(struct tcphdr)) ) {
2638 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2639 (ntohs (tcp_hdr(skb)->source) &
2640 SXG_LARGE_SEND_QUEUE_MASK):
2641 (ntohs(tcp_hdr(skb)->dest) &
2642 SXG_LARGE_SEND_QUEUE_MASK));
2643 }
2644 }
2645
2646 /* Grab the spinlock and acquire a command */
2647 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2648 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2649 if (XmtCmd == NULL) {
2650 /*
2651 * Call sxg_complete_slow_send to see if we can
2652 * free up any XmtRingZero entries and then try again
2653 */
2654
2655 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2656 sxg_complete_slow_send(adapter);
2657 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2658 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2659 if (XmtCmd == NULL) {
2660 adapter->Stats.XmtZeroFull++;
2661 goto abortcmd;
2662 }
2663 }
2664 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2665 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
2666 /* Update stats */
2667 adapter->stats.tx_packets++;
2668 adapter->stats.tx_bytes += DataLength;
2669 #if XXXTODO /* Stats stuff */
2670 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2671 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2672 adapter->Stats.DumbXmtBcastPkts++;
2673 adapter->Stats.DumbXmtBcastBytes += DataLength;
2674 } else {
2675 adapter->Stats.DumbXmtMcastPkts++;
2676 adapter->Stats.DumbXmtMcastBytes += DataLength;
2677 }
2678 } else {
2679 adapter->Stats.DumbXmtUcastPkts++;
2680 adapter->Stats.DumbXmtUcastBytes += DataLength;
2681 }
2682 #endif
2683 /*
2684 * Fill in the command
2685 * Copy out the first SGE to the command and adjust for offset
2686 */
2687 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
2688 PCI_DMA_TODEVICE);
2689
2690 /*
2691 * SAHARA SGL WORKAROUND
2692 * See if the SGL straddles a 64k boundary. If so, skip to
2693 * the start of the next 64k boundary and continue
2694 */
2695
2696 if ((adapter->asictype == SAHARA_REV_A) &&
2697 (SXG_INVALID_SGL(phys_addr,skb->data_len)))
2698 {
2699 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2700 /* Silently drop this packet */
2701 printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n");
2702 return STATUS_SUCCESS;
2703 }
2704 memset(XmtCmd, '\0', sizeof(*XmtCmd));
2705 XmtCmd->Buffer.FirstSgeAddress = phys_addr;
2706 XmtCmd->Buffer.FirstSgeLength = DataLength;
2707 XmtCmd->Buffer.SgeOffset = 0;
2708 XmtCmd->Buffer.TotalLength = DataLength;
2709 XmtCmd->SgEntries = 1;
2710 XmtCmd->Flags = 0;
2711
2712 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2713 /*
2714 * We need to set the Checkum in IP header to 0. This is
2715 * required by hardware.
2716 */
2717 ip_hdr(skb)->check = 0x0;
2718 XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP;
2719 XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP;
2720 /* Dont know if length will require a change in case of VLAN */
2721 XmtCmd->CsumFlags.MacLen = ETH_HLEN;
2722 XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >>
2723 SXG_NW_HDR_LEN_SHIFT;
2724 }
2725 /*
2726 * Advance transmit cmd descripter by 1.
2727 * NOTE - See comments in SxgTcpOutput where we write
2728 * to the XmtCmd register regarding CPU ID values and/or
2729 * multiple commands.
2730 * Top 16 bits specify queue_id. See comments about queue_id above
2731 */
2732 /* Four queues at the moment */
2733 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0);
2734 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE);
2735 adapter->Stats.XmtQLen++; /* Stats within lock */
2736 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2737 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2738 XmtCmd, pSgl, SxgSgl, 0);
2739 return STATUS_SUCCESS;
2740
2741 abortcmd:
2742 /*
2743 * NOTE - Only jump to this label AFTER grabbing the
2744 * XmtZeroLock, and DO NOT DROP IT between the
2745 * command allocation and the following abort.
2746 */
2747 if (XmtCmd) {
2748 SXG_ABORT_CMD(XmtRingInfo);
2749 }
2750 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2751
2752 /*
2753 * failsgl:
2754 * Jump to this label if failure occurs before the
2755 * XmtZeroLock is grabbed
2756 */
2757 adapter->stats.tx_errors++;
2758 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2759 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
2760 /* SxgSgl->DumbPacket is the skb */
2761 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
2762
2763 return STATUS_FAILURE;
2764 }
2765
2766 /*
2767 * Link management functions
2768 *
2769 * sxg_initialize_link - Initialize the link stuff
2770 *
2771 * Arguments -
2772 * adapter - A pointer to our adapter structure
2773 *
2774 * Return
2775 * status
2776 */
2777 static int sxg_initialize_link(struct adapter_t *adapter)
2778 {
2779 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2780 u32 Value;
2781 u32 ConfigData;
2782 u32 MaxFrame;
2783 u32 AxgMacReg1;
2784 int status;
2785
2786 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2787 adapter, 0, 0, 0);
2788
2789 /* Reset PHY and XGXS module */
2790 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2791
2792 /* Reset transmit configuration register */
2793 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2794
2795 /* Reset receive configuration register */
2796 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2797
2798 /* Reset all MAC modules */
2799 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2800
2801 /*
2802 * Link address 0
2803 * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2804 * is stored with the first nibble (0a) in the byte 0
2805 * of the Mac address. Possibly reverse?
2806 */
2807 Value = *(u32 *) adapter->macaddr;
2808 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
2809 /* also write the MAC address to the MAC. Endian is reversed. */
2810 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
2811 Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF);
2812 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
2813 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
2814 Value = ntohl(Value);
2815 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
2816 /* Link address 1 */
2817 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2818 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
2819 /* Link address 2 */
2820 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2821 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
2822 /* Link address 3 */
2823 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2824 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2825
2826 /* Enable MAC modules */
2827 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2828
2829 /* Configure MAC */
2830 AxgMacReg1 = ( /* Enable XMT */
2831 AXGMAC_CFG1_XMT_EN |
2832 /* Enable receive */
2833 AXGMAC_CFG1_RCV_EN |
2834 /* short frame detection */
2835 AXGMAC_CFG1_SHORT_ASSERT |
2836 /* Verify frame length */
2837 AXGMAC_CFG1_CHECK_LEN |
2838 /* Generate FCS */
2839 AXGMAC_CFG1_GEN_FCS |
2840 /* Pad frames to 64 bytes */
2841 AXGMAC_CFG1_PAD_64);
2842
2843 if (adapter->XmtFcEnabled) {
2844 AxgMacReg1 |= AXGMAC_CFG1_XMT_PAUSE; /* Allow sending of pause */
2845 }
2846 if (adapter->RcvFcEnabled) {
2847 AxgMacReg1 |= AXGMAC_CFG1_RCV_PAUSE; /* Enable detection of pause */
2848 }
2849
2850 WRITE_REG(HwRegs->MacConfig1, AxgMacReg1, TRUE);
2851
2852 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
2853 if (adapter->JumboEnabled) {
2854 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2855 }
2856 /*
2857 * AMIIM Configuration Register -
2858 * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2859 * (bottom bits) of this register is used to determine the MDC frequency
2860 * as specified in the A-XGMAC Design Document. This value must not be
2861 * zero. The following value (62 or 0x3E) is based on our MAC transmit
2862 * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2863 * frequency of 2.5 MHz (see the PHY spec), we get:
2864 * 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2865 * This value happens to be the default value for this register, so we
2866 * really don't have to do this.
2867 */
2868 if (adapter->asictype == SAHARA_REV_B) {
2869 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000001F, TRUE);
2870 } else {
2871 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2872 }
2873
2874 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
2875 WRITE_REG(HwRegs->LinkStatus,
2876 (LS_PHY_CLR_RESET |
2877 LS_XGXS_ENABLE |
2878 LS_XGXS_CTL |
2879 LS_PHY_CLK_EN |
2880 LS_ATTN_ALARM),
2881 TRUE);
2882 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2883
2884 /*
2885 * Per information given by Aeluros, wait 100 ms after removing reset.
2886 * It's not enough to wait for the self-clearing reset bit in reg 0 to
2887 * clear.
2888 */
2889 mdelay(100);
2890
2891 /* Verify the PHY has come up by checking that the Reset bit has
2892 * cleared.
2893 */
2894 status = sxg_read_mdio_reg(adapter,
2895 MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2896 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2897 &Value);
2898 DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
2899 (Value & PMA_CONTROL1_RESET));
2900 if (status != STATUS_SUCCESS)
2901 return (STATUS_FAILURE);
2902 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
2903 return (STATUS_FAILURE);
2904
2905 /* The SERDES should be initialized by now - confirm */
2906 READ_REG(HwRegs->LinkStatus, Value);
2907 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
2908 return (STATUS_FAILURE);
2909
2910 /* The XAUI link should also be up - confirm */
2911 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
2912 return (STATUS_FAILURE);
2913
2914 /* Initialize the PHY */
2915 status = sxg_phy_init(adapter);
2916 if (status != STATUS_SUCCESS)
2917 return (STATUS_FAILURE);
2918
2919 /* Enable the Link Alarm */
2920
2921 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2922 * LASI_CONTROL - LASI control register
2923 * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
2924 */
2925 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2926 LASI_CONTROL,
2927 LASI_CTL_LS_ALARM_ENABLE);
2928 if (status != STATUS_SUCCESS)
2929 return (STATUS_FAILURE);
2930
2931 /* XXXTODO - temporary - verify bit is set */
2932
2933 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2934 * LASI_CONTROL - LASI control register
2935 */
2936 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2937 LASI_CONTROL,
2938 &Value);
2939
2940 if (status != STATUS_SUCCESS)
2941 return (STATUS_FAILURE);
2942 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2943 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2944 }
2945 /* Enable receive */
2946 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2947 ConfigData = (RCV_CONFIG_ENABLE |
2948 RCV_CONFIG_ENPARSE |
2949 RCV_CONFIG_RCVBAD |
2950 RCV_CONFIG_RCVPAUSE |
2951 RCV_CONFIG_TZIPV6 |
2952 RCV_CONFIG_TZIPV4 |
2953 RCV_CONFIG_HASH_16 |
2954 RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
2955
2956 if (adapter->asictype == SAHARA_REV_B) {
2957 ConfigData |= (RCV_CONFIG_HIPRICTL |
2958 RCV_CONFIG_NEWSTATUSFMT);
2959 }
2960 WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2961
2962 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2963
2964 /* Mark the link as down. We'll get a link event when it comes up. */
2965 sxg_link_state(adapter, SXG_LINK_DOWN);
2966
2967 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2968 adapter, 0, 0, 0);
2969 return (STATUS_SUCCESS);
2970 }
2971
2972 /*
2973 * sxg_phy_init - Initialize the PHY
2974 *
2975 * Arguments -
2976 * adapter - A pointer to our adapter structure
2977 *
2978 * Return
2979 * status
2980 */
2981 static int sxg_phy_init(struct adapter_t *adapter)
2982 {
2983 u32 Value;
2984 struct phy_ucode *p;
2985 int status;
2986
2987 DBG_ERROR("ENTER %s\n", __func__);
2988
2989 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2990 * 0xC205 - PHY ID register (?)
2991 * &Value - XXXTODO - add def
2992 */
2993 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2994 0xC205,
2995 &Value);
2996 if (status != STATUS_SUCCESS)
2997 return (STATUS_FAILURE);
2998
2999 if (Value == 0x0012) {
3000 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
3001 DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
3002 microcode.\n");
3003
3004 /* Initialize AEL2005C PHY and download PHY microcode */
3005 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
3006 if (p->Addr == 0) {
3007 /* if address == 0, data == sleep time in ms */
3008 mdelay(p->Data);
3009 } else {
3010 /* write the given data to the specified address */
3011 status = sxg_write_mdio_reg(adapter,
3012 MIIM_DEV_PHY_PMA,
3013 /* PHY address */
3014 p->Addr,
3015 /* PHY data */
3016 p->Data);
3017 if (status != STATUS_SUCCESS)
3018 return (STATUS_FAILURE);
3019 }
3020 }
3021 }
3022 DBG_ERROR("EXIT %s\n", __func__);
3023
3024 return (STATUS_SUCCESS);
3025 }
3026
3027 /*
3028 * sxg_link_event - Process a link event notification from the card
3029 *
3030 * Arguments -
3031 * adapter - A pointer to our adapter structure
3032 *
3033 * Return
3034 * None
3035 */
3036 static void sxg_link_event(struct adapter_t *adapter)
3037 {
3038 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
3039 struct net_device *netdev = adapter->netdev;
3040 enum SXG_LINK_STATE LinkState;
3041 int status;
3042 u32 Value;
3043
3044 if (adapter->state == ADAPT_DOWN)
3045 return;
3046 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
3047 adapter, 0, 0, 0);
3048 DBG_ERROR("ENTER %s\n", __func__);
3049
3050 /* Check the Link Status register. We should have a Link Alarm. */
3051 READ_REG(HwRegs->LinkStatus, Value);
3052 if (Value & LS_LINK_ALARM) {
3053 /*
3054 * We got a Link Status alarm. First, pause to let the
3055 * link state settle (it can bounce a number of times)
3056 */
3057 mdelay(10);
3058
3059 /* Now clear the alarm by reading the LASI status register. */
3060 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3061 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
3062 /* LASI status register */
3063 LASI_STATUS,
3064 &Value);
3065 if (status != STATUS_SUCCESS) {
3066 DBG_ERROR("Error reading LASI Status MDIO register!\n");
3067 sxg_link_state(adapter, SXG_LINK_DOWN);
3068 /* ASSERT(0); */
3069 }
3070 /*
3071 * We used to assert that the LASI_LS_ALARM bit was set, as
3072 * it should be. But there appears to be cases during
3073 * initialization (when the PHY is reset and re-initialized)
3074 * when we get a link alarm, but the status bit is 0 when we
3075 * read it. Rather than trying to assure this never happens
3076 * (and nver being certain), just ignore it.
3077
3078 * ASSERT(Value & LASI_STATUS_LS_ALARM);
3079 */
3080
3081 /* Now get and set the link state */
3082 LinkState = sxg_get_link_state(adapter);
3083 sxg_link_state(adapter, LinkState);
3084 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
3085 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
3086 if (LinkState == SXG_LINK_UP) {
3087 netif_carrier_on(netdev);
3088 netif_tx_start_all_queues(netdev);
3089 } else {
3090 netif_tx_stop_all_queues(netdev);
3091 netif_carrier_off(netdev);
3092 }
3093 } else {
3094 /*
3095 * XXXTODO - Assuming Link Attention is only being generated
3096 * for the Link Alarm pin (and not for a XAUI Link Status change)
3097 * , then it's impossible to get here. Yet we've gotten here
3098 * twice (under extreme conditions - bouncing the link up and
3099 * down many times a second). Needs further investigation.
3100 */
3101 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
3102 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
3103 /* ASSERT(0); */
3104 }
3105 DBG_ERROR("EXIT %s\n", __func__);
3106
3107 }
3108
3109 /*
3110 * sxg_get_link_state - Determine if the link is up or down
3111 *
3112 * Arguments -
3113 * adapter - A pointer to our adapter structure
3114 *
3115 * Return
3116 * Link State
3117 */
3118 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
3119 {
3120 int status;
3121 u32 Value;
3122
3123 DBG_ERROR("ENTER %s\n", __func__);
3124
3125 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
3126 adapter, 0, 0, 0);
3127
3128 /*
3129 * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
3130 * the following 3 bits (from 3 different MDIO registers) are all true.
3131 */
3132
3133 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3134 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
3135 /* PMA/PMD Receive Signal Detect register */
3136 PHY_PMA_RCV_DET,
3137 &Value);
3138 if (status != STATUS_SUCCESS)
3139 goto bad;
3140
3141 /* If PMA/PMD receive signal detect is 0, then the link is down */
3142 if (!(Value & PMA_RCV_DETECT))
3143 return (SXG_LINK_DOWN);
3144
3145 /* MIIM_DEV_PHY_PCS - PHY PCS module */
3146 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
3147 /* PCS 10GBASE-R Status 1 register */
3148 PHY_PCS_10G_STATUS1,
3149 &Value);
3150 if (status != STATUS_SUCCESS)
3151 goto bad;
3152
3153 /* If PCS is not locked to receive blocks, then the link is down */
3154 if (!(Value & PCS_10B_BLOCK_LOCK))
3155 return (SXG_LINK_DOWN);
3156
3157 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
3158 /* XS Lane Status register */
3159 PHY_XS_LANE_STATUS,
3160 &Value);
3161 if (status != STATUS_SUCCESS)
3162 goto bad;
3163
3164 /* If XS transmit lanes are not aligned, then the link is down */
3165 if (!(Value & XS_LANE_ALIGN))
3166 return (SXG_LINK_DOWN);
3167
3168 /* All 3 bits are true, so the link is up */
3169 DBG_ERROR("EXIT %s\n", __func__);
3170
3171 return (SXG_LINK_UP);
3172
3173 bad:
3174 /* An error occurred reading an MDIO register. This shouldn't happen. */
3175 DBG_ERROR("Error reading an MDIO register!\n");
3176 ASSERT(0);
3177 return (SXG_LINK_DOWN);
3178 }
3179
3180 static void sxg_indicate_link_state(struct adapter_t *adapter,
3181 enum SXG_LINK_STATE LinkState)
3182 {
3183 if (adapter->LinkState == SXG_LINK_UP) {
3184 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
3185 __func__);
3186 netif_start_queue(adapter->netdev);
3187 } else {
3188 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
3189 __func__);
3190 netif_stop_queue(adapter->netdev);
3191 }
3192 }
3193
3194 /*
3195 * sxg_change_mtu - Change the Maximum Transfer Unit
3196 * * @returns 0 on success, negative on failure
3197 */
3198 int sxg_change_mtu (struct net_device *netdev, int new_mtu)
3199 {
3200 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(netdev);
3201
3202 if (!((new_mtu == SXG_DEFAULT_MTU) || (new_mtu == SXG_JUMBO_MTU)))
3203 return -EINVAL;
3204
3205 if(new_mtu == netdev->mtu)
3206 return 0;
3207
3208 netdev->mtu = new_mtu;
3209
3210 if (new_mtu == SXG_JUMBO_MTU) {
3211 adapter->JumboEnabled = TRUE;
3212 adapter->FrameSize = JUMBOMAXFRAME;
3213 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
3214 } else {
3215 adapter->JumboEnabled = FALSE;
3216 adapter->FrameSize = ETHERMAXFRAME;
3217 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
3218 }
3219
3220 sxg_entry_halt(netdev);
3221 sxg_entry_open(netdev);
3222 return 0;
3223 }
3224
3225 /*
3226 * sxg_link_state - Set the link state and if necessary, indicate.
3227 * This routine the central point of processing for all link state changes.
3228 * Nothing else in the driver should alter the link state or perform
3229 * link state indications
3230 *
3231 * Arguments -
3232 * adapter - A pointer to our adapter structure
3233 * LinkState - The link state
3234 *
3235 * Return
3236 * None
3237 */
3238 static void sxg_link_state(struct adapter_t *adapter,
3239 enum SXG_LINK_STATE LinkState)
3240 {
3241 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
3242 adapter, LinkState, adapter->LinkState, adapter->State);
3243
3244 DBG_ERROR("ENTER %s\n", __func__);
3245
3246 /*
3247 * Hold the adapter lock during this routine. Maybe move
3248 * the lock to the caller.
3249 */
3250 /* IMP TODO : Check if we can survive without taking this lock */
3251 // spin_lock(&adapter->AdapterLock);
3252 if (LinkState == adapter->LinkState) {
3253 /* Nothing changed.. */
3254 // spin_unlock(&adapter->AdapterLock);
3255 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
3256 __func__, LinkState);
3257 return;
3258 }
3259 /* Save the adapter state */
3260 adapter->LinkState = LinkState;
3261
3262 /* Drop the lock and indicate link state */
3263 // spin_unlock(&adapter->AdapterLock);
3264 DBG_ERROR("EXIT #1 %s\n", __func__);
3265
3266 sxg_indicate_link_state(adapter, LinkState);
3267 }
3268
3269 /*
3270 * sxg_write_mdio_reg - Write to a register on the MDIO bus
3271 *
3272 * Arguments -
3273 * adapter - A pointer to our adapter structure
3274 * DevAddr - MDIO device number being addressed
3275 * RegAddr - register address for the specified MDIO device
3276 * Value - value to write to the MDIO register
3277 *
3278 * Return
3279 * status
3280 */
3281 static int sxg_write_mdio_reg(struct adapter_t *adapter,
3282 u32 DevAddr, u32 RegAddr, u32 Value)
3283 {
3284 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
3285 /* Address operation (written to MIIM field reg) */
3286 u32 AddrOp;
3287 /* Write operation (written to MIIM field reg) */
3288 u32 WriteOp;
3289 u32 Cmd;/* Command (written to MIIM command reg) */
3290 u32 ValueRead;
3291 u32 Timeout;
3292
3293 /* DBG_ERROR("ENTER %s\n", __func__); */
3294
3295 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3296 adapter, 0, 0, 0);
3297
3298 /* Ensure values don't exceed field width */
3299 DevAddr &= 0x001F; /* 5-bit field */
3300 RegAddr &= 0xFFFF; /* 16-bit field */
3301 Value &= 0xFFFF; /* 16-bit field */
3302
3303 /* Set MIIM field register bits for an MIIM address operation */
3304 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3305 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3306 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3307 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3308
3309 /* Set MIIM field register bits for an MIIM write operation */
3310 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3311 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3312 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3313 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
3314
3315 /* Set MIIM command register bits to execute an MIIM command */
3316 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3317
3318 /* Reset the command register command bit (in case it's not 0) */
3319 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3320
3321 /* MIIM write to set the address of the specified MDIO register */
3322 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3323
3324 /* Write to MIIM Command Register to execute to address operation */
3325 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3326
3327 /* Poll AMIIM Indicator register to wait for completion */
3328 Timeout = SXG_LINK_TIMEOUT;
3329 do {
3330 udelay(100); /* Timeout in 100us units */
3331 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3332 if (--Timeout == 0) {
3333 return (STATUS_FAILURE);
3334 }
3335 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3336
3337 /* Reset the command register command bit */
3338 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3339
3340 /* MIIM write to set up an MDIO write operation */
3341 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
3342
3343 /* Write to MIIM Command Register to execute the write operation */
3344 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3345
3346 /* Poll AMIIM Indicator register to wait for completion */
3347 Timeout = SXG_LINK_TIMEOUT;
3348 do {
3349 udelay(100); /* Timeout in 100us units */
3350 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3351 if (--Timeout == 0) {
3352 return (STATUS_FAILURE);
3353 }
3354 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3355
3356 /* DBG_ERROR("EXIT %s\n", __func__); */
3357
3358 return (STATUS_SUCCESS);
3359 }
3360
3361 /*
3362 * sxg_read_mdio_reg - Read a register on the MDIO bus
3363 *
3364 * Arguments -
3365 * adapter - A pointer to our adapter structure
3366 * DevAddr - MDIO device number being addressed
3367 * RegAddr - register address for the specified MDIO device
3368 * pValue - pointer to where to put data read from the MDIO register
3369 *
3370 * Return
3371 * status
3372 */
3373 static int sxg_read_mdio_reg(struct adapter_t *adapter,
3374 u32 DevAddr, u32 RegAddr, u32 *pValue)
3375 {
3376 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
3377 u32 AddrOp; /* Address operation (written to MIIM field reg) */
3378 u32 ReadOp; /* Read operation (written to MIIM field reg) */
3379 u32 Cmd; /* Command (written to MIIM command reg) */
3380 u32 ValueRead;
3381 u32 Timeout;
3382
3383 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3384 adapter, 0, 0, 0);
3385 DBG_ERROR("ENTER %s\n", __FUNCTION__);
3386
3387 /* Ensure values don't exceed field width */
3388 DevAddr &= 0x001F; /* 5-bit field */
3389 RegAddr &= 0xFFFF; /* 16-bit field */
3390
3391 /* Set MIIM field register bits for an MIIM address operation */
3392 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3393 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3394 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3395 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3396
3397 /* Set MIIM field register bits for an MIIM read operation */
3398 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3399 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3400 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3401 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
3402
3403 /* Set MIIM command register bits to execute an MIIM command */
3404 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3405
3406 /* Reset the command register command bit (in case it's not 0) */
3407 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3408
3409 /* MIIM write to set the address of the specified MDIO register */
3410 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3411
3412 /* Write to MIIM Command Register to execute to address operation */
3413 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3414
3415 /* Poll AMIIM Indicator register to wait for completion */
3416 Timeout = SXG_LINK_TIMEOUT;
3417 do {
3418 udelay(100); /* Timeout in 100us units */
3419 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3420 if (--Timeout == 0) {
3421 DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __FUNCTION__);
3422
3423 return (STATUS_FAILURE);
3424 }
3425 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3426
3427 /* Reset the command register command bit */
3428 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3429
3430 /* MIIM write to set up an MDIO register read operation */
3431 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
3432
3433 /* Write to MIIM Command Register to execute the read operation */
3434 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3435
3436 /* Poll AMIIM Indicator register to wait for completion */
3437 Timeout = SXG_LINK_TIMEOUT;
3438 do {
3439 udelay(100); /* Timeout in 100us units */
3440 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3441 if (--Timeout == 0) {
3442 DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __FUNCTION__);
3443
3444 return (STATUS_FAILURE);
3445 }
3446 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3447
3448 /* Read the MDIO register data back from the field register */
3449 READ_REG(HwRegs->MacAmiimField, *pValue);
3450 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
3451
3452 DBG_ERROR("EXIT %s\n", __FUNCTION__);
3453
3454 return (STATUS_SUCCESS);
3455 }
3456
3457 /*
3458 * Functions to obtain the CRC corresponding to the destination mac address.
3459 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
3460 * the polynomial:
3461 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
3462 * + x^4 + x^2 + x^1.
3463 *
3464 * After the CRC for the 6 bytes is generated (but before the value is
3465 * complemented), we must then transpose the value and return bits 30-23.
3466 */
3467 static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
3468 static u32 sxg_crc_init; /* Is table initialized */
3469
3470 /* Contruct the CRC32 table */
3471 static void sxg_mcast_init_crc32(void)
3472 {
3473 u32 c; /* CRC shit reg */
3474 u32 e = 0; /* Poly X-or pattern */
3475 int i; /* counter */
3476 int k; /* byte being shifted into crc */
3477
3478 static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
3479
3480 for (i = 0; i < sizeof(p) / sizeof(int); i++) {
3481 e |= 1L << (31 - p[i]);
3482 }
3483
3484 for (i = 1; i < 256; i++) {
3485 c = i;
3486 for (k = 8; k; k--) {
3487 c = c & 1 ? (c >> 1) ^ e : c >> 1;
3488 }
3489 sxg_crc_table[i] = c;
3490 }
3491 }
3492
3493 /*
3494 * Return the MAC hast as described above.
3495 */
3496 static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
3497 {
3498 u32 crc;
3499 char *p;
3500 int i;
3501 unsigned char machash = 0;
3502
3503 if (!sxg_crc_init) {
3504 sxg_mcast_init_crc32();
3505 sxg_crc_init = 1;
3506 }
3507
3508 crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
3509 for (i = 0, p = macaddr; i < 6; ++p, ++i) {
3510 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
3511 }
3512
3513 /* Return bits 1-8, transposed */
3514 for (i = 1; i < 9; i++) {
3515 machash |= (((crc >> i) & 1) << (8 - i));
3516 }
3517
3518 return (machash);
3519 }
3520
3521 static void sxg_mcast_set_mask(struct adapter_t *adapter)
3522 {
3523 struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs;
3524
3525 DBG_ERROR("%s ENTER (%s) MacFilter[%x] mask[%llx]\n", __FUNCTION__,
3526 adapter->netdev->name, (unsigned int)adapter->MacFilter,
3527 adapter->MulticastMask);
3528
3529 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
3530 /*
3531 * Turn on all multicast addresses. We have to do this for
3532 * promiscuous mode as well as ALLMCAST mode. It saves the
3533 * Microcode from having keep state about the MAC configuration
3534 */
3535 /* DBG_ERROR("sxg: %s MacFilter = MAC_ALLMCAST | MAC_PROMISC\n \
3536 * SLUT MODE!!!\n",__func__);
3537 */
3538 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
3539 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
3540 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3541 * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3542 */
3543
3544 } else {
3545 /*
3546 * Commit our multicast mast to the SLIC by writing to the
3547 * multicast address mask registers
3548 */
3549 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3550 __func__, adapter->netdev->name,
3551 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
3552 ((ulong)
3553 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
3554
3555 WRITE_REG(sxg_regs->McastLow,
3556 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
3557 WRITE_REG(sxg_regs->McastHigh,
3558 (u32) ((adapter->
3559 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
3560 }
3561 }
3562
3563 static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
3564 {
3565 unsigned char crcpoly;
3566
3567 /* Get the CRC polynomial for the mac address */
3568 crcpoly = sxg_mcast_get_mac_hash(address);
3569
3570 /*
3571 * We only have space on the SLIC for 64 entries. Lop
3572 * off the top two bits. (2^6 = 64)
3573 */
3574 crcpoly &= 0x3F;
3575
3576 /* OR in the new bit into our 64 bit mask. */
3577 adapter->MulticastMask |= (u64) 1 << crcpoly;
3578 }
3579
3580 /*
3581 * Function takes MAC addresses from dev_mc_list and generates the Mask
3582 */
3583
3584 static void sxg_set_mcast_addr(struct adapter_t *adapter)
3585 {
3586 struct dev_mc_list *mclist;
3587 struct net_device *dev = adapter->netdev;
3588 int i;
3589
3590 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_MCAST)) {
3591 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3592 i++, mclist = mclist->next) {
3593 sxg_mcast_set_bit(adapter,mclist->da_addr);
3594 }
3595 }
3596 sxg_mcast_set_mask(adapter);
3597 }
3598
3599 static void sxg_mcast_set_list(struct net_device *dev)
3600 {
3601 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
3602
3603 ASSERT(adapter);
3604 if (dev->flags & IFF_PROMISC)
3605 adapter->MacFilter |= MAC_PROMISC;
3606 if (dev->flags & IFF_MULTICAST)
3607 adapter->MacFilter |= MAC_MCAST;
3608 if (dev->flags & IFF_ALLMULTI)
3609 adapter->MacFilter |= MAC_ALLMCAST;
3610
3611 //XXX handle other flags as well
3612 sxg_set_mcast_addr(adapter);
3613 }
3614
3615 void sxg_free_sgl_buffers(struct adapter_t *adapter)
3616 {
3617 struct list_entry *ple;
3618 struct sxg_scatter_gather *Sgl;
3619
3620 while(!(IsListEmpty(&adapter->AllSglBuffers))) {
3621 ple = RemoveHeadList(&adapter->AllSglBuffers);
3622 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3623 kfree(Sgl);
3624 adapter->AllSglBufferCount--;
3625 }
3626 }
3627
3628 void sxg_free_rcvblocks(struct adapter_t *adapter)
3629 {
3630 u32 i;
3631 void *temp_RcvBlock;
3632 struct list_entry *ple;
3633 struct sxg_rcv_block_hdr *RcvBlockHdr;
3634 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3635 ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3636 (adapter->state == SXG_STATE_HALTING));
3637 while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3638
3639 ple = RemoveHeadList(&adapter->AllRcvBlocks);
3640 RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3641
3642 if(RcvBlockHdr->VirtualAddress) {
3643 temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3644
3645 for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3646 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3647 RcvDataBufferHdr =
3648 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3649 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3650 }
3651 }
3652
3653 pci_free_consistent(adapter->pcidev,
3654 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3655 RcvBlockHdr->VirtualAddress,
3656 RcvBlockHdr->PhysicalAddress);
3657 adapter->AllRcvBlockCount--;
3658 }
3659 ASSERT(adapter->AllRcvBlockCount == 0);
3660 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3661 adapter, 0, 0, 0);
3662 }
3663 void sxg_free_mcast_addrs(struct adapter_t *adapter)
3664 {
3665 struct sxg_multicast_address *address;
3666 while(adapter->MulticastAddrs) {
3667 address = adapter->MulticastAddrs;
3668 adapter->MulticastAddrs = address->Next;
3669 kfree(address);
3670 }
3671
3672 adapter->MulticastMask= 0;
3673 }
3674
3675 void sxg_unmap_resources(struct adapter_t *adapter)
3676 {
3677 if(adapter->HwRegs) {
3678 iounmap((void *)adapter->HwRegs);
3679 }
3680 if(adapter->UcodeRegs) {
3681 iounmap((void *)adapter->UcodeRegs);
3682 }
3683
3684 ASSERT(adapter->AllRcvBlockCount == 0);
3685 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3686 adapter, 0, 0, 0);
3687 }
3688
3689
3690
3691 /*
3692 * sxg_free_resources - Free everything allocated in SxgAllocateResources
3693 *
3694 * Arguments -
3695 * adapter - A pointer to our adapter structure
3696 *
3697 * Return
3698 * none
3699 */
3700 void sxg_free_resources(struct adapter_t *adapter)
3701 {
3702 u32 RssIds, IsrCount;
3703 RssIds = SXG_RSS_CPU_COUNT(adapter);
3704 IsrCount = adapter->msi_enabled ? RssIds : 1;
3705
3706 if (adapter->BasicAllocations == FALSE) {
3707 /*
3708 * No allocations have been made, including spinlocks,
3709 * or listhead initializations. Return.
3710 */
3711 return;
3712 }
3713
3714 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
3715 sxg_free_rcvblocks(adapter);
3716 }
3717 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
3718 sxg_free_sgl_buffers(adapter);
3719 }
3720
3721 if (adapter->XmtRingZeroIndex) {
3722 pci_free_consistent(adapter->pcidev,
3723 sizeof(u32),
3724 adapter->XmtRingZeroIndex,
3725 adapter->PXmtRingZeroIndex);
3726 }
3727 if (adapter->Isr) {
3728 pci_free_consistent(adapter->pcidev,
3729 sizeof(u32) * IsrCount,
3730 adapter->Isr, adapter->PIsr);
3731 }
3732
3733 if (adapter->EventRings) {
3734 pci_free_consistent(adapter->pcidev,
3735 sizeof(struct sxg_event_ring) * RssIds,
3736 adapter->EventRings, adapter->PEventRings);
3737 }
3738 if (adapter->RcvRings) {
3739 pci_free_consistent(adapter->pcidev,
3740 sizeof(struct sxg_rcv_ring) * 1,
3741 adapter->RcvRings,
3742 adapter->PRcvRings);
3743 adapter->RcvRings = NULL;
3744 }
3745
3746 if(adapter->XmtRings) {
3747 pci_free_consistent(adapter->pcidev,
3748 sizeof(struct sxg_xmt_ring) * 1,
3749 adapter->XmtRings,
3750 adapter->PXmtRings);
3751 adapter->XmtRings = NULL;
3752 }
3753
3754 if (adapter->ucode_stats) {
3755 pci_unmap_single(adapter->pcidev,
3756 sizeof(struct sxg_ucode_stats),
3757 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3758 adapter->ucode_stats = NULL;
3759 }
3760
3761
3762 /* Unmap register spaces */
3763 sxg_unmap_resources(adapter);
3764
3765 sxg_free_mcast_addrs(adapter);
3766
3767 adapter->BasicAllocations = FALSE;
3768
3769 }
3770
3771 /*
3772 * sxg_allocate_complete -
3773 *
3774 * This routine is called when a memory allocation has completed.
3775 *
3776 * Arguments -
3777 * struct adapter_t * - Our adapter structure
3778 * VirtualAddress - Memory virtual address
3779 * PhysicalAddress - Memory physical address
3780 * Length - Length of memory allocated (or 0)
3781 * Context - The type of buffer allocated
3782 *
3783 * Return
3784 * None.
3785 */
3786 static int sxg_allocate_complete(struct adapter_t *adapter,
3787 void *VirtualAddress,
3788 dma_addr_t PhysicalAddress,
3789 u32 Length, enum sxg_buffer_type Context)
3790 {
3791 int status = 0;
3792 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
3793 adapter, VirtualAddress, Length, Context);
3794 ASSERT(atomic_read(&adapter->pending_allocations));
3795 atomic_dec(&adapter->pending_allocations);
3796
3797 switch (Context) {
3798
3799 case SXG_BUFFER_TYPE_RCV:
3800 status = sxg_allocate_rcvblock_complete(adapter,
3801 VirtualAddress,
3802 PhysicalAddress, Length);
3803 break;
3804 case SXG_BUFFER_TYPE_SGL:
3805 sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *)
3806 VirtualAddress,
3807 PhysicalAddress, Length);
3808 break;
3809 }
3810 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3811 adapter, VirtualAddress, Length, Context);
3812
3813 return status;
3814 }
3815
3816 /*
3817 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3818 * synchronous and asynchronous buffer allocations
3819 *
3820 * Arguments -
3821 * adapter - A pointer to our adapter structure
3822 * Size - block size to allocate
3823 * BufferType - Type of buffer to allocate
3824 *
3825 * Return
3826 * int
3827 */
3828 static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
3829 u32 Size, enum sxg_buffer_type BufferType)
3830 {
3831 int status;
3832 void *Buffer;
3833 dma_addr_t pBuffer;
3834
3835 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3836 adapter, Size, BufferType, 0);
3837 /*
3838 * Grab the adapter lock and check the state. If we're in anything other
3839 * than INITIALIZING or RUNNING state, fail. This is to prevent
3840 * allocations in an improper driver state
3841 */
3842
3843 atomic_inc(&adapter->pending_allocations);
3844
3845 if(BufferType != SXG_BUFFER_TYPE_SGL)
3846 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3847 else {
3848 Buffer = kzalloc(Size, GFP_ATOMIC);
3849 pBuffer = (dma_addr_t)NULL;
3850 }
3851 if (Buffer == NULL) {
3852 /*
3853 * Decrement the AllocationsPending count while holding
3854 * the lock. Pause processing relies on this
3855 */
3856 atomic_dec(&adapter->pending_allocations);
3857 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3858 adapter, Size, BufferType, 0);
3859 return (STATUS_RESOURCES);
3860 }
3861 status = sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
3862
3863 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3864 adapter, Size, BufferType, status);
3865 return status;
3866 }
3867
3868 /*
3869 * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3870 * block allocation
3871 *
3872 * Arguments -
3873 * adapter - A pointer to our adapter structure
3874 * RcvBlock - receive block virtual address
3875 * PhysicalAddress - Physical address
3876 * Length - Memory length
3877 *
3878 * Return
3879 */
3880 static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
3881 void *RcvBlock,
3882 dma_addr_t PhysicalAddress,
3883 u32 Length)
3884 {
3885 u32 i;
3886 u32 BufferSize = adapter->ReceiveBufferSize;
3887 u64 Paddr;
3888 void *temp_RcvBlock;
3889 struct sxg_rcv_block_hdr *RcvBlockHdr;
3890 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3891 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3892 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
3893
3894 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3895 adapter, RcvBlock, Length, 0);
3896 if (RcvBlock == NULL) {
3897 goto fail;
3898 }
3899 memset(RcvBlock, 0, Length);
3900 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3901 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
3902 ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE));
3903 /*
3904 * First, initialize the contained pool of receive data buffers.
3905 * This initialization requires NBL/NB/MDL allocations, if any of them
3906 * fail, free the block and return without queueing the shared memory
3907 */
3908 //RcvDataBuffer = RcvBlock;
3909 temp_RcvBlock = RcvBlock;
3910 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3911 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3912 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
3913 temp_RcvBlock;
3914 /* For FREE macro assertion */
3915 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
3916 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
3917 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3918 goto fail;
3919
3920 }
3921
3922 /*
3923 * Place this entire block of memory on the AllRcvBlocks queue so it
3924 * can be free later
3925 */
3926
3927 RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock +
3928 SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE));
3929 RcvBlockHdr->VirtualAddress = RcvBlock;
3930 RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3931 spin_lock(&adapter->RcvQLock);
3932 adapter->AllRcvBlockCount++;
3933 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3934 spin_unlock(&adapter->RcvQLock);
3935
3936 /* Now free the contained receive data buffers that we
3937 * initialized above */
3938 temp_RcvBlock = RcvBlock;
3939 for (i = 0, Paddr = PhysicalAddress;
3940 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3941 i++, Paddr += SXG_RCV_DATA_HDR_SIZE,
3942 temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3943 RcvDataBufferHdr =
3944 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3945 spin_lock(&adapter->RcvQLock);
3946 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3947 spin_unlock(&adapter->RcvQLock);
3948 }
3949
3950 /* Locate the descriptor block and put it on a separate free queue */
3951 RcvDescriptorBlock =
3952 (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock +
3953 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
3954 (SXG_RCV_DATA_HDR_SIZE));
3955 RcvDescriptorBlockHdr =
3956 (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock +
3957 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
3958 (SXG_RCV_DATA_HDR_SIZE));
3959 RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3960 RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3961 spin_lock(&adapter->RcvQLock);
3962 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3963 spin_unlock(&adapter->RcvQLock);
3964 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3965 adapter, RcvBlock, Length, 0);
3966 return STATUS_SUCCESS;
3967 fail:
3968 /* Free any allocated resources */
3969 if (RcvBlock) {
3970 temp_RcvBlock = RcvBlock;
3971 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3972 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3973 RcvDataBufferHdr =
3974 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3975 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3976 }
3977 pci_free_consistent(adapter->pcidev,
3978 Length, RcvBlock, PhysicalAddress);
3979 }
3980 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
3981 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3982 adapter, adapter->FreeRcvBufferCount,
3983 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
3984 adapter->Stats.NoMem++;
3985 /* As allocation failed, free all previously allocated blocks..*/
3986 //sxg_free_rcvblocks(adapter);
3987
3988 return STATUS_RESOURCES;
3989 }
3990
3991 /*
3992 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
3993 *
3994 * Arguments -
3995 * adapter - A pointer to our adapter structure
3996 * SxgSgl - struct sxg_scatter_gather buffer
3997 * PhysicalAddress - Physical address
3998 * Length - Memory length
3999 *
4000 * Return
4001 */
4002 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
4003 struct sxg_scatter_gather *SxgSgl,
4004 dma_addr_t PhysicalAddress,
4005 u32 Length)
4006 {
4007 unsigned long sgl_flags;
4008 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
4009 adapter, SxgSgl, Length, 0);
4010 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
4011 adapter->AllSglBufferCount++;
4012 /* PhysicalAddress; */
4013 SxgSgl->PhysicalAddress = PhysicalAddress;
4014 /* Initialize backpointer once */
4015 SxgSgl->adapter = adapter;
4016 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
4017 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
4018 SxgSgl->State = SXG_BUFFER_BUSY;
4019 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
4020 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
4021 adapter, SxgSgl, Length, 0);
4022 }
4023
4024
4025 static int sxg_adapter_set_hwaddr(struct adapter_t *adapter)
4026 {
4027 /*
4028 * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
4029 * funct#[%d]\n", __func__, card->config_set,
4030 * adapter->port, adapter->physport, adapter->functionnumber);
4031 *
4032 * sxg_dbg_macaddrs(adapter);
4033 */
4034 /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
4035 * __FUNCTION__);
4036 */
4037
4038 /* sxg_dbg_macaddrs(adapter); */
4039
4040 struct net_device * dev = adapter->netdev;
4041 if(!dev)
4042 {
4043 printk("sxg: Dev is Null\n");
4044 }
4045
4046 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
4047
4048 if (netif_running(dev)) {
4049 return -EBUSY;
4050 }
4051 if (!adapter) {
4052 return -EBUSY;
4053 }
4054
4055 if (!(adapter->currmacaddr[0] ||
4056 adapter->currmacaddr[1] ||
4057 adapter->currmacaddr[2] ||
4058 adapter->currmacaddr[3] ||
4059 adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
4060 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
4061 }
4062 if (adapter->netdev) {
4063 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
4064 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
4065 }
4066 /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
4067 sxg_dbg_macaddrs(adapter);
4068
4069 return 0;
4070 }
4071
4072 #if XXXTODO
4073 static int sxg_mac_set_address(struct net_device *dev, void *ptr)
4074 {
4075 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
4076 struct sockaddr *addr = ptr;
4077
4078 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
4079
4080 if (netif_running(dev)) {
4081 return -EBUSY;
4082 }
4083 if (!adapter) {
4084 return -EBUSY;
4085 }
4086 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
4087 __func__, adapter->netdev->name, adapter->currmacaddr[0],
4088 adapter->currmacaddr[1], adapter->currmacaddr[2],
4089 adapter->currmacaddr[3], adapter->currmacaddr[4],
4090 adapter->currmacaddr[5]);
4091 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4092 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
4093 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
4094 __func__, adapter->netdev->name, adapter->currmacaddr[0],
4095 adapter->currmacaddr[1], adapter->currmacaddr[2],
4096 adapter->currmacaddr[3], adapter->currmacaddr[4],
4097 adapter->currmacaddr[5]);
4098
4099 sxg_config_set(adapter, TRUE);
4100 return 0;
4101 }
4102 #endif
4103
4104 /*
4105 * SXG DRIVER FUNCTIONS (below)
4106 *
4107 * sxg_initialize_adapter - Initialize adapter
4108 *
4109 * Arguments -
4110 * adapter - A pointer to our adapter structure
4111 *
4112 * Return - int
4113 */
4114 static int sxg_initialize_adapter(struct adapter_t *adapter)
4115 {
4116 u32 RssIds, IsrCount;
4117 u32 i;
4118 int status;
4119 int sxg_rcv_ring_size = SXG_RCV_RING_SIZE;
4120
4121 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
4122 adapter, 0, 0, 0);
4123
4124 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
4125 IsrCount = adapter->msi_enabled ? RssIds : 1;
4126
4127 /*
4128 * Sanity check SXG_UCODE_REGS structure definition to
4129 * make sure the length is correct
4130 */
4131 ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
4132
4133 /* Disable interrupts */
4134 SXG_DISABLE_ALL_INTERRUPTS(adapter);
4135
4136 /* Set MTU */
4137 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
4138 (adapter->FrameSize == JUMBOMAXFRAME));
4139 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
4140
4141 /* Set event ring base address and size */
4142 WRITE_REG64(adapter,
4143 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
4144 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
4145
4146 /* Per-ISR initialization */
4147 for (i = 0; i < IsrCount; i++) {
4148 u64 Addr;
4149 /* Set interrupt status pointer */
4150 Addr = adapter->PIsr + (i * sizeof(u32));
4151 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
4152 }
4153
4154 /* XMT ring zero index */
4155 WRITE_REG64(adapter,
4156 adapter->UcodeRegs[0].SPSendIndex,
4157 adapter->PXmtRingZeroIndex, 0);
4158
4159 /* Per-RSS initialization */
4160 for (i = 0; i < RssIds; i++) {
4161 /* Release all event ring entries to the Microcode */
4162 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
4163 TRUE);
4164 }
4165
4166 /* Transmit ring base and size */
4167 WRITE_REG64(adapter,
4168 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
4169 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
4170
4171 /* Receive ring base and size */
4172 WRITE_REG64(adapter,
4173 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
4174 if (adapter->JumboEnabled == TRUE)
4175 sxg_rcv_ring_size = SXG_JUMBO_RCV_RING_SIZE;
4176 WRITE_REG(adapter->UcodeRegs[0].RcvSize, sxg_rcv_ring_size, TRUE);
4177
4178 /* Populate the card with receive buffers */
4179 sxg_stock_rcv_buffers(adapter);
4180
4181 /*
4182 * Initialize checksum offload capabilities. At the moment we always
4183 * enable IP and TCP receive checksums on the card. Depending on the
4184 * checksum configuration specified by the user, we can choose to
4185 * report or ignore the checksum information provided by the card.
4186 */
4187 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
4188 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
4189
4190 adapter->flags |= (SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED );
4191
4192 /* Initialize the MAC, XAUI */
4193 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
4194 status = sxg_initialize_link(adapter);
4195 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
4196 status);
4197 if (status != STATUS_SUCCESS) {
4198 return (status);
4199 }
4200 /*
4201 * Initialize Dead to FALSE.
4202 * SlicCheckForHang or SlicDumpThread will take it from here.
4203 */
4204 adapter->Dead = FALSE;
4205 adapter->PingOutstanding = FALSE;
4206 adapter->XmtFcEnabled = TRUE;
4207 adapter->RcvFcEnabled = TRUE;
4208
4209 adapter->State = SXG_STATE_RUNNING;
4210
4211 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
4212 adapter, 0, 0, 0);
4213 return (STATUS_SUCCESS);
4214 }
4215
4216 /*
4217 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
4218 * the card. The caller should hold the RcvQLock
4219 *
4220 * Arguments -
4221 * adapter - A pointer to our adapter structure
4222 * RcvDescriptorBlockHdr - Descriptor block to fill
4223 *
4224 * Return
4225 * status
4226 */
4227 static int sxg_fill_descriptor_block(struct adapter_t *adapter,
4228 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
4229 {
4230 u32 i;
4231 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4232 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
4233 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
4234 struct sxg_cmd *RingDescriptorCmd;
4235 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4236
4237 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
4238 adapter, adapter->RcvBuffersOnCard,
4239 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4240
4241 ASSERT(RcvDescriptorBlockHdr);
4242
4243 /*
4244 * If we don't have the resources to fill the descriptor block,
4245 * return failure
4246 */
4247 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
4248 SXG_RING_FULL(RcvRingInfo)) {
4249 adapter->Stats.NoMem++;
4250 return (STATUS_FAILURE);
4251 }
4252 /* Get a ring descriptor command */
4253 SXG_GET_CMD(RingZero,
4254 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
4255 ASSERT(RingDescriptorCmd);
4256 RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
4257 RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
4258 RcvDescriptorBlockHdr->VirtualAddress;
4259
4260 /* Fill in the descriptor block */
4261 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
4262 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
4263 ASSERT(RcvDataBufferHdr);
4264 // ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
4265 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
4266 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
4267 adapter->ReceiveBufferSize);
4268 if(RcvDataBufferHdr->skb)
4269 RcvDataBufferHdr->SxgDumbRcvPacket =
4270 RcvDataBufferHdr->skb;
4271 else
4272 goto no_memory;
4273 }
4274 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
4275 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
4276 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
4277 (void *)RcvDataBufferHdr;
4278
4279 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
4280 RcvDataBufferHdr->PhysicalAddress;
4281 }
4282 /* Add the descriptor block to receive descriptor ring 0 */
4283 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
4284
4285 /*
4286 * RcvBuffersOnCard is not protected via the receive lock (see
4287 * sxg_process_event_queue) We don't want to grap a lock every time a
4288 * buffer is returned to us, so we use atomic interlocked functions
4289 * instead.
4290 */
4291 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
4292
4293 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
4294 RcvDescriptorBlockHdr,
4295 RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
4296
4297 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
4298 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
4299 adapter, adapter->RcvBuffersOnCard,
4300 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4301 return (STATUS_SUCCESS);
4302 no_memory:
4303 for (; i >= 0 ; i--) {
4304 if (RcvDescriptorBlock->Descriptors[i].VirtualAddress) {
4305 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
4306 RcvDescriptorBlock->Descriptors[i].
4307 VirtualAddress;
4308 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
4309 (dma_addr_t)NULL;
4310 RcvDescriptorBlock->Descriptors[i].VirtualAddress=NULL;
4311 }
4312 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
4313 }
4314 RcvDescriptorBlockHdr->State = SXG_BUFFER_FREE;
4315 SXG_RETURN_CMD(RingZero, RcvRingInfo, RingDescriptorCmd,
4316 RcvDescriptorBlockHdr);
4317
4318 return (-ENOMEM);
4319 }
4320
4321 /*
4322 * sxg_stock_rcv_buffers - Stock the card with receive buffers
4323 *
4324 * Arguments -
4325 * adapter - A pointer to our adapter structure
4326 *
4327 * Return
4328 * None
4329 */
4330 static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
4331 {
4332 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4333 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
4334 int sxg_min_rcv_data_buffers = SXG_MIN_RCV_DATA_BUFFERS;
4335
4336 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
4337 adapter, adapter->RcvBuffersOnCard,
4338 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4339 /*
4340 * First, see if we've got less than our minimum threshold of
4341 * receive buffers, there isn't an allocation in progress, and
4342 * we haven't exceeded our maximum.. get another block of buffers
4343 * None of this needs to be SMP safe. It's round numbers.
4344 */
4345 if (adapter->JumboEnabled == TRUE)
4346 sxg_min_rcv_data_buffers = SXG_MIN_JUMBO_RCV_DATA_BUFFERS;
4347 if ((adapter->FreeRcvBufferCount < sxg_min_rcv_data_buffers) &&
4348 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
4349 (atomic_read(&adapter->pending_allocations) == 0)) {
4350 sxg_allocate_buffer_memory(adapter,
4351 SXG_RCV_BLOCK_SIZE
4352 (SXG_RCV_DATA_HDR_SIZE),
4353 SXG_BUFFER_TYPE_RCV);
4354 }
4355 /* Now grab the RcvQLock lock and proceed */
4356 spin_lock(&adapter->RcvQLock);
4357 if (adapter->JumboEnabled)
4358 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
4359 while (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
4360 struct list_entry *_ple;
4361
4362 /* Get a descriptor block */
4363 RcvDescriptorBlockHdr = NULL;
4364 if (adapter->FreeRcvBlockCount) {
4365 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
4366 RcvDescriptorBlockHdr =
4367 container_of(_ple, struct sxg_rcv_descriptor_block_hdr,
4368 FreeList);
4369 adapter->FreeRcvBlockCount--;
4370 RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
4371 }
4372
4373 if (RcvDescriptorBlockHdr == NULL) {
4374 /* Bail out.. */
4375 adapter->Stats.NoMem++;
4376 break;
4377 }
4378 /* Fill in the descriptor block and give it to the card */
4379 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
4380 STATUS_FAILURE) {
4381 /* Free the descriptor block */
4382 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4383 RcvDescriptorBlockHdr);
4384 break;
4385 }
4386 }
4387 spin_unlock(&adapter->RcvQLock);
4388 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
4389 adapter, adapter->RcvBuffersOnCard,
4390 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4391 }
4392
4393 /*
4394 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
4395 * completed by the microcode
4396 *
4397 * Arguments -
4398 * adapter - A pointer to our adapter structure
4399 * Index - Where the microcode is up to
4400 *
4401 * Return
4402 * None
4403 */
4404 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
4405 unsigned char Index)
4406 {
4407 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4408 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4409 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4410 struct sxg_cmd *RingDescriptorCmd;
4411
4412 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
4413 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4414
4415 /* Now grab the RcvQLock lock and proceed */
4416 spin_lock(&adapter->RcvQLock);
4417 ASSERT(Index != RcvRingInfo->Tail);
4418 while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
4419 RcvRingInfo->Tail) > 3) {
4420 /*
4421 * Locate the current Cmd (ring descriptor entry), and
4422 * associated receive descriptor block, and advance
4423 * the tail
4424 */
4425 SXG_RETURN_CMD(RingZero,
4426 RcvRingInfo,
4427 RingDescriptorCmd, RcvDescriptorBlockHdr);
4428 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
4429 RcvRingInfo->Head, RcvRingInfo->Tail,
4430 RingDescriptorCmd, RcvDescriptorBlockHdr);
4431
4432 /* Clear the SGL field */
4433 RingDescriptorCmd->Sgl = 0;
4434 /*
4435 * Attempt to refill it and hand it right back to the
4436 * card. If we fail to refill it, free the descriptor block
4437 * header. The card will be restocked later via the
4438 * RcvBuffersOnCard test
4439 */
4440 if (sxg_fill_descriptor_block(adapter,
4441 RcvDescriptorBlockHdr) == STATUS_FAILURE)
4442 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4443 RcvDescriptorBlockHdr);
4444 }
4445 spin_unlock(&adapter->RcvQLock);
4446 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
4447 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4448 }
4449
4450 /*
4451 * Read the statistics which the card has been maintaining.
4452 */
4453 void sxg_collect_statistics(struct adapter_t *adapter)
4454 {
4455 if(adapter->ucode_stats)
4456 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats,
4457 adapter->pucode_stats, 0);
4458 adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
4459 adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
4460 adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
4461 }
4462
4463 static struct net_device_stats *sxg_get_stats(struct net_device * dev)
4464 {
4465 struct adapter_t *adapter = netdev_priv(dev);
4466
4467 sxg_collect_statistics(adapter);
4468 return (&adapter->stats);
4469 }
4470
4471 static void sxg_watchdog(unsigned long data)
4472 {
4473 struct adapter_t *adapter = (struct adapter_t *) data;
4474
4475 if (adapter->state != ADAPT_DOWN) {
4476 sxg_link_event(adapter);
4477 /* Reset the timer */
4478 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4479 }
4480 }
4481
4482 static void sxg_update_link_status (struct work_struct *work)
4483 {
4484 struct adapter_t *adapter = (struct adapter_t *)container_of
4485 (work, struct adapter_t, update_link_status);
4486 if (likely(adapter->link_status_changed)) {
4487 sxg_link_event(adapter);
4488 adapter->link_status_changed = 0;
4489 }
4490 }
4491
4492 static struct pci_driver sxg_driver = {
4493 .name = sxg_driver_name,
4494 .id_table = sxg_pci_tbl,
4495 .probe = sxg_entry_probe,
4496 .remove = sxg_entry_remove,
4497 #if SXG_POWER_MANAGEMENT_ENABLED
4498 .suspend = sxgpm_suspend,
4499 .resume = sxgpm_resume,
4500 #endif
4501 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
4502 };
4503
4504 static int __init sxg_module_init(void)
4505 {
4506 sxg_init_driver();
4507
4508 if (debug >= 0)
4509 sxg_debug = debug;
4510
4511 return pci_register_driver(&sxg_driver);
4512 }
4513
4514 static void __exit sxg_module_cleanup(void)
4515 {
4516 pci_unregister_driver(&sxg_driver);
4517 }
4518
4519 module_init(sxg_module_init);
4520 module_exit(sxg_module_cleanup);
This page took 0.199969 seconds and 4 git commands to generate.