Staging: sxg: Fix leaks and checksum errors in transmit code path
[deliverable/linux.git] / drivers / staging / sxg / sxg.c
CommitLineData
5db6b777
GKH
1/**************************************************************************
2 *
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
32 *
0d414727
MT
33 * Parts developed by LinSysSoft Sahara team
34 *
5db6b777
GKH
35 **************************************************************************/
36
37/*
38 * FILENAME: sxg.c
39 *
40 * The SXG driver for Alacritech's 10Gbe products.
41 *
42 * NOTE: This is the standard, non-accelerated version of Alacritech's
43 * IS-NIC driver.
44 */
45
46#include <linux/kernel.h>
47#include <linux/string.h>
48#include <linux/errno.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
cda3b517 51#include <linux/firmware.h>
5db6b777
GKH
52#include <linux/ioport.h>
53#include <linux/slab.h>
54#include <linux/interrupt.h>
55#include <linux/timer.h>
56#include <linux/pci.h>
57#include <linux/spinlock.h>
58#include <linux/init.h>
59#include <linux/netdevice.h>
60#include <linux/etherdevice.h>
61#include <linux/ethtool.h>
62#include <linux/skbuff.h>
63#include <linux/delay.h>
64#include <linux/types.h>
65#include <linux/dma-mapping.h>
66#include <linux/mii.h>
0d414727
MT
67#include <linux/ip.h>
68#include <linux/in.h>
69#include <linux/tcp.h>
70#include <linux/ipv6.h>
5db6b777 71
5db6b777
GKH
72#define SLIC_GET_STATS_ENABLED 0
73#define LINUX_FREES_ADAPTER_RESOURCES 1
74#define SXG_OFFLOAD_IP_CHECKSUM 0
75#define SXG_POWER_MANAGEMENT_ENABLED 0
76#define VPCI 0
5db6b777 77#define ATK_DEBUG 1
cda3b517
MT
78#define SXG_UCODE_DEBUG 0
79
5db6b777
GKH
80
81#include "sxg_os.h"
82#include "sxghw.h"
83#include "sxghif.h"
84#include "sxg.h"
85#include "sxgdbg.h"
a536efcc 86#include "sxgphycode-1.2.h"
5db6b777 87
73b07065 88static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
942798b4 89 enum sxg_buffer_type BufferType);
0d414727 90static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
cb636fe3
MT
91 void *RcvBlock,
92 dma_addr_t PhysicalAddress,
93 u32 Length);
73b07065 94static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
942798b4 95 struct sxg_scatter_gather *SxgSgl,
5c7514e0
M
96 dma_addr_t PhysicalAddress,
97 u32 Length);
5db6b777
GKH
98
99static void sxg_mcast_init_crc32(void);
942798b4 100static int sxg_entry_open(struct net_device *dev);
0d414727 101static int sxg_second_open(struct net_device * dev);
942798b4
MT
102static int sxg_entry_halt(struct net_device *dev);
103static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
104static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
73b07065 105static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
d9d578bf 106static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
cb636fe3 107 struct sxg_scatter_gather *SxgSgl);
73b07065 108
b62a294f
MT
109static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
110 int budget);
111static void sxg_interrupt(struct adapter_t *adapter);
112static int sxg_poll(struct napi_struct *napi, int budget);
73b07065 113static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
b62a294f
MT
114static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
115 int *sxg_napi_continue, int *work_done, int budget);
c5e5cf5a 116static void sxg_complete_slow_send(struct adapter_t *adapter);
cb636fe3
MT
117static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
118 struct sxg_event *Event);
73b07065
M
119static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
120static bool sxg_mac_filter(struct adapter_t *adapter,
121 struct ether_header *EtherHdr, ushort length);
6a2946ba 122static struct net_device_stats *sxg_get_stats(struct net_device * dev);
d9d578bf
MT
123void sxg_free_resources(struct adapter_t *adapter);
124void sxg_free_rcvblocks(struct adapter_t *adapter);
125void sxg_free_sgl_buffers(struct adapter_t *adapter);
126void sxg_unmap_resources(struct adapter_t *adapter);
127void sxg_free_mcast_addrs(struct adapter_t *adapter);
128void sxg_collect_statistics(struct adapter_t *adapter);
1782199f
MT
129static int sxg_register_interrupt(struct adapter_t *adapter);
130static void sxg_remove_isr(struct adapter_t *adapter);
131static irqreturn_t sxg_isr(int irq, void *dev_id);
d0128aa9 132
e5ea8da0
MT
133static void sxg_watchdog(unsigned long data);
134static void sxg_update_link_status (struct work_struct *work);
135
c6c25ed0
GKH
136#define XXXTODO 0
137
96e7088c 138#if XXXTODO
942798b4 139static int sxg_mac_set_address(struct net_device *dev, void *ptr);
96e7088c 140#endif
942798b4 141static void sxg_mcast_set_list(struct net_device *dev);
5db6b777 142
54aed113 143static int sxg_adapter_set_hwaddr(struct adapter_t *adapter);
5db6b777 144
73b07065
M
145static int sxg_initialize_adapter(struct adapter_t *adapter);
146static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
147static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
5c7514e0 148 unsigned char Index);
7c66b14b 149int sxg_change_mtu (struct net_device *netdev, int new_mtu);
73b07065
M
150static int sxg_initialize_link(struct adapter_t *adapter);
151static int sxg_phy_init(struct adapter_t *adapter);
152static void sxg_link_event(struct adapter_t *adapter);
153static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
cb636fe3
MT
154static void sxg_link_state(struct adapter_t *adapter,
155 enum SXG_LINK_STATE LinkState);
73b07065 156static int sxg_write_mdio_reg(struct adapter_t *adapter,
5c7514e0 157 u32 DevAddr, u32 RegAddr, u32 Value);
73b07065 158static int sxg_read_mdio_reg(struct adapter_t *adapter,
5c7514e0 159 u32 DevAddr, u32 RegAddr, u32 *pValue);
b040b07b 160static void sxg_set_mcast_addr(struct adapter_t *adapter);
5db6b777
GKH
161
162static unsigned int sxg_first_init = 1;
163static char *sxg_banner =
cb636fe3
MT
164 "Alacritech SLIC Technology(tm) Server and Storage \
165 10Gbe Accelerator (Non-Accelerated)\n";
5db6b777
GKH
166
167static int sxg_debug = 1;
168static int debug = -1;
942798b4 169static struct net_device *head_netdevice = NULL;
5db6b777 170
942798b4 171static struct sxgbase_driver sxg_global = {
5db6b777
GKH
172 .dynamic_intagg = 1,
173};
174static int intagg_delay = 100;
175static u32 dynamic_intagg = 0;
176
54aed113 177char sxg_driver_name[] = "sxg_nic";
5db6b777 178#define DRV_AUTHOR "Alacritech, Inc. Engineering"
cb636fe3
MT
179#define DRV_DESCRIPTION \
180 "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
181#define DRV_COPYRIGHT \
182 "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
5db6b777
GKH
183
184MODULE_AUTHOR(DRV_AUTHOR);
185MODULE_DESCRIPTION(DRV_DESCRIPTION);
186MODULE_LICENSE("GPL");
187
188module_param(dynamic_intagg, int, 0);
189MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
190module_param(intagg_delay, int, 0);
191MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
192
193static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
194 {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
195 {0,}
196};
5c7514e0 197
5db6b777
GKH
198MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
199
5db6b777
GKH
200static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
201{
202 writel(value, reg);
203 if (flush)
204 mb();
205}
206
73b07065 207static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg,
5db6b777
GKH
208 u64 value, u32 cpu)
209{
210 u32 value_high = (u32) (value >> 32);
211 u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
212 unsigned long flags;
213
214 spin_lock_irqsave(&adapter->Bit64RegLock, flags);
215 writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
216 writel(value_low, reg);
217 spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
218}
219
220static void sxg_init_driver(void)
221{
222 if (sxg_first_init) {
223 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
e88bd231 224 __func__, jiffies);
5db6b777
GKH
225 sxg_first_init = 0;
226 spin_lock_init(&sxg_global.driver_lock);
227 }
228}
229
73b07065 230static void sxg_dbg_macaddrs(struct adapter_t *adapter)
5db6b777
GKH
231{
232 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
233 adapter->netdev->name, adapter->currmacaddr[0],
234 adapter->currmacaddr[1], adapter->currmacaddr[2],
235 adapter->currmacaddr[3], adapter->currmacaddr[4],
236 adapter->currmacaddr[5]);
237 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
238 adapter->netdev->name, adapter->macaddr[0],
239 adapter->macaddr[1], adapter->macaddr[2],
240 adapter->macaddr[3], adapter->macaddr[4],
241 adapter->macaddr[5]);
242 return;
243}
244
b243c4aa 245/* SXG Globals */
942798b4 246static struct sxg_driver SxgDriver;
5db6b777
GKH
247
248#ifdef ATKDBG
942798b4 249static struct sxg_trace_buffer LSxgTraceBuffer;
5db6b777 250#endif /* ATKDBG */
942798b4 251static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
5db6b777 252
1782199f
MT
253/*
254 * MSI Related API's
255 */
256int sxg_register_intr(struct adapter_t *adapter);
257int sxg_enable_msi_x(struct adapter_t *adapter);
258int sxg_add_msi_isr(struct adapter_t *adapter);
259void sxg_remove_msix_isr(struct adapter_t *adapter);
260int sxg_set_interrupt_capability(struct adapter_t *adapter);
261
262int sxg_set_interrupt_capability(struct adapter_t *adapter)
263{
264 int ret;
265
266 ret = sxg_enable_msi_x(adapter);
267 if (ret != STATUS_SUCCESS) {
268 adapter->msi_enabled = FALSE;
269 DBG_ERROR("sxg_set_interrupt_capability MSI-X Disable\n");
270 } else {
271 adapter->msi_enabled = TRUE;
272 DBG_ERROR("sxg_set_interrupt_capability MSI-X Enable\n");
273 }
274 return ret;
275}
276
277int sxg_register_intr(struct adapter_t *adapter)
278{
279 int ret = 0;
280
281 if (adapter->msi_enabled) {
282 ret = sxg_add_msi_isr(adapter);
283 }
284 else {
285 DBG_ERROR("MSI-X Enable Failed. Using Pin INT\n");
286 ret = sxg_register_interrupt(adapter);
287 if (ret != STATUS_SUCCESS) {
288 DBG_ERROR("sxg_register_interrupt Failed\n");
289 }
290 }
291 return ret;
292}
293
294int sxg_enable_msi_x(struct adapter_t *adapter)
295{
296 int ret;
297
298 adapter->nr_msix_entries = 1;
299 adapter->msi_entries = kmalloc(adapter->nr_msix_entries *
300 sizeof(struct msix_entry),GFP_KERNEL);
301 if (!adapter->msi_entries) {
302 DBG_ERROR("%s:MSI Entries memory allocation Failed\n",__func__);
303 return -ENOMEM;
304 }
305 memset(adapter->msi_entries, 0, adapter->nr_msix_entries *
306 sizeof(struct msix_entry));
307
308 ret = pci_enable_msix(adapter->pcidev, adapter->msi_entries,
309 adapter->nr_msix_entries);
310 if (ret) {
311 DBG_ERROR("Enabling MSI-X with %d vectors failed\n",
312 adapter->nr_msix_entries);
313 /*Should try with less vector returned.*/
314 kfree(adapter->msi_entries);
315 return STATUS_FAILURE; /*MSI-X Enable failed.*/
316 }
317 return (STATUS_SUCCESS);
318}
319
320int sxg_add_msi_isr(struct adapter_t *adapter)
321{
322 int ret,i;
323
324 if (!adapter->intrregistered) {
cc4b8dfc
MT
325 spin_unlock_irqrestore(&sxg_global.driver_lock,
326 sxg_global.flags);
1782199f
MT
327 for (i=0; i<adapter->nr_msix_entries; i++) {
328 ret = request_irq (adapter->msi_entries[i].vector,
329 sxg_isr,
330 IRQF_SHARED,
331 adapter->netdev->name,
332 adapter->netdev);
333 if (ret) {
cc4b8dfc
MT
334 spin_lock_irqsave(&sxg_global.driver_lock,
335 sxg_global.flags);
1782199f
MT
336 DBG_ERROR("sxg: MSI-X request_irq (%s) "
337 "FAILED [%x]\n", adapter->netdev->name,
338 ret);
339 return (ret);
340 }
341 }
342 }
cc4b8dfc 343 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1782199f
MT
344 adapter->msi_enabled = TRUE;
345 adapter->intrregistered = 1;
346 adapter->IntRegistered = TRUE;
347 return (STATUS_SUCCESS);
348}
349
350void sxg_remove_msix_isr(struct adapter_t *adapter)
351{
352 int i,vector;
353 struct net_device *netdev = adapter->netdev;
354
355 for(i=0; i< adapter->nr_msix_entries;i++)
356 {
357 vector = adapter->msi_entries[i].vector;
358 DBG_ERROR("%s : Freeing IRQ vector#%d\n",__FUNCTION__,vector);
359 free_irq(vector,netdev);
360 }
361}
362
363
364static void sxg_remove_isr(struct adapter_t *adapter)
365{
366 struct net_device *netdev = adapter->netdev;
367 if (adapter->msi_enabled)
368 sxg_remove_msix_isr(adapter);
369 else
370 free_irq(adapter->netdev->irq, netdev);
371}
372
373void sxg_reset_interrupt_capability(struct adapter_t *adapter)
374{
375 if (adapter->msi_enabled) {
376 pci_disable_msix(adapter->pcidev);
377 kfree(adapter->msi_entries);
378 adapter->msi_entries = NULL;
379 }
380 return;
381}
382
5db6b777
GKH
383/*
384 * sxg_download_microcode
385 *
cda3b517
MT
386 * Download Microcode to Sahara adapter using the Linux
387 * Firmware module to get the ucode.sys file.
5db6b777
GKH
388 *
389 * Arguments -
390 * adapter - A pointer to our adapter structure
391 * UcodeSel - microcode file selection
392 *
393 * Return
394 * int
395 */
cb636fe3
MT
396static bool sxg_download_microcode(struct adapter_t *adapter,
397 enum SXG_UCODE_SEL UcodeSel)
5db6b777 398{
cda3b517
MT
399 const struct firmware *fw;
400 const char *file = "";
942798b4 401 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
cda3b517
MT
402 int ret;
403 int ucode_start;
5db6b777
GKH
404 u32 Section;
405 u32 ThisSectionSize;
cda3b517 406 u32 instruction = 0;
5db6b777 407 u32 BaseAddress, AddressOffset, Address;
cb636fe3 408 /* u32 Failure; */
5db6b777
GKH
409 u32 ValueRead;
410 u32 i;
cda3b517
MT
411 u32 index = 0;
412 u32 num_sections = 0;
5db6b777
GKH
413 u32 sectionSize[16];
414 u32 sectionStart[16];
415
416 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
417 adapter, 0, 0, 0);
cda3b517
MT
418
419 /*
420 * This routine is only implemented to download the microcode
421 * for the Revision B Sahara chip. Rev A and Diagnostic
422 * microcode is not supported at this time. If Rev A or
423 * diagnostic ucode is required, this routine will obviously
424 * need to change. Also, eventually need to add support for
425 * Rev B checked version of ucode. That's easy enough once
426 * the free version of Rev B works.
427 */
428 ASSERT(UcodeSel == SXG_UCODE_SYSTEM);
429 ASSERT(adapter->asictype == SAHARA_REV_B);
430#if SXG_UCODE_DEBUG
431 file = "sxg/saharadbgdownloadB.sys";
432#else
433 file = "sxg/saharadownloadB.sys";
434#endif
435 ret = request_firmware(&fw, file, &adapter->pcidev->dev);
436 if (ret) {
437 DBG_ERROR("%s SXG_NIC: Failed to load firmware %s\n", __func__,file);
438 return ret;
439 }
440
441 /*
442 * The microcode .sys file contains starts with a 4 byte word containing
443 * the number of sections. That is followed by "num_sections" 4 byte
444 * words containing each "section" size. That is followed num_sections
445 * 4 byte words containing each section "start" address.
446 *
447 * Following the above header, the .sys file contains num_sections,
448 * where each section size is specified, newline delineatetd 12 byte
449 * microcode instructions.
450 */
451 num_sections = *(u32 *)(fw->data + index);
452 index += 4;
453 ASSERT(num_sections <= 3);
454 for (i = 0; i < num_sections; i++) {
455 sectionSize[i] = *(u32 *)(fw->data + index);
456 index += 4;
457 }
458 for (i = 0; i < num_sections; i++) {
459 sectionStart[i] = *(u32 *)(fw->data + index);
460 index += 4;
5db6b777
GKH
461 }
462
b243c4aa 463 /* First, reset the card */
5db6b777 464 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
a536efcc 465 udelay(50);
cda3b517 466 HwRegs = adapter->HwRegs;
5db6b777 467
ddd6f0a8
MT
468 /*
469 * Download each section of the microcode as specified in
cda3b517
MT
470 * sectionSize[index] to sectionStart[index] address. As
471 * described above, the .sys file contains 12 byte word
472 * microcode instructions. The *download.sys file is generated
473 * using the objtosys.exe utility that was built for Sahara
474 * microcode.
ddd6f0a8 475 */
cda3b517
MT
476 /* See usage of this below when we read back for parity */
477 ucode_start = index;
478 instruction = *(u32 *)(fw->data + index);
479 index += 4;
a536efcc 480
cda3b517 481 for (Section = 0; Section < num_sections; Section++) {
5db6b777 482 BaseAddress = sectionStart[Section];
cb636fe3
MT
483 /* Size in instructions */
484 ThisSectionSize = sectionSize[Section] / 12;
5db6b777
GKH
485 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
486 AddressOffset++) {
cda3b517
MT
487 u32 first_instr = 0; /* See comment below */
488
5db6b777
GKH
489 Address = BaseAddress + AddressOffset;
490 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
cda3b517
MT
491 /* Write instruction bits 31 - 0 (low) */
492 first_instr = instruction;
493 WRITE_REG(HwRegs->UcodeDataLow, instruction, FLUSH);
494 instruction = *(u32 *)(fw->data + index);
495 index += 4; /* Advance to the "next" instruction */
496
497 /* Write instruction bits 63-32 (middle) */
498 WRITE_REG(HwRegs->UcodeDataMiddle, instruction, FLUSH);
499 instruction = *(u32 *)(fw->data + index);
500 index += 4; /* Advance to the "next" instruction */
501
502 /* Write instruction bits 95-64 (high) */
503 WRITE_REG(HwRegs->UcodeDataHigh, instruction, FLUSH);
504 instruction = *(u32 *)(fw->data + index);
505 index += 4; /* Advance to the "next" instruction */
506
b243c4aa 507 /* Write instruction address with the WRITE bit set */
5db6b777
GKH
508 WRITE_REG(HwRegs->UcodeAddr,
509 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
ddd6f0a8
MT
510 /*
511 * Sahara bug in the ucode download logic - the write to DataLow
512 * for the next instruction could get corrupted. To avoid this,
513 * write to DataLow again for this instruction (which may get
514 * corrupted, but it doesn't matter), then increment the address
515 * and write the data for the next instruction to DataLow. That
516 * write should succeed.
517 */
cda3b517 518 WRITE_REG(HwRegs->UcodeDataLow, first_instr, FLUSH);
5db6b777
GKH
519 }
520 }
ddd6f0a8
MT
521 /*
522 * Now repeat the entire operation reading the instruction back and
523 * checking for parity errors
524 */
cda3b517
MT
525 index = ucode_start;
526
527 for (Section = 0; Section < num_sections; Section++) {
5db6b777 528 BaseAddress = sectionStart[Section];
cb636fe3
MT
529 /* Size in instructions */
530 ThisSectionSize = sectionSize[Section] / 12;
5db6b777
GKH
531 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
532 AddressOffset++) {
533 Address = BaseAddress + AddressOffset;
b243c4aa 534 /* Write the address with the READ bit set */
5db6b777
GKH
535 WRITE_REG(HwRegs->UcodeAddr,
536 (Address | MICROCODE_ADDRESS_READ), FLUSH);
b243c4aa 537 /* Read it back and check parity bit. */
5db6b777
GKH
538 READ_REG(HwRegs->UcodeAddr, ValueRead);
539 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
540 DBG_ERROR("sxg: %s PARITY ERROR\n",
e88bd231 541 __func__);
5db6b777 542
cb636fe3 543 return FALSE; /* Parity error */
5db6b777
GKH
544 }
545 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
b243c4aa 546 /* Read the instruction back and compare */
cda3b517
MT
547 /* First instruction */
548 instruction = *(u32 *)(fw->data + index);
549 index += 4;
5db6b777 550 READ_REG(HwRegs->UcodeDataLow, ValueRead);
cda3b517 551 if (ValueRead != instruction) {
5db6b777 552 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
e88bd231 553 __func__);
cb636fe3 554 return FALSE; /* Miscompare */
5db6b777 555 }
cda3b517
MT
556 instruction = *(u32 *)(fw->data + index);
557 index += 4;
5db6b777 558 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
cda3b517 559 if (ValueRead != instruction) {
5db6b777 560 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
e88bd231 561 __func__);
cb636fe3 562 return FALSE; /* Miscompare */
5db6b777 563 }
cda3b517
MT
564 instruction = *(u32 *)(fw->data + index);
565 index += 4;
5db6b777 566 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
cda3b517 567 if (ValueRead != instruction) {
5db6b777 568 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
e88bd231 569 __func__);
cb636fe3 570 return FALSE; /* Miscompare */
5db6b777 571 }
5db6b777
GKH
572 }
573 }
574
cda3b517
MT
575 /* download finished */
576 release_firmware(fw);
b243c4aa 577 /* Everything OK, Go. */
5db6b777
GKH
578 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
579
ddd6f0a8
MT
580 /*
581 * Poll the CardUp register to wait for microcode to initialize
582 * Give up after 10,000 attemps (500ms).
583 */
5db6b777
GKH
584 for (i = 0; i < 10000; i++) {
585 udelay(50);
586 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
587 if (ValueRead == 0xCAFE) {
5db6b777
GKH
588 break;
589 }
590 }
591 if (i == 10000) {
cda3b517 592 DBG_ERROR("sxg: %s TIMEOUT bringing up card - verify MICROCODE\n", __func__);
5db6b777 593
cb636fe3 594 return FALSE; /* Timeout */
5db6b777 595 }
ddd6f0a8
MT
596 /*
597 * Now write the LoadSync register. This is used to
598 * synchronize with the card so it can scribble on the memory
599 * that contained 0xCAFE from the "CardUp" step above
600 */
a536efcc 601 if (UcodeSel == SXG_UCODE_SYSTEM) {
5db6b777
GKH
602 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
603 }
604
605 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
606 adapter, 0, 0, 0);
5db6b777
GKH
607 return (TRUE);
608}
609
610/*
611 * sxg_allocate_resources - Allocate memory and locks
612 *
613 * Arguments -
cb636fe3 614 * adapter - A pointer to our adapter structure
5db6b777 615 *
cb636fe3 616 * Return - int
5db6b777 617 */
73b07065 618static int sxg_allocate_resources(struct adapter_t *adapter)
5db6b777 619{
9fd6966c 620 int status = STATUS_SUCCESS;
5db6b777 621 u32 RssIds, IsrCount;
cb636fe3
MT
622 /* struct sxg_xmt_ring *XmtRing; */
623 /* struct sxg_rcv_ring *RcvRing; */
5db6b777 624
e88bd231 625 DBG_ERROR("%s ENTER\n", __func__);
5db6b777
GKH
626
627 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
628 adapter, 0, 0, 0);
629
b243c4aa
M
630 /* Windows tells us how many CPUs it plans to use for */
631 /* RSS */
5db6b777 632 RssIds = SXG_RSS_CPU_COUNT(adapter);
1782199f 633 IsrCount = adapter->msi_enabled ? RssIds : 1;
5db6b777 634
e88bd231 635 DBG_ERROR("%s Setup the spinlocks\n", __func__);
5db6b777 636
b243c4aa 637 /* Allocate spinlocks and initialize listheads first. */
5db6b777
GKH
638 spin_lock_init(&adapter->RcvQLock);
639 spin_lock_init(&adapter->SglQLock);
640 spin_lock_init(&adapter->XmtZeroLock);
641 spin_lock_init(&adapter->Bit64RegLock);
642 spin_lock_init(&adapter->AdapterLock);
6a2946ba 643 atomic_set(&adapter->pending_allocations, 0);
5db6b777 644
e88bd231 645 DBG_ERROR("%s Setup the lists\n", __func__);
5db6b777
GKH
646
647 InitializeListHead(&adapter->FreeRcvBuffers);
648 InitializeListHead(&adapter->FreeRcvBlocks);
649 InitializeListHead(&adapter->AllRcvBlocks);
650 InitializeListHead(&adapter->FreeSglBuffers);
651 InitializeListHead(&adapter->AllSglBuffers);
652
ddd6f0a8
MT
653 /*
654 * Mark these basic allocations done. This flags essentially
655 * tells the SxgFreeResources routine that it can grab spinlocks
656 * and reference listheads.
657 */
5db6b777 658 adapter->BasicAllocations = TRUE;
ddd6f0a8
MT
659 /*
660 * Main allocation loop. Start with the maximum supported by
661 * the microcode and back off if memory allocation
662 * fails. If we hit a minimum, fail.
663 */
5db6b777
GKH
664
665 for (;;) {
d78404cc 666 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
942798b4 667 (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
5db6b777 668
ddd6f0a8 669 /*
cb636fe3
MT
670 * Start with big items first - receive and transmit rings.
671 * At the moment I'm going to keep the ring size fixed and
672 * adjust the TCBs if we fail. Later we might
673 * consider reducing the ring size as well..
ddd6f0a8 674 */
5db6b777 675 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
cb636fe3
MT
676 sizeof(struct sxg_xmt_ring) *
677 1,
678 &adapter->PXmtRings);
e88bd231 679 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
5db6b777
GKH
680
681 if (!adapter->XmtRings) {
682 goto per_tcb_allocation_failed;
683 }
942798b4 684 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
5db6b777 685
d78404cc 686 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
942798b4 687 (unsigned int)(sizeof(struct sxg_rcv_ring) * 1));
5db6b777
GKH
688 adapter->RcvRings =
689 pci_alloc_consistent(adapter->pcidev,
942798b4 690 sizeof(struct sxg_rcv_ring) * 1,
5db6b777 691 &adapter->PRcvRings);
e88bd231 692 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
5db6b777
GKH
693 if (!adapter->RcvRings) {
694 goto per_tcb_allocation_failed;
695 }
942798b4 696 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
d9d578bf
MT
697 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
698 adapter->pucode_stats = pci_map_single(adapter->pcidev,
699 adapter->ucode_stats,
700 sizeof(struct sxg_ucode_stats),
701 PCI_DMA_FROMDEVICE);
702// memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
5db6b777
GKH
703 break;
704
705 per_tcb_allocation_failed:
b243c4aa 706 /* an allocation failed. Free any successful allocations. */
5db6b777
GKH
707 if (adapter->XmtRings) {
708 pci_free_consistent(adapter->pcidev,
942798b4 709 sizeof(struct sxg_xmt_ring) * 1,
5db6b777
GKH
710 adapter->XmtRings,
711 adapter->PXmtRings);
712 adapter->XmtRings = NULL;
713 }
714 if (adapter->RcvRings) {
715 pci_free_consistent(adapter->pcidev,
942798b4 716 sizeof(struct sxg_rcv_ring) * 1,
5db6b777
GKH
717 adapter->RcvRings,
718 adapter->PRcvRings);
719 adapter->RcvRings = NULL;
720 }
b243c4aa 721 /* Loop around and try again.... */
d9d578bf
MT
722 if (adapter->ucode_stats) {
723 pci_unmap_single(adapter->pcidev,
724 sizeof(struct sxg_ucode_stats),
725 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
726 adapter->ucode_stats = NULL;
727 }
728
5db6b777
GKH
729 }
730
e88bd231 731 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
b243c4aa 732 /* Initialize rcv zero and xmt zero rings */
5db6b777
GKH
733 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
734 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
735
b243c4aa 736 /* Sanity check receive data structure format */
d0128aa9
MT
737 /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
738 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
942798b4 739 ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
5db6b777
GKH
740 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
741
d78404cc 742 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
942798b4 743 (unsigned int)(sizeof(struct sxg_event_ring) * RssIds));
5db6b777 744
b243c4aa 745 /* Allocate event queues. */
5db6b777 746 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
cb636fe3
MT
747 sizeof(struct sxg_event_ring) *
748 RssIds,
749 &adapter->PEventRings);
5db6b777
GKH
750
751 if (!adapter->EventRings) {
cb636fe3
MT
752 /* Caller will call SxgFreeAdapter to clean up above
753 * allocations */
5db6b777
GKH
754 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
755 adapter, SXG_MAX_ENTRIES, 0, 0);
756 status = STATUS_RESOURCES;
757 goto per_tcb_allocation_failed;
758 }
942798b4 759 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
5db6b777 760
e88bd231 761 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
b243c4aa 762 /* Allocate ISR */
5db6b777
GKH
763 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
764 IsrCount, &adapter->PIsr);
765 if (!adapter->Isr) {
cb636fe3
MT
766 /* Caller will call SxgFreeAdapter to clean up above
767 * allocations */
5db6b777
GKH
768 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
769 adapter, SXG_MAX_ENTRIES, 0, 0);
770 status = STATUS_RESOURCES;
771 goto per_tcb_allocation_failed;
772 }
773 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
774
d78404cc
GKH
775 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
776 __func__, (unsigned int)sizeof(u32));
5db6b777 777
b243c4aa 778 /* Allocate shared XMT ring zero index location */
5db6b777
GKH
779 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
780 sizeof(u32),
781 &adapter->
782 PXmtRingZeroIndex);
783 if (!adapter->XmtRingZeroIndex) {
784 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
785 adapter, SXG_MAX_ENTRIES, 0, 0);
786 status = STATUS_RESOURCES;
787 goto per_tcb_allocation_failed;
788 }
789 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
790
791 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
792 adapter, SXG_MAX_ENTRIES, 0, 0);
793
0d414727 794 return status;
5db6b777
GKH
795}
796
797/*
798 * sxg_config_pci -
799 *
800 * Set up PCI Configuration space
801 *
802 * Arguments -
803 * pcidev - A pointer to our adapter structure
5db6b777
GKH
804 */
805static void sxg_config_pci(struct pci_dev *pcidev)
806{
807 u16 pci_command;
808 u16 new_command;
809
810 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
e88bd231 811 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
b243c4aa 812 /* Set the command register */
cb636fe3
MT
813 new_command = pci_command | (
814 /* Memory Space Enable */
815 PCI_COMMAND_MEMORY |
816 /* Bus master enable */
817 PCI_COMMAND_MASTER |
818 /* Memory write and invalidate */
819 PCI_COMMAND_INVALIDATE |
820 /* Parity error response */
821 PCI_COMMAND_PARITY |
822 /* System ERR */
823 PCI_COMMAND_SERR |
824 /* Fast back-to-back */
825 PCI_COMMAND_FAST_BACK);
5db6b777
GKH
826 if (pci_command != new_command) {
827 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
e88bd231 828 __func__, pci_command, new_command);
5db6b777
GKH
829 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
830 }
831}
832
1323e5f1
MT
833/*
834 * sxg_read_config
835 * @adapter : Pointer to the adapter structure for the card
836 * This function will read the configuration data from EEPROM/FLASH
837 */
838static inline int sxg_read_config(struct adapter_t *adapter)
839{
ddd6f0a8 840 /* struct sxg_config data; */
b9346e0f 841 struct sxg_config *config;
942798b4 842 struct sw_cfg_data *data;
1323e5f1
MT
843 dma_addr_t p_addr;
844 unsigned long status;
845 unsigned long i;
b9346e0f
MT
846 config = pci_alloc_consistent(adapter->pcidev,
847 sizeof(struct sxg_config), &p_addr);
1323e5f1 848
b9346e0f 849 if(!config) {
ddd6f0a8
MT
850 /*
851 * We cant get even this much memory. Raise a hell
1323e5f1
MT
852 * Get out of here
853 */
cb636fe3
MT
854 printk(KERN_ERR"%s : Could not allocate memory for reading \
855 EEPROM\n", __FUNCTION__);
1323e5f1
MT
856 return -ENOMEM;
857 }
858
b9346e0f
MT
859 data = &config->SwCfg;
860
861 /* Initialize (reflective memory) status register */
1323e5f1
MT
862 WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE);
863
b9346e0f 864 /* Send request to fetch configuration data */
1323e5f1
MT
865 WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0);
866 for(i=0; i<1000; i++) {
867 READ_REG(adapter->UcodeRegs[0].ConfigStat, status);
868 if (status != SXG_CFG_TIMEOUT)
869 break;
870 mdelay(1); /* Do we really need this */
871 }
872
873 switch(status) {
cb636fe3
MT
874 /* Config read from EEPROM succeeded */
875 case SXG_CFG_LOAD_EEPROM:
876 /* Config read from Flash succeeded */
877 case SXG_CFG_LOAD_FLASH:
b9346e0f
MT
878 /*
879 * Copy the MAC address to adapter structure
880 * TODO: We are not doing the remaining part : FRU, etc
cb636fe3 881 */
d0128aa9 882 memcpy(adapter->macaddr, data->MacAddr[0].MacAddr,
b9346e0f 883 sizeof(struct sxg_config_mac));
cb636fe3
MT
884 break;
885 case SXG_CFG_TIMEOUT:
886 case SXG_CFG_LOAD_INVALID:
887 case SXG_CFG_LOAD_ERROR:
888 default: /* Fix default handler later */
889 printk(KERN_WARNING"%s : We could not read the config \
890 word. Status = %ld\n", __FUNCTION__, status);
891 break;
1323e5f1 892 }
cb636fe3
MT
893 pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
894 p_addr);
1323e5f1
MT
895 if (adapter->netdev) {
896 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
897 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
898 }
1323e5f1
MT
899 sxg_dbg_macaddrs(adapter);
900
901 return status;
902}
903
7bea3611
AB
904static const struct net_device_ops sxg_netdev_ops = {
905 .ndo_open = sxg_entry_open,
906 .ndo_stop = sxg_entry_halt,
907 .ndo_start_xmit = sxg_send_packets,
908 .ndo_do_ioctl = sxg_ioctl,
909 .ndo_change_mtu = sxg_change_mtu,
910 .ndo_get_stats = sxg_get_stats,
911 .ndo_set_multicast_list = sxg_mcast_set_list,
912 .ndo_validate_addr = eth_validate_addr,
913#if XXXTODO
914 .ndo_set_mac_address = sxg_mac_set_address,
915#else
916 .ndo_set_mac_address = eth_mac_addr,
917#endif
918};
919
5db6b777
GKH
920static int sxg_entry_probe(struct pci_dev *pcidev,
921 const struct pci_device_id *pci_tbl_entry)
922{
923 static int did_version = 0;
924 int err;
925 struct net_device *netdev;
73b07065 926 struct adapter_t *adapter;
5db6b777
GKH
927 void __iomem *memmapped_ioaddr;
928 u32 status = 0;
929 ulong mmio_start = 0;
930 ulong mmio_len = 0;
a536efcc 931 unsigned char revision_id;
5db6b777
GKH
932
933 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
e88bd231 934 __func__, jiffies, smp_processor_id());
5db6b777 935
b243c4aa 936 /* Initialize trace buffer */
5db6b777
GKH
937#ifdef ATKDBG
938 SxgTraceBuffer = &LSxgTraceBuffer;
939 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
940#endif
941
942 sxg_global.dynamic_intagg = dynamic_intagg;
943
944 err = pci_enable_device(pcidev);
945
946 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
947 if (err) {
948 return err;
949 }
950
951 if (sxg_debug > 0 && did_version++ == 0) {
952 printk(KERN_INFO "%s\n", sxg_banner);
371d7a9e 953 printk(KERN_INFO "%s\n", SXG_DRV_VERSION);
5db6b777
GKH
954 }
955
a536efcc
MT
956 pci_read_config_byte(pcidev, PCI_REVISION_ID, &revision_id);
957
6a35528a
YH
958 if (!(err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)))) {
959 DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(64)) successful\n");
5db6b777 960 } else {
284901a9 961 if ((err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)))) {
5db6b777
GKH
962 DBG_ERROR
963 ("No usable DMA configuration, aborting err[%x]\n",
964 err);
965 return err;
966 }
284901a9 967 DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(32)) successful\n");
5db6b777
GKH
968 }
969
970 DBG_ERROR("Call pci_request_regions\n");
971
371d7a9e 972 err = pci_request_regions(pcidev, sxg_driver_name);
5db6b777
GKH
973 if (err) {
974 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
975 return err;
976 }
977
978 DBG_ERROR("call pci_set_master\n");
979 pci_set_master(pcidev);
980
981 DBG_ERROR("call alloc_etherdev\n");
73b07065 982 netdev = alloc_etherdev(sizeof(struct adapter_t));
5db6b777
GKH
983 if (!netdev) {
984 err = -ENOMEM;
985 goto err_out_exit_sxg_probe;
986 }
987 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
988
989 SET_NETDEV_DEV(netdev, &pcidev->dev);
990
991 pci_set_drvdata(pcidev, netdev);
992 adapter = netdev_priv(netdev);
a536efcc
MT
993 if (revision_id == 1) {
994 adapter->asictype = SAHARA_REV_A;
995 } else if (revision_id == 2) {
996 adapter->asictype = SAHARA_REV_B;
997 } else {
998 ASSERT(0);
999 DBG_ERROR("%s Unexpected revision ID %x\n", __FUNCTION__, revision_id);
1000 goto err_out_exit_sxg_probe;
1001 }
5db6b777
GKH
1002 adapter->netdev = netdev;
1003 adapter->pcidev = pcidev;
1004
1005 mmio_start = pci_resource_start(pcidev, 0);
1006 mmio_len = pci_resource_len(pcidev, 0);
1007
1008 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
1009 mmio_start, mmio_len);
1010
1011 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
e88bd231 1012 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
5c7514e0 1013 memmapped_ioaddr);
5db6b777
GKH
1014 if (!memmapped_ioaddr) {
1015 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
e88bd231 1016 __func__, mmio_len, mmio_start);
0d414727 1017 goto err_out_free_mmio_region_0;
5db6b777
GKH
1018 }
1019
cb636fe3
MT
1020 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
1021 len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
1022 mmio_len, pcidev->irq);
5db6b777 1023
5c7514e0 1024 adapter->HwRegs = (void *)memmapped_ioaddr;
5db6b777
GKH
1025 adapter->base_addr = memmapped_ioaddr;
1026
1027 mmio_start = pci_resource_start(pcidev, 2);
1028 mmio_len = pci_resource_len(pcidev, 2);
1029
1030 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
1031 mmio_start, mmio_len);
1032
1033 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
5c7514e0
M
1034 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
1035 memmapped_ioaddr);
5db6b777
GKH
1036 if (!memmapped_ioaddr) {
1037 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
e88bd231 1038 __func__, mmio_len, mmio_start);
0d414727 1039 goto err_out_free_mmio_region_2;
5db6b777
GKH
1040 }
1041
1042 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
1043 "start[%lx] len[%lx], IRQ %d.\n", __func__,
1044 memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
1045
1046 adapter->UcodeRegs = (void *)memmapped_ioaddr;
1047
1048 adapter->State = SXG_STATE_INITIALIZING;
ddd6f0a8
MT
1049 /*
1050 * Maintain a list of all adapters anchored by
1051 * the global SxgDriver structure.
1052 */
5db6b777
GKH
1053 adapter->Next = SxgDriver.Adapters;
1054 SxgDriver.Adapters = adapter;
1055 adapter->AdapterID = ++SxgDriver.AdapterID;
1056
b243c4aa 1057 /* Initialize CRC table used to determine multicast hash */
5db6b777
GKH
1058 sxg_mcast_init_crc32();
1059
1060 adapter->JumboEnabled = FALSE;
1061 adapter->RssEnabled = FALSE;
1062 if (adapter->JumboEnabled) {
1063 adapter->FrameSize = JUMBOMAXFRAME;
1064 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
1065 } else {
1066 adapter->FrameSize = ETHERMAXFRAME;
1067 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
1068 }
1069
cb636fe3
MT
1070 /*
1071 * status = SXG_READ_EEPROM(adapter);
1072 * if (!status) {
1073 * goto sxg_init_bad;
1074 * }
1075 */
5db6b777 1076
e88bd231 1077 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
5db6b777 1078 sxg_config_pci(pcidev);
e88bd231 1079 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
5db6b777 1080
e88bd231 1081 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
5db6b777 1082 sxg_init_driver();
e88bd231 1083 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
5db6b777
GKH
1084
1085 adapter->vendid = pci_tbl_entry->vendor;
1086 adapter->devid = pci_tbl_entry->device;
1087 adapter->subsysid = pci_tbl_entry->subdevice;
5db6b777
GKH
1088 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
1089 adapter->functionnumber = (pcidev->devfn & 0x7);
1090 adapter->memorylength = pci_resource_len(pcidev, 0);
1091 adapter->irq = pcidev->irq;
1092 adapter->next_netdevice = head_netdevice;
1093 head_netdevice = netdev;
b243c4aa 1094 adapter->port = 0; /*adapter->functionnumber; */
5db6b777 1095
b243c4aa 1096 /* Allocate memory and other resources */
e88bd231 1097 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
5db6b777
GKH
1098 status = sxg_allocate_resources(adapter);
1099 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
e88bd231 1100 __func__, status);
5db6b777
GKH
1101 if (status != STATUS_SUCCESS) {
1102 goto err_out_unmap;
1103 }
1104
e88bd231 1105 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
a536efcc 1106 if (sxg_download_microcode(adapter, SXG_UCODE_SYSTEM)) {
5db6b777 1107 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
e88bd231 1108 __func__);
1323e5f1 1109 sxg_read_config(adapter);
54aed113 1110 status = sxg_adapter_set_hwaddr(adapter);
5db6b777
GKH
1111 } else {
1112 adapter->state = ADAPT_FAIL;
1113 adapter->linkstate = LINK_DOWN;
1114 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
1115 }
1116
1117 netdev->base_addr = (unsigned long)adapter->base_addr;
1118 netdev->irq = adapter->irq;
7bea3611 1119 netdev->netdev_ops = &sxg_netdev_ops;
371d7a9e 1120 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
9914f053 1121 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1782199f
MT
1122 err = sxg_set_interrupt_capability(adapter);
1123 if (err != STATUS_SUCCESS)
1124 DBG_ERROR("Cannot enable MSI-X capability\n");
5db6b777
GKH
1125
1126 strcpy(netdev->name, "eth%d");
cb636fe3 1127 /* strcpy(netdev->name, pci_name(pcidev)); */
5db6b777
GKH
1128 if ((err = register_netdev(netdev))) {
1129 DBG_ERROR("Cannot register net device, aborting. %s\n",
1130 netdev->name);
1131 goto err_out_unmap;
1132 }
1133
b62a294f
MT
1134 netif_napi_add(netdev, &adapter->napi,
1135 sxg_poll, SXG_NETDEV_WEIGHT);
e5ea8da0
MT
1136 netdev->watchdog_timeo = 2 * HZ;
1137 init_timer(&adapter->watchdog_timer);
1138 adapter->watchdog_timer.function = &sxg_watchdog;
1139 adapter->watchdog_timer.data = (unsigned long) adapter;
1140 INIT_WORK(&adapter->update_link_status, sxg_update_link_status);
1141
5db6b777 1142 DBG_ERROR
cb636fe3
MT
1143 ("sxg: %s addr 0x%lx, irq %d, MAC addr \
1144 %02X:%02X:%02X:%02X:%02X:%02X\n",
5db6b777
GKH
1145 netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
1146 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
1147 netdev->dev_addr[4], netdev->dev_addr[5]);
1148
cb636fe3 1149 /* sxg_init_bad: */
5db6b777 1150 ASSERT(status == FALSE);
cb636fe3 1151 /* sxg_free_adapter(adapter); */
5db6b777 1152
e88bd231 1153 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
5db6b777
GKH
1154 status, jiffies, smp_processor_id());
1155 return status;
1156
1157 err_out_unmap:
0d414727
MT
1158 sxg_free_resources(adapter);
1159
1160 err_out_free_mmio_region_2:
1161
1162 mmio_start = pci_resource_start(pcidev, 2);
1163 mmio_len = pci_resource_len(pcidev, 2);
1164 release_mem_region(mmio_start, mmio_len);
1165
1166 err_out_free_mmio_region_0:
1167
1168 mmio_start = pci_resource_start(pcidev, 0);
1169 mmio_len = pci_resource_len(pcidev, 0);
5db6b777 1170
5db6b777
GKH
1171 release_mem_region(mmio_start, mmio_len);
1172
1173 err_out_exit_sxg_probe:
1174
e88bd231 1175 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
5db6b777
GKH
1176 smp_processor_id());
1177
0d414727
MT
1178 pci_disable_device(pcidev);
1179 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
1180 kfree(netdev);
1181 printk("Exit %s, Sxg driver loading failed..\n", __FUNCTION__);
1182
5db6b777
GKH
1183 return -ENODEV;
1184}
1185
5db6b777 1186/*
ddd6f0a8 1187 * LINE BASE Interrupt routines..
5db6b777
GKH
1188 *
1189 * sxg_disable_interrupt
1190 *
1191 * DisableInterrupt Handler
1192 *
1193 * Arguments:
1194 *
1195 * adapter: Our adapter structure
1196 *
1197 * Return Value:
1198 * None.
1199 */
73b07065 1200static void sxg_disable_interrupt(struct adapter_t *adapter)
5db6b777
GKH
1201{
1202 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
1203 adapter, adapter->InterruptsEnabled, 0, 0);
b243c4aa 1204 /* For now, RSS is disabled with line based interrupts */
5db6b777 1205 ASSERT(adapter->RssEnabled == FALSE);
b243c4aa 1206 /* Turn off interrupts by writing to the icr register. */
5db6b777
GKH
1207 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
1208
1209 adapter->InterruptsEnabled = 0;
1210
1211 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
1212 adapter, adapter->InterruptsEnabled, 0, 0);
1213}
1214
1215/*
5db6b777
GKH
1216 * sxg_enable_interrupt
1217 *
1218 * EnableInterrupt Handler
1219 *
1220 * Arguments:
1221 *
1222 * adapter: Our adapter structure
1223 *
1224 * Return Value:
1225 * None.
1226 */
73b07065 1227static void sxg_enable_interrupt(struct adapter_t *adapter)
5db6b777
GKH
1228{
1229 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
1230 adapter, adapter->InterruptsEnabled, 0, 0);
b243c4aa 1231 /* For now, RSS is disabled with line based interrupts */
5db6b777 1232 ASSERT(adapter->RssEnabled == FALSE);
b243c4aa 1233 /* Turn on interrupts by writing to the icr register. */
5db6b777
GKH
1234 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
1235
1236 adapter->InterruptsEnabled = 1;
1237
1238 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
1239 adapter, 0, 0, 0);
1240}
1241
1242/*
5db6b777
GKH
1243 * sxg_isr - Process an line-based interrupt
1244 *
1245 * Arguments:
cb636fe3 1246 * Context - Our adapter structure
5db6b777 1247 * QueueDefault - Output parameter to queue to default CPU
cb636fe3 1248 * TargetCpus - Output bitmap to schedule DPC's
5db6b777 1249 *
cb636fe3 1250 * Return Value: TRUE if our interrupt
5db6b777
GKH
1251 */
1252static irqreturn_t sxg_isr(int irq, void *dev_id)
1253{
942798b4 1254 struct net_device *dev = (struct net_device *) dev_id;
73b07065 1255 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
5db6b777 1256
6a2946ba
MT
1257 if(adapter->state != ADAPT_UP)
1258 return IRQ_NONE;
5db6b777
GKH
1259 adapter->Stats.NumInts++;
1260 if (adapter->Isr[0] == 0) {
ddd6f0a8
MT
1261 /*
1262 * The SLIC driver used to experience a number of spurious
1263 * interrupts due to the delay associated with the masking of
1264 * the interrupt (we'd bounce back in here). If we see that
1265 * again with Sahara,add a READ_REG of the Icr register after
1266 * the WRITE_REG below.
1267 */
5db6b777
GKH
1268 adapter->Stats.FalseInts++;
1269 return IRQ_NONE;
1270 }
ddd6f0a8
MT
1271 /*
1272 * Move the Isr contents and clear the value in
1273 * shared memory, and mask interrupts
1274 */
cb636fe3 1275 /* ASSERT(adapter->IsrDpcsPending == 0); */
b243c4aa 1276#if XXXTODO /* RSS Stuff */
ddd6f0a8
MT
1277 /*
1278 * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1279 * schedule DPC's based on event queues.
1280 */
5db6b777
GKH
1281 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
1282 for (i = 0;
1283 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
1284 i++) {
cb636fe3
MT
1285 struct sxg_event_ring *EventRing =
1286 &adapter->EventRings[i];
942798b4 1287 struct sxg_event *Event =
5db6b777 1288 &EventRing->Ring[adapter->NextEvent[i]];
5c7514e0
M
1289 unsigned char Cpu =
1290 adapter->RssSystemInfo->RssIdToCpu[i];
5db6b777
GKH
1291 if (Event->Status & EVENT_STATUS_VALID) {
1292 adapter->IsrDpcsPending++;
1293 CpuMask |= (1 << Cpu);
1294 }
1295 }
1296 }
cb636fe3
MT
1297 /*
1298 * Now, either schedule the CPUs specified by the CpuMask,
ddd6f0a8
MT
1299 * or queue default
1300 */
5db6b777
GKH
1301 if (CpuMask) {
1302 *QueueDefault = FALSE;
1303 } else {
1304 adapter->IsrDpcsPending = 1;
1305 *QueueDefault = TRUE;
1306 }
1307 *TargetCpus = CpuMask;
1308#endif
b62a294f 1309 sxg_interrupt(adapter);
5db6b777
GKH
1310
1311 return IRQ_HANDLED;
1312}
1313
b62a294f
MT
1314static void sxg_interrupt(struct adapter_t *adapter)
1315{
1316 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
1317
c1f46a00
RD
1318 if (napi_schedule_prep(&adapter->napi)) {
1319 __napi_schedule(&adapter->napi);
b62a294f
MT
1320 }
1321}
1322
1323static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
1324 int budget)
5db6b777 1325{
cb636fe3 1326 /* unsigned char RssId = 0; */
5db6b777 1327 u32 NewIsr;
b62a294f 1328 int sxg_napi_continue = 1;
5db6b777
GKH
1329 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1330 adapter, adapter->IsrCopy[0], 0, 0);
b243c4aa 1331 /* For now, RSS is disabled with line based interrupts */
5db6b777 1332 ASSERT(adapter->RssEnabled == FALSE);
b62a294f
MT
1333
1334 adapter->IsrCopy[0] = adapter->Isr[0];
1335 adapter->Isr[0] = 0;
5db6b777 1336
b243c4aa 1337 /* Always process the event queue. */
b62a294f
MT
1338 while (sxg_napi_continue)
1339 {
1340 sxg_process_event_queue(adapter,
1341 (adapter->RssEnabled ? /*RssId */ 0 : 0),
1342 &sxg_napi_continue, work_done, budget);
1343 }
5db6b777 1344
b243c4aa 1345#if XXXTODO /* RSS stuff */
5db6b777 1346 if (--adapter->IsrDpcsPending) {
b243c4aa 1347 /* We're done. */
5db6b777
GKH
1348 ASSERT(adapter->RssEnabled);
1349 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1350 adapter, 0, 0, 0);
1351 return;
1352 }
1353#endif
b243c4aa 1354 /* Last (or only) DPC processes the ISR and clears the interrupt. */
5db6b777 1355 NewIsr = sxg_process_isr(adapter, 0);
b243c4aa 1356 /* Reenable interrupts */
5db6b777
GKH
1357 adapter->IsrCopy[0] = 0;
1358 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1359 adapter, NewIsr, 0, 0);
1360
5db6b777
GKH
1361 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1362 adapter, 0, 0, 0);
1363}
b62a294f
MT
1364static int sxg_poll(struct napi_struct *napi, int budget)
1365{
1366 struct adapter_t *adapter = container_of(napi, struct adapter_t, napi);
1367 int work_done = 0;
1368
1369 sxg_handle_interrupt(adapter, &work_done, budget);
1370
1371 if (work_done < budget) {
c1f46a00 1372 napi_complete(napi);
b62a294f
MT
1373 WRITE_REG(adapter->UcodeRegs[0].Isr, 0, TRUE);
1374 }
b62a294f
MT
1375 return work_done;
1376}
5db6b777
GKH
1377
1378/*
5db6b777
GKH
1379 * sxg_process_isr - Process an interrupt. Called from the line-based and
1380 * message based interrupt DPC routines
1381 *
1382 * Arguments:
1383 * adapter - Our adapter structure
1384 * Queue - The ISR that needs processing
1385 *
1386 * Return Value:
1387 * None
1388 */
73b07065 1389static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
5db6b777
GKH
1390{
1391 u32 Isr = adapter->IsrCopy[MessageId];
1392 u32 NewIsr = 0;
1393
1394 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1395 adapter, Isr, 0, 0);
1396
b243c4aa 1397 /* Error */
5db6b777
GKH
1398 if (Isr & SXG_ISR_ERR) {
1399 if (Isr & SXG_ISR_PDQF) {
1400 adapter->Stats.PdqFull++;
e88bd231 1401 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
5db6b777 1402 }
b243c4aa 1403 /* No host buffer */
5db6b777 1404 if (Isr & SXG_ISR_RMISS) {
ddd6f0a8
MT
1405 /*
1406 * There is a bunch of code in the SLIC driver which
1407 * attempts to process more receive events per DPC
1408 * if we start to fall behind. We'll probablyd
1409 * need to do something similar here, but hold
1410 * off for now. I don't want to make the code more
1411 * complicated than strictly needed.
1412 */
6a2946ba 1413 adapter->stats.rx_missed_errors++;
54aed113 1414 if (adapter->stats.rx_missed_errors< 5) {
5db6b777 1415 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
e88bd231 1416 __func__);
5db6b777
GKH
1417 }
1418 }
b243c4aa 1419 /* Card crash */
5db6b777 1420 if (Isr & SXG_ISR_DEAD) {
cb636fe3
MT
1421 /*
1422 * Set aside the crash info and set the adapter state
1423 * to RESET
1424 */
1425 adapter->CrashCpu = (unsigned char)
1426 ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
5db6b777
GKH
1427 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1428 adapter->Dead = TRUE;
e88bd231 1429 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
5db6b777
GKH
1430 adapter->CrashLocation, adapter->CrashCpu);
1431 }
b243c4aa 1432 /* Event ring full */
5db6b777 1433 if (Isr & SXG_ISR_ERFULL) {
ddd6f0a8
MT
1434 /*
1435 * Same issue as RMISS, really. This means the
1436 * host is falling behind the card. Need to increase
1437 * event ring size, process more events per interrupt,
1438 * and/or reduce/remove interrupt aggregation.
1439 */
5db6b777
GKH
1440 adapter->Stats.EventRingFull++;
1441 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
e88bd231 1442 __func__);
5db6b777 1443 }
b243c4aa 1444 /* Transmit drop - no DRAM buffers or XMT error */
5db6b777 1445 if (Isr & SXG_ISR_XDROP) {
e88bd231 1446 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
5db6b777
GKH
1447 }
1448 }
b243c4aa 1449 /* Slowpath send completions */
5db6b777 1450 if (Isr & SXG_ISR_SPSEND) {
c5e5cf5a 1451 sxg_complete_slow_send(adapter);
5db6b777 1452 }
b243c4aa 1453 /* Dump */
5db6b777 1454 if (Isr & SXG_ISR_UPC) {
cb636fe3 1455 /* Maybe change when debug is added.. */
54aed113 1456// ASSERT(adapter->DumpCmdRunning);
5db6b777
GKH
1457 adapter->DumpCmdRunning = FALSE;
1458 }
b243c4aa 1459 /* Link event */
5db6b777 1460 if (Isr & SXG_ISR_LINK) {
e5ea8da0
MT
1461 if (adapter->state != ADAPT_DOWN) {
1462 adapter->link_status_changed = 1;
1463 schedule_work(&adapter->update_link_status);
1464 }
5db6b777 1465 }
b243c4aa 1466 /* Debug - breakpoint hit */
5db6b777 1467 if (Isr & SXG_ISR_BREAK) {
ddd6f0a8
MT
1468 /*
1469 * At the moment AGDB isn't written to support interactive
cb636fe3
MT
1470 * debug sessions. When it is, this interrupt will be used to
1471 * signal AGDB that it has hit a breakpoint. For now, ASSERT.
ddd6f0a8 1472 */
5db6b777
GKH
1473 ASSERT(0);
1474 }
b243c4aa 1475 /* Heartbeat response */
5db6b777
GKH
1476 if (Isr & SXG_ISR_PING) {
1477 adapter->PingOutstanding = FALSE;
1478 }
1479 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1480 adapter, Isr, NewIsr, 0);
1481
1482 return (NewIsr);
1483}
1484
9914f053
MT
1485/*
1486 * sxg_rcv_checksum - Set the checksum for received packet
1487 *
1488 * Arguements:
bbb18b97 1489 * @adapter - Adapter structure on which packet is received
9914f053
MT
1490 * @skb - Packet which is receieved
1491 * @Event - Event read from hardware
1492 */
1493
bbb18b97
MT
1494void sxg_rcv_checksum(struct adapter_t *adapter, struct sk_buff *skb,
1495 struct sxg_event *Event)
9914f053
MT
1496{
1497 skb->ip_summed = CHECKSUM_NONE;
bbb18b97
MT
1498 if (likely(adapter->flags & SXG_RCV_IP_CSUM_ENABLED)) {
1499 if (likely(adapter->flags & SXG_RCV_TCP_CSUM_ENABLED)
1500 && (Event->Status & EVENT_STATUS_TCPIP)) {
1501 if(!(Event->Status & EVENT_STATUS_TCPBAD))
1502 skb->ip_summed = CHECKSUM_UNNECESSARY;
1503 if(!(Event->Status & EVENT_STATUS_IPBAD))
9914f053 1504 skb->ip_summed = CHECKSUM_UNNECESSARY;
bbb18b97
MT
1505 } else if(Event->Status & EVENT_STATUS_IPONLY) {
1506 if(!(Event->Status & EVENT_STATUS_IPBAD))
1507 skb->ip_summed = CHECKSUM_UNNECESSARY;
9914f053
MT
1508 }
1509 }
1510}
1511
5db6b777 1512/*
5db6b777
GKH
1513 * sxg_process_event_queue - Process our event queue
1514 *
1515 * Arguments:
1516 * - adapter - Adapter structure
1517 * - RssId - The event queue requiring processing
1518 *
1519 * Return Value:
1520 * None.
1521 */
b62a294f
MT
1522static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
1523 int *sxg_napi_continue, int *work_done, int budget)
5db6b777 1524{
942798b4
MT
1525 struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
1526 struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
5db6b777 1527 u32 EventsProcessed = 0, Batches = 0;
5db6b777
GKH
1528 struct sk_buff *skb;
1529#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1530 struct sk_buff *prev_skb = NULL;
1531 struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1532 u32 Index;
942798b4 1533 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
5db6b777
GKH
1534#endif
1535 u32 ReturnStatus = 0;
7c66b14b 1536 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
5db6b777
GKH
1537
1538 ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1539 (adapter->State == SXG_STATE_PAUSING) ||
1540 (adapter->State == SXG_STATE_PAUSED) ||
1541 (adapter->State == SXG_STATE_HALTING));
ddd6f0a8
MT
1542 /*
1543 * We may still have unprocessed events on the queue if
1544 * the card crashed. Don't process them.
1545 */
5db6b777
GKH
1546 if (adapter->Dead) {
1547 return (0);
1548 }
ddd6f0a8
MT
1549 /*
1550 * In theory there should only be a single processor that
1551 * accesses this queue, and only at interrupt-DPC time. So/
1552 * we shouldn't need a lock for any of this.
1553 */
5db6b777 1554 while (Event->Status & EVENT_STATUS_VALID) {
b62a294f 1555 (*sxg_napi_continue) = 1;
5db6b777
GKH
1556 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1557 Event, Event->Code, Event->Status,
1558 adapter->NextEvent);
1559 switch (Event->Code) {
1560 case EVENT_CODE_BUFFERS:
cb636fe3
MT
1561 /* struct sxg_ring_info Head & Tail == unsigned char */
1562 ASSERT(!(Event->CommandIndex & 0xFF00));
5db6b777
GKH
1563 sxg_complete_descriptor_blocks(adapter,
1564 Event->CommandIndex);
5db6b777
GKH
1565 break;
1566 case EVENT_CODE_SLOWRCV:
b62a294f 1567 (*work_done)++;
5db6b777
GKH
1568 --adapter->RcvBuffersOnCard;
1569 if ((skb = sxg_slow_receive(adapter, Event))) {
1570 u32 rx_bytes;
1571#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
b243c4aa 1572 /* Add it to our indication list */
5db6b777
GKH
1573 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1574 IndicationList, num_skbs);
ddd6f0a8
MT
1575 /*
1576 * Linux, we just pass up each skb to the
1577 * protocol above at this point, there is no
1578 * capability of an indication list.
1579 */
5db6b777 1580#else
cb636fe3
MT
1581 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1582 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1583 rx_bytes = Event->Length;
5db6b777
GKH
1584 adapter->stats.rx_packets++;
1585 adapter->stats.rx_bytes += rx_bytes;
bbb18b97 1586 sxg_rcv_checksum(adapter, skb, Event);
5db6b777 1587 skb->dev = adapter->netdev;
b62a294f 1588 netif_receive_skb(skb);
5db6b777
GKH
1589#endif
1590 }
1591 break;
1592 default:
1593 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
e88bd231 1594 __func__, Event->Code);
cb636fe3 1595 /* ASSERT(0); */
5db6b777 1596 }
ddd6f0a8
MT
1597 /*
1598 * See if we need to restock card receive buffers.
1599 * There are two things to note here:
1600 * First - This test is not SMP safe. The
1601 * adapter->BuffersOnCard field is protected via atomic
1602 * interlocked calls, but we do not protect it with respect
1603 * to these tests. The only way to do that is with a lock,
1604 * and I don't want to grab a lock every time we adjust the
1605 * BuffersOnCard count. Instead, we allow the buffer
1606 * replenishment to be off once in a while. The worst that
1607 * can happen is the card is given on more-or-less descriptor
1608 * block than the arbitrary value we've chosen. No big deal
1609 * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1610 * is adjusted.
1611 * Second - We expect this test to rarely
1612 * evaluate to true. We attempt to refill descriptor blocks
1613 * as they are returned to us (sxg_complete_descriptor_blocks)
1614 * so The only time this should evaluate to true is when
1615 * sxg_complete_descriptor_blocks failed to allocate
1616 * receive buffers.
1617 */
7c66b14b
MT
1618 if (adapter->JumboEnabled)
1619 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
1620
1621 if (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
5db6b777
GKH
1622 sxg_stock_rcv_buffers(adapter);
1623 }
ddd6f0a8
MT
1624 /*
1625 * It's more efficient to just set this to zero.
1626 * But clearing the top bit saves potential debug info...
1627 */
5db6b777 1628 Event->Status &= ~EVENT_STATUS_VALID;
ddd6f0a8 1629 /* Advance to the next event */
5db6b777
GKH
1630 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1631 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1632 EventsProcessed++;
1633 if (EventsProcessed == EVENT_RING_BATCH) {
b243c4aa 1634 /* Release a batch of events back to the card */
5db6b777
GKH
1635 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1636 EVENT_RING_BATCH, FALSE);
1637 EventsProcessed = 0;
ddd6f0a8
MT
1638 /*
1639 * If we've processed our batch limit, break out of the
1640 * loop and return SXG_ISR_EVENT to arrange for us to
1641 * be called again
1642 */
5db6b777
GKH
1643 if (Batches++ == EVENT_BATCH_LIMIT) {
1644 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1645 TRACE_NOISY, "EvtLimit", Batches,
1646 adapter->NextEvent, 0, 0);
1647 ReturnStatus = SXG_ISR_EVENT;
1648 break;
1649 }
1650 }
b62a294f
MT
1651 if (*work_done >= budget) {
1652 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1653 EventsProcessed, FALSE);
1654 EventsProcessed = 0;
1655 (*sxg_napi_continue) = 0;
1656 break;
1657 }
5db6b777 1658 }
b62a294f
MT
1659 if (!(Event->Status & EVENT_STATUS_VALID))
1660 (*sxg_napi_continue) = 0;
1661
5db6b777 1662#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
b243c4aa 1663 /* Indicate any received dumb-nic frames */
5db6b777
GKH
1664 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1665#endif
b243c4aa 1666 /* Release events back to the card. */
5db6b777
GKH
1667 if (EventsProcessed) {
1668 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1669 EventsProcessed, FALSE);
1670 }
1671 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1672 Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1673
1674 return (ReturnStatus);
1675}
1676
1677/*
1678 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1679 *
1680 * Arguments -
1681 * adapter - A pointer to our adapter structure
5db6b777
GKH
1682 * Return
1683 * None
1684 */
c5e5cf5a 1685static void sxg_complete_slow_send(struct adapter_t *adapter)
5db6b777 1686{
942798b4
MT
1687 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1688 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
5c7514e0 1689 u32 *ContextType;
942798b4 1690 struct sxg_cmd *XmtCmd;
54aed113
MT
1691 unsigned long flags = 0;
1692 unsigned long sgl_flags = 0;
d9d578bf 1693 unsigned int processed_count = 0;
5db6b777 1694
ddd6f0a8
MT
1695 /*
1696 * NOTE - This lock is dropped and regrabbed in this loop.
1697 * This means two different processors can both be running/
1698 * through this loop. Be *very* careful.
1699 */
c5e5cf5a 1700 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
d9d578bf 1701
5db6b777
GKH
1702 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1703 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1704
d9d578bf
MT
1705 while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1706 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) {
ddd6f0a8
MT
1707 /*
1708 * Locate the current Cmd (ring descriptor entry), and
1709 * associated SGL, and advance the tail
1710 */
5db6b777
GKH
1711 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1712 ASSERT(ContextType);
1713 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1714 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
b243c4aa 1715 /* Clear the SGL field. */
5db6b777
GKH
1716 XmtCmd->Sgl = 0;
1717
1718 switch (*ContextType) {
1719 case SXG_SGL_DUMB:
1720 {
1721 struct sk_buff *skb;
cb636fe3
MT
1722 struct sxg_scatter_gather *SxgSgl =
1723 (struct sxg_scatter_gather *)ContextType;
d9d578bf
MT
1724 dma64_addr_t FirstSgeAddress;
1725 u32 FirstSgeLength;
1323e5f1 1726
b243c4aa 1727 /* Dumb-nic send. Command context is the dumb-nic SGL */
5db6b777 1728 skb = (struct sk_buff *)ContextType;
1323e5f1 1729 skb = SxgSgl->DumbPacket;
d9d578bf
MT
1730 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1731 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
b243c4aa 1732 /* Complete the send */
5db6b777
GKH
1733 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1734 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1735 0, 0);
1736 ASSERT(adapter->Stats.XmtQLen);
ddd6f0a8 1737 /*
cb636fe3
MT
1738 * Now drop the lock and complete the send
1739 * back to Microsoft. We need to drop the lock
1740 * because Microsoft can come back with a
1741 * chimney send, which results in a double trip
1742 * in SxgTcpOuput
ddd6f0a8 1743 */
c5e5cf5a
MT
1744 spin_unlock_irqrestore(
1745 &adapter->XmtZeroLock, flags);
d9d578bf
MT
1746
1747 SxgSgl->DumbPacket = NULL;
1748 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1749 FirstSgeAddress,
1750 FirstSgeLength);
c5e5cf5a 1751 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
b243c4aa 1752 /* and reacquire.. */
c5e5cf5a 1753 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
5db6b777
GKH
1754 }
1755 break;
1756 default:
1757 ASSERT(0);
1758 }
1759 }
c5e5cf5a 1760 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
5db6b777
GKH
1761 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1762 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1763}
1764
1765/*
1766 * sxg_slow_receive
1767 *
1768 * Arguments -
1769 * adapter - A pointer to our adapter structure
1770 * Event - Receive event
1771 *
cb636fe3 1772 * Return - skb
5db6b777 1773 */
cb636fe3
MT
1774static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1775 struct sxg_event *Event)
5db6b777 1776{
d0128aa9 1777 u32 BufferSize = adapter->ReceiveBufferSize;
942798b4 1778 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
5db6b777 1779 struct sk_buff *Packet;
d9d578bf 1780 static int read_counter = 0;
5db6b777 1781
942798b4 1782 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
d9d578bf
MT
1783 if(read_counter++ & 0x100)
1784 {
1785 sxg_collect_statistics(adapter);
1786 read_counter = 0;
1787 }
5db6b777
GKH
1788 ASSERT(RcvDataBufferHdr);
1789 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
5db6b777
GKH
1790 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1791 RcvDataBufferHdr, RcvDataBufferHdr->State,
d0128aa9 1792 /*RcvDataBufferHdr->VirtualAddress*/ 0);
b243c4aa 1793 /* Drop rcv frames in non-running state */
5db6b777
GKH
1794 switch (adapter->State) {
1795 case SXG_STATE_RUNNING:
1796 break;
1797 case SXG_STATE_PAUSING:
1798 case SXG_STATE_PAUSED:
1799 case SXG_STATE_HALTING:
1800 goto drop;
1801 default:
1802 ASSERT(0);
1803 goto drop;
1804 }
1805
cb636fe3
MT
1806 /*
1807 * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1808 * RcvDataBufferHdr->VirtualAddress, Event->Length);
1809 */
1323e5f1 1810
b243c4aa 1811 /* Change buffer state to UPSTREAM */
5db6b777
GKH
1812 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1813 if (Event->Status & EVENT_STATUS_RCVERR) {
1814 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1815 Event, Event->Status, Event->HostHandle, 0);
5c7514e0 1816 sxg_process_rcv_error(adapter, *(u32 *)
5db6b777
GKH
1817 SXG_RECEIVE_DATA_LOCATION
1818 (RcvDataBufferHdr));
1819 goto drop;
1820 }
b243c4aa
M
1821#if XXXTODO /* VLAN stuff */
1822 /* If there's a VLAN tag, extract it and validate it */
cb636fe3
MT
1823 if (((struct ether_header *)
1824 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
1825 == ETHERTYPE_VLAN) {
5db6b777
GKH
1826 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1827 STATUS_SUCCESS) {
1828 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1829 "BadVlan", Event,
1830 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1831 Event->Length, 0);
1832 goto drop;
1833 }
1834 }
1835#endif
b243c4aa 1836 /* Dumb-nic frame. See if it passes our mac filter and update stats */
ddd6f0a8 1837
b040b07b
MT
1838 if (!sxg_mac_filter(adapter,
1839 (struct ether_header *)(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)),
1840 Event->Length)) {
1841 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1842 Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1843 Event->Length, 0);
1844 goto drop;
1845 }
5db6b777
GKH
1846
1847 Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
1323e5f1
MT
1848 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1849 Packet->protocol = eth_type_trans(Packet, adapter->netdev);
5db6b777
GKH
1850
1851 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1852 RcvDataBufferHdr, Packet, Event->Length, 0);
b243c4aa 1853 /* Lastly adjust the receive packet length. */
1323e5f1 1854 RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
54aed113 1855 RcvDataBufferHdr->PhysicalAddress = (dma_addr_t)NULL;
d0128aa9
MT
1856 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1857 if (RcvDataBufferHdr->skb)
1858 {
1859 spin_lock(&adapter->RcvQLock);
1860 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
d9d578bf 1861 // adapter->RcvBuffersOnCard ++;
d0128aa9
MT
1862 spin_unlock(&adapter->RcvQLock);
1863 }
5db6b777
GKH
1864 return (Packet);
1865
1866 drop:
1867 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1868 RcvDataBufferHdr, Event->Length, 0, 0);
54aed113
MT
1869 adapter->stats.rx_dropped++;
1870// adapter->Stats.RcvDiscards++;
5db6b777
GKH
1871 spin_lock(&adapter->RcvQLock);
1872 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1873 spin_unlock(&adapter->RcvQLock);
1874 return (NULL);
1875}
1876
1877/*
1878 * sxg_process_rcv_error - process receive error and update
1879 * stats
1880 *
1881 * Arguments:
1882 * adapter - Adapter structure
1883 * ErrorStatus - 4-byte receive error status
1884 *
cb636fe3 1885 * Return Value : None
5db6b777 1886 */
73b07065 1887static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
5db6b777
GKH
1888{
1889 u32 Error;
1890
54aed113 1891 adapter->stats.rx_errors++;
5db6b777
GKH
1892
1893 if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1894 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1895 switch (Error) {
1896 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1897 adapter->Stats.TransportCsum++;
1898 break;
1899 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1900 adapter->Stats.TransportUflow++;
1901 break;
1902 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1903 adapter->Stats.TransportHdrLen++;
1904 break;
1905 }
1906 }
1907 if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1908 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1909 switch (Error) {
1910 case SXG_RCV_STATUS_NETWORK_CSUM:
1911 adapter->Stats.NetworkCsum++;
1912 break;
1913 case SXG_RCV_STATUS_NETWORK_UFLOW:
1914 adapter->Stats.NetworkUflow++;
1915 break;
1916 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1917 adapter->Stats.NetworkHdrLen++;
1918 break;
1919 }
1920 }
1921 if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1922 adapter->Stats.Parity++;
1923 }
1924 if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1925 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1926 switch (Error) {
1927 case SXG_RCV_STATUS_LINK_PARITY:
1928 adapter->Stats.LinkParity++;
1929 break;
1930 case SXG_RCV_STATUS_LINK_EARLY:
1931 adapter->Stats.LinkEarly++;
1932 break;
1933 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1934 adapter->Stats.LinkBufOflow++;
1935 break;
1936 case SXG_RCV_STATUS_LINK_CODE:
1937 adapter->Stats.LinkCode++;
1938 break;
1939 case SXG_RCV_STATUS_LINK_DRIBBLE:
1940 adapter->Stats.LinkDribble++;
1941 break;
1942 case SXG_RCV_STATUS_LINK_CRC:
1943 adapter->Stats.LinkCrc++;
1944 break;
1945 case SXG_RCV_STATUS_LINK_OFLOW:
1946 adapter->Stats.LinkOflow++;
1947 break;
1948 case SXG_RCV_STATUS_LINK_UFLOW:
1949 adapter->Stats.LinkUflow++;
1950 break;
1951 }
1952 }
1953}
1954
1955/*
1956 * sxg_mac_filter
1957 *
1958 * Arguments:
1959 * adapter - Adapter structure
1960 * pether - Ethernet header
1961 * length - Frame length
1962 *
cb636fe3 1963 * Return Value : TRUE if the frame is to be allowed
5db6b777 1964 */
cb636fe3
MT
1965static bool sxg_mac_filter(struct adapter_t *adapter,
1966 struct ether_header *EtherHdr, ushort length)
5db6b777
GKH
1967{
1968 bool EqualAddr;
b040b07b 1969 struct net_device *dev = adapter->netdev;
5db6b777
GKH
1970
1971 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1972 if (SXG_BROADCAST_PACKET(EtherHdr)) {
b243c4aa 1973 /* broadcast */
5db6b777
GKH
1974 if (adapter->MacFilter & MAC_BCAST) {
1975 adapter->Stats.DumbRcvBcastPkts++;
1976 adapter->Stats.DumbRcvBcastBytes += length;
5db6b777
GKH
1977 return (TRUE);
1978 }
1979 } else {
b243c4aa 1980 /* multicast */
5db6b777
GKH
1981 if (adapter->MacFilter & MAC_ALLMCAST) {
1982 adapter->Stats.DumbRcvMcastPkts++;
1983 adapter->Stats.DumbRcvMcastBytes += length;
5db6b777
GKH
1984 return (TRUE);
1985 }
1986 if (adapter->MacFilter & MAC_MCAST) {
b040b07b
MT
1987 struct dev_mc_list *mclist = dev->mc_list;
1988 while (mclist) {
1989 ETHER_EQ_ADDR(mclist->da_addr,
5db6b777
GKH
1990 EtherHdr->ether_dhost,
1991 EqualAddr);
1992 if (EqualAddr) {
1993 adapter->Stats.
1994 DumbRcvMcastPkts++;
1995 adapter->Stats.
1996 DumbRcvMcastBytes += length;
5db6b777
GKH
1997 return (TRUE);
1998 }
b040b07b 1999 mclist = mclist->next;
5db6b777
GKH
2000 }
2001 }
2002 }
2003 } else if (adapter->MacFilter & MAC_DIRECTED) {
ddd6f0a8
MT
2004 /*
2005 * Not broadcast or multicast. Must be directed at us or
2006 * the card is in promiscuous mode. Either way, consider it
2007 * ours if MAC_DIRECTED is set
2008 */
5db6b777
GKH
2009 adapter->Stats.DumbRcvUcastPkts++;
2010 adapter->Stats.DumbRcvUcastBytes += length;
5db6b777
GKH
2011 return (TRUE);
2012 }
2013 if (adapter->MacFilter & MAC_PROMISC) {
b243c4aa 2014 /* Whatever it is, keep it. */
5db6b777
GKH
2015 return (TRUE);
2016 }
5db6b777
GKH
2017 return (FALSE);
2018}
b040b07b 2019
73b07065 2020static int sxg_register_interrupt(struct adapter_t *adapter)
5db6b777
GKH
2021{
2022 if (!adapter->intrregistered) {
2023 int retval;
2024
2025 DBG_ERROR
2026 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
e88bd231 2027 __func__, adapter, adapter->netdev->irq, NR_IRQS);
5db6b777 2028
5c7514e0
M
2029 spin_unlock_irqrestore(&sxg_global.driver_lock,
2030 sxg_global.flags);
5db6b777
GKH
2031
2032 retval = request_irq(adapter->netdev->irq,
2033 &sxg_isr,
2034 IRQF_SHARED,
2035 adapter->netdev->name, adapter->netdev);
2036
2037 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2038
2039 if (retval) {
2040 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
2041 adapter->netdev->name, retval);
2042 return (retval);
2043 }
2044 adapter->intrregistered = 1;
2045 adapter->IntRegistered = TRUE;
b243c4aa 2046 /* Disable RSS with line-based interrupts */
5db6b777
GKH
2047 adapter->RssEnabled = FALSE;
2048 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
e88bd231 2049 __func__, adapter, adapter->netdev->irq);
5db6b777
GKH
2050 }
2051 return (STATUS_SUCCESS);
2052}
2053
73b07065 2054static void sxg_deregister_interrupt(struct adapter_t *adapter)
5db6b777 2055{
e88bd231 2056 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
5db6b777
GKH
2057#if XXXTODO
2058 slic_init_cleanup(adapter);
2059#endif
2060 memset(&adapter->stats, 0, sizeof(struct net_device_stats));
2061 adapter->error_interrupts = 0;
2062 adapter->rcv_interrupts = 0;
2063 adapter->xmit_interrupts = 0;
2064 adapter->linkevent_interrupts = 0;
2065 adapter->upr_interrupts = 0;
2066 adapter->num_isrs = 0;
2067 adapter->xmit_completes = 0;
2068 adapter->rcv_broadcasts = 0;
2069 adapter->rcv_multicasts = 0;
2070 adapter->rcv_unicasts = 0;
e88bd231 2071 DBG_ERROR("sxg: %s EXIT\n", __func__);
5db6b777
GKH
2072}
2073
2074/*
2075 * sxg_if_init
2076 *
2077 * Perform initialization of our slic interface.
2078 *
2079 */
73b07065 2080static int sxg_if_init(struct adapter_t *adapter)
5db6b777 2081{
942798b4 2082 struct net_device *dev = adapter->netdev;
5db6b777
GKH
2083 int status = 0;
2084
1323e5f1 2085 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
e88bd231 2086 __func__, adapter->netdev->name,
1323e5f1 2087 adapter->state,
5db6b777
GKH
2088 adapter->linkstate, dev->flags);
2089
2090 /* adapter should be down at this point */
2091 if (adapter->state != ADAPT_DOWN) {
2092 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
2093 return (-EIO);
2094 }
2095 ASSERT(adapter->linkstate == LINK_DOWN);
2096
2097 adapter->devflags_prev = dev->flags;
b040b07b 2098 adapter->MacFilter = MAC_DIRECTED;
5db6b777 2099 if (dev->flags) {
e88bd231 2100 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
5db6b777
GKH
2101 adapter->netdev->name);
2102 if (dev->flags & IFF_BROADCAST) {
b040b07b 2103 adapter->MacFilter |= MAC_BCAST;
5db6b777
GKH
2104 DBG_ERROR("BCAST ");
2105 }
2106 if (dev->flags & IFF_PROMISC) {
b040b07b 2107 adapter->MacFilter |= MAC_PROMISC;
5db6b777
GKH
2108 DBG_ERROR("PROMISC ");
2109 }
2110 if (dev->flags & IFF_ALLMULTI) {
b040b07b 2111 adapter->MacFilter |= MAC_ALLMCAST;
5db6b777
GKH
2112 DBG_ERROR("ALL_MCAST ");
2113 }
2114 if (dev->flags & IFF_MULTICAST) {
b040b07b 2115 adapter->MacFilter |= MAC_MCAST;
5db6b777
GKH
2116 DBG_ERROR("MCAST ");
2117 }
2118 DBG_ERROR("\n");
2119 }
1782199f 2120 status = sxg_register_intr(adapter);
5db6b777 2121 if (status != STATUS_SUCCESS) {
1782199f 2122 DBG_ERROR("sxg_if_init: sxg_register_intr FAILED %x\n",
5db6b777
GKH
2123 status);
2124 sxg_deregister_interrupt(adapter);
2125 return (status);
2126 }
2127
2128 adapter->state = ADAPT_UP;
2129
ddd6f0a8 2130 /* clear any pending events, then enable interrupts */
e88bd231 2131 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
5db6b777
GKH
2132
2133 return (STATUS_SUCCESS);
2134}
2135
b62a294f
MT
2136void sxg_set_interrupt_aggregation(struct adapter_t *adapter)
2137{
2138 /*
2139 * Top bit disables aggregation on xmt (SXG_AGG_XMT_DISABLE).
2140 * Make sure Max is less than 0x8000.
2141 */
2142 adapter->max_aggregation = SXG_MAX_AGG_DEFAULT;
2143 adapter->min_aggregation = SXG_MIN_AGG_DEFAULT;
2144 WRITE_REG(adapter->UcodeRegs[0].Aggregation,
2145 ((adapter->max_aggregation << SXG_MAX_AGG_SHIFT) |
2146 adapter->min_aggregation),
2147 TRUE);
2148}
2149
942798b4 2150static int sxg_entry_open(struct net_device *dev)
5db6b777 2151{
73b07065 2152 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
5db6b777 2153 int status;
0d414727 2154 static int turn;
7c66b14b
MT
2155 int sxg_initial_rcv_data_buffers = SXG_INITIAL_RCV_DATA_BUFFERS;
2156 int i;
2157
2158 if (adapter->JumboEnabled == TRUE) {
2159 sxg_initial_rcv_data_buffers =
2160 SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS;
2161 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo,
2162 SXG_JUMBO_RCV_RING_SIZE);
2163 }
2164
2165 /*
2166 * Allocate receive data buffers. We allocate a block of buffers and
2167 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
2168 */
2169
2170 for (i = 0; i < sxg_initial_rcv_data_buffers;
2171 i += SXG_RCV_DESCRIPTORS_PER_BLOCK)
2172 {
2173 status = sxg_allocate_buffer_memory(adapter,
2174 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
2175 SXG_BUFFER_TYPE_RCV);
2176 if (status != STATUS_SUCCESS)
2177 return status;
2178 }
2179 /*
2180 * NBL resource allocation can fail in the 'AllocateComplete' routine,
2181 * which doesn't return status. Make sure we got the number of buffers
2182 * we requested
2183 */
2184
2185 if (adapter->FreeRcvBufferCount < sxg_initial_rcv_data_buffers) {
2186 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
2187 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
2188 0);
2189 return (STATUS_RESOURCES);
2190 }
2191 /*
2192 * The microcode expects it to be downloaded on every open.
2193 */
2194 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
a536efcc 2195 if (sxg_download_microcode(adapter, SXG_UCODE_SYSTEM)) {
7c66b14b
MT
2196 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
2197 __FUNCTION__);
2198 sxg_read_config(adapter);
2199 } else {
2200 adapter->state = ADAPT_FAIL;
2201 adapter->linkstate = LINK_DOWN;
2202 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n",
2203 status);
2204 }
2205 msleep(5);
0d414727
MT
2206
2207 if (turn) {
2208 sxg_second_open(adapter->netdev);
2209
2210 return STATUS_SUCCESS;
2211 }
2212
2213 turn++;
5db6b777
GKH
2214
2215 ASSERT(adapter);
e88bd231 2216 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
5db6b777
GKH
2217 adapter->activated);
2218 DBG_ERROR
2219 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
e88bd231 2220 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
5db6b777
GKH
2221 adapter->netdev, adapter, adapter->port);
2222
2223 netif_stop_queue(adapter->netdev);
2224
2225 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2226 if (!adapter->activated) {
2227 sxg_global.num_sxg_ports_active++;
2228 adapter->activated = 1;
2229 }
b243c4aa 2230 /* Initialize the adapter */
e88bd231 2231 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
5db6b777
GKH
2232 status = sxg_initialize_adapter(adapter);
2233 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
e88bd231 2234 __func__, status);
5db6b777
GKH
2235
2236 if (status == STATUS_SUCCESS) {
e88bd231 2237 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
5db6b777 2238 status = sxg_if_init(adapter);
e88bd231 2239 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
5db6b777
GKH
2240 status);
2241 }
2242
2243 if (status != STATUS_SUCCESS) {
2244 if (adapter->activated) {
2245 sxg_global.num_sxg_ports_active--;
2246 adapter->activated = 0;
2247 }
2248 spin_unlock_irqrestore(&sxg_global.driver_lock,
2249 sxg_global.flags);
2250 return (status);
2251 }
e88bd231 2252 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
b62a294f
MT
2253 sxg_set_interrupt_aggregation(adapter);
2254 napi_enable(&adapter->napi);
5db6b777 2255
b243c4aa 2256 /* Enable interrupts */
5db6b777
GKH
2257 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2258
e88bd231 2259 DBG_ERROR("sxg: %s EXIT\n", __func__);
5db6b777
GKH
2260
2261 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
1f895130
MT
2262 mod_timer(&adapter->watchdog_timer, jiffies);
2263
5db6b777
GKH
2264 return STATUS_SUCCESS;
2265}
2266
0d414727
MT
2267int sxg_second_open(struct net_device * dev)
2268{
2269 struct adapter_t *adapter = (struct adapter_t*) netdev_priv(dev);
b62a294f 2270 int status = 0;
0d414727
MT
2271
2272 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2273 netif_start_queue(adapter->netdev);
2274 adapter->state = ADAPT_UP;
2275 adapter->linkstate = LINK_UP;
2276
b62a294f
MT
2277 status = sxg_initialize_adapter(adapter);
2278 sxg_set_interrupt_aggregation(adapter);
2279 napi_enable(&adapter->napi);
0d414727
MT
2280 /* Re-enable interrupts */
2281 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2282
544ed364 2283 sxg_register_intr(adapter);
1782199f 2284 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
e5ea8da0 2285 mod_timer(&adapter->watchdog_timer, jiffies);
0d414727
MT
2286 return (STATUS_SUCCESS);
2287
2288}
2289
5db6b777
GKH
2290static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
2291{
0d414727
MT
2292 u32 mmio_start = 0;
2293 u32 mmio_len = 0;
2294
942798b4 2295 struct net_device *dev = pci_get_drvdata(pcidev);
73b07065 2296 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
6a2946ba
MT
2297
2298 flush_scheduled_work();
d0128aa9
MT
2299
2300 /* Deallocate Resources */
d9d578bf 2301 unregister_netdev(dev);
1782199f 2302 sxg_reset_interrupt_capability(adapter);
d9d578bf 2303 sxg_free_resources(adapter);
d0128aa9 2304
5db6b777 2305 ASSERT(adapter);
5db6b777 2306
0d414727
MT
2307 mmio_start = pci_resource_start(pcidev, 0);
2308 mmio_len = pci_resource_len(pcidev, 0);
5db6b777 2309
0d414727
MT
2310 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
2311 mmio_start, mmio_len);
2312 release_mem_region(mmio_start, mmio_len);
5db6b777 2313
d0128aa9
MT
2314 mmio_start = pci_resource_start(pcidev, 2);
2315 mmio_len = pci_resource_len(pcidev, 2);
2316
2317 DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __FUNCTION__,
2318 mmio_start, mmio_len);
2319 release_mem_region(mmio_start, mmio_len);
2320
d0128aa9 2321 pci_disable_device(pcidev);
5db6b777 2322
e88bd231 2323 DBG_ERROR("sxg: %s deallocate device\n", __func__);
5db6b777 2324 kfree(dev);
e88bd231 2325 DBG_ERROR("sxg: %s EXIT\n", __func__);
5db6b777
GKH
2326}
2327
942798b4 2328static int sxg_entry_halt(struct net_device *dev)
5db6b777 2329{
73b07065 2330 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
7c66b14b
MT
2331 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2332 int i;
2333 u32 RssIds, IsrCount;
2334 unsigned long flags;
2335
2336 RssIds = SXG_RSS_CPU_COUNT(adapter);
1782199f 2337 IsrCount = adapter->msi_enabled ? RssIds : 1;
e5ea8da0 2338 /* Disable interrupts */
5db6b777 2339 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
e5ea8da0 2340 SXG_DISABLE_ALL_INTERRUPTS(adapter);
5db6b777
GKH
2341 adapter->state = ADAPT_DOWN;
2342 adapter->linkstate = LINK_DOWN;
d0128aa9 2343
5db6b777 2344 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
d9d578bf 2345 sxg_deregister_interrupt(adapter);
7c66b14b
MT
2346 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
2347 mdelay(5000);
e5ea8da0
MT
2348
2349 del_timer_sync(&adapter->watchdog_timer);
2350 netif_stop_queue(dev);
2351 netif_carrier_off(dev);
2352
2353 napi_disable(&adapter->napi);
2354
2355 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 0, true);
2356 adapter->devflags_prev = 0;
2357 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
2358 __func__, dev->name, adapter, adapter->state);
2359
7c66b14b
MT
2360 spin_lock(&adapter->RcvQLock);
2361 /* Free all the blocks and the buffers, moved from remove() routine */
2362 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
2363 sxg_free_rcvblocks(adapter);
2364 }
2365
2366
2367 InitializeListHead(&adapter->FreeRcvBuffers);
2368 InitializeListHead(&adapter->FreeRcvBlocks);
2369 InitializeListHead(&adapter->AllRcvBlocks);
2370 InitializeListHead(&adapter->FreeSglBuffers);
2371 InitializeListHead(&adapter->AllSglBuffers);
2372
2373 adapter->FreeRcvBufferCount = 0;
2374 adapter->FreeRcvBlockCount = 0;
2375 adapter->AllRcvBlockCount = 0;
2376 adapter->RcvBuffersOnCard = 0;
2377 adapter->PendingRcvCount = 0;
2378
2379 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
2380 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
2381 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
2382 for (i = 0; i < SXG_MAX_RING_SIZE; i++)
2383 adapter->RcvRingZeroInfo.Context[i] = NULL;
2384 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
2385 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
2386
2387 spin_unlock(&adapter->RcvQLock);
2388
2389 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2390 adapter->AllSglBufferCount = 0;
2391 adapter->FreeSglBufferCount = 0;
2392 adapter->PendingXmtCount = 0;
2393 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
2394 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
2395 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2396
7c66b14b
MT
2397 for (i = 0; i < SXG_MAX_RSS; i++) {
2398 adapter->NextEvent[i] = 0;
2399 }
2400 atomic_set(&adapter->pending_allocations, 0);
1782199f
MT
2401 adapter->intrregistered = 0;
2402 sxg_remove_isr(adapter);
2403 DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name);
5db6b777
GKH
2404 return (STATUS_SUCCESS);
2405}
2406
942798b4 2407static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5db6b777
GKH
2408{
2409 ASSERT(rq);
cb636fe3 2410/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
5db6b777
GKH
2411 switch (cmd) {
2412 case SIOCSLICSETINTAGG:
2413 {
cb636fe3
MT
2414 /* struct adapter_t *adapter = (struct adapter_t *)
2415 * netdev_priv(dev);
2416 */
5db6b777
GKH
2417 u32 data[7];
2418 u32 intagg;
2419
2420 if (copy_from_user(data, rq->ifr_data, 28)) {
cb636fe3
MT
2421 DBG_ERROR("copy_from_user FAILED getting \
2422 initial params\n");
5db6b777
GKH
2423 return -EFAULT;
2424 }
2425 intagg = data[0];
2426 printk(KERN_EMERG
2427 "%s: set interrupt aggregation to %d\n",
e88bd231 2428 __func__, intagg);
5db6b777
GKH
2429 return 0;
2430 }
2431
2432 default:
cb636fe3 2433 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
5db6b777
GKH
2434 return -EOPNOTSUPP;
2435 }
2436 return 0;
2437}
2438
2439#define NORMAL_ETHFRAME 0
2440
2441/*
5db6b777
GKH
2442 * sxg_send_packets - Send a skb packet
2443 *
2444 * Arguments:
cb636fe3
MT
2445 * skb - The packet to send
2446 * dev - Our linux net device that refs our adapter
5db6b777
GKH
2447 *
2448 * Return:
2449 * 0 regardless of outcome XXXTODO refer to e1000 driver
2450 */
942798b4 2451static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
5db6b777 2452{
73b07065 2453 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
5db6b777
GKH
2454 u32 status = STATUS_SUCCESS;
2455
ddd6f0a8
MT
2456 /*
2457 * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
2458 * skb);
2459 */
1323e5f1 2460
b243c4aa 2461 /* Check the adapter state */
5db6b777
GKH
2462 switch (adapter->State) {
2463 case SXG_STATE_INITIALIZING:
2464 case SXG_STATE_HALTED:
2465 case SXG_STATE_SHUTDOWN:
b243c4aa
M
2466 ASSERT(0); /* unexpected */
2467 /* fall through */
5db6b777
GKH
2468 case SXG_STATE_RESETTING:
2469 case SXG_STATE_SLEEP:
2470 case SXG_STATE_BOOTDIAG:
2471 case SXG_STATE_DIAG:
2472 case SXG_STATE_HALTING:
2473 status = STATUS_FAILURE;
2474 break;
2475 case SXG_STATE_RUNNING:
2476 if (adapter->LinkState != SXG_LINK_UP) {
2477 status = STATUS_FAILURE;
2478 }
2479 break;
2480 default:
2481 ASSERT(0);
2482 status = STATUS_FAILURE;
2483 }
2484 if (status != STATUS_SUCCESS) {
2485 goto xmit_fail;
2486 }
b243c4aa 2487 /* send a packet */
5db6b777
GKH
2488 status = sxg_transmit_packet(adapter, skb);
2489 if (status == STATUS_SUCCESS) {
2490 goto xmit_done;
2491 }
2492
2493 xmit_fail:
b243c4aa 2494 /* reject & complete all the packets if they cant be sent */
5db6b777
GKH
2495 if (status != STATUS_SUCCESS) {
2496#if XXXTODO
cb636fe3 2497 /* sxg_send_packets_fail(adapter, skb, status); */
5db6b777
GKH
2498#else
2499 SXG_DROP_DUMB_SEND(adapter, skb);
2500 adapter->stats.tx_dropped++;
d9d578bf 2501 return NETDEV_TX_BUSY;
5db6b777
GKH
2502#endif
2503 }
e88bd231 2504 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
5db6b777
GKH
2505 status);
2506
2507 xmit_done:
d9d578bf 2508 return NETDEV_TX_OK;
5db6b777
GKH
2509}
2510
2511/*
2512 * sxg_transmit_packet
2513 *
2514 * This function transmits a single packet.
2515 *
2516 * Arguments -
2517 * adapter - Pointer to our adapter structure
2518 * skb - The packet to be sent
2519 *
cb636fe3 2520 * Return - STATUS of send
5db6b777 2521 */
73b07065 2522static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
5db6b777 2523{
942798b4
MT
2524 struct sxg_x64_sgl *pSgl;
2525 struct sxg_scatter_gather *SxgSgl;
d9d578bf 2526 unsigned long sgl_flags;
d0128aa9
MT
2527 /* void *SglBuffer; */
2528 /* u32 SglBufferLength; */
5db6b777 2529
ddd6f0a8
MT
2530 /*
2531 * The vast majority of work is done in the shared
2532 * sxg_dumb_sgl routine.
2533 */
5db6b777
GKH
2534 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
2535 adapter, skb, 0, 0);
2536
b243c4aa 2537 /* Allocate a SGL buffer */
d9d578bf 2538 SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
5db6b777
GKH
2539 if (!SxgSgl) {
2540 adapter->Stats.NoSglBuf++;
54aed113 2541 adapter->stats.tx_errors++;
5db6b777
GKH
2542 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
2543 adapter, skb, 0, 0);
2544 return (STATUS_RESOURCES);
2545 }
2546 ASSERT(SxgSgl->adapter == adapter);
d0128aa9
MT
2547 /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2548 SglBufferLength = SXG_SGL_BUF_SIZE; */
5db6b777
GKH
2549 SxgSgl->VlanTag.VlanTci = 0;
2550 SxgSgl->VlanTag.VlanTpid = 0;
2551 SxgSgl->Type = SXG_SGL_DUMB;
2552 SxgSgl->DumbPacket = skb;
2553 pSgl = NULL;
2554
b243c4aa 2555 /* Call the common sxg_dumb_sgl routine to complete the send. */
d9d578bf 2556 return (sxg_dumb_sgl(pSgl, SxgSgl));
5db6b777
GKH
2557}
2558
2559/*
2560 * sxg_dumb_sgl
2561 *
2562 * Arguments:
2563 * pSgl -
942798b4 2564 * SxgSgl - struct sxg_scatter_gather
5db6b777
GKH
2565 *
2566 * Return Value:
d9d578bf 2567 * Status of send operation.
5db6b777 2568 */
d9d578bf 2569static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
cb636fe3 2570 struct sxg_scatter_gather *SxgSgl)
5db6b777 2571{
73b07065 2572 struct adapter_t *adapter = SxgSgl->adapter;
5db6b777 2573 struct sk_buff *skb = SxgSgl->DumbPacket;
b243c4aa 2574 /* For now, all dumb-nic sends go on RSS queue zero */
942798b4
MT
2575 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
2576 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
2577 struct sxg_cmd *XmtCmd = NULL;
cb636fe3 2578 /* u32 Index = 0; */
5db6b777 2579 u32 DataLength = skb->len;
cb636fe3
MT
2580 /* unsigned int BufLen; */
2581 /* u32 SglOffset; */
5db6b777 2582 u64 phys_addr;
d9d578bf 2583 unsigned long flags;
0d414727 2584 unsigned long queue_id=0;
b824adc9 2585 int offload_cksum = 0;
5db6b777
GKH
2586
2587 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2588 pSgl, SxgSgl, 0, 0);
2589
b243c4aa 2590 /* Set aside a pointer to the sgl */
5db6b777
GKH
2591 SxgSgl->pSgl = pSgl;
2592
b243c4aa 2593 /* Sanity check that our SGL format is as we expect. */
942798b4 2594 ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge));
b243c4aa 2595 /* Shouldn't be a vlan tag on this frame */
5db6b777
GKH
2596 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2597 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2598
ddd6f0a8
MT
2599 /*
2600 * From here below we work with the SGL placed in our
2601 * buffer.
2602 */
5db6b777
GKH
2603
2604 SxgSgl->Sgl.NumberOfElements = 1;
0d414727
MT
2605 /*
2606 * Set ucode Queue ID based on bottom bits of destination TCP port.
2607 * This Queue ID splits slowpath/dumb-nic packet processing across
2608 * multiple threads on the card to improve performance. It is split
2609 * using the TCP port to avoid out-of-order packets that can result
2610 * from multithreaded processing. We use the destination port because
2611 * we expect to be run on a server, so in nearly all cases the local
2612 * port is likely to be constant (well-known server port) and the
2613 * remote port is likely to be random. The exception to this is iSCSI,
2614 * in which case we use the sport instead. Note
2615 * that original attempt at XOR'ing source and dest port resulted in
2616 * poor balance on NTTTCP/iometer applications since they tend to
2617 * line up (even-even, odd-odd..).
2618 */
2619
2620 if (skb->protocol == htons(ETH_P_IP)) {
2621 struct iphdr *ip;
2622
2623 ip = ip_hdr(skb);
b824adc9
MT
2624 if (ip->protocol == IPPROTO_TCP)
2625 offload_cksum = 1;
2626 if (!offload_cksum || !tcp_hdr(skb))
8d17e6ad 2627 queue_id = 0;
b824adc9 2628 else if (offload_cksum && (DataLength >= sizeof(
0d414727
MT
2629 struct tcphdr))){
2630 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2631 (ntohs (tcp_hdr(skb)->source) &
2632 SXG_LARGE_SEND_QUEUE_MASK):
2633 (ntohs(tcp_hdr(skb)->dest) &
2634 SXG_LARGE_SEND_QUEUE_MASK));
2635 }
2636 } else if (skb->protocol == htons(ETH_P_IPV6)) {
b824adc9
MT
2637 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2638 offload_cksum = 1;
2639 if (!offload_cksum || !tcp_hdr(skb))
8d17e6ad 2640 queue_id = 0;
b824adc9 2641 else if (offload_cksum && (DataLength>=sizeof(struct tcphdr))){
0d414727
MT
2642 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2643 (ntohs (tcp_hdr(skb)->source) &
2644 SXG_LARGE_SEND_QUEUE_MASK):
2645 (ntohs(tcp_hdr(skb)->dest) &
2646 SXG_LARGE_SEND_QUEUE_MASK));
2647 }
2648 }
5db6b777 2649
b243c4aa 2650 /* Grab the spinlock and acquire a command */
d9d578bf 2651 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
5db6b777
GKH
2652 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2653 if (XmtCmd == NULL) {
ddd6f0a8
MT
2654 /*
2655 * Call sxg_complete_slow_send to see if we can
2656 * free up any XmtRingZero entries and then try again
2657 */
d9d578bf
MT
2658
2659 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
c5e5cf5a 2660 sxg_complete_slow_send(adapter);
d9d578bf 2661 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
5db6b777
GKH
2662 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2663 if (XmtCmd == NULL) {
2664 adapter->Stats.XmtZeroFull++;
2665 goto abortcmd;
2666 }
2667 }
2668 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2669 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
b824adc9
MT
2670 memset(XmtCmd, '\0', sizeof(*XmtCmd));
2671 XmtCmd->SgEntries = 1;
2672 XmtCmd->Flags = 0;
2673 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2674 /*
2675 * We need to set the Checkum in IP header to 0. This is
2676 * required by hardware.
2677 */
2678 if (offload_cksum) {
2679 ip_hdr(skb)->check = 0x0;
2680 XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP;
2681 XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP;
2682 /*
2683 * Dont know if length will require a change in
2684 * case of VLAN
2685 */
2686 XmtCmd->CsumFlags.MacLen = ETH_HLEN;
2687 XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >>
2688 SXG_NW_HDR_LEN_SHIFT;
5db6b777 2689 } else {
b824adc9
MT
2690 if (skb_checksum_help(skb)){
2691 printk(KERN_EMERG "Dropped UDP packet for"
2692 " incorrect checksum calculation\n");
2693 if (XmtCmd)
2694 SXG_ABORT_CMD(XmtRingInfo);
2695 spin_unlock_irqrestore(&adapter->XmtZeroLock,
2696 flags);
2697 return STATUS_SUCCESS;
2698 }
5db6b777 2699 }
5db6b777 2700 }
b824adc9 2701
ddd6f0a8
MT
2702 /*
2703 * Fill in the command
2704 * Copy out the first SGE to the command and adjust for offset
2705 */
cb636fe3 2706 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
5c7514e0 2707 PCI_DMA_TODEVICE);
7c66b14b
MT
2708
2709 /*
2710 * SAHARA SGL WORKAROUND
2711 * See if the SGL straddles a 64k boundary. If so, skip to
2712 * the start of the next 64k boundary and continue
2713 */
2714
a536efcc
MT
2715 if ((adapter->asictype == SAHARA_REV_A) &&
2716 (SXG_INVALID_SGL(phys_addr,skb->data_len)))
7c66b14b
MT
2717 {
2718 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
b824adc9
MT
2719 if (XmtCmd)
2720 SXG_ABORT_CMD(XmtRingInfo);
7c66b14b
MT
2721 /* Silently drop this packet */
2722 printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n");
2723 return STATUS_SUCCESS;
2724 }
1323e5f1 2725 XmtCmd->Buffer.FirstSgeAddress = phys_addr;
5db6b777 2726 XmtCmd->Buffer.FirstSgeLength = DataLength;
5db6b777 2727 XmtCmd->Buffer.SgeOffset = 0;
5db6b777 2728 XmtCmd->Buffer.TotalLength = DataLength;
9914f053 2729
ddd6f0a8
MT
2730 /*
2731 * Advance transmit cmd descripter by 1.
2732 * NOTE - See comments in SxgTcpOutput where we write
2733 * to the XmtCmd register regarding CPU ID values and/or
2734 * multiple commands.
0d414727 2735 * Top 16 bits specify queue_id. See comments about queue_id above
ddd6f0a8 2736 */
0d414727
MT
2737 /* Four queues at the moment */
2738 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0);
2739 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE);
b243c4aa 2740 adapter->Stats.XmtQLen++; /* Stats within lock */
b824adc9
MT
2741 /* Update stats */
2742 adapter->stats.tx_packets++;
2743 adapter->stats.tx_bytes += DataLength;
2744#if XXXTODO /* Stats stuff */
2745 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2746 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2747 adapter->Stats.DumbXmtBcastPkts++;
2748 adapter->Stats.DumbXmtBcastBytes += DataLength;
2749 } else {
2750 adapter->Stats.DumbXmtMcastPkts++;
2751 adapter->Stats.DumbXmtMcastBytes += DataLength;
2752 }
2753 } else {
2754 adapter->Stats.DumbXmtUcastPkts++;
2755 adapter->Stats.DumbXmtUcastBytes += DataLength;
2756 }
2757#endif
2758
d9d578bf 2759 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
5db6b777
GKH
2760 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2761 XmtCmd, pSgl, SxgSgl, 0);
d9d578bf 2762 return STATUS_SUCCESS;
5db6b777
GKH
2763
2764 abortcmd:
ddd6f0a8
MT
2765 /*
2766 * NOTE - Only jump to this label AFTER grabbing the
2767 * XmtZeroLock, and DO NOT DROP IT between the
2768 * command allocation and the following abort.
2769 */
5db6b777
GKH
2770 if (XmtCmd) {
2771 SXG_ABORT_CMD(XmtRingInfo);
2772 }
d9d578bf 2773 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
5db6b777 2774
ddd6f0a8
MT
2775/*
2776 * failsgl:
2777 * Jump to this label if failure occurs before the
2778 * XmtZeroLock is grabbed
2779 */
6a2946ba 2780 adapter->stats.tx_errors++;
5db6b777
GKH
2781 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2782 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
cb636fe3 2783 /* SxgSgl->DumbPacket is the skb */
d9d578bf 2784 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
54aed113
MT
2785
2786 return STATUS_FAILURE;
5db6b777
GKH
2787}
2788
5db6b777 2789/*
ddd6f0a8
MT
2790 * Link management functions
2791 *
5db6b777
GKH
2792 * sxg_initialize_link - Initialize the link stuff
2793 *
2794 * Arguments -
2795 * adapter - A pointer to our adapter structure
2796 *
2797 * Return
2798 * status
2799 */
73b07065 2800static int sxg_initialize_link(struct adapter_t *adapter)
5db6b777 2801{
942798b4 2802 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
5db6b777
GKH
2803 u32 Value;
2804 u32 ConfigData;
2805 u32 MaxFrame;
a536efcc 2806 u32 AxgMacReg1;
5db6b777
GKH
2807 int status;
2808
2809 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2810 adapter, 0, 0, 0);
2811
b243c4aa 2812 /* Reset PHY and XGXS module */
5db6b777
GKH
2813 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2814
b243c4aa 2815 /* Reset transmit configuration register */
5db6b777
GKH
2816 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2817
b243c4aa 2818 /* Reset receive configuration register */
5db6b777
GKH
2819 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2820
b243c4aa 2821 /* Reset all MAC modules */
5db6b777
GKH
2822 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2823
ddd6f0a8
MT
2824 /*
2825 * Link address 0
2826 * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2827 * is stored with the first nibble (0a) in the byte 0
2828 * of the Mac address. Possibly reverse?
2829 */
1323e5f1 2830 Value = *(u32 *) adapter->macaddr;
5db6b777 2831 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
b243c4aa 2832 /* also write the MAC address to the MAC. Endian is reversed. */
5db6b777 2833 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
1323e5f1 2834 Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF);
5db6b777 2835 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
b243c4aa 2836 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
5db6b777
GKH
2837 Value = ntohl(Value);
2838 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
b243c4aa 2839 /* Link address 1 */
5db6b777
GKH
2840 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2841 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
b243c4aa 2842 /* Link address 2 */
5db6b777
GKH
2843 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2844 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
b243c4aa 2845 /* Link address 3 */
5db6b777
GKH
2846 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2847 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2848
b243c4aa 2849 /* Enable MAC modules */
5db6b777
GKH
2850 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2851
b243c4aa 2852 /* Configure MAC */
a536efcc
MT
2853 AxgMacReg1 = ( /* Enable XMT */
2854 AXGMAC_CFG1_XMT_EN |
2855 /* Enable receive */
2856 AXGMAC_CFG1_RCV_EN |
2857 /* short frame detection */
2858 AXGMAC_CFG1_SHORT_ASSERT |
2859 /* Verify frame length */
2860 AXGMAC_CFG1_CHECK_LEN |
2861 /* Generate FCS */
2862 AXGMAC_CFG1_GEN_FCS |
2863 /* Pad frames to 64 bytes */
2864 AXGMAC_CFG1_PAD_64);
2865
2866 if (adapter->XmtFcEnabled) {
2867 AxgMacReg1 |= AXGMAC_CFG1_XMT_PAUSE; /* Allow sending of pause */
2868 }
2869 if (adapter->RcvFcEnabled) {
2870 AxgMacReg1 |= AXGMAC_CFG1_RCV_PAUSE; /* Enable detection of pause */
2871 }
2872
2873 WRITE_REG(HwRegs->MacConfig1, AxgMacReg1, TRUE);
5db6b777 2874
b243c4aa 2875 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
5db6b777
GKH
2876 if (adapter->JumboEnabled) {
2877 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2878 }
ddd6f0a8
MT
2879 /*
2880 * AMIIM Configuration Register -
2881 * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2882 * (bottom bits) of this register is used to determine the MDC frequency
2883 * as specified in the A-XGMAC Design Document. This value must not be
2884 * zero. The following value (62 or 0x3E) is based on our MAC transmit
2885 * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2886 * frequency of 2.5 MHz (see the PHY spec), we get:
2887 * 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2888 * This value happens to be the default value for this register, so we
2889 * really don't have to do this.
2890 */
a536efcc
MT
2891 if (adapter->asictype == SAHARA_REV_B) {
2892 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000001F, TRUE);
2893 } else {
2894 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2895 }
5db6b777 2896
b243c4aa 2897 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
5db6b777 2898 WRITE_REG(HwRegs->LinkStatus,
a536efcc
MT
2899 (LS_PHY_CLR_RESET |
2900 LS_XGXS_ENABLE |
2901 LS_XGXS_CTL |
2902 LS_PHY_CLK_EN |
2903 LS_ATTN_ALARM),
2904 TRUE);
5db6b777
GKH
2905 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2906
ddd6f0a8
MT
2907 /*
2908 * Per information given by Aeluros, wait 100 ms after removing reset.
cb636fe3
MT
2909 * It's not enough to wait for the self-clearing reset bit in reg 0 to
2910 * clear.
ddd6f0a8 2911 */
5db6b777
GKH
2912 mdelay(100);
2913
cb636fe3
MT
2914 /* Verify the PHY has come up by checking that the Reset bit has
2915 * cleared.
2916 */
2917 status = sxg_read_mdio_reg(adapter,
2918 MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2919 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2920 &Value);
2921 DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
2922 (Value & PMA_CONTROL1_RESET));
5db6b777
GKH
2923 if (status != STATUS_SUCCESS)
2924 return (STATUS_FAILURE);
b243c4aa 2925 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
5db6b777
GKH
2926 return (STATUS_FAILURE);
2927
b243c4aa 2928 /* The SERDES should be initialized by now - confirm */
5db6b777 2929 READ_REG(HwRegs->LinkStatus, Value);
b243c4aa 2930 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
5db6b777
GKH
2931 return (STATUS_FAILURE);
2932
b243c4aa
M
2933 /* The XAUI link should also be up - confirm */
2934 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
5db6b777
GKH
2935 return (STATUS_FAILURE);
2936
b243c4aa 2937 /* Initialize the PHY */
5db6b777
GKH
2938 status = sxg_phy_init(adapter);
2939 if (status != STATUS_SUCCESS)
2940 return (STATUS_FAILURE);
2941
b243c4aa 2942 /* Enable the Link Alarm */
cb636fe3
MT
2943
2944 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2945 * LASI_CONTROL - LASI control register
2946 * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
2947 */
2948 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2949 LASI_CONTROL,
2950 LASI_CTL_LS_ALARM_ENABLE);
5db6b777
GKH
2951 if (status != STATUS_SUCCESS)
2952 return (STATUS_FAILURE);
2953
b243c4aa 2954 /* XXXTODO - temporary - verify bit is set */
cb636fe3
MT
2955
2956 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2957 * LASI_CONTROL - LASI control register
2958 */
2959 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2960 LASI_CONTROL,
5db6b777 2961 &Value);
cb636fe3 2962
5db6b777
GKH
2963 if (status != STATUS_SUCCESS)
2964 return (STATUS_FAILURE);
2965 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2966 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2967 }
b243c4aa 2968 /* Enable receive */
5db6b777
GKH
2969 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2970 ConfigData = (RCV_CONFIG_ENABLE |
2971 RCV_CONFIG_ENPARSE |
2972 RCV_CONFIG_RCVBAD |
2973 RCV_CONFIG_RCVPAUSE |
2974 RCV_CONFIG_TZIPV6 |
2975 RCV_CONFIG_TZIPV4 |
2976 RCV_CONFIG_HASH_16 |
2977 RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
a536efcc
MT
2978
2979 if (adapter->asictype == SAHARA_REV_B) {
2980 ConfigData |= (RCV_CONFIG_HIPRICTL |
2981 RCV_CONFIG_NEWSTATUSFMT);
2982 }
5db6b777
GKH
2983 WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2984
2985 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2986
b243c4aa 2987 /* Mark the link as down. We'll get a link event when it comes up. */
5db6b777
GKH
2988 sxg_link_state(adapter, SXG_LINK_DOWN);
2989
2990 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2991 adapter, 0, 0, 0);
2992 return (STATUS_SUCCESS);
2993}
2994
2995/*
2996 * sxg_phy_init - Initialize the PHY
2997 *
2998 * Arguments -
2999 * adapter - A pointer to our adapter structure
3000 *
3001 * Return
3002 * status
3003 */
73b07065 3004static int sxg_phy_init(struct adapter_t *adapter)
5db6b777
GKH
3005{
3006 u32 Value;
942798b4 3007 struct phy_ucode *p;
5db6b777
GKH
3008 int status;
3009
e88bd231 3010 DBG_ERROR("ENTER %s\n", __func__);
5db6b777 3011
cb636fe3
MT
3012 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
3013 * 0xC205 - PHY ID register (?)
3014 * &Value - XXXTODO - add def
3015 */
3016 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
3017 0xC205,
3018 &Value);
5db6b777
GKH
3019 if (status != STATUS_SUCCESS)
3020 return (STATUS_FAILURE);
3021
cb636fe3
MT
3022 if (Value == 0x0012) {
3023 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
3024 DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
3025 microcode.\n");
5db6b777 3026
b243c4aa 3027 /* Initialize AEL2005C PHY and download PHY microcode */
5db6b777
GKH
3028 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
3029 if (p->Addr == 0) {
b243c4aa 3030 /* if address == 0, data == sleep time in ms */
5db6b777
GKH
3031 mdelay(p->Data);
3032 } else {
cb636fe3
MT
3033 /* write the given data to the specified address */
3034 status = sxg_write_mdio_reg(adapter,
3035 MIIM_DEV_PHY_PMA,
3036 /* PHY address */
3037 p->Addr,
3038 /* PHY data */
3039 p->Data);
5db6b777
GKH
3040 if (status != STATUS_SUCCESS)
3041 return (STATUS_FAILURE);
3042 }
3043 }
3044 }
e88bd231 3045 DBG_ERROR("EXIT %s\n", __func__);
5db6b777
GKH
3046
3047 return (STATUS_SUCCESS);
3048}
3049
3050/*
3051 * sxg_link_event - Process a link event notification from the card
3052 *
3053 * Arguments -
3054 * adapter - A pointer to our adapter structure
3055 *
3056 * Return
3057 * None
3058 */
73b07065 3059static void sxg_link_event(struct adapter_t *adapter)
5db6b777 3060{
942798b4 3061 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
0d414727 3062 struct net_device *netdev = adapter->netdev;
73b07065 3063 enum SXG_LINK_STATE LinkState;
5db6b777
GKH
3064 int status;
3065 u32 Value;
3066
e5ea8da0
MT
3067 if (adapter->state == ADAPT_DOWN)
3068 return;
5db6b777
GKH
3069 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
3070 adapter, 0, 0, 0);
e88bd231 3071 DBG_ERROR("ENTER %s\n", __func__);
5db6b777 3072
b243c4aa 3073 /* Check the Link Status register. We should have a Link Alarm. */
5db6b777
GKH
3074 READ_REG(HwRegs->LinkStatus, Value);
3075 if (Value & LS_LINK_ALARM) {
ddd6f0a8
MT
3076 /*
3077 * We got a Link Status alarm. First, pause to let the
3078 * link state settle (it can bounce a number of times)
3079 */
5db6b777
GKH
3080 mdelay(10);
3081
b243c4aa 3082 /* Now clear the alarm by reading the LASI status register. */
cb636fe3
MT
3083 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3084 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
3085 /* LASI status register */
3086 LASI_STATUS,
5db6b777
GKH
3087 &Value);
3088 if (status != STATUS_SUCCESS) {
3089 DBG_ERROR("Error reading LASI Status MDIO register!\n");
3090 sxg_link_state(adapter, SXG_LINK_DOWN);
cb636fe3 3091 /* ASSERT(0); */
5db6b777 3092 }
a536efcc
MT
3093 /*
3094 * We used to assert that the LASI_LS_ALARM bit was set, as
3095 * it should be. But there appears to be cases during
3096 * initialization (when the PHY is reset and re-initialized)
3097 * when we get a link alarm, but the status bit is 0 when we
3098 * read it. Rather than trying to assure this never happens
3099 * (and nver being certain), just ignore it.
3100
3101 * ASSERT(Value & LASI_STATUS_LS_ALARM);
3102 */
5db6b777 3103
b243c4aa 3104 /* Now get and set the link state */
5db6b777
GKH
3105 LinkState = sxg_get_link_state(adapter);
3106 sxg_link_state(adapter, LinkState);
3107 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
3108 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
e5ea8da0 3109 if (LinkState == SXG_LINK_UP) {
0d414727 3110 netif_carrier_on(netdev);
e5ea8da0
MT
3111 netif_tx_start_all_queues(netdev);
3112 } else {
3113 netif_tx_stop_all_queues(netdev);
0d414727 3114 netif_carrier_off(netdev);
e5ea8da0 3115 }
5db6b777 3116 } else {
ddd6f0a8
MT
3117 /*
3118 * XXXTODO - Assuming Link Attention is only being generated
3119 * for the Link Alarm pin (and not for a XAUI Link Status change)
3120 * , then it's impossible to get here. Yet we've gotten here
3121 * twice (under extreme conditions - bouncing the link up and
3122 * down many times a second). Needs further investigation.
3123 */
5db6b777
GKH
3124 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
3125 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
cb636fe3 3126 /* ASSERT(0); */
5db6b777 3127 }
e88bd231 3128 DBG_ERROR("EXIT %s\n", __func__);
5db6b777
GKH
3129
3130}
3131
3132/*
3133 * sxg_get_link_state - Determine if the link is up or down
3134 *
3135 * Arguments -
3136 * adapter - A pointer to our adapter structure
3137 *
3138 * Return
3139 * Link State
3140 */
73b07065 3141static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
5db6b777
GKH
3142{
3143 int status;
3144 u32 Value;
3145
e88bd231 3146 DBG_ERROR("ENTER %s\n", __func__);
5db6b777
GKH
3147
3148 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
3149 adapter, 0, 0, 0);
3150
ddd6f0a8
MT
3151 /*
3152 * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
3153 * the following 3 bits (from 3 different MDIO registers) are all true.
3154 */
cb636fe3
MT
3155
3156 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3157 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
3158 /* PMA/PMD Receive Signal Detect register */
3159 PHY_PMA_RCV_DET,
5db6b777
GKH
3160 &Value);
3161 if (status != STATUS_SUCCESS)
3162 goto bad;
3163
b243c4aa 3164 /* If PMA/PMD receive signal detect is 0, then the link is down */
5db6b777
GKH
3165 if (!(Value & PMA_RCV_DETECT))
3166 return (SXG_LINK_DOWN);
3167
cb636fe3
MT
3168 /* MIIM_DEV_PHY_PCS - PHY PCS module */
3169 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
3170 /* PCS 10GBASE-R Status 1 register */
3171 PHY_PCS_10G_STATUS1,
5db6b777
GKH
3172 &Value);
3173 if (status != STATUS_SUCCESS)
3174 goto bad;
3175
b243c4aa 3176 /* If PCS is not locked to receive blocks, then the link is down */
5db6b777
GKH
3177 if (!(Value & PCS_10B_BLOCK_LOCK))
3178 return (SXG_LINK_DOWN);
3179
cb636fe3
MT
3180 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
3181 /* XS Lane Status register */
3182 PHY_XS_LANE_STATUS,
5db6b777
GKH
3183 &Value);
3184 if (status != STATUS_SUCCESS)
3185 goto bad;
3186
b243c4aa 3187 /* If XS transmit lanes are not aligned, then the link is down */
5db6b777
GKH
3188 if (!(Value & XS_LANE_ALIGN))
3189 return (SXG_LINK_DOWN);
3190
b243c4aa 3191 /* All 3 bits are true, so the link is up */
e88bd231 3192 DBG_ERROR("EXIT %s\n", __func__);
5db6b777
GKH
3193
3194 return (SXG_LINK_UP);
3195
3196 bad:
cb636fe3 3197 /* An error occurred reading an MDIO register. This shouldn't happen. */
5db6b777
GKH
3198 DBG_ERROR("Error reading an MDIO register!\n");
3199 ASSERT(0);
3200 return (SXG_LINK_DOWN);
3201}
3202
73b07065
M
3203static void sxg_indicate_link_state(struct adapter_t *adapter,
3204 enum SXG_LINK_STATE LinkState)
5db6b777
GKH
3205{
3206 if (adapter->LinkState == SXG_LINK_UP) {
3207 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
e88bd231 3208 __func__);
5db6b777
GKH
3209 netif_start_queue(adapter->netdev);
3210 } else {
3211 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
e88bd231 3212 __func__);
5db6b777
GKH
3213 netif_stop_queue(adapter->netdev);
3214 }
3215}
3216
7c66b14b
MT
3217/*
3218 * sxg_change_mtu - Change the Maximum Transfer Unit
3219 * * @returns 0 on success, negative on failure
3220 */
3221int sxg_change_mtu (struct net_device *netdev, int new_mtu)
3222{
3223 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(netdev);
3224
3225 if (!((new_mtu == SXG_DEFAULT_MTU) || (new_mtu == SXG_JUMBO_MTU)))
3226 return -EINVAL;
3227
3228 if(new_mtu == netdev->mtu)
3229 return 0;
3230
3231 netdev->mtu = new_mtu;
3232
3233 if (new_mtu == SXG_JUMBO_MTU) {
3234 adapter->JumboEnabled = TRUE;
3235 adapter->FrameSize = JUMBOMAXFRAME;
3236 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
3237 } else {
3238 adapter->JumboEnabled = FALSE;
3239 adapter->FrameSize = ETHERMAXFRAME;
3240 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
3241 }
3242
3243 sxg_entry_halt(netdev);
3244 sxg_entry_open(netdev);
3245 return 0;
3246}
3247
5db6b777
GKH
3248/*
3249 * sxg_link_state - Set the link state and if necessary, indicate.
3250 * This routine the central point of processing for all link state changes.
3251 * Nothing else in the driver should alter the link state or perform
3252 * link state indications
3253 *
3254 * Arguments -
3255 * adapter - A pointer to our adapter structure
3256 * LinkState - The link state
3257 *
3258 * Return
3259 * None
3260 */
cb636fe3
MT
3261static void sxg_link_state(struct adapter_t *adapter,
3262 enum SXG_LINK_STATE LinkState)
5db6b777
GKH
3263{
3264 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
3265 adapter, LinkState, adapter->LinkState, adapter->State);
3266
e88bd231 3267 DBG_ERROR("ENTER %s\n", __func__);
5db6b777 3268
ddd6f0a8
MT
3269 /*
3270 * Hold the adapter lock during this routine. Maybe move
3271 * the lock to the caller.
3272 */
6a2946ba
MT
3273 /* IMP TODO : Check if we can survive without taking this lock */
3274// spin_lock(&adapter->AdapterLock);
5db6b777 3275 if (LinkState == adapter->LinkState) {
b243c4aa 3276 /* Nothing changed.. */
6a2946ba 3277// spin_unlock(&adapter->AdapterLock);
cb636fe3
MT
3278 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
3279 __func__, LinkState);
5db6b777
GKH
3280 return;
3281 }
b243c4aa 3282 /* Save the adapter state */
5db6b777
GKH
3283 adapter->LinkState = LinkState;
3284
b243c4aa 3285 /* Drop the lock and indicate link state */
6a2946ba 3286// spin_unlock(&adapter->AdapterLock);
e88bd231 3287 DBG_ERROR("EXIT #1 %s\n", __func__);
5db6b777
GKH
3288
3289 sxg_indicate_link_state(adapter, LinkState);
3290}
3291
3292/*
3293 * sxg_write_mdio_reg - Write to a register on the MDIO bus
3294 *
3295 * Arguments -
3296 * adapter - A pointer to our adapter structure
3297 * DevAddr - MDIO device number being addressed
3298 * RegAddr - register address for the specified MDIO device
3299 * Value - value to write to the MDIO register
3300 *
3301 * Return
3302 * status
3303 */
73b07065 3304static int sxg_write_mdio_reg(struct adapter_t *adapter,
5c7514e0 3305 u32 DevAddr, u32 RegAddr, u32 Value)
5db6b777 3306{
942798b4 3307 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
cb636fe3
MT
3308 /* Address operation (written to MIIM field reg) */
3309 u32 AddrOp;
3310 /* Write operation (written to MIIM field reg) */
3311 u32 WriteOp;
3312 u32 Cmd;/* Command (written to MIIM command reg) */
5db6b777
GKH
3313 u32 ValueRead;
3314 u32 Timeout;
3315
cb636fe3 3316 /* DBG_ERROR("ENTER %s\n", __func__); */
5db6b777
GKH
3317
3318 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3319 adapter, 0, 0, 0);
3320
b243c4aa
M
3321 /* Ensure values don't exceed field width */
3322 DevAddr &= 0x001F; /* 5-bit field */
3323 RegAddr &= 0xFFFF; /* 16-bit field */
3324 Value &= 0xFFFF; /* 16-bit field */
5db6b777 3325
b243c4aa 3326 /* Set MIIM field register bits for an MIIM address operation */
5db6b777
GKH
3327 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3328 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3329 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3330 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3331
b243c4aa 3332 /* Set MIIM field register bits for an MIIM write operation */
5db6b777
GKH
3333 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3334 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3335 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3336 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
3337
b243c4aa 3338 /* Set MIIM command register bits to execute an MIIM command */
5db6b777
GKH
3339 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3340
b243c4aa 3341 /* Reset the command register command bit (in case it's not 0) */
5db6b777
GKH
3342 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3343
b243c4aa 3344 /* MIIM write to set the address of the specified MDIO register */
5db6b777
GKH
3345 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3346
b243c4aa 3347 /* Write to MIIM Command Register to execute to address operation */
5db6b777
GKH
3348 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3349
b243c4aa 3350 /* Poll AMIIM Indicator register to wait for completion */
5db6b777
GKH
3351 Timeout = SXG_LINK_TIMEOUT;
3352 do {
b243c4aa 3353 udelay(100); /* Timeout in 100us units */
5db6b777
GKH
3354 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3355 if (--Timeout == 0) {
3356 return (STATUS_FAILURE);
3357 }
3358 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3359
b243c4aa 3360 /* Reset the command register command bit */
5db6b777
GKH
3361 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3362
b243c4aa 3363 /* MIIM write to set up an MDIO write operation */
5db6b777
GKH
3364 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
3365
b243c4aa 3366 /* Write to MIIM Command Register to execute the write operation */
5db6b777
GKH
3367 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3368
b243c4aa 3369 /* Poll AMIIM Indicator register to wait for completion */
5db6b777
GKH
3370 Timeout = SXG_LINK_TIMEOUT;
3371 do {
b243c4aa 3372 udelay(100); /* Timeout in 100us units */
5db6b777
GKH
3373 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3374 if (--Timeout == 0) {
3375 return (STATUS_FAILURE);
3376 }
3377 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3378
cb636fe3 3379 /* DBG_ERROR("EXIT %s\n", __func__); */
5db6b777
GKH
3380
3381 return (STATUS_SUCCESS);
3382}
3383
3384/*
3385 * sxg_read_mdio_reg - Read a register on the MDIO bus
3386 *
3387 * Arguments -
3388 * adapter - A pointer to our adapter structure
3389 * DevAddr - MDIO device number being addressed
3390 * RegAddr - register address for the specified MDIO device
cb636fe3 3391 * pValue - pointer to where to put data read from the MDIO register
5db6b777
GKH
3392 *
3393 * Return
3394 * status
3395 */
73b07065 3396static int sxg_read_mdio_reg(struct adapter_t *adapter,
5c7514e0 3397 u32 DevAddr, u32 RegAddr, u32 *pValue)
5db6b777 3398{
942798b4 3399 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
cb636fe3
MT
3400 u32 AddrOp; /* Address operation (written to MIIM field reg) */
3401 u32 ReadOp; /* Read operation (written to MIIM field reg) */
3402 u32 Cmd; /* Command (written to MIIM command reg) */
5db6b777
GKH
3403 u32 ValueRead;
3404 u32 Timeout;
3405
3406 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3407 adapter, 0, 0, 0);
cb636fe3 3408 DBG_ERROR("ENTER %s\n", __FUNCTION__);
5db6b777 3409
b243c4aa
M
3410 /* Ensure values don't exceed field width */
3411 DevAddr &= 0x001F; /* 5-bit field */
3412 RegAddr &= 0xFFFF; /* 16-bit field */
5db6b777 3413
b243c4aa 3414 /* Set MIIM field register bits for an MIIM address operation */
5db6b777
GKH
3415 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3416 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3417 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3418 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3419
b243c4aa 3420 /* Set MIIM field register bits for an MIIM read operation */
5db6b777
GKH
3421 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3422 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3423 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3424 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
3425
b243c4aa 3426 /* Set MIIM command register bits to execute an MIIM command */
5db6b777
GKH
3427 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3428
b243c4aa 3429 /* Reset the command register command bit (in case it's not 0) */
5db6b777
GKH
3430 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3431
b243c4aa 3432 /* MIIM write to set the address of the specified MDIO register */
5db6b777
GKH
3433 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3434
b243c4aa 3435 /* Write to MIIM Command Register to execute to address operation */
5db6b777
GKH
3436 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3437
b243c4aa 3438 /* Poll AMIIM Indicator register to wait for completion */
5db6b777
GKH
3439 Timeout = SXG_LINK_TIMEOUT;
3440 do {
b243c4aa 3441 udelay(100); /* Timeout in 100us units */
5db6b777
GKH
3442 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3443 if (--Timeout == 0) {
1323e5f1
MT
3444 DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __FUNCTION__);
3445
5db6b777
GKH
3446 return (STATUS_FAILURE);
3447 }
3448 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3449
b243c4aa 3450 /* Reset the command register command bit */
5db6b777
GKH
3451 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3452
b243c4aa 3453 /* MIIM write to set up an MDIO register read operation */
5db6b777
GKH
3454 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
3455
b243c4aa 3456 /* Write to MIIM Command Register to execute the read operation */
5db6b777
GKH
3457 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3458
b243c4aa 3459 /* Poll AMIIM Indicator register to wait for completion */
5db6b777
GKH
3460 Timeout = SXG_LINK_TIMEOUT;
3461 do {
b243c4aa 3462 udelay(100); /* Timeout in 100us units */
5db6b777
GKH
3463 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3464 if (--Timeout == 0) {
1323e5f1
MT
3465 DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __FUNCTION__);
3466
5db6b777
GKH
3467 return (STATUS_FAILURE);
3468 }
3469 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3470
b243c4aa 3471 /* Read the MDIO register data back from the field register */
5db6b777 3472 READ_REG(HwRegs->MacAmiimField, *pValue);
b243c4aa 3473 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
5db6b777 3474
cb636fe3 3475 DBG_ERROR("EXIT %s\n", __FUNCTION__);
5db6b777
GKH
3476
3477 return (STATUS_SUCCESS);
3478}
3479
5db6b777
GKH
3480/*
3481 * Functions to obtain the CRC corresponding to the destination mac address.
3482 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
3483 * the polynomial:
cb636fe3
MT
3484 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
3485 * + x^4 + x^2 + x^1.
5db6b777 3486 *
cb636fe3
MT
3487 * After the CRC for the 6 bytes is generated (but before the value is
3488 * complemented), we must then transpose the value and return bits 30-23.
5db6b777 3489 */
cb636fe3
MT
3490static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
3491static u32 sxg_crc_init; /* Is table initialized */
5db6b777 3492
cb636fe3 3493/* Contruct the CRC32 table */
5db6b777
GKH
3494static void sxg_mcast_init_crc32(void)
3495{
cb636fe3
MT
3496 u32 c; /* CRC shit reg */
3497 u32 e = 0; /* Poly X-or pattern */
3498 int i; /* counter */
5db6b777
GKH
3499 int k; /* byte being shifted into crc */
3500
3501 static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
3502
3503 for (i = 0; i < sizeof(p) / sizeof(int); i++) {
3504 e |= 1L << (31 - p[i]);
3505 }
3506
3507 for (i = 1; i < 256; i++) {
3508 c = i;
3509 for (k = 8; k; k--) {
3510 c = c & 1 ? (c >> 1) ^ e : c >> 1;
3511 }
3512 sxg_crc_table[i] = c;
3513 }
3514}
3515
3516/*
3517 * Return the MAC hast as described above.
3518 */
3519static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
3520{
3521 u32 crc;
3522 char *p;
3523 int i;
3524 unsigned char machash = 0;
3525
3526 if (!sxg_crc_init) {
3527 sxg_mcast_init_crc32();
3528 sxg_crc_init = 1;
3529 }
3530
3531 crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
3532 for (i = 0, p = macaddr; i < 6; ++p, ++i) {
3533 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
3534 }
3535
3536 /* Return bits 1-8, transposed */
3537 for (i = 1; i < 9; i++) {
3538 machash |= (((crc >> i) & 1) << (8 - i));
3539 }
3540
3541 return (machash);
3542}
3543
73b07065 3544static void sxg_mcast_set_mask(struct adapter_t *adapter)
c6c25ed0 3545{
942798b4 3546 struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs;
c6c25ed0 3547
b040b07b 3548 DBG_ERROR("%s ENTER (%s) MacFilter[%x] mask[%llx]\n", __FUNCTION__,
c6c25ed0
GKH
3549 adapter->netdev->name, (unsigned int)adapter->MacFilter,
3550 adapter->MulticastMask);
3551
3552 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
ddd6f0a8 3553 /*
cb636fe3
MT
3554 * Turn on all multicast addresses. We have to do this for
3555 * promiscuous mode as well as ALLMCAST mode. It saves the
3556 * Microcode from having keep state about the MAC configuration
3557 */
b040b07b 3558 /* DBG_ERROR("sxg: %s MacFilter = MAC_ALLMCAST | MAC_PROMISC\n \
cb636fe3 3559 * SLUT MODE!!!\n",__func__);
c6c25ed0 3560 */
c6c25ed0
GKH
3561 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
3562 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
cb636fe3
MT
3563 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3564 * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3565 */
c6c25ed0
GKH
3566
3567 } else {
ddd6f0a8 3568 /*
cb636fe3
MT
3569 * Commit our multicast mast to the SLIC by writing to the
3570 * multicast address mask registers
c6c25ed0
GKH
3571 */
3572 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3573 __func__, adapter->netdev->name,
3574 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
3575 ((ulong)
3576 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
3577
3578 WRITE_REG(sxg_regs->McastLow,
3579 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
3580 WRITE_REG(sxg_regs->McastHigh,
3581 (u32) ((adapter->
3582 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
3583 }
3584}
3585
73b07065 3586static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
5db6b777
GKH
3587{
3588 unsigned char crcpoly;
3589
3590 /* Get the CRC polynomial for the mac address */
3591 crcpoly = sxg_mcast_get_mac_hash(address);
3592
ddd6f0a8
MT
3593 /*
3594 * We only have space on the SLIC for 64 entries. Lop
5db6b777
GKH
3595 * off the top two bits. (2^6 = 64)
3596 */
3597 crcpoly &= 0x3F;
3598
3599 /* OR in the new bit into our 64 bit mask. */
3600 adapter->MulticastMask |= (u64) 1 << crcpoly;
3601}
b040b07b
MT
3602
3603/*
3604 * Function takes MAC addresses from dev_mc_list and generates the Mask
3605 */
3606
3607static void sxg_set_mcast_addr(struct adapter_t *adapter)
3608{
3609 struct dev_mc_list *mclist;
3610 struct net_device *dev = adapter->netdev;
3611 int i;
3612
3613 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_MCAST)) {
3614 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3615 i++, mclist = mclist->next) {
3616 sxg_mcast_set_bit(adapter,mclist->da_addr);
3617 }
3618 }
3619 sxg_mcast_set_mask(adapter);
3620}
5db6b777 3621
942798b4 3622static void sxg_mcast_set_list(struct net_device *dev)
5db6b777 3623{
73b07065 3624 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
5db6b777
GKH
3625
3626 ASSERT(adapter);
559990c6 3627 if (dev->flags & IFF_PROMISC)
1323e5f1 3628 adapter->MacFilter |= MAC_PROMISC;
b040b07b
MT
3629 if (dev->flags & IFF_MULTICAST)
3630 adapter->MacFilter |= MAC_MCAST;
559990c6 3631 if (dev->flags & IFF_ALLMULTI)
b040b07b 3632 adapter->MacFilter |= MAC_ALLMCAST;
b040b07b 3633
1323e5f1 3634 //XXX handle other flags as well
b040b07b 3635 sxg_set_mcast_addr(adapter);
1323e5f1 3636}
5db6b777 3637
d9d578bf 3638void sxg_free_sgl_buffers(struct adapter_t *adapter)
d0128aa9 3639{
d0128aa9 3640 struct list_entry *ple;
d9d578bf 3641 struct sxg_scatter_gather *Sgl;
d0128aa9 3642
d9d578bf 3643 while(!(IsListEmpty(&adapter->AllSglBuffers))) {
6a2946ba
MT
3644 ple = RemoveHeadList(&adapter->AllSglBuffers);
3645 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3646 kfree(Sgl);
d9d578bf
MT
3647 adapter->AllSglBufferCount--;
3648 }
3649}
3650
3651void sxg_free_rcvblocks(struct adapter_t *adapter)
3652{
3653 u32 i;
3654 void *temp_RcvBlock;
3655 struct list_entry *ple;
3656 struct sxg_rcv_block_hdr *RcvBlockHdr;
3657 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3658 ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3659 (adapter->state == SXG_STATE_HALTING));
3660 while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3661
3662 ple = RemoveHeadList(&adapter->AllRcvBlocks);
3663 RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3664
3665 if(RcvBlockHdr->VirtualAddress) {
3666 temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3667
3668 for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3669 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3670 RcvDataBufferHdr =
3671 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3672 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3673 }
3674 }
d0128aa9 3675
d9d578bf
MT
3676 pci_free_consistent(adapter->pcidev,
3677 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3678 RcvBlockHdr->VirtualAddress,
3679 RcvBlockHdr->PhysicalAddress);
3680 adapter->AllRcvBlockCount--;
3681 }
3682 ASSERT(adapter->AllRcvBlockCount == 0);
3683 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3684 adapter, 0, 0, 0);
3685}
3686void sxg_free_mcast_addrs(struct adapter_t *adapter)
3687{
3688 struct sxg_multicast_address *address;
3689 while(adapter->MulticastAddrs) {
3690 address = adapter->MulticastAddrs;
3691 adapter->MulticastAddrs = address->Next;
3692 kfree(address);
3693 }
3694
3695 adapter->MulticastMask= 0;
3696}
d0128aa9 3697
d9d578bf
MT
3698void sxg_unmap_resources(struct adapter_t *adapter)
3699{
3700 if(adapter->HwRegs) {
3701 iounmap((void *)adapter->HwRegs);
3702 }
3703 if(adapter->UcodeRegs) {
3704 iounmap((void *)adapter->UcodeRegs);
d0128aa9 3705 }
d9d578bf
MT
3706
3707 ASSERT(adapter->AllRcvBlockCount == 0);
3708 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3709 adapter, 0, 0, 0);
d0128aa9 3710}
d9d578bf
MT
3711
3712
5db6b777 3713
5db6b777 3714/*
d9d578bf 3715 * sxg_free_resources - Free everything allocated in SxgAllocateResources
5db6b777
GKH
3716 *
3717 * Arguments -
3718 * adapter - A pointer to our adapter structure
3719 *
3720 * Return
3721 * none
3722 */
d9d578bf 3723void sxg_free_resources(struct adapter_t *adapter)
5db6b777
GKH
3724{
3725 u32 RssIds, IsrCount;
5db6b777 3726 RssIds = SXG_RSS_CPU_COUNT(adapter);
1782199f 3727 IsrCount = adapter->msi_enabled ? RssIds : 1;
5db6b777
GKH
3728
3729 if (adapter->BasicAllocations == FALSE) {
ddd6f0a8
MT
3730 /*
3731 * No allocations have been made, including spinlocks,
3732 * or listhead initializations. Return.
3733 */
5db6b777
GKH
3734 return;
3735 }
d9d578bf 3736
5db6b777 3737 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
d9d578bf 3738 sxg_free_rcvblocks(adapter);
5db6b777
GKH
3739 }
3740 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
d9d578bf 3741 sxg_free_sgl_buffers(adapter);
5db6b777 3742 }
d0128aa9 3743
5db6b777
GKH
3744 if (adapter->XmtRingZeroIndex) {
3745 pci_free_consistent(adapter->pcidev,
3746 sizeof(u32),
3747 adapter->XmtRingZeroIndex,
3748 adapter->PXmtRingZeroIndex);
3749 }
d0128aa9
MT
3750 if (adapter->Isr) {
3751 pci_free_consistent(adapter->pcidev,
3752 sizeof(u32) * IsrCount,
3753 adapter->Isr, adapter->PIsr);
3754 }
3755
d0128aa9
MT
3756 if (adapter->EventRings) {
3757 pci_free_consistent(adapter->pcidev,
3758 sizeof(struct sxg_event_ring) * RssIds,
3759 adapter->EventRings, adapter->PEventRings);
3760 }
d0128aa9
MT
3761 if (adapter->RcvRings) {
3762 pci_free_consistent(adapter->pcidev,
d9d578bf 3763 sizeof(struct sxg_rcv_ring) * 1,
d0128aa9
MT
3764 adapter->RcvRings,
3765 adapter->PRcvRings);
3766 adapter->RcvRings = NULL;
3767 }
3768
d0128aa9
MT
3769 if(adapter->XmtRings) {
3770 pci_free_consistent(adapter->pcidev,
d9d578bf 3771 sizeof(struct sxg_xmt_ring) * 1,
d0128aa9
MT
3772 adapter->XmtRings,
3773 adapter->PXmtRings);
3774 adapter->XmtRings = NULL;
3775 }
3776
d9d578bf
MT
3777 if (adapter->ucode_stats) {
3778 pci_unmap_single(adapter->pcidev,
3779 sizeof(struct sxg_ucode_stats),
3780 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3781 adapter->ucode_stats = NULL;
3782 }
d0128aa9 3783
5db6b777 3784
b243c4aa 3785 /* Unmap register spaces */
d9d578bf 3786 sxg_unmap_resources(adapter);
5db6b777 3787
d9d578bf 3788 sxg_free_mcast_addrs(adapter);
5db6b777 3789
5db6b777
GKH
3790 adapter->BasicAllocations = FALSE;
3791
5db6b777 3792}
5db6b777
GKH
3793
3794/*
3795 * sxg_allocate_complete -
3796 *
3797 * This routine is called when a memory allocation has completed.
3798 *
3799 * Arguments -
73b07065 3800 * struct adapter_t * - Our adapter structure
5db6b777
GKH
3801 * VirtualAddress - Memory virtual address
3802 * PhysicalAddress - Memory physical address
3803 * Length - Length of memory allocated (or 0)
3804 * Context - The type of buffer allocated
3805 *
3806 * Return
3807 * None.
3808 */
0d414727 3809static int sxg_allocate_complete(struct adapter_t *adapter,
5c7514e0
M
3810 void *VirtualAddress,
3811 dma_addr_t PhysicalAddress,
942798b4 3812 u32 Length, enum sxg_buffer_type Context)
5db6b777 3813{
0d414727 3814 int status = 0;
5db6b777
GKH
3815 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
3816 adapter, VirtualAddress, Length, Context);
6a2946ba
MT
3817 ASSERT(atomic_read(&adapter->pending_allocations));
3818 atomic_dec(&adapter->pending_allocations);
5db6b777
GKH
3819
3820 switch (Context) {
3821
3822 case SXG_BUFFER_TYPE_RCV:
0d414727 3823 status = sxg_allocate_rcvblock_complete(adapter,
5db6b777
GKH
3824 VirtualAddress,
3825 PhysicalAddress, Length);
3826 break;
3827 case SXG_BUFFER_TYPE_SGL:
942798b4 3828 sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *)
5db6b777
GKH
3829 VirtualAddress,
3830 PhysicalAddress, Length);
3831 break;
3832 }
3833 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3834 adapter, VirtualAddress, Length, Context);
0d414727
MT
3835
3836 return status;
5db6b777
GKH
3837}
3838
3839/*
3840 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3841 * synchronous and asynchronous buffer allocations
3842 *
3843 * Arguments -
3844 * adapter - A pointer to our adapter structure
3845 * Size - block size to allocate
3846 * BufferType - Type of buffer to allocate
3847 *
3848 * Return
3849 * int
3850 */
73b07065 3851static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
942798b4 3852 u32 Size, enum sxg_buffer_type BufferType)
5db6b777
GKH
3853{
3854 int status;
5c7514e0 3855 void *Buffer;
5db6b777
GKH
3856 dma_addr_t pBuffer;
3857
3858 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3859 adapter, Size, BufferType, 0);
ddd6f0a8
MT
3860 /*
3861 * Grab the adapter lock and check the state. If we're in anything other
3862 * than INITIALIZING or RUNNING state, fail. This is to prevent
3863 * allocations in an improper driver state
3864 */
5db6b777 3865
6a2946ba 3866 atomic_inc(&adapter->pending_allocations);
5db6b777 3867
d9d578bf
MT
3868 if(BufferType != SXG_BUFFER_TYPE_SGL)
3869 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3870 else {
3871 Buffer = kzalloc(Size, GFP_ATOMIC);
54aed113 3872 pBuffer = (dma_addr_t)NULL;
d9d578bf 3873 }
5db6b777 3874 if (Buffer == NULL) {
ddd6f0a8
MT
3875 /*
3876 * Decrement the AllocationsPending count while holding
3877 * the lock. Pause processing relies on this
3878 */
6a2946ba 3879 atomic_dec(&adapter->pending_allocations);
5db6b777
GKH
3880 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3881 adapter, Size, BufferType, 0);
3882 return (STATUS_RESOURCES);
3883 }
0d414727 3884 status = sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
5db6b777
GKH
3885
3886 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3887 adapter, Size, BufferType, status);
0d414727 3888 return status;
5db6b777
GKH
3889}
3890
3891/*
cb636fe3
MT
3892 * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3893 * block allocation
5db6b777
GKH
3894 *
3895 * Arguments -
3896 * adapter - A pointer to our adapter structure
3897 * RcvBlock - receive block virtual address
3898 * PhysicalAddress - Physical address
3899 * Length - Memory length
3900 *
3901 * Return
5db6b777 3902 */
0d414727 3903static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
5c7514e0
M
3904 void *RcvBlock,
3905 dma_addr_t PhysicalAddress,
3906 u32 Length)
5db6b777
GKH
3907{
3908 u32 i;
3909 u32 BufferSize = adapter->ReceiveBufferSize;
3910 u64 Paddr;
d0128aa9 3911 void *temp_RcvBlock;
942798b4 3912 struct sxg_rcv_block_hdr *RcvBlockHdr;
942798b4
MT
3913 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3914 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3915 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
5db6b777
GKH
3916
3917 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3918 adapter, RcvBlock, Length, 0);
3919 if (RcvBlock == NULL) {
3920 goto fail;
3921 }
3922 memset(RcvBlock, 0, Length);
3923 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3924 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
d0128aa9 3925 ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE));
ddd6f0a8
MT
3926 /*
3927 * First, initialize the contained pool of receive data buffers.
3928 * This initialization requires NBL/NB/MDL allocations, if any of them
3929 * fail, free the block and return without queueing the shared memory
3930 */
d0128aa9
MT
3931 //RcvDataBuffer = RcvBlock;
3932 temp_RcvBlock = RcvBlock;
3933 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3934 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3935 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
3936 temp_RcvBlock;
3937 /* For FREE macro assertion */
3938 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
3939 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
3940 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3941 goto fail;
5db6b777 3942
d0128aa9 3943 }
5db6b777 3944
ddd6f0a8
MT
3945 /*
3946 * Place this entire block of memory on the AllRcvBlocks queue so it
3947 * can be free later
3948 */
d0128aa9
MT
3949
3950 RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock +
3951 SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE));
5db6b777
GKH
3952 RcvBlockHdr->VirtualAddress = RcvBlock;
3953 RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3954 spin_lock(&adapter->RcvQLock);
3955 adapter->AllRcvBlockCount++;
3956 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3957 spin_unlock(&adapter->RcvQLock);
3958
cb636fe3
MT
3959 /* Now free the contained receive data buffers that we
3960 * initialized above */
d0128aa9 3961 temp_RcvBlock = RcvBlock;
5db6b777
GKH
3962 for (i = 0, Paddr = PhysicalAddress;
3963 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
d0128aa9
MT
3964 i++, Paddr += SXG_RCV_DATA_HDR_SIZE,
3965 temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3966 RcvDataBufferHdr =
3967 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
5db6b777
GKH
3968 spin_lock(&adapter->RcvQLock);
3969 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3970 spin_unlock(&adapter->RcvQLock);
3971 }
3972
b243c4aa 3973 /* Locate the descriptor block and put it on a separate free queue */
5c7514e0 3974 RcvDescriptorBlock =
942798b4 3975 (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock +
5c7514e0 3976 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
d0128aa9 3977 (SXG_RCV_DATA_HDR_SIZE));
5db6b777 3978 RcvDescriptorBlockHdr =
942798b4 3979 (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock +
5db6b777 3980 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
d0128aa9 3981 (SXG_RCV_DATA_HDR_SIZE));
5db6b777
GKH
3982 RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3983 RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3984 spin_lock(&adapter->RcvQLock);
3985 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3986 spin_unlock(&adapter->RcvQLock);
3987 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3988 adapter, RcvBlock, Length, 0);
0d414727 3989 return STATUS_SUCCESS;
cb636fe3 3990fail:
b243c4aa 3991 /* Free any allocated resources */
5db6b777 3992 if (RcvBlock) {
d0128aa9 3993 temp_RcvBlock = RcvBlock;
5db6b777 3994 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
d0128aa9 3995 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
5db6b777 3996 RcvDataBufferHdr =
d0128aa9 3997 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
5db6b777
GKH
3998 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3999 }
4000 pci_free_consistent(adapter->pcidev,
4001 Length, RcvBlock, PhysicalAddress);
4002 }
e88bd231 4003 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
5db6b777
GKH
4004 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
4005 adapter, adapter->FreeRcvBufferCount,
4006 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
4007 adapter->Stats.NoMem++;
0d414727
MT
4008 /* As allocation failed, free all previously allocated blocks..*/
4009 //sxg_free_rcvblocks(adapter);
4010
4011 return STATUS_RESOURCES;
5db6b777
GKH
4012}
4013
4014/*
4015 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
4016 *
4017 * Arguments -
4018 * adapter - A pointer to our adapter structure
942798b4 4019 * SxgSgl - struct sxg_scatter_gather buffer
5db6b777
GKH
4020 * PhysicalAddress - Physical address
4021 * Length - Memory length
4022 *
4023 * Return
5db6b777 4024 */
73b07065 4025static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
942798b4 4026 struct sxg_scatter_gather *SxgSgl,
5c7514e0
M
4027 dma_addr_t PhysicalAddress,
4028 u32 Length)
5db6b777 4029{
d9d578bf 4030 unsigned long sgl_flags;
5db6b777
GKH
4031 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
4032 adapter, SxgSgl, Length, 0);
c5e5cf5a 4033 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
5db6b777 4034 adapter->AllSglBufferCount++;
d9d578bf 4035 /* PhysicalAddress; */
cb636fe3
MT
4036 SxgSgl->PhysicalAddress = PhysicalAddress;
4037 /* Initialize backpointer once */
4038 SxgSgl->adapter = adapter;
5db6b777 4039 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
c5e5cf5a 4040 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
5db6b777 4041 SxgSgl->State = SXG_BUFFER_BUSY;
c5e5cf5a 4042 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
5db6b777
GKH
4043 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
4044 adapter, SxgSgl, Length, 0);
4045}
4046
5db6b777 4047
54aed113 4048static int sxg_adapter_set_hwaddr(struct adapter_t *adapter)
5db6b777 4049{
cb636fe3
MT
4050 /*
4051 * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
4052 * funct#[%d]\n", __func__, card->config_set,
4053 * adapter->port, adapter->physport, adapter->functionnumber);
4054 *
4055 * sxg_dbg_macaddrs(adapter);
4056 */
cb636fe3
MT
4057 /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
4058 * __FUNCTION__);
4059 */
4060
4061 /* sxg_dbg_macaddrs(adapter); */
5db6b777 4062
6a2946ba
MT
4063 struct net_device * dev = adapter->netdev;
4064 if(!dev)
4065 {
4066 printk("sxg: Dev is Null\n");
4067 }
4068
4069 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
4070
4071 if (netif_running(dev)) {
4072 return -EBUSY;
4073 }
4074 if (!adapter) {
4075 return -EBUSY;
4076 }
4077
5db6b777
GKH
4078 if (!(adapter->currmacaddr[0] ||
4079 adapter->currmacaddr[1] ||
4080 adapter->currmacaddr[2] ||
4081 adapter->currmacaddr[3] ||
4082 adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
4083 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
4084 }
4085 if (adapter->netdev) {
4086 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
1323e5f1 4087 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
5db6b777 4088 }
cb636fe3 4089 /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
5db6b777
GKH
4090 sxg_dbg_macaddrs(adapter);
4091
54aed113 4092 return 0;
5db6b777
GKH
4093}
4094
c6c25ed0 4095#if XXXTODO
942798b4 4096static int sxg_mac_set_address(struct net_device *dev, void *ptr)
5db6b777 4097{
73b07065 4098 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
5db6b777
GKH
4099 struct sockaddr *addr = ptr;
4100
e88bd231 4101 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
5db6b777
GKH
4102
4103 if (netif_running(dev)) {
4104 return -EBUSY;
4105 }
4106 if (!adapter) {
4107 return -EBUSY;
4108 }
4109 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
e88bd231 4110 __func__, adapter->netdev->name, adapter->currmacaddr[0],
5db6b777
GKH
4111 adapter->currmacaddr[1], adapter->currmacaddr[2],
4112 adapter->currmacaddr[3], adapter->currmacaddr[4],
4113 adapter->currmacaddr[5]);
4114 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4115 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
4116 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
e88bd231 4117 __func__, adapter->netdev->name, adapter->currmacaddr[0],
5db6b777
GKH
4118 adapter->currmacaddr[1], adapter->currmacaddr[2],
4119 adapter->currmacaddr[3], adapter->currmacaddr[4],
4120 adapter->currmacaddr[5]);
4121
4122 sxg_config_set(adapter, TRUE);
5db6b777
GKH
4123 return 0;
4124}
c6c25ed0 4125#endif
5db6b777 4126
5db6b777 4127/*
ddd6f0a8
MT
4128 * SXG DRIVER FUNCTIONS (below)
4129 *
5db6b777
GKH
4130 * sxg_initialize_adapter - Initialize adapter
4131 *
4132 * Arguments -
4133 * adapter - A pointer to our adapter structure
4134 *
ddd6f0a8 4135 * Return - int
5db6b777 4136 */
73b07065 4137static int sxg_initialize_adapter(struct adapter_t *adapter)
5db6b777
GKH
4138{
4139 u32 RssIds, IsrCount;
4140 u32 i;
4141 int status;
7c66b14b 4142 int sxg_rcv_ring_size = SXG_RCV_RING_SIZE;
5db6b777
GKH
4143
4144 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
4145 adapter, 0, 0, 0);
4146
b243c4aa 4147 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
1782199f 4148 IsrCount = adapter->msi_enabled ? RssIds : 1;
5db6b777 4149
ddd6f0a8
MT
4150 /*
4151 * Sanity check SXG_UCODE_REGS structure definition to
4152 * make sure the length is correct
4153 */
942798b4 4154 ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
5db6b777 4155
b243c4aa 4156 /* Disable interrupts */
5db6b777
GKH
4157 SXG_DISABLE_ALL_INTERRUPTS(adapter);
4158
b243c4aa 4159 /* Set MTU */
5db6b777
GKH
4160 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
4161 (adapter->FrameSize == JUMBOMAXFRAME));
4162 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
4163
b243c4aa 4164 /* Set event ring base address and size */
5db6b777
GKH
4165 WRITE_REG64(adapter,
4166 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
4167 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
4168
b243c4aa 4169 /* Per-ISR initialization */
5db6b777
GKH
4170 for (i = 0; i < IsrCount; i++) {
4171 u64 Addr;
b243c4aa 4172 /* Set interrupt status pointer */
5db6b777
GKH
4173 Addr = adapter->PIsr + (i * sizeof(u32));
4174 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
4175 }
4176
b243c4aa 4177 /* XMT ring zero index */
5db6b777
GKH
4178 WRITE_REG64(adapter,
4179 adapter->UcodeRegs[0].SPSendIndex,
4180 adapter->PXmtRingZeroIndex, 0);
4181
b243c4aa 4182 /* Per-RSS initialization */
5db6b777 4183 for (i = 0; i < RssIds; i++) {
b243c4aa 4184 /* Release all event ring entries to the Microcode */
5db6b777
GKH
4185 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
4186 TRUE);
4187 }
4188
b243c4aa 4189 /* Transmit ring base and size */
5db6b777
GKH
4190 WRITE_REG64(adapter,
4191 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
4192 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
4193
b243c4aa 4194 /* Receive ring base and size */
5db6b777
GKH
4195 WRITE_REG64(adapter,
4196 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
7c66b14b
MT
4197 if (adapter->JumboEnabled == TRUE)
4198 sxg_rcv_ring_size = SXG_JUMBO_RCV_RING_SIZE;
4199 WRITE_REG(adapter->UcodeRegs[0].RcvSize, sxg_rcv_ring_size, TRUE);
5db6b777 4200
b243c4aa 4201 /* Populate the card with receive buffers */
5db6b777
GKH
4202 sxg_stock_rcv_buffers(adapter);
4203
ddd6f0a8
MT
4204 /*
4205 * Initialize checksum offload capabilities. At the moment we always
4206 * enable IP and TCP receive checksums on the card. Depending on the
4207 * checksum configuration specified by the user, we can choose to
4208 * report or ignore the checksum information provided by the card.
4209 */
5db6b777
GKH
4210 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
4211 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
4212
9914f053
MT
4213 adapter->flags |= (SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED );
4214
b243c4aa 4215 /* Initialize the MAC, XAUI */
e88bd231 4216 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
5db6b777 4217 status = sxg_initialize_link(adapter);
e88bd231 4218 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
5db6b777
GKH
4219 status);
4220 if (status != STATUS_SUCCESS) {
4221 return (status);
4222 }
ddd6f0a8
MT
4223 /*
4224 * Initialize Dead to FALSE.
4225 * SlicCheckForHang or SlicDumpThread will take it from here.
4226 */
5db6b777
GKH
4227 adapter->Dead = FALSE;
4228 adapter->PingOutstanding = FALSE;
a536efcc
MT
4229 adapter->XmtFcEnabled = TRUE;
4230 adapter->RcvFcEnabled = TRUE;
4231
1323e5f1 4232 adapter->State = SXG_STATE_RUNNING;
5db6b777
GKH
4233
4234 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
4235 adapter, 0, 0, 0);
4236 return (STATUS_SUCCESS);
4237}
4238
4239/*
4240 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
4241 * the card. The caller should hold the RcvQLock
4242 *
4243 * Arguments -
4244 * adapter - A pointer to our adapter structure
4245 * RcvDescriptorBlockHdr - Descriptor block to fill
4246 *
4247 * Return
4248 * status
4249 */
73b07065 4250static int sxg_fill_descriptor_block(struct adapter_t *adapter,
cb636fe3 4251 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
5db6b777
GKH
4252{
4253 u32 i;
942798b4
MT
4254 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4255 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
4256 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
4257 struct sxg_cmd *RingDescriptorCmd;
4258 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
5db6b777
GKH
4259
4260 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
4261 adapter, adapter->RcvBuffersOnCard,
4262 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4263
4264 ASSERT(RcvDescriptorBlockHdr);
4265
ddd6f0a8
MT
4266 /*
4267 * If we don't have the resources to fill the descriptor block,
4268 * return failure
4269 */
5db6b777
GKH
4270 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
4271 SXG_RING_FULL(RcvRingInfo)) {
4272 adapter->Stats.NoMem++;
4273 return (STATUS_FAILURE);
4274 }
b243c4aa 4275 /* Get a ring descriptor command */
5db6b777
GKH
4276 SXG_GET_CMD(RingZero,
4277 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
4278 ASSERT(RingDescriptorCmd);
4279 RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
cb636fe3
MT
4280 RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
4281 RcvDescriptorBlockHdr->VirtualAddress;
5db6b777 4282
b243c4aa 4283 /* Fill in the descriptor block */
5db6b777
GKH
4284 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
4285 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
4286 ASSERT(RcvDataBufferHdr);
6a2946ba 4287// ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
d9d578bf
MT
4288 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
4289 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
4290 adapter->ReceiveBufferSize);
4291 if(RcvDataBufferHdr->skb)
4292 RcvDataBufferHdr->SxgDumbRcvPacket =
4293 RcvDataBufferHdr->skb;
4294 else
4295 goto no_memory;
4296 }
5db6b777
GKH
4297 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
4298 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
5c7514e0 4299 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
cb636fe3 4300 (void *)RcvDataBufferHdr;
1323e5f1 4301
5db6b777
GKH
4302 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
4303 RcvDataBufferHdr->PhysicalAddress;
4304 }
b243c4aa 4305 /* Add the descriptor block to receive descriptor ring 0 */
5db6b777
GKH
4306 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
4307
ddd6f0a8
MT
4308 /*
4309 * RcvBuffersOnCard is not protected via the receive lock (see
4310 * sxg_process_event_queue) We don't want to grap a lock every time a
4311 * buffer is returned to us, so we use atomic interlocked functions
4312 * instead.
4313 */
5db6b777
GKH
4314 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
4315
4316 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
4317 RcvDescriptorBlockHdr,
4318 RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
4319
4320 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
4321 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
4322 adapter, adapter->RcvBuffersOnCard,
4323 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4324 return (STATUS_SUCCESS);
d9d578bf 4325no_memory:
b9d1081a
MT
4326 for (; i >= 0 ; i--) {
4327 if (RcvDescriptorBlock->Descriptors[i].VirtualAddress) {
4328 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
4329 RcvDescriptorBlock->Descriptors[i].
4330 VirtualAddress;
4331 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
4332 (dma_addr_t)NULL;
4333 RcvDescriptorBlock->Descriptors[i].VirtualAddress=NULL;
4334 }
4335 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
4336 }
4337 RcvDescriptorBlockHdr->State = SXG_BUFFER_FREE;
4338 SXG_RETURN_CMD(RingZero, RcvRingInfo, RingDescriptorCmd,
4339 RcvDescriptorBlockHdr);
4340
d9d578bf 4341 return (-ENOMEM);
5db6b777
GKH
4342}
4343
4344/*
4345 * sxg_stock_rcv_buffers - Stock the card with receive buffers
4346 *
4347 * Arguments -
4348 * adapter - A pointer to our adapter structure
4349 *
4350 * Return
4351 * None
4352 */
73b07065 4353static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
5db6b777 4354{
942798b4 4355 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
7c66b14b
MT
4356 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
4357 int sxg_min_rcv_data_buffers = SXG_MIN_RCV_DATA_BUFFERS;
5db6b777
GKH
4358
4359 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
4360 adapter, adapter->RcvBuffersOnCard,
4361 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
ddd6f0a8
MT
4362 /*
4363 * First, see if we've got less than our minimum threshold of
4364 * receive buffers, there isn't an allocation in progress, and
4365 * we haven't exceeded our maximum.. get another block of buffers
4366 * None of this needs to be SMP safe. It's round numbers.
4367 */
7c66b14b
MT
4368 if (adapter->JumboEnabled == TRUE)
4369 sxg_min_rcv_data_buffers = SXG_MIN_JUMBO_RCV_DATA_BUFFERS;
4370 if ((adapter->FreeRcvBufferCount < sxg_min_rcv_data_buffers) &&
5db6b777 4371 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
6a2946ba 4372 (atomic_read(&adapter->pending_allocations) == 0)) {
5db6b777 4373 sxg_allocate_buffer_memory(adapter,
d0128aa9
MT
4374 SXG_RCV_BLOCK_SIZE
4375 (SXG_RCV_DATA_HDR_SIZE),
5db6b777
GKH
4376 SXG_BUFFER_TYPE_RCV);
4377 }
b243c4aa 4378 /* Now grab the RcvQLock lock and proceed */
5db6b777 4379 spin_lock(&adapter->RcvQLock);
7c66b14b
MT
4380 if (adapter->JumboEnabled)
4381 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
4382 while (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
942798b4 4383 struct list_entry *_ple;
5db6b777 4384
b243c4aa 4385 /* Get a descriptor block */
5db6b777
GKH
4386 RcvDescriptorBlockHdr = NULL;
4387 if (adapter->FreeRcvBlockCount) {
4388 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
5c7514e0 4389 RcvDescriptorBlockHdr =
942798b4 4390 container_of(_ple, struct sxg_rcv_descriptor_block_hdr,
5c7514e0 4391 FreeList);
5db6b777
GKH
4392 adapter->FreeRcvBlockCount--;
4393 RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
4394 }
4395
4396 if (RcvDescriptorBlockHdr == NULL) {
b243c4aa 4397 /* Bail out.. */
5db6b777
GKH
4398 adapter->Stats.NoMem++;
4399 break;
4400 }
b243c4aa 4401 /* Fill in the descriptor block and give it to the card */
5db6b777
GKH
4402 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
4403 STATUS_FAILURE) {
b243c4aa 4404 /* Free the descriptor block */
5db6b777
GKH
4405 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4406 RcvDescriptorBlockHdr);
4407 break;
4408 }
4409 }
4410 spin_unlock(&adapter->RcvQLock);
4411 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
4412 adapter, adapter->RcvBuffersOnCard,
4413 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4414}
4415
4416/*
4417 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
4418 * completed by the microcode
4419 *
4420 * Arguments -
4421 * adapter - A pointer to our adapter structure
4422 * Index - Where the microcode is up to
4423 *
4424 * Return
4425 * None
4426 */
73b07065 4427static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
5c7514e0 4428 unsigned char Index)
5db6b777 4429{
942798b4
MT
4430 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4431 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4432 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4433 struct sxg_cmd *RingDescriptorCmd;
5db6b777
GKH
4434
4435 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
4436 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4437
b243c4aa 4438 /* Now grab the RcvQLock lock and proceed */
5db6b777
GKH
4439 spin_lock(&adapter->RcvQLock);
4440 ASSERT(Index != RcvRingInfo->Tail);
d9d578bf
MT
4441 while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
4442 RcvRingInfo->Tail) > 3) {
ddd6f0a8
MT
4443 /*
4444 * Locate the current Cmd (ring descriptor entry), and
4445 * associated receive descriptor block, and advance
4446 * the tail
4447 */
5db6b777
GKH
4448 SXG_RETURN_CMD(RingZero,
4449 RcvRingInfo,
4450 RingDescriptorCmd, RcvDescriptorBlockHdr);
4451 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
4452 RcvRingInfo->Head, RcvRingInfo->Tail,
4453 RingDescriptorCmd, RcvDescriptorBlockHdr);
4454
b243c4aa 4455 /* Clear the SGL field */
5db6b777 4456 RingDescriptorCmd->Sgl = 0;
ddd6f0a8
MT
4457 /*
4458 * Attempt to refill it and hand it right back to the
4459 * card. If we fail to refill it, free the descriptor block
4460 * header. The card will be restocked later via the
4461 * RcvBuffersOnCard test
4462 */
cb636fe3
MT
4463 if (sxg_fill_descriptor_block(adapter,
4464 RcvDescriptorBlockHdr) == STATUS_FAILURE)
5db6b777
GKH
4465 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4466 RcvDescriptorBlockHdr);
5db6b777
GKH
4467 }
4468 spin_unlock(&adapter->RcvQLock);
4469 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
4470 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4471}
4472
d9d578bf
MT
4473/*
4474 * Read the statistics which the card has been maintaining.
4475 */
4476void sxg_collect_statistics(struct adapter_t *adapter)
4477{
4478 if(adapter->ucode_stats)
54aed113
MT
4479 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats,
4480 adapter->pucode_stats, 0);
6a2946ba
MT
4481 adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
4482 adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
4483 adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
4484}
4485
4486static struct net_device_stats *sxg_get_stats(struct net_device * dev)
4487{
4488 struct adapter_t *adapter = netdev_priv(dev);
4489
4490 sxg_collect_statistics(adapter);
4491 return (&adapter->stats);
d9d578bf
MT
4492}
4493
e5ea8da0
MT
4494static void sxg_watchdog(unsigned long data)
4495{
4496 struct adapter_t *adapter = (struct adapter_t *) data;
4497
4498 if (adapter->state != ADAPT_DOWN) {
4499 sxg_link_event(adapter);
4500 /* Reset the timer */
4501 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4502 }
4503}
4504
4505static void sxg_update_link_status (struct work_struct *work)
4506{
4507 struct adapter_t *adapter = (struct adapter_t *)container_of
4508 (work, struct adapter_t, update_link_status);
4509 if (likely(adapter->link_status_changed)) {
4510 sxg_link_event(adapter);
4511 adapter->link_status_changed = 0;
4512 }
4513}
4514
5db6b777 4515static struct pci_driver sxg_driver = {
371d7a9e 4516 .name = sxg_driver_name,
5db6b777
GKH
4517 .id_table = sxg_pci_tbl,
4518 .probe = sxg_entry_probe,
4519 .remove = sxg_entry_remove,
4520#if SXG_POWER_MANAGEMENT_ENABLED
4521 .suspend = sxgpm_suspend,
4522 .resume = sxgpm_resume,
4523#endif
cb636fe3 4524 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
5db6b777
GKH
4525};
4526
4527static int __init sxg_module_init(void)
4528{
4529 sxg_init_driver();
4530
4531 if (debug >= 0)
4532 sxg_debug = debug;
4533
4534 return pci_register_driver(&sxg_driver);
4535}
4536
4537static void __exit sxg_module_cleanup(void)
4538{
4539 pci_unregister_driver(&sxg_driver);
4540}
4541
4542module_init(sxg_module_init);
4543module_exit(sxg_module_cleanup);
This page took 0.327911 seconds and 5 git commands to generate.