[PATCH] S2io: New link handling scheme for Xframe II
[deliverable/linux.git] / drivers / net / s2io.c
CommitLineData
1da177e4
LT
1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722 14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
20346722 29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
1da177e4
LT
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 35 * Tx descriptors that can be associated with each corresponding FIFO.
1da177e4
LT
36 ************************************************************************/
37
38#include <linux/config.h>
39#include <linux/module.h>
40#include <linux/types.h>
41#include <linux/errno.h>
42#include <linux/ioport.h>
43#include <linux/pci.h>
1e7f0bd8 44#include <linux/dma-mapping.h>
1da177e4
LT
45#include <linux/kernel.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/skbuff.h>
49#include <linux/init.h>
50#include <linux/delay.h>
51#include <linux/stddef.h>
52#include <linux/ioctl.h>
53#include <linux/timex.h>
54#include <linux/sched.h>
55#include <linux/ethtool.h>
56#include <linux/version.h>
57#include <linux/workqueue.h>
be3a6b02 58#include <linux/if_vlan.h>
1da177e4 59
1da177e4
LT
60#include <asm/system.h>
61#include <asm/uaccess.h>
20346722 62#include <asm/io.h>
1da177e4
LT
63
64/* local include */
65#include "s2io.h"
66#include "s2io-regs.h"
67
68/* S2io Driver name & version. */
20346722 69static char s2io_driver_name[] = "Neterion";
a371a07d 70static char s2io_driver_version[] = "Version 2.0.2.0";
1da177e4 71
5e25b9dd 72static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73{
74 int ret;
75
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
78
79 return ret;
80}
81
20346722 82/*
1da177e4
LT
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
86 */
541ae68f 87#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88 (dev_type == XFRAME_I_DEVICE) ? \
89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
91
92#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95#define PANIC 1
96#define LOW 2
97static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98{
99 int level = 0;
20346722 100 mac_info_t *mac_control;
101
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
1da177e4 104 level = LOW;
fe113638 105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
1da177e4
LT
106 level = PANIC;
107 }
108 }
109
110 return level;
111}
112
113/* Ethtool related variables and Macros. */
114static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115 "Register test\t(offline)",
116 "Eeprom test\t(offline)",
117 "Link test\t(online)",
118 "RLDRAM test\t(offline)",
119 "BIST Test\t(offline)"
120};
121
122static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123 {"tmac_frms"},
124 {"tmac_data_octets"},
125 {"tmac_drop_frms"},
126 {"tmac_mcst_frms"},
127 {"tmac_bcst_frms"},
128 {"tmac_pause_ctrl_frms"},
129 {"tmac_any_err_frms"},
130 {"tmac_vld_ip_octets"},
131 {"tmac_vld_ip"},
132 {"tmac_drop_ip"},
133 {"tmac_icmp"},
134 {"tmac_rst_tcp"},
135 {"tmac_tcp"},
136 {"tmac_udp"},
137 {"rmac_vld_frms"},
138 {"rmac_data_octets"},
139 {"rmac_fcs_err_frms"},
140 {"rmac_drop_frms"},
141 {"rmac_vld_mcst_frms"},
142 {"rmac_vld_bcst_frms"},
143 {"rmac_in_rng_len_err_frms"},
144 {"rmac_long_frms"},
145 {"rmac_pause_ctrl_frms"},
146 {"rmac_discarded_frms"},
147 {"rmac_usized_frms"},
148 {"rmac_osized_frms"},
149 {"rmac_frag_frms"},
150 {"rmac_jabber_frms"},
151 {"rmac_ip"},
152 {"rmac_ip_octets"},
153 {"rmac_hdr_err_ip"},
154 {"rmac_drop_ip"},
155 {"rmac_icmp"},
156 {"rmac_tcp"},
157 {"rmac_udp"},
158 {"rmac_err_drp_udp"},
159 {"rmac_pause_cnt"},
160 {"rmac_accepted_ip"},
161 {"rmac_err_tcp"},
7ba013ac 162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
1da177e4
LT
165};
166
167#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169
170#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
172
25fff88e 173#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
178
be3a6b02 179/* Add the vlan */
180static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
182{
183 nic_t *nic = dev->priv;
184 unsigned long flags;
185
186 spin_lock_irqsave(&nic->tx_lock, flags);
187 nic->vlgrp = grp;
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
189}
190
191/* Unregister the vlan */
192static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193{
194 nic_t *nic = dev->priv;
195 unsigned long flags;
196
197 spin_lock_irqsave(&nic->tx_lock, flags);
198 if (nic->vlgrp)
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
201}
202
20346722 203/*
1da177e4
LT
204 * Constants to be programmed into the Xena's registers, to configure
205 * the XAUI.
206 */
207
208#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
209#define END_SIGN 0x0
210
541ae68f 211static u64 herc_act_dtx_cfg[] = {
212 /* Set address */
213 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
214 /* Write data */
215 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
216 /* Set address */
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218 /* Write data */
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220 /* Set address */
221 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
222 /* Write data */
223 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
224 /* Done */
225 END_SIGN
226};
227
228static u64 xena_mdio_cfg[] = {
1da177e4
LT
229 /* Reset PMA PLL */
230 0xC001010000000000ULL, 0xC0010100000000E0ULL,
231 0xC0010100008000E4ULL,
232 /* Remove Reset from PMA PLL */
233 0xC001010000000000ULL, 0xC0010100000000E0ULL,
234 0xC0010100000000E4ULL,
235 END_SIGN
236};
237
541ae68f 238static u64 xena_dtx_cfg[] = {
1da177e4
LT
239 0x8000051500000000ULL, 0x80000515000000E0ULL,
240 0x80000515D93500E4ULL, 0x8001051500000000ULL,
241 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
242 0x8002051500000000ULL, 0x80020515000000E0ULL,
243 0x80020515F21000E4ULL,
244 /* Set PADLOOPBACKN */
245 0x8002051500000000ULL, 0x80020515000000E0ULL,
246 0x80020515B20000E4ULL, 0x8003051500000000ULL,
247 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
248 0x8004051500000000ULL, 0x80040515000000E0ULL,
249 0x80040515B20000E4ULL, 0x8005051500000000ULL,
250 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
251 SWITCH_SIGN,
252 /* Remove PADLOOPBACKN */
253 0x8002051500000000ULL, 0x80020515000000E0ULL,
254 0x80020515F20000E4ULL, 0x8003051500000000ULL,
255 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
256 0x8004051500000000ULL, 0x80040515000000E0ULL,
257 0x80040515F20000E4ULL, 0x8005051500000000ULL,
258 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
259 END_SIGN
260};
261
20346722 262/*
1da177e4
LT
263 * Constants for Fixing the MacAddress problem seen mostly on
264 * Alpha machines.
265 */
266static u64 fix_mac[] = {
267 0x0060000000000000ULL, 0x0060600000000000ULL,
268 0x0040600000000000ULL, 0x0000600000000000ULL,
269 0x0020600000000000ULL, 0x0060600000000000ULL,
270 0x0020600000000000ULL, 0x0060600000000000ULL,
271 0x0020600000000000ULL, 0x0060600000000000ULL,
272 0x0020600000000000ULL, 0x0060600000000000ULL,
273 0x0020600000000000ULL, 0x0060600000000000ULL,
274 0x0020600000000000ULL, 0x0060600000000000ULL,
275 0x0020600000000000ULL, 0x0060600000000000ULL,
276 0x0020600000000000ULL, 0x0060600000000000ULL,
277 0x0020600000000000ULL, 0x0060600000000000ULL,
278 0x0020600000000000ULL, 0x0060600000000000ULL,
279 0x0020600000000000ULL, 0x0000600000000000ULL,
280 0x0040600000000000ULL, 0x0060600000000000ULL,
281 END_SIGN
282};
283
284/* Module Loadable parameters. */
285static unsigned int tx_fifo_num = 1;
286static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
287 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
288static unsigned int rx_ring_num = 1;
289static unsigned int rx_ring_sz[MAX_RX_RINGS] =
290 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
20346722 291static unsigned int rts_frm_len[MAX_RX_RINGS] =
292 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
5e25b9dd 293static unsigned int use_continuous_tx_intrs = 1;
1da177e4
LT
294static unsigned int rmac_pause_time = 65535;
295static unsigned int mc_pause_threshold_q0q3 = 187;
296static unsigned int mc_pause_threshold_q4q7 = 187;
297static unsigned int shared_splits;
298static unsigned int tmac_util_period = 5;
299static unsigned int rmac_util_period = 5;
b6e3f982 300static unsigned int bimodal = 0;
1da177e4
LT
301#ifndef CONFIG_S2IO_NAPI
302static unsigned int indicate_max_pkts;
303#endif
304
20346722 305/*
1da177e4 306 * S2IO device table.
20346722 307 * This table lists all the devices that this driver supports.
1da177e4
LT
308 */
309static struct pci_device_id s2io_tbl[] __devinitdata = {
310 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
311 PCI_ANY_ID, PCI_ANY_ID},
312 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
313 PCI_ANY_ID, PCI_ANY_ID},
314 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722 315 PCI_ANY_ID, PCI_ANY_ID},
316 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
317 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
318 {0,}
319};
320
321MODULE_DEVICE_TABLE(pci, s2io_tbl);
322
323static struct pci_driver s2io_driver = {
324 .name = "S2IO",
325 .id_table = s2io_tbl,
326 .probe = s2io_init_nic,
327 .remove = __devexit_p(s2io_rem_nic),
328};
329
330/* A simplifier macro used both by init and free shared_mem Fns(). */
331#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
332
333/**
334 * init_shared_mem - Allocation and Initialization of Memory
335 * @nic: Device private variable.
20346722 336 * Description: The function allocates all the memory areas shared
337 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
338 * Rx descriptors and the statistics block.
339 */
340
341static int init_shared_mem(struct s2io_nic *nic)
342{
343 u32 size;
344 void *tmp_v_addr, *tmp_v_addr_next;
345 dma_addr_t tmp_p_addr, tmp_p_addr_next;
346 RxD_block_t *pre_rxd_blk = NULL;
20346722 347 int i, j, blk_cnt, rx_sz, tx_sz;
1da177e4
LT
348 int lst_size, lst_per_page;
349 struct net_device *dev = nic->dev;
350#ifdef CONFIG_2BUFF_MODE
20346722 351 u64 tmp;
1da177e4
LT
352 buffAdd_t *ba;
353#endif
354
355 mac_info_t *mac_control;
356 struct config_param *config;
357
358 mac_control = &nic->mac_control;
359 config = &nic->config;
360
361
362 /* Allocation and initialization of TXDLs in FIOFs */
363 size = 0;
364 for (i = 0; i < config->tx_fifo_num; i++) {
365 size += config->tx_cfg[i].fifo_len;
366 }
367 if (size > MAX_AVAILABLE_TXDS) {
368 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
369 dev->name);
370 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
371 DBG_PRINT(ERR_DBG, "that can be used\n");
372 return FAILURE;
373 }
374
375 lst_size = (sizeof(TxD_t) * config->max_txds);
20346722 376 tx_sz = lst_size * size;
1da177e4
LT
377 lst_per_page = PAGE_SIZE / lst_size;
378
379 for (i = 0; i < config->tx_fifo_num; i++) {
380 int fifo_len = config->tx_cfg[i].fifo_len;
381 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
20346722 382 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
383 GFP_KERNEL);
384 if (!mac_control->fifos[i].list_info) {
1da177e4
LT
385 DBG_PRINT(ERR_DBG,
386 "Malloc failed for list_info\n");
387 return -ENOMEM;
388 }
20346722 389 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
390 }
391 for (i = 0; i < config->tx_fifo_num; i++) {
392 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
393 lst_per_page);
20346722 394 mac_control->fifos[i].tx_curr_put_info.offset = 0;
395 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 396 config->tx_cfg[i].fifo_len - 1;
20346722 397 mac_control->fifos[i].tx_curr_get_info.offset = 0;
398 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 399 config->tx_cfg[i].fifo_len - 1;
20346722 400 mac_control->fifos[i].fifo_no = i;
401 mac_control->fifos[i].nic = nic;
402 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
403
1da177e4
LT
404 for (j = 0; j < page_num; j++) {
405 int k = 0;
406 dma_addr_t tmp_p;
407 void *tmp_v;
408 tmp_v = pci_alloc_consistent(nic->pdev,
409 PAGE_SIZE, &tmp_p);
410 if (!tmp_v) {
411 DBG_PRINT(ERR_DBG,
412 "pci_alloc_consistent ");
413 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
414 return -ENOMEM;
415 }
416 while (k < lst_per_page) {
417 int l = (j * lst_per_page) + k;
418 if (l == config->tx_cfg[i].fifo_len)
20346722 419 break;
420 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 421 tmp_v + (k * lst_size);
20346722 422 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
423 tmp_p + (k * lst_size);
424 k++;
425 }
426 }
427 }
1da177e4
LT
428
429 /* Allocation and initialization of RXDs in Rings */
430 size = 0;
431 for (i = 0; i < config->rx_ring_num; i++) {
432 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
433 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
434 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
435 i);
436 DBG_PRINT(ERR_DBG, "RxDs per Block");
437 return FAILURE;
438 }
439 size += config->rx_cfg[i].num_rxd;
20346722 440 mac_control->rings[i].block_count =
1da177e4 441 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722 442 mac_control->rings[i].pkt_cnt =
443 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
1da177e4 444 }
20346722 445 size = (size * (sizeof(RxD_t)));
446 rx_sz = size;
1da177e4
LT
447
448 for (i = 0; i < config->rx_ring_num; i++) {
20346722 449 mac_control->rings[i].rx_curr_get_info.block_index = 0;
450 mac_control->rings[i].rx_curr_get_info.offset = 0;
451 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 452 config->rx_cfg[i].num_rxd - 1;
20346722 453 mac_control->rings[i].rx_curr_put_info.block_index = 0;
454 mac_control->rings[i].rx_curr_put_info.offset = 0;
455 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 456 config->rx_cfg[i].num_rxd - 1;
20346722 457 mac_control->rings[i].nic = nic;
458 mac_control->rings[i].ring_no = i;
459
1da177e4
LT
460 blk_cnt =
461 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
462 /* Allocating all the Rx blocks */
463 for (j = 0; j < blk_cnt; j++) {
464#ifndef CONFIG_2BUFF_MODE
465 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
466#else
467 size = SIZE_OF_BLOCK;
468#endif
469 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
470 &tmp_p_addr);
471 if (tmp_v_addr == NULL) {
472 /*
20346722 473 * In case of failure, free_shared_mem()
474 * is called, which should free any
475 * memory that was alloced till the
1da177e4
LT
476 * failure happened.
477 */
20346722 478 mac_control->rings[i].rx_blocks[j].block_virt_addr =
1da177e4
LT
479 tmp_v_addr;
480 return -ENOMEM;
481 }
482 memset(tmp_v_addr, 0, size);
20346722 483 mac_control->rings[i].rx_blocks[j].block_virt_addr =
484 tmp_v_addr;
485 mac_control->rings[i].rx_blocks[j].block_dma_addr =
486 tmp_p_addr;
1da177e4
LT
487 }
488 /* Interlinking all Rx Blocks */
489 for (j = 0; j < blk_cnt; j++) {
20346722 490 tmp_v_addr =
491 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 492 tmp_v_addr_next =
20346722 493 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 494 blk_cnt].block_virt_addr;
20346722 495 tmp_p_addr =
496 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 497 tmp_p_addr_next =
20346722 498 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
499 blk_cnt].block_dma_addr;
500
501 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
20346722 502 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
1da177e4
LT
503 * marker.
504 */
505#ifndef CONFIG_2BUFF_MODE
506 pre_rxd_blk->reserved_2_pNext_RxD_block =
507 (unsigned long) tmp_v_addr_next;
508#endif
509 pre_rxd_blk->pNext_RxD_Blk_physical =
510 (u64) tmp_p_addr_next;
511 }
512 }
513
514#ifdef CONFIG_2BUFF_MODE
20346722 515 /*
1da177e4
LT
516 * Allocation of Storages for buffer addresses in 2BUFF mode
517 * and the buffers as well.
518 */
519 for (i = 0; i < config->rx_ring_num; i++) {
520 blk_cnt =
521 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722 522 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
1da177e4 523 GFP_KERNEL);
20346722 524 if (!mac_control->rings[i].ba)
1da177e4
LT
525 return -ENOMEM;
526 for (j = 0; j < blk_cnt; j++) {
527 int k = 0;
20346722 528 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
1da177e4
LT
529 (MAX_RXDS_PER_BLOCK + 1)),
530 GFP_KERNEL);
20346722 531 if (!mac_control->rings[i].ba[j])
1da177e4
LT
532 return -ENOMEM;
533 while (k != MAX_RXDS_PER_BLOCK) {
20346722 534 ba = &mac_control->rings[i].ba[j][k];
1da177e4 535
20346722 536 ba->ba_0_org = (void *) kmalloc
1da177e4
LT
537 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
538 if (!ba->ba_0_org)
539 return -ENOMEM;
20346722 540 tmp = (u64) ba->ba_0_org;
1da177e4 541 tmp += ALIGN_SIZE;
20346722 542 tmp &= ~((u64) ALIGN_SIZE);
1da177e4
LT
543 ba->ba_0 = (void *) tmp;
544
20346722 545 ba->ba_1_org = (void *) kmalloc
1da177e4
LT
546 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
547 if (!ba->ba_1_org)
548 return -ENOMEM;
20346722 549 tmp = (u64) ba->ba_1_org;
1da177e4 550 tmp += ALIGN_SIZE;
20346722 551 tmp &= ~((u64) ALIGN_SIZE);
1da177e4
LT
552 ba->ba_1 = (void *) tmp;
553 k++;
554 }
555 }
556 }
557#endif
558
559 /* Allocation and initialization of Statistics block */
560 size = sizeof(StatInfo_t);
561 mac_control->stats_mem = pci_alloc_consistent
562 (nic->pdev, size, &mac_control->stats_mem_phy);
563
564 if (!mac_control->stats_mem) {
20346722 565 /*
566 * In case of failure, free_shared_mem() is called, which
567 * should free any memory that was alloced till the
1da177e4
LT
568 * failure happened.
569 */
570 return -ENOMEM;
571 }
572 mac_control->stats_mem_sz = size;
573
574 tmp_v_addr = mac_control->stats_mem;
575 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
576 memset(tmp_v_addr, 0, size);
1da177e4
LT
577 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
578 (unsigned long long) tmp_p_addr);
579
580 return SUCCESS;
581}
582
20346722 583/**
584 * free_shared_mem - Free the allocated Memory
1da177e4
LT
585 * @nic: Device private variable.
586 * Description: This function is to free all memory locations allocated by
587 * the init_shared_mem() function and return it to the kernel.
588 */
589
590static void free_shared_mem(struct s2io_nic *nic)
591{
592 int i, j, blk_cnt, size;
593 void *tmp_v_addr;
594 dma_addr_t tmp_p_addr;
595 mac_info_t *mac_control;
596 struct config_param *config;
597 int lst_size, lst_per_page;
598
599
600 if (!nic)
601 return;
602
603 mac_control = &nic->mac_control;
604 config = &nic->config;
605
606 lst_size = (sizeof(TxD_t) * config->max_txds);
607 lst_per_page = PAGE_SIZE / lst_size;
608
609 for (i = 0; i < config->tx_fifo_num; i++) {
610 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
611 lst_per_page);
612 for (j = 0; j < page_num; j++) {
613 int mem_blks = (j * lst_per_page);
20346722 614 if (!mac_control->fifos[i].list_info[mem_blks].
615 list_virt_addr)
1da177e4
LT
616 break;
617 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722 618 mac_control->fifos[i].
619 list_info[mem_blks].
1da177e4 620 list_virt_addr,
20346722 621 mac_control->fifos[i].
622 list_info[mem_blks].
1da177e4
LT
623 list_phy_addr);
624 }
20346722 625 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
626 }
627
628#ifndef CONFIG_2BUFF_MODE
629 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
630#else
631 size = SIZE_OF_BLOCK;
632#endif
633 for (i = 0; i < config->rx_ring_num; i++) {
20346722 634 blk_cnt = mac_control->rings[i].block_count;
1da177e4 635 for (j = 0; j < blk_cnt; j++) {
20346722 636 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
637 block_virt_addr;
638 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
639 block_dma_addr;
1da177e4
LT
640 if (tmp_v_addr == NULL)
641 break;
642 pci_free_consistent(nic->pdev, size,
643 tmp_v_addr, tmp_p_addr);
644 }
645 }
646
647#ifdef CONFIG_2BUFF_MODE
648 /* Freeing buffer storage addresses in 2BUFF mode. */
649 for (i = 0; i < config->rx_ring_num; i++) {
650 blk_cnt =
651 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
1da177e4
LT
652 for (j = 0; j < blk_cnt; j++) {
653 int k = 0;
20346722 654 if (!mac_control->rings[i].ba[j])
655 continue;
1da177e4 656 while (k != MAX_RXDS_PER_BLOCK) {
20346722 657 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
1da177e4
LT
658 kfree(ba->ba_0_org);
659 kfree(ba->ba_1_org);
660 k++;
661 }
20346722 662 kfree(mac_control->rings[i].ba[j]);
1da177e4 663 }
20346722 664 if (mac_control->rings[i].ba)
665 kfree(mac_control->rings[i].ba);
1da177e4 666 }
1da177e4
LT
667#endif
668
669 if (mac_control->stats_mem) {
670 pci_free_consistent(nic->pdev,
671 mac_control->stats_mem_sz,
672 mac_control->stats_mem,
673 mac_control->stats_mem_phy);
674 }
675}
676
541ae68f 677/**
678 * s2io_verify_pci_mode -
679 */
680
681static int s2io_verify_pci_mode(nic_t *nic)
682{
683 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
684 register u64 val64 = 0;
685 int mode;
686
687 val64 = readq(&bar0->pci_mode);
688 mode = (u8)GET_PCI_MODE(val64);
689
690 if ( val64 & PCI_MODE_UNKNOWN_MODE)
691 return -1; /* Unknown PCI mode */
692 return mode;
693}
694
695
696/**
697 * s2io_print_pci_mode -
698 */
699static int s2io_print_pci_mode(nic_t *nic)
700{
701 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
702 register u64 val64 = 0;
703 int mode;
704 struct config_param *config = &nic->config;
705
706 val64 = readq(&bar0->pci_mode);
707 mode = (u8)GET_PCI_MODE(val64);
708
709 if ( val64 & PCI_MODE_UNKNOWN_MODE)
710 return -1; /* Unknown PCI mode */
711
712 if (val64 & PCI_MODE_32_BITS) {
713 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
714 } else {
715 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
716 }
717
718 switch(mode) {
719 case PCI_MODE_PCI_33:
720 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
721 config->bus_speed = 33;
722 break;
723 case PCI_MODE_PCI_66:
724 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
725 config->bus_speed = 133;
726 break;
727 case PCI_MODE_PCIX_M1_66:
728 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
729 config->bus_speed = 133; /* Herc doubles the clock rate */
730 break;
731 case PCI_MODE_PCIX_M1_100:
732 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
733 config->bus_speed = 200;
734 break;
735 case PCI_MODE_PCIX_M1_133:
736 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
737 config->bus_speed = 266;
738 break;
739 case PCI_MODE_PCIX_M2_66:
740 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
741 config->bus_speed = 133;
742 break;
743 case PCI_MODE_PCIX_M2_100:
744 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
745 config->bus_speed = 200;
746 break;
747 case PCI_MODE_PCIX_M2_133:
748 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
749 config->bus_speed = 266;
750 break;
751 default:
752 return -1; /* Unsupported bus speed */
753 }
754
755 return mode;
756}
757
20346722 758/**
759 * init_nic - Initialization of hardware
1da177e4 760 * @nic: device peivate variable
20346722 761 * Description: The function sequentially configures every block
762 * of the H/W from their reset values.
763 * Return Value: SUCCESS on success and
1da177e4
LT
764 * '-1' on failure (endian settings incorrect).
765 */
766
767static int init_nic(struct s2io_nic *nic)
768{
769 XENA_dev_config_t __iomem *bar0 = nic->bar0;
770 struct net_device *dev = nic->dev;
771 register u64 val64 = 0;
772 void __iomem *add;
773 u32 time;
774 int i, j;
775 mac_info_t *mac_control;
776 struct config_param *config;
777 int mdio_cnt = 0, dtx_cnt = 0;
778 unsigned long long mem_share;
20346722 779 int mem_size;
1da177e4
LT
780
781 mac_control = &nic->mac_control;
782 config = &nic->config;
783
5e25b9dd 784 /* to set the swapper controle on the card */
20346722 785 if(s2io_set_swapper(nic)) {
1da177e4
LT
786 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
787 return -1;
788 }
789
541ae68f 790 /*
791 * Herc requires EOI to be removed from reset before XGXS, so..
792 */
793 if (nic->device_type & XFRAME_II_DEVICE) {
794 val64 = 0xA500000000ULL;
795 writeq(val64, &bar0->sw_reset);
796 msleep(500);
797 val64 = readq(&bar0->sw_reset);
798 }
799
1da177e4
LT
800 /* Remove XGXS from reset state */
801 val64 = 0;
802 writeq(val64, &bar0->sw_reset);
1da177e4 803 msleep(500);
20346722 804 val64 = readq(&bar0->sw_reset);
1da177e4
LT
805
806 /* Enable Receiving broadcasts */
807 add = &bar0->mac_cfg;
808 val64 = readq(&bar0->mac_cfg);
809 val64 |= MAC_RMAC_BCAST_ENABLE;
810 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
811 writel((u32) val64, add);
812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
813 writel((u32) (val64 >> 32), (add + 4));
814
815 /* Read registers in all blocks */
816 val64 = readq(&bar0->mac_int_mask);
817 val64 = readq(&bar0->mc_int_mask);
818 val64 = readq(&bar0->xgxs_int_mask);
819
820 /* Set MTU */
821 val64 = dev->mtu;
822 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
823
20346722 824 /*
825 * Configuring the XAUI Interface of Xena.
1da177e4 826 * ***************************************
20346722 827 * To Configure the Xena's XAUI, one has to write a series
828 * of 64 bit values into two registers in a particular
829 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
830 * which will be defined in the array of configuration values
541ae68f 831 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
20346722 832 * to switch writing from one regsiter to another. We continue
1da177e4 833 * writing these values until we encounter the 'END_SIGN' macro.
20346722 834 * For example, After making a series of 21 writes into
835 * dtx_control register the 'SWITCH_SIGN' appears and hence we
1da177e4
LT
836 * start writing into mdio_control until we encounter END_SIGN.
837 */
541ae68f 838 if (nic->device_type & XFRAME_II_DEVICE) {
839 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
840 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1da177e4 841 &bar0->dtx_control, UF);
541ae68f 842 if (dtx_cnt & 0x1)
843 msleep(1); /* Necessary!! */
1da177e4
LT
844 dtx_cnt++;
845 }
541ae68f 846 } else {
847 while (1) {
848 dtx_cfg:
849 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
850 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
851 dtx_cnt++;
852 goto mdio_cfg;
853 }
854 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
855 &bar0->dtx_control, UF);
856 val64 = readq(&bar0->dtx_control);
857 dtx_cnt++;
858 }
859 mdio_cfg:
860 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
861 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
862 mdio_cnt++;
863 goto dtx_cfg;
864 }
865 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
866 &bar0->mdio_control, UF);
867 val64 = readq(&bar0->mdio_control);
1da177e4 868 mdio_cnt++;
541ae68f 869 }
870 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
871 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
872 break;
873 } else {
1da177e4
LT
874 goto dtx_cfg;
875 }
1da177e4
LT
876 }
877 }
878
879 /* Tx DMA Initialization */
880 val64 = 0;
881 writeq(val64, &bar0->tx_fifo_partition_0);
882 writeq(val64, &bar0->tx_fifo_partition_1);
883 writeq(val64, &bar0->tx_fifo_partition_2);
884 writeq(val64, &bar0->tx_fifo_partition_3);
885
886
887 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
888 val64 |=
889 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
890 13) | vBIT(config->tx_cfg[i].fifo_priority,
891 ((i * 32) + 5), 3);
892
893 if (i == (config->tx_fifo_num - 1)) {
894 if (i % 2 == 0)
895 i++;
896 }
897
898 switch (i) {
899 case 1:
900 writeq(val64, &bar0->tx_fifo_partition_0);
901 val64 = 0;
902 break;
903 case 3:
904 writeq(val64, &bar0->tx_fifo_partition_1);
905 val64 = 0;
906 break;
907 case 5:
908 writeq(val64, &bar0->tx_fifo_partition_2);
909 val64 = 0;
910 break;
911 case 7:
912 writeq(val64, &bar0->tx_fifo_partition_3);
913 break;
914 }
915 }
916
917 /* Enable Tx FIFO partition 0. */
918 val64 = readq(&bar0->tx_fifo_partition_0);
919 val64 |= BIT(0); /* To enable the FIFO partition. */
920 writeq(val64, &bar0->tx_fifo_partition_0);
921
5e25b9dd 922 /*
923 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
924 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
925 */
541ae68f 926 if ((nic->device_type == XFRAME_I_DEVICE) &&
927 (get_xena_rev_id(nic->pdev) < 4))
5e25b9dd 928 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
929
1da177e4
LT
930 val64 = readq(&bar0->tx_fifo_partition_0);
931 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
932 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
933
20346722 934 /*
935 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
936 * integrity checking.
937 */
938 val64 = readq(&bar0->tx_pa_cfg);
939 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
940 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
941 writeq(val64, &bar0->tx_pa_cfg);
942
943 /* Rx DMA intialization. */
944 val64 = 0;
945 for (i = 0; i < config->rx_ring_num; i++) {
946 val64 |=
947 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
948 3);
949 }
950 writeq(val64, &bar0->rx_queue_priority);
951
20346722 952 /*
953 * Allocating equal share of memory to all the
1da177e4
LT
954 * configured Rings.
955 */
956 val64 = 0;
541ae68f 957 if (nic->device_type & XFRAME_II_DEVICE)
958 mem_size = 32;
959 else
960 mem_size = 64;
961
1da177e4
LT
962 for (i = 0; i < config->rx_ring_num; i++) {
963 switch (i) {
964 case 0:
20346722 965 mem_share = (mem_size / config->rx_ring_num +
966 mem_size % config->rx_ring_num);
1da177e4
LT
967 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
968 continue;
969 case 1:
20346722 970 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
971 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
972 continue;
973 case 2:
20346722 974 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
975 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
976 continue;
977 case 3:
20346722 978 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
979 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
980 continue;
981 case 4:
20346722 982 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
983 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
984 continue;
985 case 5:
20346722 986 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
987 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
988 continue;
989 case 6:
20346722 990 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
991 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
992 continue;
993 case 7:
20346722 994 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
995 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
996 continue;
997 }
998 }
999 writeq(val64, &bar0->rx_queue_cfg);
1000
20346722 1001 /*
5e25b9dd 1002 * Filling Tx round robin registers
1003 * as per the number of FIFOs
1da177e4 1004 */
5e25b9dd 1005 switch (config->tx_fifo_num) {
1006 case 1:
1007 val64 = 0x0000000000000000ULL;
1008 writeq(val64, &bar0->tx_w_round_robin_0);
1009 writeq(val64, &bar0->tx_w_round_robin_1);
1010 writeq(val64, &bar0->tx_w_round_robin_2);
1011 writeq(val64, &bar0->tx_w_round_robin_3);
1012 writeq(val64, &bar0->tx_w_round_robin_4);
1013 break;
1014 case 2:
1015 val64 = 0x0000010000010000ULL;
1016 writeq(val64, &bar0->tx_w_round_robin_0);
1017 val64 = 0x0100000100000100ULL;
1018 writeq(val64, &bar0->tx_w_round_robin_1);
1019 val64 = 0x0001000001000001ULL;
1020 writeq(val64, &bar0->tx_w_round_robin_2);
1021 val64 = 0x0000010000010000ULL;
1022 writeq(val64, &bar0->tx_w_round_robin_3);
1023 val64 = 0x0100000000000000ULL;
1024 writeq(val64, &bar0->tx_w_round_robin_4);
1025 break;
1026 case 3:
1027 val64 = 0x0001000102000001ULL;
1028 writeq(val64, &bar0->tx_w_round_robin_0);
1029 val64 = 0x0001020000010001ULL;
1030 writeq(val64, &bar0->tx_w_round_robin_1);
1031 val64 = 0x0200000100010200ULL;
1032 writeq(val64, &bar0->tx_w_round_robin_2);
1033 val64 = 0x0001000102000001ULL;
1034 writeq(val64, &bar0->tx_w_round_robin_3);
1035 val64 = 0x0001020000000000ULL;
1036 writeq(val64, &bar0->tx_w_round_robin_4);
1037 break;
1038 case 4:
1039 val64 = 0x0001020300010200ULL;
1040 writeq(val64, &bar0->tx_w_round_robin_0);
1041 val64 = 0x0100000102030001ULL;
1042 writeq(val64, &bar0->tx_w_round_robin_1);
1043 val64 = 0x0200010000010203ULL;
1044 writeq(val64, &bar0->tx_w_round_robin_2);
1045 val64 = 0x0001020001000001ULL;
1046 writeq(val64, &bar0->tx_w_round_robin_3);
1047 val64 = 0x0203000100000000ULL;
1048 writeq(val64, &bar0->tx_w_round_robin_4);
1049 break;
1050 case 5:
1051 val64 = 0x0001000203000102ULL;
1052 writeq(val64, &bar0->tx_w_round_robin_0);
1053 val64 = 0x0001020001030004ULL;
1054 writeq(val64, &bar0->tx_w_round_robin_1);
1055 val64 = 0x0001000203000102ULL;
1056 writeq(val64, &bar0->tx_w_round_robin_2);
1057 val64 = 0x0001020001030004ULL;
1058 writeq(val64, &bar0->tx_w_round_robin_3);
1059 val64 = 0x0001000000000000ULL;
1060 writeq(val64, &bar0->tx_w_round_robin_4);
1061 break;
1062 case 6:
1063 val64 = 0x0001020304000102ULL;
1064 writeq(val64, &bar0->tx_w_round_robin_0);
1065 val64 = 0x0304050001020001ULL;
1066 writeq(val64, &bar0->tx_w_round_robin_1);
1067 val64 = 0x0203000100000102ULL;
1068 writeq(val64, &bar0->tx_w_round_robin_2);
1069 val64 = 0x0304000102030405ULL;
1070 writeq(val64, &bar0->tx_w_round_robin_3);
1071 val64 = 0x0001000200000000ULL;
1072 writeq(val64, &bar0->tx_w_round_robin_4);
1073 break;
1074 case 7:
1075 val64 = 0x0001020001020300ULL;
1076 writeq(val64, &bar0->tx_w_round_robin_0);
1077 val64 = 0x0102030400010203ULL;
1078 writeq(val64, &bar0->tx_w_round_robin_1);
1079 val64 = 0x0405060001020001ULL;
1080 writeq(val64, &bar0->tx_w_round_robin_2);
1081 val64 = 0x0304050000010200ULL;
1082 writeq(val64, &bar0->tx_w_round_robin_3);
1083 val64 = 0x0102030000000000ULL;
1084 writeq(val64, &bar0->tx_w_round_robin_4);
1085 break;
1086 case 8:
1087 val64 = 0x0001020300040105ULL;
1088 writeq(val64, &bar0->tx_w_round_robin_0);
1089 val64 = 0x0200030106000204ULL;
1090 writeq(val64, &bar0->tx_w_round_robin_1);
1091 val64 = 0x0103000502010007ULL;
1092 writeq(val64, &bar0->tx_w_round_robin_2);
1093 val64 = 0x0304010002060500ULL;
1094 writeq(val64, &bar0->tx_w_round_robin_3);
1095 val64 = 0x0103020400000000ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_4);
1097 break;
1098 }
1099
1100 /* Filling the Rx round robin registers as per the
1101 * number of Rings and steering based on QoS.
1102 */
1103 switch (config->rx_ring_num) {
1104 case 1:
1105 val64 = 0x8080808080808080ULL;
1106 writeq(val64, &bar0->rts_qos_steering);
1107 break;
1108 case 2:
1109 val64 = 0x0000010000010000ULL;
1110 writeq(val64, &bar0->rx_w_round_robin_0);
1111 val64 = 0x0100000100000100ULL;
1112 writeq(val64, &bar0->rx_w_round_robin_1);
1113 val64 = 0x0001000001000001ULL;
1114 writeq(val64, &bar0->rx_w_round_robin_2);
1115 val64 = 0x0000010000010000ULL;
1116 writeq(val64, &bar0->rx_w_round_robin_3);
1117 val64 = 0x0100000000000000ULL;
1118 writeq(val64, &bar0->rx_w_round_robin_4);
1119
1120 val64 = 0x8080808040404040ULL;
1121 writeq(val64, &bar0->rts_qos_steering);
1122 break;
1123 case 3:
1124 val64 = 0x0001000102000001ULL;
1125 writeq(val64, &bar0->rx_w_round_robin_0);
1126 val64 = 0x0001020000010001ULL;
1127 writeq(val64, &bar0->rx_w_round_robin_1);
1128 val64 = 0x0200000100010200ULL;
1129 writeq(val64, &bar0->rx_w_round_robin_2);
1130 val64 = 0x0001000102000001ULL;
1131 writeq(val64, &bar0->rx_w_round_robin_3);
1132 val64 = 0x0001020000000000ULL;
1133 writeq(val64, &bar0->rx_w_round_robin_4);
1134
1135 val64 = 0x8080804040402020ULL;
1136 writeq(val64, &bar0->rts_qos_steering);
1137 break;
1138 case 4:
1139 val64 = 0x0001020300010200ULL;
1140 writeq(val64, &bar0->rx_w_round_robin_0);
1141 val64 = 0x0100000102030001ULL;
1142 writeq(val64, &bar0->rx_w_round_robin_1);
1143 val64 = 0x0200010000010203ULL;
1144 writeq(val64, &bar0->rx_w_round_robin_2);
1145 val64 = 0x0001020001000001ULL;
1146 writeq(val64, &bar0->rx_w_round_robin_3);
1147 val64 = 0x0203000100000000ULL;
1148 writeq(val64, &bar0->rx_w_round_robin_4);
1149
1150 val64 = 0x8080404020201010ULL;
1151 writeq(val64, &bar0->rts_qos_steering);
1152 break;
1153 case 5:
1154 val64 = 0x0001000203000102ULL;
1155 writeq(val64, &bar0->rx_w_round_robin_0);
1156 val64 = 0x0001020001030004ULL;
1157 writeq(val64, &bar0->rx_w_round_robin_1);
1158 val64 = 0x0001000203000102ULL;
1159 writeq(val64, &bar0->rx_w_round_robin_2);
1160 val64 = 0x0001020001030004ULL;
1161 writeq(val64, &bar0->rx_w_round_robin_3);
1162 val64 = 0x0001000000000000ULL;
1163 writeq(val64, &bar0->rx_w_round_robin_4);
1164
1165 val64 = 0x8080404020201008ULL;
1166 writeq(val64, &bar0->rts_qos_steering);
1167 break;
1168 case 6:
1169 val64 = 0x0001020304000102ULL;
1170 writeq(val64, &bar0->rx_w_round_robin_0);
1171 val64 = 0x0304050001020001ULL;
1172 writeq(val64, &bar0->rx_w_round_robin_1);
1173 val64 = 0x0203000100000102ULL;
1174 writeq(val64, &bar0->rx_w_round_robin_2);
1175 val64 = 0x0304000102030405ULL;
1176 writeq(val64, &bar0->rx_w_round_robin_3);
1177 val64 = 0x0001000200000000ULL;
1178 writeq(val64, &bar0->rx_w_round_robin_4);
1179
1180 val64 = 0x8080404020100804ULL;
1181 writeq(val64, &bar0->rts_qos_steering);
1182 break;
1183 case 7:
1184 val64 = 0x0001020001020300ULL;
1185 writeq(val64, &bar0->rx_w_round_robin_0);
1186 val64 = 0x0102030400010203ULL;
1187 writeq(val64, &bar0->rx_w_round_robin_1);
1188 val64 = 0x0405060001020001ULL;
1189 writeq(val64, &bar0->rx_w_round_robin_2);
1190 val64 = 0x0304050000010200ULL;
1191 writeq(val64, &bar0->rx_w_round_robin_3);
1192 val64 = 0x0102030000000000ULL;
1193 writeq(val64, &bar0->rx_w_round_robin_4);
1194
1195 val64 = 0x8080402010080402ULL;
1196 writeq(val64, &bar0->rts_qos_steering);
1197 break;
1198 case 8:
1199 val64 = 0x0001020300040105ULL;
1200 writeq(val64, &bar0->rx_w_round_robin_0);
1201 val64 = 0x0200030106000204ULL;
1202 writeq(val64, &bar0->rx_w_round_robin_1);
1203 val64 = 0x0103000502010007ULL;
1204 writeq(val64, &bar0->rx_w_round_robin_2);
1205 val64 = 0x0304010002060500ULL;
1206 writeq(val64, &bar0->rx_w_round_robin_3);
1207 val64 = 0x0103020400000000ULL;
1208 writeq(val64, &bar0->rx_w_round_robin_4);
1209
1210 val64 = 0x8040201008040201ULL;
1211 writeq(val64, &bar0->rts_qos_steering);
1212 break;
1213 }
1da177e4
LT
1214
1215 /* UDP Fix */
1216 val64 = 0;
20346722 1217 for (i = 0; i < 8; i++)
1da177e4
LT
1218 writeq(val64, &bar0->rts_frm_len_n[i]);
1219
5e25b9dd 1220 /* Set the default rts frame length for the rings configured */
1221 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1222 for (i = 0 ; i < config->rx_ring_num ; i++)
1223 writeq(val64, &bar0->rts_frm_len_n[i]);
1224
1225 /* Set the frame length for the configured rings
1226 * desired by the user
1227 */
1228 for (i = 0; i < config->rx_ring_num; i++) {
1229 /* If rts_frm_len[i] == 0 then it is assumed that user not
1230 * specified frame length steering.
1231 * If the user provides the frame length then program
1232 * the rts_frm_len register for those values or else
1233 * leave it as it is.
1234 */
1235 if (rts_frm_len[i] != 0) {
1236 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1237 &bar0->rts_frm_len_n[i]);
1238 }
1239 }
1da177e4 1240
20346722 1241 /* Program statistics memory */
1da177e4 1242 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1243
541ae68f 1244 if (nic->device_type == XFRAME_II_DEVICE) {
1245 val64 = STAT_BC(0x320);
1246 writeq(val64, &bar0->stat_byte_cnt);
1247 }
1248
20346722 1249 /*
1da177e4
LT
1250 * Initializing the sampling rate for the device to calculate the
1251 * bandwidth utilization.
1252 */
1253 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1254 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1255 writeq(val64, &bar0->mac_link_util);
1256
1257
20346722 1258 /*
1259 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1260 * Scheme.
1261 */
20346722 1262 /*
1263 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1264 * 250 interrupts per sec. Continuous interrupts are enabled
1265 * by default.
1266 */
541ae68f 1267 if (nic->device_type == XFRAME_II_DEVICE) {
1268 int count = (nic->config.bus_speed * 125)/2;
1269 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1270 } else {
1271
1272 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1273 }
1274 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1da177e4 1275 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd 1276 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
541ae68f 1277 if (use_continuous_tx_intrs)
1278 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1279 writeq(val64, &bar0->tti_data1_mem);
1280
1281 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1282 TTI_DATA2_MEM_TX_UFC_B(0x20) |
5e25b9dd 1283 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1284 writeq(val64, &bar0->tti_data2_mem);
1285
1286 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1287 writeq(val64, &bar0->tti_command_mem);
1288
20346722 1289 /*
1da177e4
LT
1290 * Once the operation completes, the Strobe bit of the command
1291 * register will be reset. We poll for this particular condition
1292 * We wait for a maximum of 500ms for the operation to complete,
1293 * if it's not complete by then we return error.
1294 */
1295 time = 0;
1296 while (TRUE) {
1297 val64 = readq(&bar0->tti_command_mem);
1298 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1299 break;
1300 }
1301 if (time > 10) {
1302 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1303 dev->name);
1304 return -1;
1305 }
1306 msleep(50);
1307 time++;
1308 }
1309
b6e3f982 1310 if (nic->config.bimodal) {
1311 int k = 0;
1312 for (k = 0; k < config->rx_ring_num; k++) {
1313 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1314 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1315 writeq(val64, &bar0->tti_command_mem);
541ae68f 1316
541ae68f 1317 /*
b6e3f982 1318 * Once the operation completes, the Strobe bit of the command
1319 * register will be reset. We poll for this particular condition
1320 * We wait for a maximum of 500ms for the operation to complete,
1321 * if it's not complete by then we return error.
1322 */
1323 time = 0;
1324 while (TRUE) {
1325 val64 = readq(&bar0->tti_command_mem);
1326 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1327 break;
1328 }
1329 if (time > 10) {
1330 DBG_PRINT(ERR_DBG,
1331 "%s: TTI init Failed\n",
1332 dev->name);
1333 return -1;
1334 }
1335 time++;
1336 msleep(50);
1337 }
1338 }
541ae68f 1339 } else {
1da177e4 1340
b6e3f982 1341 /* RTI Initialization */
1342 if (nic->device_type == XFRAME_II_DEVICE) {
1343 /*
1344 * Programmed to generate Apprx 500 Intrs per
1345 * second
1346 */
1347 int count = (nic->config.bus_speed * 125)/4;
1348 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1349 } else {
1350 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1351 }
1352 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1353 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1354 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1da177e4 1355
b6e3f982 1356 writeq(val64, &bar0->rti_data1_mem);
1da177e4 1357
b6e3f982 1358 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1359 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1360 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1361 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1362
b6e3f982 1363 for (i = 0; i < config->rx_ring_num; i++) {
1364 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1365 | RTI_CMD_MEM_OFFSET(i);
1366 writeq(val64, &bar0->rti_command_mem);
1367
1368 /*
1369 * Once the operation completes, the Strobe bit of the
1370 * command register will be reset. We poll for this
1371 * particular condition. We wait for a maximum of 500ms
1372 * for the operation to complete, if it's not complete
1373 * by then we return error.
1374 */
1375 time = 0;
1376 while (TRUE) {
1377 val64 = readq(&bar0->rti_command_mem);
1378 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1379 break;
1380 }
1381 if (time > 10) {
1382 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1383 dev->name);
1384 return -1;
1385 }
1386 time++;
1387 msleep(50);
1388 }
1da177e4 1389 }
1da177e4
LT
1390 }
1391
20346722 1392 /*
1393 * Initializing proper values as Pause threshold into all
1da177e4
LT
1394 * the 8 Queues on Rx side.
1395 */
1396 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1397 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1398
1399 /* Disable RMAC PAD STRIPPING */
20346722 1400 add = (void *) &bar0->mac_cfg;
1da177e4
LT
1401 val64 = readq(&bar0->mac_cfg);
1402 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1403 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1404 writel((u32) (val64), add);
1405 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1406 writel((u32) (val64 >> 32), (add + 4));
1407 val64 = readq(&bar0->mac_cfg);
1408
20346722 1409 /*
1410 * Set the time value to be inserted in the pause frame
1da177e4
LT
1411 * generated by xena.
1412 */
1413 val64 = readq(&bar0->rmac_pause_cfg);
1414 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1415 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1416 writeq(val64, &bar0->rmac_pause_cfg);
1417
20346722 1418 /*
1da177e4
LT
1419 * Set the Threshold Limit for Generating the pause frame
1420 * If the amount of data in any Queue exceeds ratio of
1421 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1422 * pause frame is generated
1423 */
1424 val64 = 0;
1425 for (i = 0; i < 4; i++) {
1426 val64 |=
1427 (((u64) 0xFF00 | nic->mac_control.
1428 mc_pause_threshold_q0q3)
1429 << (i * 2 * 8));
1430 }
1431 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1432
1433 val64 = 0;
1434 for (i = 0; i < 4; i++) {
1435 val64 |=
1436 (((u64) 0xFF00 | nic->mac_control.
1437 mc_pause_threshold_q4q7)
1438 << (i * 2 * 8));
1439 }
1440 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1441
20346722 1442 /*
1443 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1444 * exceeded the limit pointed by shared_splits
1445 */
1446 val64 = readq(&bar0->pic_control);
1447 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1448 writeq(val64, &bar0->pic_control);
1449
541ae68f 1450 /*
1451 * Programming the Herc to split every write transaction
1452 * that does not start on an ADB to reduce disconnects.
1453 */
1454 if (nic->device_type == XFRAME_II_DEVICE) {
1455 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1456 writeq(val64, &bar0->wreq_split_mask);
1457 }
1458
a371a07d 1459 /* Setting Link stability period to 64 ms */
1460 if (nic->device_type == XFRAME_II_DEVICE) {
1461 val64 = MISC_LINK_STABILITY_PRD(3);
1462 writeq(val64, &bar0->misc_control);
1463 }
1464
1da177e4
LT
1465 return SUCCESS;
1466}
a371a07d 1467#define LINK_UP_DOWN_INTERRUPT 1
1468#define MAC_RMAC_ERR_TIMER 2
1469
1470#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1471#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1472#else
1473int s2io_link_fault_indication(nic_t *nic)
1474{
1475 if (nic->device_type == XFRAME_II_DEVICE)
1476 return LINK_UP_DOWN_INTERRUPT;
1477 else
1478 return MAC_RMAC_ERR_TIMER;
1479}
1480#endif
1da177e4 1481
20346722 1482/**
1483 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1484 * @nic: device private variable,
1485 * @mask: A mask indicating which Intr block must be modified and,
1486 * @flag: A flag indicating whether to enable or disable the Intrs.
1487 * Description: This function will either disable or enable the interrupts
20346722 1488 * depending on the flag argument. The mask argument can be used to
1489 * enable/disable any Intr block.
1da177e4
LT
1490 * Return Value: NONE.
1491 */
1492
1493static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1494{
1495 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1496 register u64 val64 = 0, temp64 = 0;
1497
1498 /* Top level interrupt classification */
1499 /* PIC Interrupts */
1500 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1501 /* Enable PIC Intrs in the general intr mask register */
1502 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1503 if (flag == ENABLE_INTRS) {
1504 temp64 = readq(&bar0->general_int_mask);
1505 temp64 &= ~((u64) val64);
1506 writeq(temp64, &bar0->general_int_mask);
20346722 1507 /*
a371a07d 1508 * If Hercules adapter enable GPIO otherwise
1509 * disabled all PCIX, Flash, MDIO, IIC and GPIO
20346722 1510 * interrupts for now.
1511 * TODO
1da177e4 1512 */
a371a07d 1513 if (s2io_link_fault_indication(nic) ==
1514 LINK_UP_DOWN_INTERRUPT ) {
1515 temp64 = readq(&bar0->pic_int_mask);
1516 temp64 &= ~((u64) PIC_INT_GPIO);
1517 writeq(temp64, &bar0->pic_int_mask);
1518 temp64 = readq(&bar0->gpio_int_mask);
1519 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1520 writeq(temp64, &bar0->gpio_int_mask);
1521 } else {
1522 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1523 }
20346722 1524 /*
1da177e4
LT
1525 * No MSI Support is available presently, so TTI and
1526 * RTI interrupts are also disabled.
1527 */
1528 } else if (flag == DISABLE_INTRS) {
20346722 1529 /*
1530 * Disable PIC Intrs in the general
1531 * intr mask register
1da177e4
LT
1532 */
1533 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1534 temp64 = readq(&bar0->general_int_mask);
1535 val64 |= temp64;
1536 writeq(val64, &bar0->general_int_mask);
1537 }
1538 }
1539
1540 /* DMA Interrupts */
1541 /* Enabling/Disabling Tx DMA interrupts */
1542 if (mask & TX_DMA_INTR) {
1543 /* Enable TxDMA Intrs in the general intr mask register */
1544 val64 = TXDMA_INT_M;
1545 if (flag == ENABLE_INTRS) {
1546 temp64 = readq(&bar0->general_int_mask);
1547 temp64 &= ~((u64) val64);
1548 writeq(temp64, &bar0->general_int_mask);
20346722 1549 /*
1550 * Keep all interrupts other than PFC interrupt
1da177e4
LT
1551 * and PCC interrupt disabled in DMA level.
1552 */
1553 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1554 TXDMA_PCC_INT_M);
1555 writeq(val64, &bar0->txdma_int_mask);
20346722 1556 /*
1557 * Enable only the MISC error 1 interrupt in PFC block
1da177e4
LT
1558 */
1559 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1560 writeq(val64, &bar0->pfc_err_mask);
20346722 1561 /*
1562 * Enable only the FB_ECC error interrupt in PCC block
1da177e4
LT
1563 */
1564 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1565 writeq(val64, &bar0->pcc_err_mask);
1566 } else if (flag == DISABLE_INTRS) {
20346722 1567 /*
1568 * Disable TxDMA Intrs in the general intr mask
1569 * register
1da177e4
LT
1570 */
1571 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1572 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1573 temp64 = readq(&bar0->general_int_mask);
1574 val64 |= temp64;
1575 writeq(val64, &bar0->general_int_mask);
1576 }
1577 }
1578
1579 /* Enabling/Disabling Rx DMA interrupts */
1580 if (mask & RX_DMA_INTR) {
1581 /* Enable RxDMA Intrs in the general intr mask register */
1582 val64 = RXDMA_INT_M;
1583 if (flag == ENABLE_INTRS) {
1584 temp64 = readq(&bar0->general_int_mask);
1585 temp64 &= ~((u64) val64);
1586 writeq(temp64, &bar0->general_int_mask);
20346722 1587 /*
1588 * All RxDMA block interrupts are disabled for now
1589 * TODO
1da177e4
LT
1590 */
1591 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1592 } else if (flag == DISABLE_INTRS) {
20346722 1593 /*
1594 * Disable RxDMA Intrs in the general intr mask
1595 * register
1da177e4
LT
1596 */
1597 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1598 temp64 = readq(&bar0->general_int_mask);
1599 val64 |= temp64;
1600 writeq(val64, &bar0->general_int_mask);
1601 }
1602 }
1603
1604 /* MAC Interrupts */
1605 /* Enabling/Disabling MAC interrupts */
1606 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1607 val64 = TXMAC_INT_M | RXMAC_INT_M;
1608 if (flag == ENABLE_INTRS) {
1609 temp64 = readq(&bar0->general_int_mask);
1610 temp64 &= ~((u64) val64);
1611 writeq(temp64, &bar0->general_int_mask);
20346722 1612 /*
1613 * All MAC block error interrupts are disabled for now
1da177e4
LT
1614 * TODO
1615 */
1da177e4 1616 } else if (flag == DISABLE_INTRS) {
20346722 1617 /*
1618 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1619 */
1620 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1621 writeq(DISABLE_ALL_INTRS,
1622 &bar0->mac_rmac_err_mask);
1623
1624 temp64 = readq(&bar0->general_int_mask);
1625 val64 |= temp64;
1626 writeq(val64, &bar0->general_int_mask);
1627 }
1628 }
1629
1630 /* XGXS Interrupts */
1631 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1632 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1633 if (flag == ENABLE_INTRS) {
1634 temp64 = readq(&bar0->general_int_mask);
1635 temp64 &= ~((u64) val64);
1636 writeq(temp64, &bar0->general_int_mask);
20346722 1637 /*
1da177e4 1638 * All XGXS block error interrupts are disabled for now
20346722 1639 * TODO
1da177e4
LT
1640 */
1641 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1642 } else if (flag == DISABLE_INTRS) {
20346722 1643 /*
1644 * Disable MC Intrs in the general intr mask register
1da177e4
LT
1645 */
1646 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1647 temp64 = readq(&bar0->general_int_mask);
1648 val64 |= temp64;
1649 writeq(val64, &bar0->general_int_mask);
1650 }
1651 }
1652
1653 /* Memory Controller(MC) interrupts */
1654 if (mask & MC_INTR) {
1655 val64 = MC_INT_M;
1656 if (flag == ENABLE_INTRS) {
1657 temp64 = readq(&bar0->general_int_mask);
1658 temp64 &= ~((u64) val64);
1659 writeq(temp64, &bar0->general_int_mask);
20346722 1660 /*
5e25b9dd 1661 * Enable all MC Intrs.
1da177e4 1662 */
5e25b9dd 1663 writeq(0x0, &bar0->mc_int_mask);
1664 writeq(0x0, &bar0->mc_err_mask);
1da177e4
LT
1665 } else if (flag == DISABLE_INTRS) {
1666 /*
1667 * Disable MC Intrs in the general intr mask register
1668 */
1669 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1670 temp64 = readq(&bar0->general_int_mask);
1671 val64 |= temp64;
1672 writeq(val64, &bar0->general_int_mask);
1673 }
1674 }
1675
1676
1677 /* Tx traffic interrupts */
1678 if (mask & TX_TRAFFIC_INTR) {
1679 val64 = TXTRAFFIC_INT_M;
1680 if (flag == ENABLE_INTRS) {
1681 temp64 = readq(&bar0->general_int_mask);
1682 temp64 &= ~((u64) val64);
1683 writeq(temp64, &bar0->general_int_mask);
20346722 1684 /*
1da177e4 1685 * Enable all the Tx side interrupts
20346722 1686 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1687 */
1688 writeq(0x0, &bar0->tx_traffic_mask);
1689 } else if (flag == DISABLE_INTRS) {
20346722 1690 /*
1691 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1692 * register.
1693 */
1694 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1695 temp64 = readq(&bar0->general_int_mask);
1696 val64 |= temp64;
1697 writeq(val64, &bar0->general_int_mask);
1698 }
1699 }
1700
1701 /* Rx traffic interrupts */
1702 if (mask & RX_TRAFFIC_INTR) {
1703 val64 = RXTRAFFIC_INT_M;
1704 if (flag == ENABLE_INTRS) {
1705 temp64 = readq(&bar0->general_int_mask);
1706 temp64 &= ~((u64) val64);
1707 writeq(temp64, &bar0->general_int_mask);
1708 /* writing 0 Enables all 8 RX interrupt levels */
1709 writeq(0x0, &bar0->rx_traffic_mask);
1710 } else if (flag == DISABLE_INTRS) {
20346722 1711 /*
1712 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1713 * register.
1714 */
1715 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1716 temp64 = readq(&bar0->general_int_mask);
1717 val64 |= temp64;
1718 writeq(val64, &bar0->general_int_mask);
1719 }
1720 }
1721}
1722
541ae68f 1723static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
20346722 1724{
1725 int ret = 0;
1726
1727 if (flag == FALSE) {
541ae68f 1728 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd 1729 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1730 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1731 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1732 ret = 1;
1733 }
541ae68f 1734 }else {
5e25b9dd 1735 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1736 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1737 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1738 ret = 1;
1739 }
20346722 1740 }
1741 } else {
541ae68f 1742 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd 1743 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1744 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1745 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1746 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1747 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1748 ret = 1;
1749 }
1750 } else {
1751 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1752 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1753 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1754 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1755 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1756 ret = 1;
1757 }
20346722 1758 }
1759 }
1760
1761 return ret;
1762}
1763/**
1764 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4
LT
1765 * @val64 : Value read from adapter status register.
1766 * @flag : indicates if the adapter enable bit was ever written once
1767 * before.
1768 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1769 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1770 * differs and the calling function passes the input argument flag to
1771 * indicate this.
20346722 1772 * Return: 1 If xena is quiescence
1da177e4
LT
1773 * 0 If Xena is not quiescence
1774 */
1775
20346722 1776static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1da177e4 1777{
541ae68f 1778 int ret = 0, herc;
1da177e4 1779 u64 tmp64 = ~((u64) val64);
5e25b9dd 1780 int rev_id = get_xena_rev_id(sp->pdev);
1da177e4 1781
541ae68f 1782 herc = (sp->device_type == XFRAME_II_DEVICE);
1da177e4
LT
1783 if (!
1784 (tmp64 &
1785 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1786 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1787 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1788 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1789 ADAPTER_STATUS_P_PLL_LOCK))) {
541ae68f 1790 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1da177e4
LT
1791 }
1792
1793 return ret;
1794}
1795
1796/**
1797 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1798 * @sp: Pointer to device specifc structure
20346722 1799 * Description :
1da177e4
LT
1800 * New procedure to clear mac address reading problems on Alpha platforms
1801 *
1802 */
1803
20346722 1804void fix_mac_address(nic_t * sp)
1da177e4
LT
1805{
1806 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1807 u64 val64;
1808 int i = 0;
1809
1810 while (fix_mac[i] != END_SIGN) {
1811 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1812 udelay(10);
1da177e4
LT
1813 val64 = readq(&bar0->gpio_control);
1814 }
1815}
1816
1817/**
20346722 1818 * start_nic - Turns the device on
1da177e4 1819 * @nic : device private variable.
20346722 1820 * Description:
1821 * This function actually turns the device on. Before this function is
1822 * called,all Registers are configured from their reset states
1823 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1824 * calling this function, the device interrupts are cleared and the NIC is
1825 * literally switched on by writing into the adapter control register.
20346722 1826 * Return Value:
1da177e4
LT
1827 * SUCCESS on success and -1 on failure.
1828 */
1829
1830static int start_nic(struct s2io_nic *nic)
1831{
1832 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1833 struct net_device *dev = nic->dev;
1834 register u64 val64 = 0;
20346722 1835 u16 interruptible;
1836 u16 subid, i;
1da177e4
LT
1837 mac_info_t *mac_control;
1838 struct config_param *config;
1839
1840 mac_control = &nic->mac_control;
1841 config = &nic->config;
1842
1843 /* PRC Initialization and configuration */
1844 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1845 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
1846 &bar0->prc_rxd0_n[i]);
1847
1848 val64 = readq(&bar0->prc_ctrl_n[i]);
b6e3f982 1849 if (nic->config.bimodal)
1850 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1da177e4
LT
1851#ifndef CONFIG_2BUFF_MODE
1852 val64 |= PRC_CTRL_RC_ENABLED;
1853#else
1854 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1855#endif
1856 writeq(val64, &bar0->prc_ctrl_n[i]);
1857 }
1858
1859#ifdef CONFIG_2BUFF_MODE
1860 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1861 val64 = readq(&bar0->rx_pa_cfg);
1862 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1863 writeq(val64, &bar0->rx_pa_cfg);
1864#endif
1865
20346722 1866 /*
1da177e4
LT
1867 * Enabling MC-RLDRAM. After enabling the device, we timeout
1868 * for around 100ms, which is approximately the time required
1869 * for the device to be ready for operation.
1870 */
1871 val64 = readq(&bar0->mc_rldram_mrs);
1872 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1873 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1874 val64 = readq(&bar0->mc_rldram_mrs);
1875
20346722 1876 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
1877
1878 /* Enabling ECC Protection. */
1879 val64 = readq(&bar0->adapter_control);
1880 val64 &= ~ADAPTER_ECC_EN;
1881 writeq(val64, &bar0->adapter_control);
1882
20346722 1883 /*
1884 * Clearing any possible Link state change interrupts that
1da177e4
LT
1885 * could have popped up just before Enabling the card.
1886 */
1887 val64 = readq(&bar0->mac_rmac_err_reg);
1888 if (val64)
1889 writeq(val64, &bar0->mac_rmac_err_reg);
1890
20346722 1891 /*
1892 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
1893 * it.
1894 */
1895 val64 = readq(&bar0->adapter_status);
20346722 1896 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
1897 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1898 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1899 (unsigned long long) val64);
1900 return FAILURE;
1901 }
1902
1903 /* Enable select interrupts */
a371a07d 1904 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
1905 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1906 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1907
1da177e4
LT
1908 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1909
20346722 1910 /*
1da177e4 1911 * With some switches, link might be already up at this point.
20346722 1912 * Because of this weird behavior, when we enable laser,
1913 * we may not get link. We need to handle this. We cannot
1914 * figure out which switch is misbehaving. So we are forced to
1915 * make a global change.
1da177e4
LT
1916 */
1917
1918 /* Enabling Laser. */
1919 val64 = readq(&bar0->adapter_control);
1920 val64 |= ADAPTER_EOI_TX_ON;
1921 writeq(val64, &bar0->adapter_control);
1922
1923 /* SXE-002: Initialize link and activity LED */
1924 subid = nic->pdev->subsystem_device;
541ae68f 1925 if (((subid & 0xFF) >= 0x07) &&
1926 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
1927 val64 = readq(&bar0->gpio_control);
1928 val64 |= 0x0000800000000000ULL;
1929 writeq(val64, &bar0->gpio_control);
1930 val64 = 0x0411040400000000ULL;
20346722 1931 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1da177e4
LT
1932 }
1933
20346722 1934 /*
1935 * Don't see link state interrupts on certain switches, so
1da177e4
LT
1936 * directly scheduling a link state task from here.
1937 */
1938 schedule_work(&nic->set_link_task);
1939
1da177e4
LT
1940 return SUCCESS;
1941}
1942
20346722 1943/**
1944 * free_tx_buffers - Free all queued Tx buffers
1da177e4 1945 * @nic : device private variable.
20346722 1946 * Description:
1da177e4 1947 * Free all queued Tx buffers.
20346722 1948 * Return Value: void
1da177e4
LT
1949*/
1950
1951static void free_tx_buffers(struct s2io_nic *nic)
1952{
1953 struct net_device *dev = nic->dev;
1954 struct sk_buff *skb;
1955 TxD_t *txdp;
1956 int i, j;
1957 mac_info_t *mac_control;
1958 struct config_param *config;
1ddc50d4 1959 int cnt = 0, frg_cnt;
1da177e4
LT
1960
1961 mac_control = &nic->mac_control;
1962 config = &nic->config;
1963
1964 for (i = 0; i < config->tx_fifo_num; i++) {
1965 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
20346722 1966 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1da177e4
LT
1967 list_virt_addr;
1968 skb =
1969 (struct sk_buff *) ((unsigned long) txdp->
1970 Host_Control);
1971 if (skb == NULL) {
1ddc50d4 1972 memset(txdp, 0, sizeof(TxD_t) *
1973 config->max_txds);
1da177e4
LT
1974 continue;
1975 }
1ddc50d4 1976 frg_cnt = skb_shinfo(skb)->nr_frags;
1977 pci_unmap_single(nic->pdev, (dma_addr_t)
1978 txdp->Buffer_Pointer,
1979 skb->len - skb->data_len,
1980 PCI_DMA_TODEVICE);
1981 if (frg_cnt) {
1982 TxD_t *temp;
1983 temp = txdp;
1984 txdp++;
1985 for (j = 0; j < frg_cnt; j++, txdp++) {
1986 skb_frag_t *frag =
1987 &skb_shinfo(skb)->frags[j];
1988 pci_unmap_page(nic->pdev,
1989 (dma_addr_t)
1990 txdp->
1991 Buffer_Pointer,
1992 frag->size,
1993 PCI_DMA_TODEVICE);
1994 }
1995 txdp = temp;
1996 }
1da177e4 1997 dev_kfree_skb(skb);
1ddc50d4 1998 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1da177e4
LT
1999 cnt++;
2000 }
2001 DBG_PRINT(INTR_DBG,
2002 "%s:forcibly freeing %d skbs on FIFO%d\n",
2003 dev->name, cnt, i);
20346722 2004 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2005 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
2006 }
2007}
2008
20346722 2009/**
2010 * stop_nic - To stop the nic
1da177e4 2011 * @nic ; device private variable.
20346722 2012 * Description:
2013 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2014 * function does. This function is called to stop the device.
2015 * Return Value:
2016 * void.
2017 */
2018
2019static void stop_nic(struct s2io_nic *nic)
2020{
2021 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2022 register u64 val64 = 0;
2023 u16 interruptible, i;
2024 mac_info_t *mac_control;
2025 struct config_param *config;
2026
2027 mac_control = &nic->mac_control;
2028 config = &nic->config;
2029
2030 /* Disable all interrupts */
a371a07d 2031 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
2032 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2033 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1da177e4
LT
2034 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2035
2036 /* Disable PRCs */
2037 for (i = 0; i < config->rx_ring_num; i++) {
2038 val64 = readq(&bar0->prc_ctrl_n[i]);
2039 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2040 writeq(val64, &bar0->prc_ctrl_n[i]);
2041 }
2042}
2043
20346722 2044/**
2045 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2046 * @nic: device private variable
20346722 2047 * @ring_no: ring number
2048 * Description:
1da177e4
LT
2049 * The function allocates Rx side skbs and puts the physical
2050 * address of these buffers into the RxD buffer pointers, so that the NIC
2051 * can DMA the received frame into these locations.
2052 * The NIC supports 3 receive modes, viz
2053 * 1. single buffer,
2054 * 2. three buffer and
2055 * 3. Five buffer modes.
20346722 2056 * Each mode defines how many fragments the received frame will be split
2057 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2058 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2059 * is split into 3 fragments. As of now only single buffer mode is
2060 * supported.
2061 * Return Value:
2062 * SUCCESS on success or an appropriate -ve value on failure.
2063 */
2064
20346722 2065int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2066{
2067 struct net_device *dev = nic->dev;
2068 struct sk_buff *skb;
2069 RxD_t *rxdp;
2070 int off, off1, size, block_no, block_no1;
2071 int offset, offset1;
2072 u32 alloc_tab = 0;
20346722 2073 u32 alloc_cnt;
1da177e4
LT
2074 mac_info_t *mac_control;
2075 struct config_param *config;
2076#ifdef CONFIG_2BUFF_MODE
2077 RxD_t *rxdpnext;
2078 int nextblk;
20346722 2079 u64 tmp;
1da177e4
LT
2080 buffAdd_t *ba;
2081 dma_addr_t rxdpphys;
2082#endif
2083#ifndef CONFIG_S2IO_NAPI
2084 unsigned long flags;
2085#endif
2086
2087 mac_control = &nic->mac_control;
2088 config = &nic->config;
20346722 2089 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2090 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4
LT
2091 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2092 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2093
2094 while (alloc_tab < alloc_cnt) {
20346722 2095 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2096 block_index;
20346722 2097 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1da177e4 2098 block_index;
20346722 2099 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2100 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4
LT
2101#ifndef CONFIG_2BUFF_MODE
2102 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2103 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2104#else
2105 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2106 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2107#endif
2108
20346722 2109 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2110 block_virt_addr + off;
2111 if ((offset == offset1) && (rxdp->Host_Control)) {
2112 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2113 DBG_PRINT(INTR_DBG, " info equated\n");
2114 goto end;
2115 }
2116#ifndef CONFIG_2BUFF_MODE
2117 if (rxdp->Control_1 == END_OF_BLOCK) {
20346722 2118 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2119 block_index++;
20346722 2120 mac_control->rings[ring_no].rx_curr_put_info.
2121 block_index %= mac_control->rings[ring_no].block_count;
2122 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2123 block_index;
1da177e4
LT
2124 off++;
2125 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 2126 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4
LT
2127 off;
2128 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2129 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2130 dev->name, rxdp);
2131 }
2132#ifndef CONFIG_S2IO_NAPI
2133 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2134 mac_control->rings[ring_no].put_pos =
1da177e4
LT
2135 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2136 spin_unlock_irqrestore(&nic->put_lock, flags);
2137#endif
2138#else
2139 if (rxdp->Host_Control == END_OF_BLOCK) {
20346722 2140 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2141 block_index++;
20346722 2142 mac_control->rings[ring_no].rx_curr_put_info.block_index
2143 %= mac_control->rings[ring_no].block_count;
2144 block_no = mac_control->rings[ring_no].rx_curr_put_info
2145 .block_index;
1da177e4
LT
2146 off = 0;
2147 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2148 dev->name, block_no,
2149 (unsigned long long) rxdp->Control_1);
20346722 2150 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4 2151 off;
20346722 2152 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2153 block_virt_addr;
2154 }
2155#ifndef CONFIG_S2IO_NAPI
2156 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2157 mac_control->rings[ring_no].put_pos = (block_no *
1da177e4
LT
2158 (MAX_RXDS_PER_BLOCK + 1)) + off;
2159 spin_unlock_irqrestore(&nic->put_lock, flags);
2160#endif
2161#endif
2162
2163#ifndef CONFIG_2BUFF_MODE
2164 if (rxdp->Control_1 & RXD_OWN_XENA)
2165#else
2166 if (rxdp->Control_2 & BIT(0))
2167#endif
2168 {
20346722 2169 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4
LT
2170 offset = off;
2171 goto end;
2172 }
2173#ifdef CONFIG_2BUFF_MODE
20346722 2174 /*
2175 * RxDs Spanning cache lines will be replenished only
2176 * if the succeeding RxD is also owned by Host. It
2177 * will always be the ((8*i)+3) and ((8*i)+6)
2178 * descriptors for the 48 byte descriptor. The offending
1da177e4
LT
2179 * decsriptor is of-course the 3rd descriptor.
2180 */
20346722 2181 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2182 block_dma_addr + (off * sizeof(RxD_t));
2183 if (((u64) (rxdpphys)) % 128 > 80) {
20346722 2184 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2185 block_virt_addr + (off + 1);
2186 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2187 nextblk = (block_no + 1) %
20346722 2188 (mac_control->rings[ring_no].block_count);
2189 rxdpnext = mac_control->rings[ring_no].rx_blocks
1da177e4
LT
2190 [nextblk].block_virt_addr;
2191 }
2192 if (rxdpnext->Control_2 & BIT(0))
2193 goto end;
2194 }
2195#endif
2196
2197#ifndef CONFIG_2BUFF_MODE
2198 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2199#else
2200 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2201#endif
2202 if (!skb) {
2203 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2204 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2205 return -ENOMEM;
2206 }
2207#ifndef CONFIG_2BUFF_MODE
2208 skb_reserve(skb, NET_IP_ALIGN);
2209 memset(rxdp, 0, sizeof(RxD_t));
2210 rxdp->Buffer0_ptr = pci_map_single
2211 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2212 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2213 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2214 rxdp->Host_Control = (unsigned long) (skb);
2215 rxdp->Control_1 |= RXD_OWN_XENA;
2216 off++;
2217 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 2218 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 2219#else
20346722 2220 ba = &mac_control->rings[ring_no].ba[block_no][off];
1da177e4 2221 skb_reserve(skb, BUF0_LEN);
689be439
DM
2222 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2223 if (tmp)
2224 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1da177e4
LT
2225
2226 memset(rxdp, 0, sizeof(RxD_t));
2227 rxdp->Buffer2_ptr = pci_map_single
2228 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2229 PCI_DMA_FROMDEVICE);
2230 rxdp->Buffer0_ptr =
2231 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2232 PCI_DMA_FROMDEVICE);
2233 rxdp->Buffer1_ptr =
2234 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2235 PCI_DMA_FROMDEVICE);
2236
2237 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2238 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2239 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2240 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2241 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2242 rxdp->Control_1 |= RXD_OWN_XENA;
2243 off++;
20346722 2244 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 2245#endif
5e25b9dd 2246 rxdp->Control_2 |= SET_RXD_MARKER;
20346722 2247
1da177e4
LT
2248 atomic_inc(&nic->rx_bufs_left[ring_no]);
2249 alloc_tab++;
2250 }
2251
2252 end:
2253 return SUCCESS;
2254}
2255
2256/**
20346722 2257 * free_rx_buffers - Frees all Rx buffers
1da177e4 2258 * @sp: device private variable.
20346722 2259 * Description:
1da177e4
LT
2260 * This function will free all Rx buffers allocated by host.
2261 * Return Value:
2262 * NONE.
2263 */
2264
2265static void free_rx_buffers(struct s2io_nic *sp)
2266{
2267 struct net_device *dev = sp->dev;
2268 int i, j, blk = 0, off, buf_cnt = 0;
2269 RxD_t *rxdp;
2270 struct sk_buff *skb;
2271 mac_info_t *mac_control;
2272 struct config_param *config;
2273#ifdef CONFIG_2BUFF_MODE
2274 buffAdd_t *ba;
2275#endif
2276
2277 mac_control = &sp->mac_control;
2278 config = &sp->config;
2279
2280 for (i = 0; i < config->rx_ring_num; i++) {
2281 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2282 off = j % (MAX_RXDS_PER_BLOCK + 1);
20346722 2283 rxdp = mac_control->rings[i].rx_blocks[blk].
2284 block_virt_addr + off;
1da177e4
LT
2285
2286#ifndef CONFIG_2BUFF_MODE
2287 if (rxdp->Control_1 == END_OF_BLOCK) {
2288 rxdp =
2289 (RxD_t *) ((unsigned long) rxdp->
2290 Control_2);
2291 j++;
2292 blk++;
2293 }
2294#else
2295 if (rxdp->Host_Control == END_OF_BLOCK) {
2296 blk++;
2297 continue;
2298 }
2299#endif
2300
2301 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2302 memset(rxdp, 0, sizeof(RxD_t));
2303 continue;
2304 }
2305
2306 skb =
2307 (struct sk_buff *) ((unsigned long) rxdp->
2308 Host_Control);
2309 if (skb) {
2310#ifndef CONFIG_2BUFF_MODE
2311 pci_unmap_single(sp->pdev, (dma_addr_t)
2312 rxdp->Buffer0_ptr,
2313 dev->mtu +
2314 HEADER_ETHERNET_II_802_3_SIZE
2315 + HEADER_802_2_SIZE +
2316 HEADER_SNAP_SIZE,
2317 PCI_DMA_FROMDEVICE);
2318#else
20346722 2319 ba = &mac_control->rings[i].ba[blk][off];
1da177e4
LT
2320 pci_unmap_single(sp->pdev, (dma_addr_t)
2321 rxdp->Buffer0_ptr,
2322 BUF0_LEN,
2323 PCI_DMA_FROMDEVICE);
2324 pci_unmap_single(sp->pdev, (dma_addr_t)
2325 rxdp->Buffer1_ptr,
2326 BUF1_LEN,
2327 PCI_DMA_FROMDEVICE);
2328 pci_unmap_single(sp->pdev, (dma_addr_t)
2329 rxdp->Buffer2_ptr,
2330 dev->mtu + BUF0_LEN + 4,
2331 PCI_DMA_FROMDEVICE);
2332#endif
2333 dev_kfree_skb(skb);
2334 atomic_dec(&sp->rx_bufs_left[i]);
2335 buf_cnt++;
2336 }
2337 memset(rxdp, 0, sizeof(RxD_t));
2338 }
20346722 2339 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2340 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2341 mac_control->rings[i].rx_curr_put_info.offset = 0;
2342 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2343 atomic_set(&sp->rx_bufs_left[i], 0);
2344 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2345 dev->name, buf_cnt, i);
2346 }
2347}
2348
2349/**
2350 * s2io_poll - Rx interrupt handler for NAPI support
2351 * @dev : pointer to the device structure.
20346722 2352 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2353 * during one pass through the 'Poll" function.
2354 * Description:
2355 * Comes into picture only if NAPI support has been incorporated. It does
2356 * the same thing that rx_intr_handler does, but not in a interrupt context
2357 * also It will process only a given number of packets.
2358 * Return value:
2359 * 0 on success and 1 if there are No Rx packets to be processed.
2360 */
2361
20346722 2362#if defined(CONFIG_S2IO_NAPI)
1da177e4
LT
2363static int s2io_poll(struct net_device *dev, int *budget)
2364{
2365 nic_t *nic = dev->priv;
20346722 2366 int pkt_cnt = 0, org_pkts_to_process;
1da177e4
LT
2367 mac_info_t *mac_control;
2368 struct config_param *config;
20346722 2369 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2370 u64 val64;
2371 int i;
1da177e4 2372
7ba013ac 2373 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2374 mac_control = &nic->mac_control;
2375 config = &nic->config;
2376
20346722 2377 nic->pkts_to_process = *budget;
2378 if (nic->pkts_to_process > dev->quota)
2379 nic->pkts_to_process = dev->quota;
2380 org_pkts_to_process = nic->pkts_to_process;
1da177e4
LT
2381
2382 val64 = readq(&bar0->rx_traffic_int);
2383 writeq(val64, &bar0->rx_traffic_int);
2384
2385 for (i = 0; i < config->rx_ring_num; i++) {
20346722 2386 rx_intr_handler(&mac_control->rings[i]);
2387 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2388 if (!nic->pkts_to_process) {
2389 /* Quota for the current iteration has been met */
2390 goto no_rx;
1da177e4 2391 }
1da177e4
LT
2392 }
2393 if (!pkt_cnt)
2394 pkt_cnt = 1;
2395
2396 dev->quota -= pkt_cnt;
2397 *budget -= pkt_cnt;
2398 netif_rx_complete(dev);
2399
2400 for (i = 0; i < config->rx_ring_num; i++) {
2401 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2402 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2403 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2404 break;
2405 }
2406 }
2407 /* Re enable the Rx interrupts. */
2408 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
7ba013ac 2409 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2410 return 0;
2411
20346722 2412no_rx:
1da177e4
LT
2413 dev->quota -= pkt_cnt;
2414 *budget -= pkt_cnt;
2415
2416 for (i = 0; i < config->rx_ring_num; i++) {
2417 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2418 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2419 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2420 break;
2421 }
2422 }
7ba013ac 2423 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2424 return 1;
2425}
20346722 2426#endif
2427
2428/**
1da177e4
LT
2429 * rx_intr_handler - Rx interrupt handler
2430 * @nic: device private variable.
20346722 2431 * Description:
2432 * If the interrupt is because of a received frame or if the
1da177e4 2433 * receive ring contains fresh as yet un-processed frames,this function is
20346722 2434 * called. It picks out the RxD at which place the last Rx processing had
2435 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2436 * the offset.
2437 * Return Value:
2438 * NONE.
2439 */
20346722 2440static void rx_intr_handler(ring_info_t *ring_data)
1da177e4 2441{
20346722 2442 nic_t *nic = ring_data->nic;
1da177e4 2443 struct net_device *dev = (struct net_device *) nic->dev;
20346722 2444 int get_block, get_offset, put_block, put_offset, ring_bufs;
1da177e4
LT
2445 rx_curr_get_info_t get_info, put_info;
2446 RxD_t *rxdp;
2447 struct sk_buff *skb;
20346722 2448#ifndef CONFIG_S2IO_NAPI
2449 int pkt_cnt = 0;
1da177e4 2450#endif
7ba013ac 2451 spin_lock(&nic->rx_lock);
2452 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2453 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2454 __FUNCTION__, dev->name);
2455 spin_unlock(&nic->rx_lock);
2456 }
2457
20346722 2458 get_info = ring_data->rx_curr_get_info;
2459 get_block = get_info.block_index;
2460 put_info = ring_data->rx_curr_put_info;
2461 put_block = put_info.block_index;
2462 ring_bufs = get_info.ring_len+1;
2463 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
1da177e4 2464 get_info.offset;
20346722 2465 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2466 get_info.offset;
2467#ifndef CONFIG_S2IO_NAPI
2468 spin_lock(&nic->put_lock);
2469 put_offset = ring_data->put_pos;
2470 spin_unlock(&nic->put_lock);
2471#else
2472 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2473 put_info.offset;
2474#endif
5e25b9dd 2475 while (RXD_IS_UP2DT(rxdp) &&
2476 (((get_offset + 1) % ring_bufs) != put_offset)) {
20346722 2477 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2478 if (skb == NULL) {
2479 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2480 dev->name);
2481 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2482 spin_unlock(&nic->rx_lock);
20346722 2483 return;
1da177e4 2484 }
20346722 2485#ifndef CONFIG_2BUFF_MODE
2486 pci_unmap_single(nic->pdev, (dma_addr_t)
2487 rxdp->Buffer0_ptr,
2488 dev->mtu +
2489 HEADER_ETHERNET_II_802_3_SIZE +
2490 HEADER_802_2_SIZE +
2491 HEADER_SNAP_SIZE,
2492 PCI_DMA_FROMDEVICE);
1da177e4 2493#else
20346722 2494 pci_unmap_single(nic->pdev, (dma_addr_t)
2495 rxdp->Buffer0_ptr,
2496 BUF0_LEN, PCI_DMA_FROMDEVICE);
2497 pci_unmap_single(nic->pdev, (dma_addr_t)
2498 rxdp->Buffer1_ptr,
2499 BUF1_LEN, PCI_DMA_FROMDEVICE);
2500 pci_unmap_single(nic->pdev, (dma_addr_t)
2501 rxdp->Buffer2_ptr,
2502 dev->mtu + BUF0_LEN + 4,
2503 PCI_DMA_FROMDEVICE);
2504#endif
2505 rx_osm_handler(ring_data, rxdp);
2506 get_info.offset++;
2507 ring_data->rx_curr_get_info.offset =
1da177e4 2508 get_info.offset;
20346722 2509 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2510 get_info.offset;
2511 if (get_info.offset &&
2512 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2513 get_info.offset = 0;
2514 ring_data->rx_curr_get_info.offset
2515 = get_info.offset;
2516 get_block++;
2517 get_block %= ring_data->block_count;
2518 ring_data->rx_curr_get_info.block_index
2519 = get_block;
2520 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2521 }
1da177e4 2522
20346722 2523 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1da177e4 2524 get_info.offset;
20346722 2525#ifdef CONFIG_S2IO_NAPI
2526 nic->pkts_to_process -= 1;
2527 if (!nic->pkts_to_process)
2528 break;
2529#else
2530 pkt_cnt++;
1da177e4
LT
2531 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2532 break;
20346722 2533#endif
1da177e4 2534 }
7ba013ac 2535 spin_unlock(&nic->rx_lock);
1da177e4 2536}
20346722 2537
2538/**
1da177e4
LT
2539 * tx_intr_handler - Transmit interrupt handler
2540 * @nic : device private variable
20346722 2541 * Description:
2542 * If an interrupt was raised to indicate DMA complete of the
2543 * Tx packet, this function is called. It identifies the last TxD
2544 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2545 * DMA'ed into the NICs internal memory.
2546 * Return Value:
2547 * NONE
2548 */
2549
20346722 2550static void tx_intr_handler(fifo_info_t *fifo_data)
1da177e4 2551{
20346722 2552 nic_t *nic = fifo_data->nic;
1da177e4
LT
2553 struct net_device *dev = (struct net_device *) nic->dev;
2554 tx_curr_get_info_t get_info, put_info;
2555 struct sk_buff *skb;
2556 TxD_t *txdlp;
1da177e4 2557 u16 j, frg_cnt;
1da177e4 2558
20346722 2559 get_info = fifo_data->tx_curr_get_info;
2560 put_info = fifo_data->tx_curr_put_info;
2561 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2562 list_virt_addr;
2563 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2564 (get_info.offset != put_info.offset) &&
2565 (txdlp->Host_Control)) {
2566 /* Check for TxD errors */
2567 if (txdlp->Control_1 & TXD_T_CODE) {
2568 unsigned long long err;
2569 err = txdlp->Control_1 & TXD_T_CODE;
2570 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2571 err);
2572 }
1da177e4 2573
20346722 2574 skb = (struct sk_buff *) ((unsigned long)
2575 txdlp->Host_Control);
2576 if (skb == NULL) {
2577 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2578 __FUNCTION__);
2579 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2580 return;
2581 }
2582
2583 frg_cnt = skb_shinfo(skb)->nr_frags;
2584 nic->tx_pkt_count++;
2585
2586 pci_unmap_single(nic->pdev, (dma_addr_t)
2587 txdlp->Buffer_Pointer,
2588 skb->len - skb->data_len,
2589 PCI_DMA_TODEVICE);
2590 if (frg_cnt) {
2591 TxD_t *temp;
2592 temp = txdlp;
2593 txdlp++;
2594 for (j = 0; j < frg_cnt; j++, txdlp++) {
2595 skb_frag_t *frag =
2596 &skb_shinfo(skb)->frags[j];
2597 pci_unmap_page(nic->pdev,
2598 (dma_addr_t)
2599 txdlp->
2600 Buffer_Pointer,
2601 frag->size,
2602 PCI_DMA_TODEVICE);
1da177e4 2603 }
20346722 2604 txdlp = temp;
1da177e4 2605 }
20346722 2606 memset(txdlp, 0,
2607 (sizeof(TxD_t) * fifo_data->max_txds));
2608
2609 /* Updating the statistics block */
20346722 2610 nic->stats.tx_bytes += skb->len;
2611 dev_kfree_skb_irq(skb);
2612
2613 get_info.offset++;
2614 get_info.offset %= get_info.fifo_len + 1;
2615 txdlp = (TxD_t *) fifo_data->list_info
2616 [get_info.offset].list_virt_addr;
2617 fifo_data->tx_curr_get_info.offset =
2618 get_info.offset;
1da177e4
LT
2619 }
2620
2621 spin_lock(&nic->tx_lock);
2622 if (netif_queue_stopped(dev))
2623 netif_wake_queue(dev);
2624 spin_unlock(&nic->tx_lock);
2625}
2626
20346722 2627/**
1da177e4
LT
2628 * alarm_intr_handler - Alarm Interrrupt handler
2629 * @nic: device private variable
20346722 2630 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 2631 * complete, this function is called. If the interrupt was to indicate
20346722 2632 * a loss of link, the OSM link status handler is invoked for any other
2633 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
2634 * and a H/W reset is issued.
2635 * Return Value:
2636 * NONE
2637*/
2638
2639static void alarm_intr_handler(struct s2io_nic *nic)
2640{
2641 struct net_device *dev = (struct net_device *) nic->dev;
2642 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2643 register u64 val64 = 0, err_reg = 0;
2644
2645 /* Handling link status change error Intr */
a371a07d 2646 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2647 err_reg = readq(&bar0->mac_rmac_err_reg);
2648 writeq(err_reg, &bar0->mac_rmac_err_reg);
2649 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2650 schedule_work(&nic->set_link_task);
2651 }
1da177e4
LT
2652 }
2653
5e25b9dd 2654 /* Handling Ecc errors */
2655 val64 = readq(&bar0->mc_err_reg);
2656 writeq(val64, &bar0->mc_err_reg);
2657 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2658 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac 2659 nic->mac_control.stats_info->sw_stat.
2660 double_ecc_errs++;
5e25b9dd 2661 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2662 dev->name);
2663 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2664 netif_stop_queue(dev);
2665 schedule_work(&nic->rst_timer_task);
2666 } else {
7ba013ac 2667 nic->mac_control.stats_info->sw_stat.
2668 single_ecc_errs++;
5e25b9dd 2669 }
2670 }
2671
1da177e4
LT
2672 /* In case of a serious error, the device will be Reset. */
2673 val64 = readq(&bar0->serr_source);
2674 if (val64 & SERR_SOURCE_ANY) {
2675 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2676 DBG_PRINT(ERR_DBG, "serious error!!\n");
2677 netif_stop_queue(dev);
2678 schedule_work(&nic->rst_timer_task);
2679 }
2680
2681 /*
2682 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2683 * Error occurs, the adapter will be recycled by disabling the
20346722 2684 * adapter enable bit and enabling it again after the device
1da177e4
LT
2685 * becomes Quiescent.
2686 */
2687 val64 = readq(&bar0->pcc_err_reg);
2688 writeq(val64, &bar0->pcc_err_reg);
2689 if (val64 & PCC_FB_ECC_DB_ERR) {
2690 u64 ac = readq(&bar0->adapter_control);
2691 ac &= ~(ADAPTER_CNTL_EN);
2692 writeq(ac, &bar0->adapter_control);
2693 ac = readq(&bar0->adapter_control);
2694 schedule_work(&nic->set_link_task);
2695 }
2696
2697 /* Other type of interrupts are not being handled now, TODO */
2698}
2699
20346722 2700/**
1da177e4 2701 * wait_for_cmd_complete - waits for a command to complete.
20346722 2702 * @sp : private member of the device structure, which is a pointer to the
1da177e4 2703 * s2io_nic structure.
20346722 2704 * Description: Function that waits for a command to Write into RMAC
2705 * ADDR DATA registers to be completed and returns either success or
2706 * error depending on whether the command was complete or not.
1da177e4
LT
2707 * Return value:
2708 * SUCCESS on success and FAILURE on failure.
2709 */
2710
20346722 2711int wait_for_cmd_complete(nic_t * sp)
1da177e4
LT
2712{
2713 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2714 int ret = FAILURE, cnt = 0;
2715 u64 val64;
2716
2717 while (TRUE) {
2718 val64 = readq(&bar0->rmac_addr_cmd_mem);
2719 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2720 ret = SUCCESS;
2721 break;
2722 }
2723 msleep(50);
2724 if (cnt++ > 10)
2725 break;
2726 }
2727
2728 return ret;
2729}
2730
20346722 2731/**
2732 * s2io_reset - Resets the card.
1da177e4
LT
2733 * @sp : private member of the device structure.
2734 * Description: Function to Reset the card. This function then also
20346722 2735 * restores the previously saved PCI configuration space registers as
1da177e4
LT
2736 * the card reset also resets the configuration space.
2737 * Return value:
2738 * void.
2739 */
2740
20346722 2741void s2io_reset(nic_t * sp)
1da177e4
LT
2742{
2743 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2744 u64 val64;
5e25b9dd 2745 u16 subid, pci_cmd;
1da177e4
LT
2746
2747 val64 = SW_RESET_ALL;
2748 writeq(val64, &bar0->sw_reset);
2749
20346722 2750 /*
2751 * At this stage, if the PCI write is indeed completed, the
2752 * card is reset and so is the PCI Config space of the device.
2753 * So a read cannot be issued at this stage on any of the
1da177e4
LT
2754 * registers to ensure the write into "sw_reset" register
2755 * has gone through.
2756 * Question: Is there any system call that will explicitly force
2757 * all the write commands still pending on the bus to be pushed
2758 * through?
2759 * As of now I'am just giving a 250ms delay and hoping that the
2760 * PCI write to sw_reset register is done by this time.
2761 */
2762 msleep(250);
2763
541ae68f 2764 if (!(sp->device_type & XFRAME_II_DEVICE)) {
1da177e4 2765 /* Restore the PCI state saved during initializarion. */
541ae68f 2766 pci_restore_state(sp->pdev);
2767 } else {
2768 pci_set_master(sp->pdev);
2769 }
1da177e4
LT
2770 s2io_init_pci(sp);
2771
2772 msleep(250);
2773
20346722 2774 /* Set swapper to enable I/O register access */
2775 s2io_set_swapper(sp);
2776
5e25b9dd 2777 /* Clear certain PCI/PCI-X fields after reset */
2778 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2779 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2780 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2781
2782 val64 = readq(&bar0->txpic_int_reg);
2783 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2784 writeq(val64, &bar0->txpic_int_reg);
2785
2786 /* Clearing PCIX Ecc status register */
2787 pci_write_config_dword(sp->pdev, 0x68, 0);
2788
20346722 2789 /* Reset device statistics maintained by OS */
2790 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2791
1da177e4
LT
2792 /* SXE-002: Configure link and activity LED to turn it off */
2793 subid = sp->pdev->subsystem_device;
541ae68f 2794 if (((subid & 0xFF) >= 0x07) &&
2795 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2796 val64 = readq(&bar0->gpio_control);
2797 val64 |= 0x0000800000000000ULL;
2798 writeq(val64, &bar0->gpio_control);
2799 val64 = 0x0411040400000000ULL;
20346722 2800 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1da177e4
LT
2801 }
2802
541ae68f 2803 /*
2804 * Clear spurious ECC interrupts that would have occured on
2805 * XFRAME II cards after reset.
2806 */
2807 if (sp->device_type == XFRAME_II_DEVICE) {
2808 val64 = readq(&bar0->pcc_err_reg);
2809 writeq(val64, &bar0->pcc_err_reg);
2810 }
2811
1da177e4
LT
2812 sp->device_enabled_once = FALSE;
2813}
2814
2815/**
20346722 2816 * s2io_set_swapper - to set the swapper controle on the card
2817 * @sp : private member of the device structure,
1da177e4 2818 * pointer to the s2io_nic structure.
20346722 2819 * Description: Function to set the swapper control on the card
1da177e4
LT
2820 * correctly depending on the 'endianness' of the system.
2821 * Return value:
2822 * SUCCESS on success and FAILURE on failure.
2823 */
2824
20346722 2825int s2io_set_swapper(nic_t * sp)
1da177e4
LT
2826{
2827 struct net_device *dev = sp->dev;
2828 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2829 u64 val64, valt, valr;
2830
20346722 2831 /*
1da177e4
LT
2832 * Set proper endian settings and verify the same by reading
2833 * the PIF Feed-back register.
2834 */
2835
2836 val64 = readq(&bar0->pif_rd_swapper_fb);
2837 if (val64 != 0x0123456789ABCDEFULL) {
2838 int i = 0;
2839 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2840 0x8100008181000081ULL, /* FE=1, SE=0 */
2841 0x4200004242000042ULL, /* FE=0, SE=1 */
2842 0}; /* FE=0, SE=0 */
2843
2844 while(i<4) {
2845 writeq(value[i], &bar0->swapper_ctrl);
2846 val64 = readq(&bar0->pif_rd_swapper_fb);
2847 if (val64 == 0x0123456789ABCDEFULL)
2848 break;
2849 i++;
2850 }
2851 if (i == 4) {
2852 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2853 dev->name);
2854 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2855 (unsigned long long) val64);
2856 return FAILURE;
2857 }
2858 valr = value[i];
2859 } else {
2860 valr = readq(&bar0->swapper_ctrl);
2861 }
2862
2863 valt = 0x0123456789ABCDEFULL;
2864 writeq(valt, &bar0->xmsi_address);
2865 val64 = readq(&bar0->xmsi_address);
2866
2867 if(val64 != valt) {
2868 int i = 0;
2869 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2870 0x0081810000818100ULL, /* FE=1, SE=0 */
2871 0x0042420000424200ULL, /* FE=0, SE=1 */
2872 0}; /* FE=0, SE=0 */
2873
2874 while(i<4) {
2875 writeq((value[i] | valr), &bar0->swapper_ctrl);
2876 writeq(valt, &bar0->xmsi_address);
2877 val64 = readq(&bar0->xmsi_address);
2878 if(val64 == valt)
2879 break;
2880 i++;
2881 }
2882 if(i == 4) {
20346722 2883 unsigned long long x = val64;
1da177e4 2884 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 2885 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
2886 return FAILURE;
2887 }
2888 }
2889 val64 = readq(&bar0->swapper_ctrl);
2890 val64 &= 0xFFFF000000000000ULL;
2891
2892#ifdef __BIG_ENDIAN
20346722 2893 /*
2894 * The device by default set to a big endian format, so a
1da177e4
LT
2895 * big endian driver need not set anything.
2896 */
2897 val64 |= (SWAPPER_CTRL_TXP_FE |
2898 SWAPPER_CTRL_TXP_SE |
2899 SWAPPER_CTRL_TXD_R_FE |
2900 SWAPPER_CTRL_TXD_W_FE |
2901 SWAPPER_CTRL_TXF_R_FE |
2902 SWAPPER_CTRL_RXD_R_FE |
2903 SWAPPER_CTRL_RXD_W_FE |
2904 SWAPPER_CTRL_RXF_W_FE |
2905 SWAPPER_CTRL_XMSI_FE |
2906 SWAPPER_CTRL_XMSI_SE |
2907 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2908 writeq(val64, &bar0->swapper_ctrl);
2909#else
20346722 2910 /*
1da177e4 2911 * Initially we enable all bits to make it accessible by the
20346722 2912 * driver, then we selectively enable only those bits that
1da177e4
LT
2913 * we want to set.
2914 */
2915 val64 |= (SWAPPER_CTRL_TXP_FE |
2916 SWAPPER_CTRL_TXP_SE |
2917 SWAPPER_CTRL_TXD_R_FE |
2918 SWAPPER_CTRL_TXD_R_SE |
2919 SWAPPER_CTRL_TXD_W_FE |
2920 SWAPPER_CTRL_TXD_W_SE |
2921 SWAPPER_CTRL_TXF_R_FE |
2922 SWAPPER_CTRL_RXD_R_FE |
2923 SWAPPER_CTRL_RXD_R_SE |
2924 SWAPPER_CTRL_RXD_W_FE |
2925 SWAPPER_CTRL_RXD_W_SE |
2926 SWAPPER_CTRL_RXF_W_FE |
2927 SWAPPER_CTRL_XMSI_FE |
2928 SWAPPER_CTRL_XMSI_SE |
2929 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2930 writeq(val64, &bar0->swapper_ctrl);
2931#endif
2932 val64 = readq(&bar0->swapper_ctrl);
2933
20346722 2934 /*
2935 * Verifying if endian settings are accurate by reading a
1da177e4
LT
2936 * feedback register.
2937 */
2938 val64 = readq(&bar0->pif_rd_swapper_fb);
2939 if (val64 != 0x0123456789ABCDEFULL) {
2940 /* Endian settings are incorrect, calls for another dekko. */
2941 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2942 dev->name);
2943 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2944 (unsigned long long) val64);
2945 return FAILURE;
2946 }
2947
2948 return SUCCESS;
2949}
2950
2951/* ********************************************************* *
2952 * Functions defined below concern the OS part of the driver *
2953 * ********************************************************* */
2954
20346722 2955/**
1da177e4
LT
2956 * s2io_open - open entry point of the driver
2957 * @dev : pointer to the device structure.
2958 * Description:
2959 * This function is the open entry point of the driver. It mainly calls a
2960 * function to allocate Rx buffers and inserts them into the buffer
20346722 2961 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
2962 * Return value:
2963 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2964 * file on failure.
2965 */
2966
20346722 2967int s2io_open(struct net_device *dev)
1da177e4
LT
2968{
2969 nic_t *sp = dev->priv;
2970 int err = 0;
2971
20346722 2972 /*
2973 * Make sure you have link off by default every time
1da177e4
LT
2974 * Nic is initialized
2975 */
2976 netif_carrier_off(dev);
a371a07d 2977 sp->last_link_state = LINK_DOWN;
1da177e4
LT
2978
2979 /* Initialize H/W and enable interrupts */
2980 if (s2io_card_up(sp)) {
2981 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2982 dev->name);
20346722 2983 err = -ENODEV;
2984 goto hw_init_failed;
1da177e4
LT
2985 }
2986
2987 /* After proper initialization of H/W, register ISR */
20346722 2988 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
1da177e4
LT
2989 sp->name, dev);
2990 if (err) {
1da177e4
LT
2991 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2992 dev->name);
20346722 2993 goto isr_registration_failed;
1da177e4
LT
2994 }
2995
2996 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2997 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
20346722 2998 err = -ENODEV;
2999 goto setting_mac_address_failed;
1da177e4
LT
3000 }
3001
3002 netif_start_queue(dev);
3003 return 0;
20346722 3004
3005setting_mac_address_failed:
3006 free_irq(sp->pdev->irq, dev);
3007isr_registration_failed:
25fff88e 3008 del_timer_sync(&sp->alarm_timer);
20346722 3009 s2io_reset(sp);
3010hw_init_failed:
3011 return err;
1da177e4
LT
3012}
3013
3014/**
3015 * s2io_close -close entry point of the driver
3016 * @dev : device pointer.
3017 * Description:
3018 * This is the stop entry point of the driver. It needs to undo exactly
3019 * whatever was done by the open entry point,thus it's usually referred to
3020 * as the close function.Among other things this function mainly stops the
3021 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3022 * Return value:
3023 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3024 * file on failure.
3025 */
3026
20346722 3027int s2io_close(struct net_device *dev)
1da177e4
LT
3028{
3029 nic_t *sp = dev->priv;
1da177e4
LT
3030 flush_scheduled_work();
3031 netif_stop_queue(dev);
3032 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3033 s2io_card_down(sp);
3034
20346722 3035 free_irq(sp->pdev->irq, dev);
1da177e4
LT
3036 sp->device_close_flag = TRUE; /* Device is shut down. */
3037 return 0;
3038}
3039
3040/**
3041 * s2io_xmit - Tx entry point of te driver
3042 * @skb : the socket buffer containing the Tx data.
3043 * @dev : device pointer.
3044 * Description :
3045 * This function is the Tx entry point of the driver. S2IO NIC supports
3046 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3047 * NOTE: when device cant queue the pkt,just the trans_start variable will
3048 * not be upadted.
3049 * Return value:
3050 * 0 on success & 1 on failure.
3051 */
3052
20346722 3053int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
3054{
3055 nic_t *sp = dev->priv;
3056 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3057 register u64 val64;
3058 TxD_t *txdp;
3059 TxFIFO_element_t __iomem *tx_fifo;
3060 unsigned long flags;
3061#ifdef NETIF_F_TSO
3062 int mss;
3063#endif
be3a6b02 3064 u16 vlan_tag = 0;
3065 int vlan_priority = 0;
1da177e4
LT
3066 mac_info_t *mac_control;
3067 struct config_param *config;
1da177e4
LT
3068
3069 mac_control = &sp->mac_control;
3070 config = &sp->config;
3071
20346722 3072 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 3073 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 3074 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 3075 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
3076 dev->name);
3077 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722 3078 dev_kfree_skb(skb);
3079 return 0;
1da177e4
LT
3080 }
3081
3082 queue = 0;
1da177e4 3083
be3a6b02 3084 /* Get Fifo number to Transmit based on vlan priority */
3085 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3086 vlan_tag = vlan_tx_tag_get(skb);
3087 vlan_priority = vlan_tag >> 13;
3088 queue = config->fifo_mapping[vlan_priority];
3089 }
3090
20346722 3091 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3092 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3093 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3094 list_virt_addr;
3095
3096 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4
LT
3097 /* Avoid "put" pointer going beyond "get" pointer */
3098 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3099 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
3100 netif_stop_queue(dev);
3101 dev_kfree_skb(skb);
3102 spin_unlock_irqrestore(&sp->tx_lock, flags);
3103 return 0;
3104 }
3105#ifdef NETIF_F_TSO
3106 mss = skb_shinfo(skb)->tso_size;
3107 if (mss) {
3108 txdp->Control_1 |= TXD_TCP_LSO_EN;
3109 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3110 }
3111#endif
3112
3113 frg_cnt = skb_shinfo(skb)->nr_frags;
3114 frg_len = skb->len - skb->data_len;
3115
1da177e4
LT
3116 txdp->Buffer_Pointer = pci_map_single
3117 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
20346722 3118 txdp->Host_Control = (unsigned long) skb;
1da177e4
LT
3119 if (skb->ip_summed == CHECKSUM_HW) {
3120 txdp->Control_2 |=
3121 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3122 TXD_TX_CKO_UDP_EN);
3123 }
3124
3125 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 3126
be3a6b02 3127 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3128 txdp->Control_2 |= TXD_VLAN_ENABLE;
3129 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3130 }
3131
1da177e4
LT
3132 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3133 TXD_GATHER_CODE_FIRST);
3134 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3135
3136 /* For fragmented SKB. */
3137 for (i = 0; i < frg_cnt; i++) {
3138 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3139 txdp++;
3140 txdp->Buffer_Pointer = (u64) pci_map_page
3141 (sp->pdev, frag->page, frag->page_offset,
3142 frag->size, PCI_DMA_TODEVICE);
3143 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3144 }
3145 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3146
3147 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 3148 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
3149 writeq(val64, &tx_fifo->TxDL_Pointer);
3150
fe113638 3151 wmb();
3152
1da177e4
LT
3153 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3154 TX_FIFO_LAST_LIST);
20346722 3155
1da177e4
LT
3156#ifdef NETIF_F_TSO
3157 if (mss)
3158 val64 |= TX_FIFO_SPECIAL_FUNC;
3159#endif
3160 writeq(val64, &tx_fifo->List_Control);
3161
1da177e4 3162 put_off++;
20346722 3163 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3164 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
3165
3166 /* Avoid "put" pointer going beyond "get" pointer */
3167 if (((put_off + 1) % queue_len) == get_off) {
3168 DBG_PRINT(TX_DBG,
3169 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3170 put_off, get_off);
3171 netif_stop_queue(dev);
3172 }
3173
3174 dev->trans_start = jiffies;
3175 spin_unlock_irqrestore(&sp->tx_lock, flags);
3176
3177 return 0;
3178}
3179
25fff88e 3180static void
3181s2io_alarm_handle(unsigned long data)
3182{
3183 nic_t *sp = (nic_t *)data;
3184
3185 alarm_intr_handler(sp);
3186 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3187}
3188
a371a07d 3189static void s2io_txpic_intr_handle(nic_t *sp)
3190{
3191 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0;
3192 u64 val64;
3193
3194 val64 = readq(&bar0->pic_int_status);
3195 if (val64 & PIC_INT_GPIO) {
3196 val64 = readq(&bar0->gpio_int_reg);
3197 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3198 (val64 & GPIO_INT_REG_LINK_UP)) {
3199 val64 |= GPIO_INT_REG_LINK_DOWN;
3200 val64 |= GPIO_INT_REG_LINK_UP;
3201 writeq(val64, &bar0->gpio_int_reg);
3202 goto masking;
3203 }
3204
3205 if (((sp->last_link_state == LINK_UP) &&
3206 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3207 ((sp->last_link_state == LINK_DOWN) &&
3208 (val64 & GPIO_INT_REG_LINK_UP))) {
3209 val64 = readq(&bar0->gpio_int_mask);
3210 val64 |= GPIO_INT_MASK_LINK_DOWN;
3211 val64 |= GPIO_INT_MASK_LINK_UP;
3212 writeq(val64, &bar0->gpio_int_mask);
3213 s2io_set_link((unsigned long)sp);
3214 }
3215masking:
3216 if (sp->last_link_state == LINK_UP) {
3217 /*enable down interrupt */
3218 val64 = readq(&bar0->gpio_int_mask);
3219 /* unmasks link down intr */
3220 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3221 /* masks link up intr */
3222 val64 |= GPIO_INT_MASK_LINK_UP;
3223 writeq(val64, &bar0->gpio_int_mask);
3224 } else {
3225 /*enable UP Interrupt */
3226 val64 = readq(&bar0->gpio_int_mask);
3227 /* unmasks link up interrupt */
3228 val64 &= ~GPIO_INT_MASK_LINK_UP;
3229 /* masks link down interrupt */
3230 val64 |= GPIO_INT_MASK_LINK_DOWN;
3231 writeq(val64, &bar0->gpio_int_mask);
3232 }
3233 }
3234}
3235
1da177e4
LT
3236/**
3237 * s2io_isr - ISR handler of the device .
3238 * @irq: the irq of the device.
3239 * @dev_id: a void pointer to the dev structure of the NIC.
3240 * @pt_regs: pointer to the registers pushed on the stack.
20346722 3241 * Description: This function is the ISR handler of the device. It
3242 * identifies the reason for the interrupt and calls the relevant
3243 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
3244 * recv buffers, if their numbers are below the panic value which is
3245 * presently set to 25% of the original number of rcv buffers allocated.
3246 * Return value:
20346722 3247 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
3248 * IRQ_NONE: will be returned if interrupt is not from our device
3249 */
3250static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3251{
3252 struct net_device *dev = (struct net_device *) dev_id;
3253 nic_t *sp = dev->priv;
3254 XENA_dev_config_t __iomem *bar0 = sp->bar0;
20346722 3255 int i;
fe113638 3256 u64 reason = 0, val64;
1da177e4
LT
3257 mac_info_t *mac_control;
3258 struct config_param *config;
3259
7ba013ac 3260 atomic_inc(&sp->isr_cnt);
1da177e4
LT
3261 mac_control = &sp->mac_control;
3262 config = &sp->config;
3263
20346722 3264 /*
1da177e4
LT
3265 * Identify the cause for interrupt and call the appropriate
3266 * interrupt handler. Causes for the interrupt could be;
3267 * 1. Rx of packet.
3268 * 2. Tx complete.
3269 * 3. Link down.
20346722 3270 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
3271 */
3272 reason = readq(&bar0->general_int_status);
3273
3274 if (!reason) {
3275 /* The interrupt was not raised by Xena. */
7ba013ac 3276 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3277 return IRQ_NONE;
3278 }
3279
1da177e4
LT
3280#ifdef CONFIG_S2IO_NAPI
3281 if (reason & GEN_INTR_RXTRAFFIC) {
3282 if (netif_rx_schedule_prep(dev)) {
3283 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3284 DISABLE_INTRS);
3285 __netif_rx_schedule(dev);
3286 }
3287 }
3288#else
3289 /* If Intr is because of Rx Traffic */
3290 if (reason & GEN_INTR_RXTRAFFIC) {
fe113638 3291 /*
3292 * rx_traffic_int reg is an R1 register, writing all 1's
3293 * will ensure that the actual interrupt causing bit get's
3294 * cleared and hence a read can be avoided.
3295 */
3296 val64 = 0xFFFFFFFFFFFFFFFFULL;
3297 writeq(val64, &bar0->rx_traffic_int);
20346722 3298 for (i = 0; i < config->rx_ring_num; i++) {
3299 rx_intr_handler(&mac_control->rings[i]);
3300 }
1da177e4
LT
3301 }
3302#endif
3303
20346722 3304 /* If Intr is because of Tx Traffic */
3305 if (reason & GEN_INTR_TXTRAFFIC) {
fe113638 3306 /*
3307 * tx_traffic_int reg is an R1 register, writing all 1's
3308 * will ensure that the actual interrupt causing bit get's
3309 * cleared and hence a read can be avoided.
3310 */
3311 val64 = 0xFFFFFFFFFFFFFFFFULL;
3312 writeq(val64, &bar0->tx_traffic_int);
3313
20346722 3314 for (i = 0; i < config->tx_fifo_num; i++)
3315 tx_intr_handler(&mac_control->fifos[i]);
3316 }
3317
a371a07d 3318 if (reason & GEN_INTR_TXPIC)
3319 s2io_txpic_intr_handle(sp);
20346722 3320 /*
3321 * If the Rx buffer count is below the panic threshold then
3322 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
3323 * else schedule a tasklet to reallocate the buffers.
3324 */
3325#ifndef CONFIG_S2IO_NAPI
3326 for (i = 0; i < config->rx_ring_num; i++) {
20346722 3327 int ret;
1da177e4
LT
3328 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3329 int level = rx_buffer_level(sp, rxb_size, i);
3330
3331 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3332 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3333 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3334 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3335 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3336 dev->name);
3337 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3338 clear_bit(0, (&sp->tasklet_status));
7ba013ac 3339 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3340 return IRQ_HANDLED;
3341 }
3342 clear_bit(0, (&sp->tasklet_status));
3343 } else if (level == LOW) {
3344 tasklet_schedule(&sp->task);
3345 }
3346 }
3347#endif
3348
7ba013ac 3349 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3350 return IRQ_HANDLED;
3351}
3352
7ba013ac 3353/**
3354 * s2io_updt_stats -
3355 */
3356static void s2io_updt_stats(nic_t *sp)
3357{
3358 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3359 u64 val64;
3360 int cnt = 0;
3361
3362 if (atomic_read(&sp->card_state) == CARD_UP) {
3363 /* Apprx 30us on a 133 MHz bus */
3364 val64 = SET_UPDT_CLICKS(10) |
3365 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3366 writeq(val64, &bar0->stat_cfg);
3367 do {
3368 udelay(100);
3369 val64 = readq(&bar0->stat_cfg);
3370 if (!(val64 & BIT(0)))
3371 break;
3372 cnt++;
3373 if (cnt == 5)
3374 break; /* Updt failed */
3375 } while(1);
3376 }
3377}
3378
1da177e4 3379/**
20346722 3380 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
3381 * @dev : pointer to the device structure.
3382 * Description:
20346722 3383 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
3384 * structure and returns a pointer to the same.
3385 * Return value:
3386 * pointer to the updated net_device_stats structure.
3387 */
3388
20346722 3389struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4
LT
3390{
3391 nic_t *sp = dev->priv;
3392 mac_info_t *mac_control;
3393 struct config_param *config;
3394
20346722 3395
1da177e4
LT
3396 mac_control = &sp->mac_control;
3397 config = &sp->config;
3398
7ba013ac 3399 /* Configure Stats for immediate updt */
3400 s2io_updt_stats(sp);
3401
3402 sp->stats.tx_packets =
3403 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722 3404 sp->stats.tx_errors =
3405 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3406 sp->stats.rx_errors =
3407 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3408 sp->stats.multicast =
3409 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 3410 sp->stats.rx_length_errors =
20346722 3411 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
3412
3413 return (&sp->stats);
3414}
3415
3416/**
3417 * s2io_set_multicast - entry point for multicast address enable/disable.
3418 * @dev : pointer to the device structure
3419 * Description:
20346722 3420 * This function is a driver entry point which gets called by the kernel
3421 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
3422 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3423 * determine, if multicast address must be enabled or if promiscuous mode
3424 * is to be disabled etc.
3425 * Return value:
3426 * void.
3427 */
3428
3429static void s2io_set_multicast(struct net_device *dev)
3430{
3431 int i, j, prev_cnt;
3432 struct dev_mc_list *mclist;
3433 nic_t *sp = dev->priv;
3434 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3435 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3436 0xfeffffffffffULL;
3437 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3438 void __iomem *add;
3439
3440 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3441 /* Enable all Multicast addresses */
3442 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3443 &bar0->rmac_addr_data0_mem);
3444 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3445 &bar0->rmac_addr_data1_mem);
3446 val64 = RMAC_ADDR_CMD_MEM_WE |
3447 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3448 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3449 writeq(val64, &bar0->rmac_addr_cmd_mem);
3450 /* Wait till command completes */
3451 wait_for_cmd_complete(sp);
3452
3453 sp->m_cast_flg = 1;
3454 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3455 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3456 /* Disable all Multicast addresses */
3457 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3458 &bar0->rmac_addr_data0_mem);
5e25b9dd 3459 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3460 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3461 val64 = RMAC_ADDR_CMD_MEM_WE |
3462 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3463 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3464 writeq(val64, &bar0->rmac_addr_cmd_mem);
3465 /* Wait till command completes */
3466 wait_for_cmd_complete(sp);
3467
3468 sp->m_cast_flg = 0;
3469 sp->all_multi_pos = 0;
3470 }
3471
3472 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3473 /* Put the NIC into promiscuous mode */
3474 add = &bar0->mac_cfg;
3475 val64 = readq(&bar0->mac_cfg);
3476 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3477
3478 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3479 writel((u32) val64, add);
3480 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3481 writel((u32) (val64 >> 32), (add + 4));
3482
3483 val64 = readq(&bar0->mac_cfg);
3484 sp->promisc_flg = 1;
3485 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3486 dev->name);
3487 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3488 /* Remove the NIC from promiscuous mode */
3489 add = &bar0->mac_cfg;
3490 val64 = readq(&bar0->mac_cfg);
3491 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3492
3493 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3494 writel((u32) val64, add);
3495 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3496 writel((u32) (val64 >> 32), (add + 4));
3497
3498 val64 = readq(&bar0->mac_cfg);
3499 sp->promisc_flg = 0;
3500 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3501 dev->name);
3502 }
3503
3504 /* Update individual M_CAST address list */
3505 if ((!sp->m_cast_flg) && dev->mc_count) {
3506 if (dev->mc_count >
3507 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3508 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3509 dev->name);
3510 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3511 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3512 return;
3513 }
3514
3515 prev_cnt = sp->mc_addr_count;
3516 sp->mc_addr_count = dev->mc_count;
3517
3518 /* Clear out the previous list of Mc in the H/W. */
3519 for (i = 0; i < prev_cnt; i++) {
3520 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3521 &bar0->rmac_addr_data0_mem);
3522 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3523 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3524 val64 = RMAC_ADDR_CMD_MEM_WE |
3525 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3526 RMAC_ADDR_CMD_MEM_OFFSET
3527 (MAC_MC_ADDR_START_OFFSET + i);
3528 writeq(val64, &bar0->rmac_addr_cmd_mem);
3529
3530 /* Wait for command completes */
3531 if (wait_for_cmd_complete(sp)) {
3532 DBG_PRINT(ERR_DBG, "%s: Adding ",
3533 dev->name);
3534 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3535 return;
3536 }
3537 }
3538
3539 /* Create the new Rx filter list and update the same in H/W. */
3540 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3541 i++, mclist = mclist->next) {
3542 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3543 ETH_ALEN);
3544 for (j = 0; j < ETH_ALEN; j++) {
3545 mac_addr |= mclist->dmi_addr[j];
3546 mac_addr <<= 8;
3547 }
3548 mac_addr >>= 8;
3549 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3550 &bar0->rmac_addr_data0_mem);
3551 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3552 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3553 val64 = RMAC_ADDR_CMD_MEM_WE |
3554 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3555 RMAC_ADDR_CMD_MEM_OFFSET
3556 (i + MAC_MC_ADDR_START_OFFSET);
3557 writeq(val64, &bar0->rmac_addr_cmd_mem);
3558
3559 /* Wait for command completes */
3560 if (wait_for_cmd_complete(sp)) {
3561 DBG_PRINT(ERR_DBG, "%s: Adding ",
3562 dev->name);
3563 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3564 return;
3565 }
3566 }
3567 }
3568}
3569
3570/**
20346722 3571 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
3572 * @dev : pointer to the device structure.
3573 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 3574 * Description : This procedure will program the Xframe to receive
1da177e4 3575 * frames with new Mac Address
20346722 3576 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
3577 * as defined in errno.h file on failure.
3578 */
3579
3580int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3581{
3582 nic_t *sp = dev->priv;
3583 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3584 register u64 val64, mac_addr = 0;
3585 int i;
3586
20346722 3587 /*
1da177e4
LT
3588 * Set the new MAC address as the new unicast filter and reflect this
3589 * change on the device address registered with the OS. It will be
20346722 3590 * at offset 0.
1da177e4
LT
3591 */
3592 for (i = 0; i < ETH_ALEN; i++) {
3593 mac_addr <<= 8;
3594 mac_addr |= addr[i];
3595 }
3596
3597 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3598 &bar0->rmac_addr_data0_mem);
3599
3600 val64 =
3601 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3602 RMAC_ADDR_CMD_MEM_OFFSET(0);
3603 writeq(val64, &bar0->rmac_addr_cmd_mem);
3604 /* Wait till command completes */
3605 if (wait_for_cmd_complete(sp)) {
3606 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3607 return FAILURE;
3608 }
3609
3610 return SUCCESS;
3611}
3612
3613/**
20346722 3614 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
3615 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3616 * @info: pointer to the structure with parameters given by ethtool to set
3617 * link information.
3618 * Description:
20346722 3619 * The function sets different link parameters provided by the user onto
1da177e4
LT
3620 * the NIC.
3621 * Return value:
3622 * 0 on success.
3623*/
3624
3625static int s2io_ethtool_sset(struct net_device *dev,
3626 struct ethtool_cmd *info)
3627{
3628 nic_t *sp = dev->priv;
3629 if ((info->autoneg == AUTONEG_ENABLE) ||
3630 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3631 return -EINVAL;
3632 else {
3633 s2io_close(sp->dev);
3634 s2io_open(sp->dev);
3635 }
3636
3637 return 0;
3638}
3639
3640/**
20346722 3641 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
3642 * @sp : private member of the device structure, pointer to the
3643 * s2io_nic structure.
3644 * @info : pointer to the structure with parameters given by ethtool
3645 * to return link information.
3646 * Description:
3647 * Returns link specific information like speed, duplex etc.. to ethtool.
3648 * Return value :
3649 * return 0 on success.
3650 */
3651
3652static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3653{
3654 nic_t *sp = dev->priv;
3655 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3656 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3657 info->port = PORT_FIBRE;
3658 /* info->transceiver?? TODO */
3659
3660 if (netif_carrier_ok(sp->dev)) {
3661 info->speed = 10000;
3662 info->duplex = DUPLEX_FULL;
3663 } else {
3664 info->speed = -1;
3665 info->duplex = -1;
3666 }
3667
3668 info->autoneg = AUTONEG_DISABLE;
3669 return 0;
3670}
3671
3672/**
20346722 3673 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3674 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3675 * s2io_nic structure.
3676 * @info : pointer to the structure with parameters given by ethtool to
3677 * return driver information.
3678 * Description:
3679 * Returns driver specefic information like name, version etc.. to ethtool.
3680 * Return value:
3681 * void
3682 */
3683
3684static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3685 struct ethtool_drvinfo *info)
3686{
3687 nic_t *sp = dev->priv;
3688
3689 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3690 strncpy(info->version, s2io_driver_version,
3691 sizeof(s2io_driver_version));
3692 strncpy(info->fw_version, "", 32);
3693 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3694 info->regdump_len = XENA_REG_SPACE;
3695 info->eedump_len = XENA_EEPROM_SPACE;
3696 info->testinfo_len = S2IO_TEST_LEN;
3697 info->n_stats = S2IO_STAT_LEN;
3698}
3699
3700/**
3701 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 3702 * @sp: private member of the device structure, which is a pointer to the
1da177e4 3703 * s2io_nic structure.
20346722 3704 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
3705 * dumping the registers.
3706 * @reg_space: The input argumnet into which all the registers are dumped.
3707 * Description:
3708 * Dumps the entire register space of xFrame NIC into the user given
3709 * buffer area.
3710 * Return value :
3711 * void .
3712*/
3713
3714static void s2io_ethtool_gregs(struct net_device *dev,
3715 struct ethtool_regs *regs, void *space)
3716{
3717 int i;
3718 u64 reg;
3719 u8 *reg_space = (u8 *) space;
3720 nic_t *sp = dev->priv;
3721
3722 regs->len = XENA_REG_SPACE;
3723 regs->version = sp->pdev->subsystem_device;
3724
3725 for (i = 0; i < regs->len; i += 8) {
3726 reg = readq(sp->bar0 + i);
3727 memcpy((reg_space + i), &reg, 8);
3728 }
3729}
3730
3731/**
3732 * s2io_phy_id - timer function that alternates adapter LED.
20346722 3733 * @data : address of the private member of the device structure, which
1da177e4 3734 * is a pointer to the s2io_nic structure, provided as an u32.
20346722 3735 * Description: This is actually the timer function that alternates the
3736 * adapter LED bit of the adapter control bit to set/reset every time on
3737 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
3738 * once every second.
3739*/
3740static void s2io_phy_id(unsigned long data)
3741{
3742 nic_t *sp = (nic_t *) data;
3743 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3744 u64 val64 = 0;
3745 u16 subid;
3746
3747 subid = sp->pdev->subsystem_device;
541ae68f 3748 if ((sp->device_type == XFRAME_II_DEVICE) ||
3749 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
3750 val64 = readq(&bar0->gpio_control);
3751 val64 ^= GPIO_CTRL_GPIO_0;
3752 writeq(val64, &bar0->gpio_control);
3753 } else {
3754 val64 = readq(&bar0->adapter_control);
3755 val64 ^= ADAPTER_LED_ON;
3756 writeq(val64, &bar0->adapter_control);
3757 }
3758
3759 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3760}
3761
3762/**
3763 * s2io_ethtool_idnic - To physically identify the nic on the system.
3764 * @sp : private member of the device structure, which is a pointer to the
3765 * s2io_nic structure.
20346722 3766 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
3767 * ethtool.
3768 * Description: Used to physically identify the NIC on the system.
20346722 3769 * The Link LED will blink for a time specified by the user for
1da177e4 3770 * identification.
20346722 3771 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
3772 * identification is possible only if it's link is up.
3773 * Return value:
3774 * int , returns 0 on success
3775 */
3776
3777static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3778{
3779 u64 val64 = 0, last_gpio_ctrl_val;
3780 nic_t *sp = dev->priv;
3781 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3782 u16 subid;
3783
3784 subid = sp->pdev->subsystem_device;
3785 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f 3786 if ((sp->device_type == XFRAME_I_DEVICE) &&
3787 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
3788 val64 = readq(&bar0->adapter_control);
3789 if (!(val64 & ADAPTER_CNTL_EN)) {
3790 printk(KERN_ERR
3791 "Adapter Link down, cannot blink LED\n");
3792 return -EFAULT;
3793 }
3794 }
3795 if (sp->id_timer.function == NULL) {
3796 init_timer(&sp->id_timer);
3797 sp->id_timer.function = s2io_phy_id;
3798 sp->id_timer.data = (unsigned long) sp;
3799 }
3800 mod_timer(&sp->id_timer, jiffies);
3801 if (data)
20346722 3802 msleep_interruptible(data * HZ);
1da177e4 3803 else
20346722 3804 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
3805 del_timer_sync(&sp->id_timer);
3806
541ae68f 3807 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
3808 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3809 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3810 }
3811
3812 return 0;
3813}
3814
3815/**
3816 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722 3817 * @sp : private member of the device structure, which is a pointer to the
3818 * s2io_nic structure.
1da177e4
LT
3819 * @ep : pointer to the structure with pause parameters given by ethtool.
3820 * Description:
3821 * Returns the Pause frame generation and reception capability of the NIC.
3822 * Return value:
3823 * void
3824 */
3825static void s2io_ethtool_getpause_data(struct net_device *dev,
3826 struct ethtool_pauseparam *ep)
3827{
3828 u64 val64;
3829 nic_t *sp = dev->priv;
3830 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3831
3832 val64 = readq(&bar0->rmac_pause_cfg);
3833 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3834 ep->tx_pause = TRUE;
3835 if (val64 & RMAC_PAUSE_RX_ENABLE)
3836 ep->rx_pause = TRUE;
3837 ep->autoneg = FALSE;
3838}
3839
3840/**
3841 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 3842 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3843 * s2io_nic structure.
3844 * @ep : pointer to the structure with pause parameters given by ethtool.
3845 * Description:
3846 * It can be used to set or reset Pause frame generation or reception
3847 * support of the NIC.
3848 * Return value:
3849 * int, returns 0 on Success
3850 */
3851
3852static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 3853 struct ethtool_pauseparam *ep)
1da177e4
LT
3854{
3855 u64 val64;
3856 nic_t *sp = dev->priv;
3857 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3858
3859 val64 = readq(&bar0->rmac_pause_cfg);
3860 if (ep->tx_pause)
3861 val64 |= RMAC_PAUSE_GEN_ENABLE;
3862 else
3863 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3864 if (ep->rx_pause)
3865 val64 |= RMAC_PAUSE_RX_ENABLE;
3866 else
3867 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3868 writeq(val64, &bar0->rmac_pause_cfg);
3869 return 0;
3870}
3871
3872/**
3873 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 3874 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3875 * s2io_nic structure.
3876 * @off : offset at which the data must be written
3877 * @data : Its an output parameter where the data read at the given
20346722 3878 * offset is stored.
1da177e4 3879 * Description:
20346722 3880 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
3881 * read data.
3882 * NOTE: Will allow to read only part of the EEPROM visible through the
3883 * I2C bus.
3884 * Return value:
3885 * -1 on failure and 0 on success.
3886 */
3887
3888#define S2IO_DEV_ID 5
3889static int read_eeprom(nic_t * sp, int off, u32 * data)
3890{
3891 int ret = -1;
3892 u32 exit_cnt = 0;
3893 u64 val64;
3894 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3895
3896 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3897 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3898 I2C_CONTROL_CNTL_START;
3899 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3900
3901 while (exit_cnt < 5) {
3902 val64 = readq(&bar0->i2c_control);
3903 if (I2C_CONTROL_CNTL_END(val64)) {
3904 *data = I2C_CONTROL_GET_DATA(val64);
3905 ret = 0;
3906 break;
3907 }
3908 msleep(50);
3909 exit_cnt++;
3910 }
3911
3912 return ret;
3913}
3914
3915/**
3916 * write_eeprom - actually writes the relevant part of the data value.
3917 * @sp : private member of the device structure, which is a pointer to the
3918 * s2io_nic structure.
3919 * @off : offset at which the data must be written
3920 * @data : The data that is to be written
20346722 3921 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
3922 * the Eeprom. (max of 3)
3923 * Description:
3924 * Actually writes the relevant part of the data value into the Eeprom
3925 * through the I2C bus.
3926 * Return value:
3927 * 0 on success, -1 on failure.
3928 */
3929
3930static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3931{
3932 int exit_cnt = 0, ret = -1;
3933 u64 val64;
3934 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3935
3936 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3937 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3938 I2C_CONTROL_CNTL_START;
3939 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3940
3941 while (exit_cnt < 5) {
3942 val64 = readq(&bar0->i2c_control);
3943 if (I2C_CONTROL_CNTL_END(val64)) {
3944 if (!(val64 & I2C_CONTROL_NACK))
3945 ret = 0;
3946 break;
3947 }
3948 msleep(50);
3949 exit_cnt++;
3950 }
3951
3952 return ret;
3953}
3954
3955/**
3956 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3957 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 3958 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
3959 * containing all relevant information.
3960 * @data_buf : user defined value to be written into Eeprom.
3961 * Description: Reads the values stored in the Eeprom at given offset
3962 * for a given length. Stores these values int the input argument data
3963 * buffer 'data_buf' and returns these to the caller (ethtool.)
3964 * Return value:
3965 * int 0 on success
3966 */
3967
3968static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 3969 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4
LT
3970{
3971 u32 data, i, valid;
3972 nic_t *sp = dev->priv;
3973
3974 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3975
3976 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3977 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3978
3979 for (i = 0; i < eeprom->len; i += 4) {
3980 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3981 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3982 return -EFAULT;
3983 }
3984 valid = INV(data);
3985 memcpy((data_buf + i), &valid, 4);
3986 }
3987 return 0;
3988}
3989
3990/**
3991 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3992 * @sp : private member of the device structure, which is a pointer to the
3993 * s2io_nic structure.
20346722 3994 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
3995 * containing all relevant information.
3996 * @data_buf ; user defined value to be written into Eeprom.
3997 * Description:
3998 * Tries to write the user provided value in the Eeprom, at the offset
3999 * given by the user.
4000 * Return value:
4001 * 0 on success, -EFAULT on failure.
4002 */
4003
4004static int s2io_ethtool_seeprom(struct net_device *dev,
4005 struct ethtool_eeprom *eeprom,
4006 u8 * data_buf)
4007{
4008 int len = eeprom->len, cnt = 0;
4009 u32 valid = 0, data;
4010 nic_t *sp = dev->priv;
4011
4012 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4013 DBG_PRINT(ERR_DBG,
4014 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4015 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4016 eeprom->magic);
4017 return -EFAULT;
4018 }
4019
4020 while (len) {
4021 data = (u32) data_buf[cnt] & 0x000000FF;
4022 if (data) {
4023 valid = (u32) (data << 24);
4024 } else
4025 valid = data;
4026
4027 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4028 DBG_PRINT(ERR_DBG,
4029 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4030 DBG_PRINT(ERR_DBG,
4031 "write into the specified offset\n");
4032 return -EFAULT;
4033 }
4034 cnt++;
4035 len--;
4036 }
4037
4038 return 0;
4039}
4040
4041/**
20346722 4042 * s2io_register_test - reads and writes into all clock domains.
4043 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4044 * s2io_nic structure.
4045 * @data : variable that returns the result of each of the test conducted b
4046 * by the driver.
4047 * Description:
4048 * Read and write into all clock domains. The NIC has 3 clock domains,
4049 * see that registers in all the three regions are accessible.
4050 * Return value:
4051 * 0 on success.
4052 */
4053
4054static int s2io_register_test(nic_t * sp, uint64_t * data)
4055{
4056 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4057 u64 val64 = 0;
4058 int fail = 0;
4059
20346722 4060 val64 = readq(&bar0->pif_rd_swapper_fb);
4061 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
4062 fail = 1;
4063 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4064 }
4065
4066 val64 = readq(&bar0->rmac_pause_cfg);
4067 if (val64 != 0xc000ffff00000000ULL) {
4068 fail = 1;
4069 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4070 }
4071
4072 val64 = readq(&bar0->rx_queue_cfg);
4073 if (val64 != 0x0808080808080808ULL) {
4074 fail = 1;
4075 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4076 }
4077
4078 val64 = readq(&bar0->xgxs_efifo_cfg);
4079 if (val64 != 0x000000001923141EULL) {
4080 fail = 1;
4081 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4082 }
4083
4084 val64 = 0x5A5A5A5A5A5A5A5AULL;
4085 writeq(val64, &bar0->xmsi_data);
4086 val64 = readq(&bar0->xmsi_data);
4087 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4088 fail = 1;
4089 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4090 }
4091
4092 val64 = 0xA5A5A5A5A5A5A5A5ULL;
4093 writeq(val64, &bar0->xmsi_data);
4094 val64 = readq(&bar0->xmsi_data);
4095 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4096 fail = 1;
4097 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4098 }
4099
4100 *data = fail;
4101 return 0;
4102}
4103
4104/**
20346722 4105 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
4106 * @sp : private member of the device structure, which is a pointer to the
4107 * s2io_nic structure.
4108 * @data:variable that returns the result of each of the test conducted by
4109 * the driver.
4110 * Description:
20346722 4111 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
4112 * register.
4113 * Return value:
4114 * 0 on success.
4115 */
4116
4117static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4118{
4119 int fail = 0;
4120 u32 ret_data;
4121
4122 /* Test Write Error at offset 0 */
4123 if (!write_eeprom(sp, 0, 0, 3))
4124 fail = 1;
4125
4126 /* Test Write at offset 4f0 */
4127 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4128 fail = 1;
4129 if (read_eeprom(sp, 0x4F0, &ret_data))
4130 fail = 1;
4131
4132 if (ret_data != 0x01234567)
4133 fail = 1;
4134
4135 /* Reset the EEPROM data go FFFF */
4136 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4137
4138 /* Test Write Request Error at offset 0x7c */
4139 if (!write_eeprom(sp, 0x07C, 0, 3))
4140 fail = 1;
4141
4142 /* Test Write Request at offset 0x7fc */
4143 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4144 fail = 1;
4145 if (read_eeprom(sp, 0x7FC, &ret_data))
4146 fail = 1;
4147
4148 if (ret_data != 0x01234567)
4149 fail = 1;
4150
4151 /* Reset the EEPROM data go FFFF */
4152 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4153
4154 /* Test Write Error at offset 0x80 */
4155 if (!write_eeprom(sp, 0x080, 0, 3))
4156 fail = 1;
4157
4158 /* Test Write Error at offset 0xfc */
4159 if (!write_eeprom(sp, 0x0FC, 0, 3))
4160 fail = 1;
4161
4162 /* Test Write Error at offset 0x100 */
4163 if (!write_eeprom(sp, 0x100, 0, 3))
4164 fail = 1;
4165
4166 /* Test Write Error at offset 4ec */
4167 if (!write_eeprom(sp, 0x4EC, 0, 3))
4168 fail = 1;
4169
4170 *data = fail;
4171 return 0;
4172}
4173
4174/**
4175 * s2io_bist_test - invokes the MemBist test of the card .
20346722 4176 * @sp : private member of the device structure, which is a pointer to the
1da177e4 4177 * s2io_nic structure.
20346722 4178 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
4179 * the driver.
4180 * Description:
4181 * This invokes the MemBist test of the card. We give around
4182 * 2 secs time for the Test to complete. If it's still not complete
20346722 4183 * within this peiod, we consider that the test failed.
1da177e4
LT
4184 * Return value:
4185 * 0 on success and -1 on failure.
4186 */
4187
4188static int s2io_bist_test(nic_t * sp, uint64_t * data)
4189{
4190 u8 bist = 0;
4191 int cnt = 0, ret = -1;
4192
4193 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4194 bist |= PCI_BIST_START;
4195 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4196
4197 while (cnt < 20) {
4198 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4199 if (!(bist & PCI_BIST_START)) {
4200 *data = (bist & PCI_BIST_CODE_MASK);
4201 ret = 0;
4202 break;
4203 }
4204 msleep(100);
4205 cnt++;
4206 }
4207
4208 return ret;
4209}
4210
4211/**
20346722 4212 * s2io-link_test - verifies the link state of the nic
4213 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
4214 * s2io_nic structure.
4215 * @data: variable that returns the result of each of the test conducted by
4216 * the driver.
4217 * Description:
20346722 4218 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
4219 * argument 'data' appropriately.
4220 * Return value:
4221 * 0 on success.
4222 */
4223
4224static int s2io_link_test(nic_t * sp, uint64_t * data)
4225{
4226 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4227 u64 val64;
4228
4229 val64 = readq(&bar0->adapter_status);
4230 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4231 *data = 1;
4232
4233 return 0;
4234}
4235
4236/**
20346722 4237 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4238 * @sp - private member of the device structure, which is a pointer to the
1da177e4 4239 * s2io_nic structure.
20346722 4240 * @data - variable that returns the result of each of the test
1da177e4
LT
4241 * conducted by the driver.
4242 * Description:
20346722 4243 * This is one of the offline test that tests the read and write
1da177e4
LT
4244 * access to the RldRam chip on the NIC.
4245 * Return value:
4246 * 0 on success.
4247 */
4248
4249static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4250{
4251 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4252 u64 val64;
4253 int cnt, iteration = 0, test_pass = 0;
4254
4255 val64 = readq(&bar0->adapter_control);
4256 val64 &= ~ADAPTER_ECC_EN;
4257 writeq(val64, &bar0->adapter_control);
4258
4259 val64 = readq(&bar0->mc_rldram_test_ctrl);
4260 val64 |= MC_RLDRAM_TEST_MODE;
4261 writeq(val64, &bar0->mc_rldram_test_ctrl);
4262
4263 val64 = readq(&bar0->mc_rldram_mrs);
4264 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4265 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4266
4267 val64 |= MC_RLDRAM_MRS_ENABLE;
4268 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4269
4270 while (iteration < 2) {
4271 val64 = 0x55555555aaaa0000ULL;
4272 if (iteration == 1) {
4273 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4274 }
4275 writeq(val64, &bar0->mc_rldram_test_d0);
4276
4277 val64 = 0xaaaa5a5555550000ULL;
4278 if (iteration == 1) {
4279 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4280 }
4281 writeq(val64, &bar0->mc_rldram_test_d1);
4282
4283 val64 = 0x55aaaaaaaa5a0000ULL;
4284 if (iteration == 1) {
4285 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4286 }
4287 writeq(val64, &bar0->mc_rldram_test_d2);
4288
4289 val64 = (u64) (0x0000003fffff0000ULL);
4290 writeq(val64, &bar0->mc_rldram_test_add);
4291
4292
4293 val64 = MC_RLDRAM_TEST_MODE;
4294 writeq(val64, &bar0->mc_rldram_test_ctrl);
4295
4296 val64 |=
4297 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4298 MC_RLDRAM_TEST_GO;
4299 writeq(val64, &bar0->mc_rldram_test_ctrl);
4300
4301 for (cnt = 0; cnt < 5; cnt++) {
4302 val64 = readq(&bar0->mc_rldram_test_ctrl);
4303 if (val64 & MC_RLDRAM_TEST_DONE)
4304 break;
4305 msleep(200);
4306 }
4307
4308 if (cnt == 5)
4309 break;
4310
4311 val64 = MC_RLDRAM_TEST_MODE;
4312 writeq(val64, &bar0->mc_rldram_test_ctrl);
4313
4314 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4315 writeq(val64, &bar0->mc_rldram_test_ctrl);
4316
4317 for (cnt = 0; cnt < 5; cnt++) {
4318 val64 = readq(&bar0->mc_rldram_test_ctrl);
4319 if (val64 & MC_RLDRAM_TEST_DONE)
4320 break;
4321 msleep(500);
4322 }
4323
4324 if (cnt == 5)
4325 break;
4326
4327 val64 = readq(&bar0->mc_rldram_test_ctrl);
4328 if (val64 & MC_RLDRAM_TEST_PASS)
4329 test_pass = 1;
4330
4331 iteration++;
4332 }
4333
4334 if (!test_pass)
4335 *data = 1;
4336 else
4337 *data = 0;
4338
4339 return 0;
4340}
4341
4342/**
4343 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4344 * @sp : private member of the device structure, which is a pointer to the
4345 * s2io_nic structure.
4346 * @ethtest : pointer to a ethtool command specific structure that will be
4347 * returned to the user.
20346722 4348 * @data : variable that returns the result of each of the test
1da177e4
LT
4349 * conducted by the driver.
4350 * Description:
4351 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4352 * the health of the card.
4353 * Return value:
4354 * void
4355 */
4356
4357static void s2io_ethtool_test(struct net_device *dev,
4358 struct ethtool_test *ethtest,
4359 uint64_t * data)
4360{
4361 nic_t *sp = dev->priv;
4362 int orig_state = netif_running(sp->dev);
4363
4364 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4365 /* Offline Tests. */
20346722 4366 if (orig_state)
1da177e4 4367 s2io_close(sp->dev);
1da177e4
LT
4368
4369 if (s2io_register_test(sp, &data[0]))
4370 ethtest->flags |= ETH_TEST_FL_FAILED;
4371
4372 s2io_reset(sp);
1da177e4
LT
4373
4374 if (s2io_rldram_test(sp, &data[3]))
4375 ethtest->flags |= ETH_TEST_FL_FAILED;
4376
4377 s2io_reset(sp);
1da177e4
LT
4378
4379 if (s2io_eeprom_test(sp, &data[1]))
4380 ethtest->flags |= ETH_TEST_FL_FAILED;
4381
4382 if (s2io_bist_test(sp, &data[4]))
4383 ethtest->flags |= ETH_TEST_FL_FAILED;
4384
4385 if (orig_state)
4386 s2io_open(sp->dev);
4387
4388 data[2] = 0;
4389 } else {
4390 /* Online Tests. */
4391 if (!orig_state) {
4392 DBG_PRINT(ERR_DBG,
4393 "%s: is not up, cannot run test\n",
4394 dev->name);
4395 data[0] = -1;
4396 data[1] = -1;
4397 data[2] = -1;
4398 data[3] = -1;
4399 data[4] = -1;
4400 }
4401
4402 if (s2io_link_test(sp, &data[2]))
4403 ethtest->flags |= ETH_TEST_FL_FAILED;
4404
4405 data[0] = 0;
4406 data[1] = 0;
4407 data[3] = 0;
4408 data[4] = 0;
4409 }
4410}
4411
4412static void s2io_get_ethtool_stats(struct net_device *dev,
4413 struct ethtool_stats *estats,
4414 u64 * tmp_stats)
4415{
4416 int i = 0;
4417 nic_t *sp = dev->priv;
4418 StatInfo_t *stat_info = sp->mac_control.stats_info;
4419
7ba013ac 4420 s2io_updt_stats(sp);
541ae68f 4421 tmp_stats[i++] =
4422 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4423 le32_to_cpu(stat_info->tmac_frms);
4424 tmp_stats[i++] =
4425 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4426 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 4427 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f 4428 tmp_stats[i++] =
4429 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4430 le32_to_cpu(stat_info->tmac_mcst_frms);
4431 tmp_stats[i++] =
4432 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4433 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 4434 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
541ae68f 4435 tmp_stats[i++] =
4436 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4437 le32_to_cpu(stat_info->tmac_any_err_frms);
1da177e4 4438 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f 4439 tmp_stats[i++] =
4440 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4441 le32_to_cpu(stat_info->tmac_vld_ip);
4442 tmp_stats[i++] =
4443 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4444 le32_to_cpu(stat_info->tmac_drop_ip);
4445 tmp_stats[i++] =
4446 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4447 le32_to_cpu(stat_info->tmac_icmp);
4448 tmp_stats[i++] =
4449 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4450 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 4451 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f 4452 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4453 le32_to_cpu(stat_info->tmac_udp);
4454 tmp_stats[i++] =
4455 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4456 le32_to_cpu(stat_info->rmac_vld_frms);
4457 tmp_stats[i++] =
4458 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4459 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
4460 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4461 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f 4462 tmp_stats[i++] =
4463 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4464 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4465 tmp_stats[i++] =
4466 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4467 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4
LT
4468 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4469 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4470 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
541ae68f 4471 tmp_stats[i++] =
4472 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4473 le32_to_cpu(stat_info->rmac_discarded_frms);
4474 tmp_stats[i++] =
4475 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4476 le32_to_cpu(stat_info->rmac_usized_frms);
4477 tmp_stats[i++] =
4478 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4479 le32_to_cpu(stat_info->rmac_osized_frms);
4480 tmp_stats[i++] =
4481 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4482 le32_to_cpu(stat_info->rmac_frag_frms);
4483 tmp_stats[i++] =
4484 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4485 le32_to_cpu(stat_info->rmac_jabber_frms);
4486 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4487 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
4488 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4489 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
541ae68f 4490 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4491 le32_to_cpu(stat_info->rmac_drop_ip);
4492 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4493 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 4494 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
541ae68f 4495 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4496 le32_to_cpu(stat_info->rmac_udp);
4497 tmp_stats[i++] =
4498 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4499 le32_to_cpu(stat_info->rmac_err_drp_udp);
4500 tmp_stats[i++] =
4501 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4502 le32_to_cpu(stat_info->rmac_pause_cnt);
4503 tmp_stats[i++] =
4504 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4505 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 4506 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
7ba013ac 4507 tmp_stats[i++] = 0;
4508 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4509 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
1da177e4
LT
4510}
4511
20346722 4512int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
4513{
4514 return (XENA_REG_SPACE);
4515}
4516
4517
20346722 4518u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4
LT
4519{
4520 nic_t *sp = dev->priv;
4521
4522 return (sp->rx_csum);
4523}
20346722 4524int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4525{
4526 nic_t *sp = dev->priv;
4527
4528 if (data)
4529 sp->rx_csum = 1;
4530 else
4531 sp->rx_csum = 0;
4532
4533 return 0;
4534}
20346722 4535int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
4536{
4537 return (XENA_EEPROM_SPACE);
4538}
4539
20346722 4540int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
4541{
4542 return (S2IO_TEST_LEN);
4543}
20346722 4544void s2io_ethtool_get_strings(struct net_device *dev,
4545 u32 stringset, u8 * data)
1da177e4
LT
4546{
4547 switch (stringset) {
4548 case ETH_SS_TEST:
4549 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4550 break;
4551 case ETH_SS_STATS:
4552 memcpy(data, &ethtool_stats_keys,
4553 sizeof(ethtool_stats_keys));
4554 }
4555}
1da177e4
LT
4556static int s2io_ethtool_get_stats_count(struct net_device *dev)
4557{
4558 return (S2IO_STAT_LEN);
4559}
4560
20346722 4561int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4562{
4563 if (data)
4564 dev->features |= NETIF_F_IP_CSUM;
4565 else
4566 dev->features &= ~NETIF_F_IP_CSUM;
4567
4568 return 0;
4569}
4570
4571
4572static struct ethtool_ops netdev_ethtool_ops = {
4573 .get_settings = s2io_ethtool_gset,
4574 .set_settings = s2io_ethtool_sset,
4575 .get_drvinfo = s2io_ethtool_gdrvinfo,
4576 .get_regs_len = s2io_ethtool_get_regs_len,
4577 .get_regs = s2io_ethtool_gregs,
4578 .get_link = ethtool_op_get_link,
4579 .get_eeprom_len = s2io_get_eeprom_len,
4580 .get_eeprom = s2io_ethtool_geeprom,
4581 .set_eeprom = s2io_ethtool_seeprom,
4582 .get_pauseparam = s2io_ethtool_getpause_data,
4583 .set_pauseparam = s2io_ethtool_setpause_data,
4584 .get_rx_csum = s2io_ethtool_get_rx_csum,
4585 .set_rx_csum = s2io_ethtool_set_rx_csum,
4586 .get_tx_csum = ethtool_op_get_tx_csum,
4587 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4588 .get_sg = ethtool_op_get_sg,
4589 .set_sg = ethtool_op_set_sg,
4590#ifdef NETIF_F_TSO
4591 .get_tso = ethtool_op_get_tso,
4592 .set_tso = ethtool_op_set_tso,
4593#endif
4594 .self_test_count = s2io_ethtool_self_test_count,
4595 .self_test = s2io_ethtool_test,
4596 .get_strings = s2io_ethtool_get_strings,
4597 .phys_id = s2io_ethtool_idnic,
4598 .get_stats_count = s2io_ethtool_get_stats_count,
4599 .get_ethtool_stats = s2io_get_ethtool_stats
4600};
4601
4602/**
20346722 4603 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
4604 * @dev : Device pointer.
4605 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4606 * a proprietary structure used to pass information to the driver.
4607 * @cmd : This is used to distinguish between the different commands that
4608 * can be passed to the IOCTL functions.
4609 * Description:
20346722 4610 * Currently there are no special functionality supported in IOCTL, hence
4611 * function always return EOPNOTSUPPORTED
1da177e4
LT
4612 */
4613
20346722 4614int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
4615{
4616 return -EOPNOTSUPP;
4617}
4618
4619/**
4620 * s2io_change_mtu - entry point to change MTU size for the device.
4621 * @dev : device pointer.
4622 * @new_mtu : the new MTU size for the device.
4623 * Description: A driver entry point to change MTU size for the device.
4624 * Before changing the MTU the device must be stopped.
4625 * Return value:
4626 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4627 * file on failure.
4628 */
4629
20346722 4630int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4
LT
4631{
4632 nic_t *sp = dev->priv;
1da177e4
LT
4633
4634 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4635 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4636 dev->name);
4637 return -EPERM;
4638 }
4639
1da177e4 4640 dev->mtu = new_mtu;
d8892c6e 4641 if (netif_running(dev)) {
4642 s2io_card_down(sp);
4643 netif_stop_queue(dev);
4644 if (s2io_card_up(sp)) {
4645 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4646 __FUNCTION__);
4647 }
4648 if (netif_queue_stopped(dev))
4649 netif_wake_queue(dev);
4650 } else { /* Device is down */
4651 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4652 u64 val64 = new_mtu;
4653
4654 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4655 }
1da177e4
LT
4656
4657 return 0;
4658}
4659
4660/**
4661 * s2io_tasklet - Bottom half of the ISR.
4662 * @dev_adr : address of the device structure in dma_addr_t format.
4663 * Description:
4664 * This is the tasklet or the bottom half of the ISR. This is
20346722 4665 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 4666 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 4667 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
4668 * replenish the Rx buffers in the Rx buffer descriptors.
4669 * Return value:
4670 * void.
4671 */
4672
4673static void s2io_tasklet(unsigned long dev_addr)
4674{
4675 struct net_device *dev = (struct net_device *) dev_addr;
4676 nic_t *sp = dev->priv;
4677 int i, ret;
4678 mac_info_t *mac_control;
4679 struct config_param *config;
4680
4681 mac_control = &sp->mac_control;
4682 config = &sp->config;
4683
4684 if (!TASKLET_IN_USE) {
4685 for (i = 0; i < config->rx_ring_num; i++) {
4686 ret = fill_rx_buffers(sp, i);
4687 if (ret == -ENOMEM) {
4688 DBG_PRINT(ERR_DBG, "%s: Out of ",
4689 dev->name);
4690 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4691 break;
4692 } else if (ret == -EFILL) {
4693 DBG_PRINT(ERR_DBG,
4694 "%s: Rx Ring %d is full\n",
4695 dev->name, i);
4696 break;
4697 }
4698 }
4699 clear_bit(0, (&sp->tasklet_status));
4700 }
4701}
4702
4703/**
4704 * s2io_set_link - Set the LInk status
4705 * @data: long pointer to device private structue
4706 * Description: Sets the link status for the adapter
4707 */
4708
4709static void s2io_set_link(unsigned long data)
4710{
4711 nic_t *nic = (nic_t *) data;
4712 struct net_device *dev = nic->dev;
4713 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4714 register u64 val64;
4715 u16 subid;
4716
4717 if (test_and_set_bit(0, &(nic->link_state))) {
4718 /* The card is being reset, no point doing anything */
4719 return;
4720 }
4721
4722 subid = nic->pdev->subsystem_device;
a371a07d 4723 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
4724 /*
4725 * Allow a small delay for the NICs self initiated
4726 * cleanup to complete.
4727 */
4728 msleep(100);
4729 }
1da177e4
LT
4730
4731 val64 = readq(&bar0->adapter_status);
20346722 4732 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
4733 if (LINK_IS_UP(val64)) {
4734 val64 = readq(&bar0->adapter_control);
4735 val64 |= ADAPTER_CNTL_EN;
4736 writeq(val64, &bar0->adapter_control);
541ae68f 4737 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4738 subid)) {
1da177e4
LT
4739 val64 = readq(&bar0->gpio_control);
4740 val64 |= GPIO_CTRL_GPIO_0;
4741 writeq(val64, &bar0->gpio_control);
4742 val64 = readq(&bar0->gpio_control);
4743 } else {
4744 val64 |= ADAPTER_LED_ON;
4745 writeq(val64, &bar0->adapter_control);
4746 }
a371a07d 4747 if (s2io_link_fault_indication(nic) ==
4748 MAC_RMAC_ERR_TIMER) {
4749 val64 = readq(&bar0->adapter_status);
4750 if (!LINK_IS_UP(val64)) {
4751 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4752 DBG_PRINT(ERR_DBG, " Link down");
4753 DBG_PRINT(ERR_DBG, "after ");
4754 DBG_PRINT(ERR_DBG, "enabling ");
4755 DBG_PRINT(ERR_DBG, "device \n");
4756 }
1da177e4
LT
4757 }
4758 if (nic->device_enabled_once == FALSE) {
4759 nic->device_enabled_once = TRUE;
4760 }
4761 s2io_link(nic, LINK_UP);
4762 } else {
541ae68f 4763 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4764 subid)) {
1da177e4
LT
4765 val64 = readq(&bar0->gpio_control);
4766 val64 &= ~GPIO_CTRL_GPIO_0;
4767 writeq(val64, &bar0->gpio_control);
4768 val64 = readq(&bar0->gpio_control);
4769 }
4770 s2io_link(nic, LINK_DOWN);
4771 }
4772 } else { /* NIC is not Quiescent. */
4773 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4774 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4775 netif_stop_queue(dev);
4776 }
4777 clear_bit(0, &(nic->link_state));
4778}
4779
4780static void s2io_card_down(nic_t * sp)
4781{
4782 int cnt = 0;
4783 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4784 unsigned long flags;
4785 register u64 val64 = 0;
4786
25fff88e 4787 del_timer_sync(&sp->alarm_timer);
1da177e4 4788 /* If s2io_set_link task is executing, wait till it completes. */
20346722 4789 while (test_and_set_bit(0, &(sp->link_state))) {
1da177e4 4790 msleep(50);
20346722 4791 }
1da177e4
LT
4792 atomic_set(&sp->card_state, CARD_DOWN);
4793
4794 /* disable Tx and Rx traffic on the NIC */
4795 stop_nic(sp);
4796
4797 /* Kill tasklet. */
4798 tasklet_kill(&sp->task);
4799
4800 /* Check if the device is Quiescent and then Reset the NIC */
4801 do {
4802 val64 = readq(&bar0->adapter_status);
20346722 4803 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
1da177e4
LT
4804 break;
4805 }
4806
4807 msleep(50);
4808 cnt++;
4809 if (cnt == 10) {
4810 DBG_PRINT(ERR_DBG,
4811 "s2io_close:Device not Quiescent ");
4812 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4813 (unsigned long long) val64);
4814 break;
4815 }
4816 } while (1);
1da177e4
LT
4817 s2io_reset(sp);
4818
7ba013ac 4819 /* Waiting till all Interrupt handlers are complete */
4820 cnt = 0;
4821 do {
4822 msleep(10);
4823 if (!atomic_read(&sp->isr_cnt))
4824 break;
4825 cnt++;
4826 } while(cnt < 5);
4827
4828 spin_lock_irqsave(&sp->tx_lock, flags);
4829 /* Free all Tx buffers */
1da177e4 4830 free_tx_buffers(sp);
7ba013ac 4831 spin_unlock_irqrestore(&sp->tx_lock, flags);
4832
4833 /* Free all Rx buffers */
4834 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 4835 free_rx_buffers(sp);
7ba013ac 4836 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 4837
1da177e4
LT
4838 clear_bit(0, &(sp->link_state));
4839}
4840
4841static int s2io_card_up(nic_t * sp)
4842{
4843 int i, ret;
4844 mac_info_t *mac_control;
4845 struct config_param *config;
4846 struct net_device *dev = (struct net_device *) sp->dev;
4847
4848 /* Initialize the H/W I/O registers */
4849 if (init_nic(sp) != 0) {
4850 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4851 dev->name);
4852 return -ENODEV;
4853 }
4854
20346722 4855 /*
4856 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
4857 * Rx ring and initializing buffers into 30 Rx blocks
4858 */
4859 mac_control = &sp->mac_control;
4860 config = &sp->config;
4861
4862 for (i = 0; i < config->rx_ring_num; i++) {
4863 if ((ret = fill_rx_buffers(sp, i))) {
4864 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4865 dev->name);
4866 s2io_reset(sp);
4867 free_rx_buffers(sp);
4868 return -ENOMEM;
4869 }
4870 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4871 atomic_read(&sp->rx_bufs_left[i]));
4872 }
4873
4874 /* Setting its receive mode */
4875 s2io_set_multicast(dev);
4876
4877 /* Enable tasklet for the device */
4878 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4879
4880 /* Enable Rx Traffic and interrupts on the NIC */
4881 if (start_nic(sp)) {
4882 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4883 tasklet_kill(&sp->task);
4884 s2io_reset(sp);
4885 free_irq(dev->irq, dev);
4886 free_rx_buffers(sp);
4887 return -ENODEV;
4888 }
4889
25fff88e 4890 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4891
1da177e4
LT
4892 atomic_set(&sp->card_state, CARD_UP);
4893 return 0;
4894}
4895
20346722 4896/**
1da177e4
LT
4897 * s2io_restart_nic - Resets the NIC.
4898 * @data : long pointer to the device private structure
4899 * Description:
4900 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 4901 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
4902 * the run time of the watch dog routine which is run holding a
4903 * spin lock.
4904 */
4905
4906static void s2io_restart_nic(unsigned long data)
4907{
4908 struct net_device *dev = (struct net_device *) data;
4909 nic_t *sp = dev->priv;
4910
4911 s2io_card_down(sp);
4912 if (s2io_card_up(sp)) {
4913 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4914 dev->name);
4915 }
4916 netif_wake_queue(dev);
4917 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4918 dev->name);
20346722 4919
1da177e4
LT
4920}
4921
20346722 4922/**
4923 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
4924 * @dev : Pointer to net device structure
4925 * Description:
4926 * This function is triggered if the Tx Queue is stopped
4927 * for a pre-defined amount of time when the Interface is still up.
4928 * If the Interface is jammed in such a situation, the hardware is
4929 * reset (by s2io_close) and restarted again (by s2io_open) to
4930 * overcome any problem that might have been caused in the hardware.
4931 * Return value:
4932 * void
4933 */
4934
4935static void s2io_tx_watchdog(struct net_device *dev)
4936{
4937 nic_t *sp = dev->priv;
4938
4939 if (netif_carrier_ok(dev)) {
4940 schedule_work(&sp->rst_timer_task);
4941 }
4942}
4943
4944/**
4945 * rx_osm_handler - To perform some OS related operations on SKB.
4946 * @sp: private member of the device structure,pointer to s2io_nic structure.
4947 * @skb : the socket buffer pointer.
4948 * @len : length of the packet
4949 * @cksum : FCS checksum of the frame.
4950 * @ring_no : the ring from which this RxD was extracted.
20346722 4951 * Description:
1da177e4
LT
4952 * This function is called by the Tx interrupt serivce routine to perform
4953 * some OS related operations on the SKB before passing it to the upper
4954 * layers. It mainly checks if the checksum is OK, if so adds it to the
4955 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4956 * to the upper layer. If the checksum is wrong, it increments the Rx
4957 * packet error count, frees the SKB and returns error.
4958 * Return value:
4959 * SUCCESS on success and -1 on failure.
4960 */
20346722 4961static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
1da177e4 4962{
20346722 4963 nic_t *sp = ring_data->nic;
1da177e4 4964 struct net_device *dev = (struct net_device *) sp->dev;
20346722 4965 struct sk_buff *skb = (struct sk_buff *)
4966 ((unsigned long) rxdp->Host_Control);
4967 int ring_no = ring_data->ring_no;
1da177e4
LT
4968 u16 l3_csum, l4_csum;
4969#ifdef CONFIG_2BUFF_MODE
20346722 4970 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4971 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4972 int get_block = ring_data->rx_curr_get_info.block_index;
4973 int get_off = ring_data->rx_curr_get_info.offset;
4974 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
1da177e4 4975 unsigned char *buff;
20346722 4976#else
4977 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
1da177e4 4978#endif
20346722 4979 skb->dev = dev;
4980 if (rxdp->Control_1 & RXD_T_CODE) {
4981 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4982 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4983 dev->name, err);
1ddc50d4 4984 dev_kfree_skb(skb);
4985 sp->stats.rx_crc_errors++;
4986 atomic_dec(&sp->rx_bufs_left[ring_no]);
4987 rxdp->Host_Control = 0;
4988 return 0;
20346722 4989 }
1da177e4 4990
20346722 4991 /* Updating statistics */
4992 rxdp->Host_Control = 0;
4993 sp->rx_pkt_count++;
4994 sp->stats.rx_packets++;
4995#ifndef CONFIG_2BUFF_MODE
4996 sp->stats.rx_bytes += len;
4997#else
4998 sp->stats.rx_bytes += buf0_len + buf2_len;
4999#endif
5000
5001#ifndef CONFIG_2BUFF_MODE
5002 skb_put(skb, len);
5003#else
5004 buff = skb_push(skb, buf0_len);
5005 memcpy(buff, ba->ba_0, buf0_len);
5006 skb_put(skb, buf2_len);
5007#endif
5008
5009 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5010 (sp->rx_csum)) {
5011 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
5012 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5013 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 5014 /*
1da177e4
LT
5015 * NIC verifies if the Checksum of the received
5016 * frame is Ok or not and accordingly returns
5017 * a flag in the RxD.
5018 */
5019 skb->ip_summed = CHECKSUM_UNNECESSARY;
5020 } else {
20346722 5021 /*
5022 * Packet with erroneous checksum, let the
1da177e4
LT
5023 * upper layers deal with it.
5024 */
5025 skb->ip_summed = CHECKSUM_NONE;
5026 }
5027 } else {
5028 skb->ip_summed = CHECKSUM_NONE;
5029 }
5030
1da177e4 5031 skb->protocol = eth_type_trans(skb, dev);
1da177e4 5032#ifdef CONFIG_S2IO_NAPI
be3a6b02 5033 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5034 /* Queueing the vlan frame to the upper layer */
5035 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5036 RXD_GET_VLAN_TAG(rxdp->Control_2));
5037 } else {
5038 netif_receive_skb(skb);
5039 }
1da177e4 5040#else
be3a6b02 5041 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5042 /* Queueing the vlan frame to the upper layer */
5043 vlan_hwaccel_rx(skb, sp->vlgrp,
5044 RXD_GET_VLAN_TAG(rxdp->Control_2));
5045 } else {
5046 netif_rx(skb);
5047 }
1da177e4 5048#endif
1da177e4 5049 dev->last_rx = jiffies;
1da177e4 5050 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
5051 return SUCCESS;
5052}
5053
5054/**
5055 * s2io_link - stops/starts the Tx queue.
5056 * @sp : private member of the device structure, which is a pointer to the
5057 * s2io_nic structure.
5058 * @link : inidicates whether link is UP/DOWN.
5059 * Description:
5060 * This function stops/starts the Tx queue depending on whether the link
20346722 5061 * status of the NIC is is down or up. This is called by the Alarm
5062 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
5063 * Return value:
5064 * void.
5065 */
5066
20346722 5067void s2io_link(nic_t * sp, int link)
1da177e4
LT
5068{
5069 struct net_device *dev = (struct net_device *) sp->dev;
5070
5071 if (link != sp->last_link_state) {
5072 if (link == LINK_DOWN) {
5073 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5074 netif_carrier_off(dev);
5075 } else {
5076 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5077 netif_carrier_on(dev);
5078 }
5079 }
5080 sp->last_link_state = link;
5081}
5082
5083/**
20346722 5084 * get_xena_rev_id - to identify revision ID of xena.
5085 * @pdev : PCI Dev structure
5086 * Description:
5087 * Function to identify the Revision ID of xena.
5088 * Return value:
5089 * returns the revision ID of the device.
5090 */
5091
5092int get_xena_rev_id(struct pci_dev *pdev)
5093{
5094 u8 id = 0;
5095 int ret;
5096 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5097 return id;
5098}
5099
5100/**
5101 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5102 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5103 * s2io_nic structure.
5104 * Description:
5105 * This function initializes a few of the PCI and PCI-X configuration registers
5106 * with recommended values.
5107 * Return value:
5108 * void
5109 */
5110
5111static void s2io_init_pci(nic_t * sp)
5112{
20346722 5113 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
5114
5115 /* Enable Data Parity Error Recovery in PCI-X command register. */
5116 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5117 &(pcix_cmd));
1da177e4 5118 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5119 (pcix_cmd | 1));
1da177e4 5120 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5121 &(pcix_cmd));
1da177e4
LT
5122
5123 /* Set the PErr Response bit in PCI command register. */
5124 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5125 pci_write_config_word(sp->pdev, PCI_COMMAND,
5126 (pci_cmd | PCI_COMMAND_PARITY));
5127 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5128
1da177e4 5129 /* Forcibly disabling relaxed ordering capability of the card. */
20346722 5130 pcix_cmd &= 0xfffd;
1da177e4 5131 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5132 pcix_cmd);
1da177e4 5133 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5134 &(pcix_cmd));
1da177e4
LT
5135}
5136
5137MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5138MODULE_LICENSE("GPL");
5139module_param(tx_fifo_num, int, 0);
1da177e4 5140module_param(rx_ring_num, int, 0);
20346722 5141module_param_array(tx_fifo_len, uint, NULL, 0);
5142module_param_array(rx_ring_sz, uint, NULL, 0);
20346722 5143module_param_array(rts_frm_len, uint, NULL, 0);
5e25b9dd 5144module_param(use_continuous_tx_intrs, int, 1);
1da177e4
LT
5145module_param(rmac_pause_time, int, 0);
5146module_param(mc_pause_threshold_q0q3, int, 0);
5147module_param(mc_pause_threshold_q4q7, int, 0);
5148module_param(shared_splits, int, 0);
5149module_param(tmac_util_period, int, 0);
5150module_param(rmac_util_period, int, 0);
b6e3f982 5151module_param(bimodal, bool, 0);
1da177e4
LT
5152#ifndef CONFIG_S2IO_NAPI
5153module_param(indicate_max_pkts, int, 0);
5154#endif
20346722 5155
1da177e4 5156/**
20346722 5157 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
5158 * @pdev : structure containing the PCI related information of the device.
5159 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5160 * Description:
5161 * The function initializes an adapter identified by the pci_dec structure.
20346722 5162 * All OS related initialization including memory and device structure and
5163 * initlaization of the device private variable is done. Also the swapper
5164 * control register is initialized to enable read and write into the I/O
1da177e4
LT
5165 * registers of the device.
5166 * Return value:
5167 * returns 0 on success and negative on failure.
5168 */
5169
5170static int __devinit
5171s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5172{
5173 nic_t *sp;
5174 struct net_device *dev;
1da177e4
LT
5175 int i, j, ret;
5176 int dma_flag = FALSE;
5177 u32 mac_up, mac_down;
5178 u64 val64 = 0, tmp64 = 0;
5179 XENA_dev_config_t __iomem *bar0 = NULL;
5180 u16 subid;
5181 mac_info_t *mac_control;
5182 struct config_param *config;
541ae68f 5183 int mode;
1da177e4 5184
20346722 5185#ifdef CONFIG_S2IO_NAPI
5186 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5187#endif
1da177e4
LT
5188
5189 if ((ret = pci_enable_device(pdev))) {
5190 DBG_PRINT(ERR_DBG,
5191 "s2io_init_nic: pci_enable_device failed\n");
5192 return ret;
5193 }
5194
1e7f0bd8 5195 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
5196 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5197 dma_flag = TRUE;
1da177e4 5198 if (pci_set_consistent_dma_mask
1e7f0bd8 5199 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
5200 DBG_PRINT(ERR_DBG,
5201 "Unable to obtain 64bit DMA for \
5202 consistent allocations\n");
5203 pci_disable_device(pdev);
5204 return -ENOMEM;
5205 }
1e7f0bd8 5206 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
5207 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5208 } else {
5209 pci_disable_device(pdev);
5210 return -ENOMEM;
5211 }
5212
5213 if (pci_request_regions(pdev, s2io_driver_name)) {
5214 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5215 pci_disable_device(pdev);
5216 return -ENODEV;
5217 }
5218
5219 dev = alloc_etherdev(sizeof(nic_t));
5220 if (dev == NULL) {
5221 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5222 pci_disable_device(pdev);
5223 pci_release_regions(pdev);
5224 return -ENODEV;
5225 }
5226
5227 pci_set_master(pdev);
5228 pci_set_drvdata(pdev, dev);
5229 SET_MODULE_OWNER(dev);
5230 SET_NETDEV_DEV(dev, &pdev->dev);
5231
5232 /* Private member variable initialized to s2io NIC structure */
5233 sp = dev->priv;
5234 memset(sp, 0, sizeof(nic_t));
5235 sp->dev = dev;
5236 sp->pdev = pdev;
1da177e4 5237 sp->high_dma_flag = dma_flag;
1da177e4 5238 sp->device_enabled_once = FALSE;
1da177e4 5239
541ae68f 5240 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5241 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5242 sp->device_type = XFRAME_II_DEVICE;
5243 else
5244 sp->device_type = XFRAME_I_DEVICE;
5245
1da177e4
LT
5246 /* Initialize some PCI/PCI-X fields of the NIC. */
5247 s2io_init_pci(sp);
5248
20346722 5249 /*
1da177e4 5250 * Setting the device configuration parameters.
20346722 5251 * Most of these parameters can be specified by the user during
5252 * module insertion as they are module loadable parameters. If
5253 * these parameters are not not specified during load time, they
1da177e4
LT
5254 * are initialized with default values.
5255 */
5256 mac_control = &sp->mac_control;
5257 config = &sp->config;
5258
5259 /* Tx side parameters. */
5260 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5261 config->tx_fifo_num = tx_fifo_num;
5262 for (i = 0; i < MAX_TX_FIFOS; i++) {
5263 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5264 config->tx_cfg[i].fifo_priority = i;
5265 }
5266
20346722 5267 /* mapping the QoS priority to the configured fifos */
5268 for (i = 0; i < MAX_TX_FIFOS; i++)
5269 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5270
1da177e4
LT
5271 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5272 for (i = 0; i < config->tx_fifo_num; i++) {
5273 config->tx_cfg[i].f_no_snoop =
5274 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5275 if (config->tx_cfg[i].fifo_len < 65) {
5276 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5277 break;
5278 }
5279 }
5280 config->max_txds = MAX_SKB_FRAGS;
5281
5282 /* Rx side parameters. */
5283 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5284 config->rx_ring_num = rx_ring_num;
5285 for (i = 0; i < MAX_RX_RINGS; i++) {
5286 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5287 (MAX_RXDS_PER_BLOCK + 1);
5288 config->rx_cfg[i].ring_priority = i;
5289 }
5290
5291 for (i = 0; i < rx_ring_num; i++) {
5292 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5293 config->rx_cfg[i].f_no_snoop =
5294 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5295 }
5296
5297 /* Setting Mac Control parameters */
5298 mac_control->rmac_pause_time = rmac_pause_time;
5299 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5300 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5301
5302
5303 /* Initialize Ring buffer parameters. */
5304 for (i = 0; i < config->rx_ring_num; i++)
5305 atomic_set(&sp->rx_bufs_left[i], 0);
5306
7ba013ac 5307 /* Initialize the number of ISRs currently running */
5308 atomic_set(&sp->isr_cnt, 0);
5309
1da177e4
LT
5310 /* initialize the shared memory used by the NIC and the host */
5311 if (init_shared_mem(sp)) {
5312 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5313 dev->name);
5314 ret = -ENOMEM;
5315 goto mem_alloc_failed;
5316 }
5317
5318 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5319 pci_resource_len(pdev, 0));
5320 if (!sp->bar0) {
5321 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5322 dev->name);
5323 ret = -ENOMEM;
5324 goto bar0_remap_failed;
5325 }
5326
5327 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5328 pci_resource_len(pdev, 2));
5329 if (!sp->bar1) {
5330 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5331 dev->name);
5332 ret = -ENOMEM;
5333 goto bar1_remap_failed;
5334 }
5335
5336 dev->irq = pdev->irq;
5337 dev->base_addr = (unsigned long) sp->bar0;
5338
5339 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5340 for (j = 0; j < MAX_TX_FIFOS; j++) {
5341 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5342 (sp->bar1 + (j * 0x00020000));
5343 }
5344
5345 /* Driver entry points */
5346 dev->open = &s2io_open;
5347 dev->stop = &s2io_close;
5348 dev->hard_start_xmit = &s2io_xmit;
5349 dev->get_stats = &s2io_get_stats;
5350 dev->set_multicast_list = &s2io_set_multicast;
5351 dev->do_ioctl = &s2io_ioctl;
5352 dev->change_mtu = &s2io_change_mtu;
5353 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02 5354 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5355 dev->vlan_rx_register = s2io_vlan_rx_register;
5356 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 5357
1da177e4
LT
5358 /*
5359 * will use eth_mac_addr() for dev->set_mac_address
5360 * mac address will be set every time dev->open() is called
5361 */
20346722 5362#if defined(CONFIG_S2IO_NAPI)
1da177e4 5363 dev->poll = s2io_poll;
20346722 5364 dev->weight = 32;
1da177e4
LT
5365#endif
5366
5367 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5368 if (sp->high_dma_flag == TRUE)
5369 dev->features |= NETIF_F_HIGHDMA;
5370#ifdef NETIF_F_TSO
5371 dev->features |= NETIF_F_TSO;
5372#endif
5373
5374 dev->tx_timeout = &s2io_tx_watchdog;
5375 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5376 INIT_WORK(&sp->rst_timer_task,
5377 (void (*)(void *)) s2io_restart_nic, dev);
5378 INIT_WORK(&sp->set_link_task,
5379 (void (*)(void *)) s2io_set_link, sp);
5380
541ae68f 5381 if (!(sp->device_type & XFRAME_II_DEVICE)) {
5382 pci_save_state(sp->pdev);
5383 }
1da177e4
LT
5384
5385 /* Setting swapper control on the NIC, for proper reset operation */
5386 if (s2io_set_swapper(sp)) {
5387 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5388 dev->name);
5389 ret = -EAGAIN;
5390 goto set_swap_failed;
5391 }
5392
541ae68f 5393 /* Verify if the Herc works on the slot its placed into */
5394 if (sp->device_type & XFRAME_II_DEVICE) {
5395 mode = s2io_verify_pci_mode(sp);
5396 if (mode < 0) {
5397 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5398 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5399 ret = -EBADSLT;
5400 goto set_swap_failed;
5401 }
5402 }
5403
5404 /* Not needed for Herc */
5405 if (sp->device_type & XFRAME_I_DEVICE) {
5406 /*
5407 * Fix for all "FFs" MAC address problems observed on
5408 * Alpha platforms
5409 */
5410 fix_mac_address(sp);
5411 s2io_reset(sp);
5412 }
1da177e4
LT
5413
5414 /*
1da177e4
LT
5415 * MAC address initialization.
5416 * For now only one mac address will be read and used.
5417 */
5418 bar0 = sp->bar0;
5419 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5420 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5421 writeq(val64, &bar0->rmac_addr_cmd_mem);
5422 wait_for_cmd_complete(sp);
5423
5424 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5425 mac_down = (u32) tmp64;
5426 mac_up = (u32) (tmp64 >> 32);
5427
5428 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5429
5430 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5431 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5432 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5433 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5434 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5435 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5436
1da177e4
LT
5437 /* Set the factory defined MAC address initially */
5438 dev->addr_len = ETH_ALEN;
5439 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5440
5441 /*
20346722 5442 * Initialize the tasklet status and link state flags
541ae68f 5443 * and the card state parameter
1da177e4
LT
5444 */
5445 atomic_set(&(sp->card_state), 0);
5446 sp->tasklet_status = 0;
5447 sp->link_state = 0;
5448
1da177e4
LT
5449 /* Initialize spinlocks */
5450 spin_lock_init(&sp->tx_lock);
5451#ifndef CONFIG_S2IO_NAPI
5452 spin_lock_init(&sp->put_lock);
5453#endif
7ba013ac 5454 spin_lock_init(&sp->rx_lock);
1da177e4 5455
20346722 5456 /*
5457 * SXE-002: Configure link and activity LED to init state
5458 * on driver load.
1da177e4
LT
5459 */
5460 subid = sp->pdev->subsystem_device;
5461 if ((subid & 0xFF) >= 0x07) {
5462 val64 = readq(&bar0->gpio_control);
5463 val64 |= 0x0000800000000000ULL;
5464 writeq(val64, &bar0->gpio_control);
5465 val64 = 0x0411040400000000ULL;
5466 writeq(val64, (void __iomem *) bar0 + 0x2700);
5467 val64 = readq(&bar0->gpio_control);
5468 }
5469
5470 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5471
5472 if (register_netdev(dev)) {
5473 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5474 ret = -ENODEV;
5475 goto register_failed;
5476 }
5477
541ae68f 5478 if (sp->device_type & XFRAME_II_DEVICE) {
5479 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5480 dev->name);
5481 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5482 get_xena_rev_id(sp->pdev),
5483 s2io_driver_version);
5484 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5485 sp->def_mac_addr[0].mac_addr[0],
5486 sp->def_mac_addr[0].mac_addr[1],
5487 sp->def_mac_addr[0].mac_addr[2],
5488 sp->def_mac_addr[0].mac_addr[3],
5489 sp->def_mac_addr[0].mac_addr[4],
5490 sp->def_mac_addr[0].mac_addr[5]);
5491 int mode = s2io_print_pci_mode(sp);
5492 if (mode < 0) {
5493 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5494 ret = -EBADSLT;
5495 goto set_swap_failed;
5496 }
5497 } else {
5498 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5499 dev->name);
5500 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5501 get_xena_rev_id(sp->pdev),
5502 s2io_driver_version);
5503 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5504 sp->def_mac_addr[0].mac_addr[0],
5505 sp->def_mac_addr[0].mac_addr[1],
5506 sp->def_mac_addr[0].mac_addr[2],
5507 sp->def_mac_addr[0].mac_addr[3],
5508 sp->def_mac_addr[0].mac_addr[4],
5509 sp->def_mac_addr[0].mac_addr[5]);
5510 }
5511
7ba013ac 5512 /* Initialize device name */
5513 strcpy(sp->name, dev->name);
541ae68f 5514 if (sp->device_type & XFRAME_II_DEVICE)
5515 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5516 else
5517 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
7ba013ac 5518
b6e3f982 5519 /* Initialize bimodal Interrupts */
5520 sp->config.bimodal = bimodal;
5521 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5522 sp->config.bimodal = 0;
5523 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5524 dev->name);
5525 }
5526
20346722 5527 /*
5528 * Make Link state as off at this point, when the Link change
5529 * interrupt comes the state will be automatically changed to
1da177e4
LT
5530 * the right state.
5531 */
5532 netif_carrier_off(dev);
1da177e4
LT
5533
5534 return 0;
5535
5536 register_failed:
5537 set_swap_failed:
5538 iounmap(sp->bar1);
5539 bar1_remap_failed:
5540 iounmap(sp->bar0);
5541 bar0_remap_failed:
5542 mem_alloc_failed:
5543 free_shared_mem(sp);
5544 pci_disable_device(pdev);
5545 pci_release_regions(pdev);
5546 pci_set_drvdata(pdev, NULL);
5547 free_netdev(dev);
5548
5549 return ret;
5550}
5551
5552/**
20346722 5553 * s2io_rem_nic - Free the PCI device
1da177e4 5554 * @pdev: structure containing the PCI related information of the device.
20346722 5555 * Description: This function is called by the Pci subsystem to release a
1da177e4 5556 * PCI device and free up all resource held up by the device. This could
20346722 5557 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
5558 * from memory.
5559 */
5560
5561static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5562{
5563 struct net_device *dev =
5564 (struct net_device *) pci_get_drvdata(pdev);
5565 nic_t *sp;
5566
5567 if (dev == NULL) {
5568 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5569 return;
5570 }
5571
5572 sp = dev->priv;
5573 unregister_netdev(dev);
5574
5575 free_shared_mem(sp);
5576 iounmap(sp->bar0);
5577 iounmap(sp->bar1);
5578 pci_disable_device(pdev);
5579 pci_release_regions(pdev);
5580 pci_set_drvdata(pdev, NULL);
1da177e4
LT
5581 free_netdev(dev);
5582}
5583
5584/**
5585 * s2io_starter - Entry point for the driver
5586 * Description: This function is the entry point for the driver. It verifies
5587 * the module loadable parameters and initializes PCI configuration space.
5588 */
5589
5590int __init s2io_starter(void)
5591{
5592 return pci_module_init(&s2io_driver);
5593}
5594
5595/**
20346722 5596 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
5597 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5598 */
5599
20346722 5600void s2io_closer(void)
1da177e4
LT
5601{
5602 pci_unregister_driver(&s2io_driver);
5603 DBG_PRINT(INIT_DBG, "cleanup done\n");
5604}
5605
5606module_init(s2io_starter);
5607module_exit(s2io_closer);
This page took 0.310566 seconds and 5 git commands to generate.