[PATCH] S2io: Miscellaneous fixes
[deliverable/linux.git] / drivers / net / s2io.c
CommitLineData
1da177e4
LT
1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722 14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
20346722 29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
1da177e4
LT
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 35 * Tx descriptors that can be associated with each corresponding FIFO.
1da177e4
LT
36 ************************************************************************/
37
38#include <linux/config.h>
39#include <linux/module.h>
40#include <linux/types.h>
41#include <linux/errno.h>
42#include <linux/ioport.h>
43#include <linux/pci.h>
1e7f0bd8 44#include <linux/dma-mapping.h>
1da177e4
LT
45#include <linux/kernel.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/skbuff.h>
49#include <linux/init.h>
50#include <linux/delay.h>
51#include <linux/stddef.h>
52#include <linux/ioctl.h>
53#include <linux/timex.h>
54#include <linux/sched.h>
55#include <linux/ethtool.h>
56#include <linux/version.h>
57#include <linux/workqueue.h>
be3a6b02 58#include <linux/if_vlan.h>
1da177e4 59
1da177e4
LT
60#include <asm/system.h>
61#include <asm/uaccess.h>
20346722 62#include <asm/io.h>
1da177e4
LT
63
64/* local include */
65#include "s2io.h"
66#include "s2io-regs.h"
67
68/* S2io Driver name & version. */
20346722 69static char s2io_driver_name[] = "Neterion";
a371a07d 70static char s2io_driver_version[] = "Version 2.0.2.0";
1da177e4 71
5e25b9dd 72static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73{
74 int ret;
75
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
78
79 return ret;
80}
81
20346722 82/*
1da177e4
LT
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
86 */
541ae68f 87#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88 (dev_type == XFRAME_I_DEVICE) ? \
89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
91
92#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95#define PANIC 1
96#define LOW 2
97static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98{
99 int level = 0;
20346722 100 mac_info_t *mac_control;
101
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
1da177e4 104 level = LOW;
fe113638 105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
1da177e4
LT
106 level = PANIC;
107 }
108 }
109
110 return level;
111}
112
113/* Ethtool related variables and Macros. */
114static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115 "Register test\t(offline)",
116 "Eeprom test\t(offline)",
117 "Link test\t(online)",
118 "RLDRAM test\t(offline)",
119 "BIST Test\t(offline)"
120};
121
122static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123 {"tmac_frms"},
124 {"tmac_data_octets"},
125 {"tmac_drop_frms"},
126 {"tmac_mcst_frms"},
127 {"tmac_bcst_frms"},
128 {"tmac_pause_ctrl_frms"},
129 {"tmac_any_err_frms"},
130 {"tmac_vld_ip_octets"},
131 {"tmac_vld_ip"},
132 {"tmac_drop_ip"},
133 {"tmac_icmp"},
134 {"tmac_rst_tcp"},
135 {"tmac_tcp"},
136 {"tmac_udp"},
137 {"rmac_vld_frms"},
138 {"rmac_data_octets"},
139 {"rmac_fcs_err_frms"},
140 {"rmac_drop_frms"},
141 {"rmac_vld_mcst_frms"},
142 {"rmac_vld_bcst_frms"},
143 {"rmac_in_rng_len_err_frms"},
144 {"rmac_long_frms"},
145 {"rmac_pause_ctrl_frms"},
146 {"rmac_discarded_frms"},
147 {"rmac_usized_frms"},
148 {"rmac_osized_frms"},
149 {"rmac_frag_frms"},
150 {"rmac_jabber_frms"},
151 {"rmac_ip"},
152 {"rmac_ip_octets"},
153 {"rmac_hdr_err_ip"},
154 {"rmac_drop_ip"},
155 {"rmac_icmp"},
156 {"rmac_tcp"},
157 {"rmac_udp"},
158 {"rmac_err_drp_udp"},
159 {"rmac_pause_cnt"},
160 {"rmac_accepted_ip"},
161 {"rmac_err_tcp"},
7ba013ac 162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
1da177e4
LT
165};
166
167#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169
170#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
172
25fff88e 173#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
178
be3a6b02 179/* Add the vlan */
180static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
182{
183 nic_t *nic = dev->priv;
184 unsigned long flags;
185
186 spin_lock_irqsave(&nic->tx_lock, flags);
187 nic->vlgrp = grp;
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
189}
190
191/* Unregister the vlan */
192static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193{
194 nic_t *nic = dev->priv;
195 unsigned long flags;
196
197 spin_lock_irqsave(&nic->tx_lock, flags);
198 if (nic->vlgrp)
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
201}
202
20346722 203/*
1da177e4
LT
204 * Constants to be programmed into the Xena's registers, to configure
205 * the XAUI.
206 */
207
208#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
209#define END_SIGN 0x0
210
541ae68f 211static u64 herc_act_dtx_cfg[] = {
212 /* Set address */
213 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
214 /* Write data */
215 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
216 /* Set address */
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218 /* Write data */
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220 /* Set address */
221 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
222 /* Write data */
223 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
224 /* Done */
225 END_SIGN
226};
227
228static u64 xena_mdio_cfg[] = {
1da177e4
LT
229 /* Reset PMA PLL */
230 0xC001010000000000ULL, 0xC0010100000000E0ULL,
231 0xC0010100008000E4ULL,
232 /* Remove Reset from PMA PLL */
233 0xC001010000000000ULL, 0xC0010100000000E0ULL,
234 0xC0010100000000E4ULL,
235 END_SIGN
236};
237
541ae68f 238static u64 xena_dtx_cfg[] = {
1da177e4
LT
239 0x8000051500000000ULL, 0x80000515000000E0ULL,
240 0x80000515D93500E4ULL, 0x8001051500000000ULL,
241 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
242 0x8002051500000000ULL, 0x80020515000000E0ULL,
243 0x80020515F21000E4ULL,
244 /* Set PADLOOPBACKN */
245 0x8002051500000000ULL, 0x80020515000000E0ULL,
246 0x80020515B20000E4ULL, 0x8003051500000000ULL,
247 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
248 0x8004051500000000ULL, 0x80040515000000E0ULL,
249 0x80040515B20000E4ULL, 0x8005051500000000ULL,
250 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
251 SWITCH_SIGN,
252 /* Remove PADLOOPBACKN */
253 0x8002051500000000ULL, 0x80020515000000E0ULL,
254 0x80020515F20000E4ULL, 0x8003051500000000ULL,
255 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
256 0x8004051500000000ULL, 0x80040515000000E0ULL,
257 0x80040515F20000E4ULL, 0x8005051500000000ULL,
258 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
259 END_SIGN
260};
261
20346722 262/*
1da177e4
LT
263 * Constants for Fixing the MacAddress problem seen mostly on
264 * Alpha machines.
265 */
266static u64 fix_mac[] = {
267 0x0060000000000000ULL, 0x0060600000000000ULL,
268 0x0040600000000000ULL, 0x0000600000000000ULL,
269 0x0020600000000000ULL, 0x0060600000000000ULL,
270 0x0020600000000000ULL, 0x0060600000000000ULL,
271 0x0020600000000000ULL, 0x0060600000000000ULL,
272 0x0020600000000000ULL, 0x0060600000000000ULL,
273 0x0020600000000000ULL, 0x0060600000000000ULL,
274 0x0020600000000000ULL, 0x0060600000000000ULL,
275 0x0020600000000000ULL, 0x0060600000000000ULL,
276 0x0020600000000000ULL, 0x0060600000000000ULL,
277 0x0020600000000000ULL, 0x0060600000000000ULL,
278 0x0020600000000000ULL, 0x0060600000000000ULL,
279 0x0020600000000000ULL, 0x0000600000000000ULL,
280 0x0040600000000000ULL, 0x0060600000000000ULL,
281 END_SIGN
282};
283
284/* Module Loadable parameters. */
285static unsigned int tx_fifo_num = 1;
286static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
287 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
288static unsigned int rx_ring_num = 1;
289static unsigned int rx_ring_sz[MAX_RX_RINGS] =
290 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
20346722 291static unsigned int rts_frm_len[MAX_RX_RINGS] =
292 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
5e25b9dd 293static unsigned int use_continuous_tx_intrs = 1;
1da177e4
LT
294static unsigned int rmac_pause_time = 65535;
295static unsigned int mc_pause_threshold_q0q3 = 187;
296static unsigned int mc_pause_threshold_q4q7 = 187;
297static unsigned int shared_splits;
298static unsigned int tmac_util_period = 5;
299static unsigned int rmac_util_period = 5;
b6e3f982 300static unsigned int bimodal = 0;
1da177e4
LT
301#ifndef CONFIG_S2IO_NAPI
302static unsigned int indicate_max_pkts;
303#endif
304
20346722 305/*
1da177e4 306 * S2IO device table.
20346722 307 * This table lists all the devices that this driver supports.
1da177e4
LT
308 */
309static struct pci_device_id s2io_tbl[] __devinitdata = {
310 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
311 PCI_ANY_ID, PCI_ANY_ID},
312 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
313 PCI_ANY_ID, PCI_ANY_ID},
314 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722 315 PCI_ANY_ID, PCI_ANY_ID},
316 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
317 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
318 {0,}
319};
320
321MODULE_DEVICE_TABLE(pci, s2io_tbl);
322
323static struct pci_driver s2io_driver = {
324 .name = "S2IO",
325 .id_table = s2io_tbl,
326 .probe = s2io_init_nic,
327 .remove = __devexit_p(s2io_rem_nic),
328};
329
330/* A simplifier macro used both by init and free shared_mem Fns(). */
331#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
332
333/**
334 * init_shared_mem - Allocation and Initialization of Memory
335 * @nic: Device private variable.
20346722 336 * Description: The function allocates all the memory areas shared
337 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
338 * Rx descriptors and the statistics block.
339 */
340
341static int init_shared_mem(struct s2io_nic *nic)
342{
343 u32 size;
344 void *tmp_v_addr, *tmp_v_addr_next;
345 dma_addr_t tmp_p_addr, tmp_p_addr_next;
346 RxD_block_t *pre_rxd_blk = NULL;
20346722 347 int i, j, blk_cnt, rx_sz, tx_sz;
1da177e4
LT
348 int lst_size, lst_per_page;
349 struct net_device *dev = nic->dev;
350#ifdef CONFIG_2BUFF_MODE
20346722 351 u64 tmp;
1da177e4
LT
352 buffAdd_t *ba;
353#endif
354
355 mac_info_t *mac_control;
356 struct config_param *config;
357
358 mac_control = &nic->mac_control;
359 config = &nic->config;
360
361
362 /* Allocation and initialization of TXDLs in FIOFs */
363 size = 0;
364 for (i = 0; i < config->tx_fifo_num; i++) {
365 size += config->tx_cfg[i].fifo_len;
366 }
367 if (size > MAX_AVAILABLE_TXDS) {
0b1f7ebe 368 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
369 __FUNCTION__);
370 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
1da177e4
LT
371 return FAILURE;
372 }
373
374 lst_size = (sizeof(TxD_t) * config->max_txds);
20346722 375 tx_sz = lst_size * size;
1da177e4
LT
376 lst_per_page = PAGE_SIZE / lst_size;
377
378 for (i = 0; i < config->tx_fifo_num; i++) {
379 int fifo_len = config->tx_cfg[i].fifo_len;
380 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
20346722 381 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
382 GFP_KERNEL);
383 if (!mac_control->fifos[i].list_info) {
1da177e4
LT
384 DBG_PRINT(ERR_DBG,
385 "Malloc failed for list_info\n");
386 return -ENOMEM;
387 }
20346722 388 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
389 }
390 for (i = 0; i < config->tx_fifo_num; i++) {
391 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
392 lst_per_page);
20346722 393 mac_control->fifos[i].tx_curr_put_info.offset = 0;
394 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 395 config->tx_cfg[i].fifo_len - 1;
20346722 396 mac_control->fifos[i].tx_curr_get_info.offset = 0;
397 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 398 config->tx_cfg[i].fifo_len - 1;
20346722 399 mac_control->fifos[i].fifo_no = i;
400 mac_control->fifos[i].nic = nic;
401 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
402
1da177e4
LT
403 for (j = 0; j < page_num; j++) {
404 int k = 0;
405 dma_addr_t tmp_p;
406 void *tmp_v;
407 tmp_v = pci_alloc_consistent(nic->pdev,
408 PAGE_SIZE, &tmp_p);
409 if (!tmp_v) {
410 DBG_PRINT(ERR_DBG,
411 "pci_alloc_consistent ");
412 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
413 return -ENOMEM;
414 }
415 while (k < lst_per_page) {
416 int l = (j * lst_per_page) + k;
417 if (l == config->tx_cfg[i].fifo_len)
20346722 418 break;
419 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 420 tmp_v + (k * lst_size);
20346722 421 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
422 tmp_p + (k * lst_size);
423 k++;
424 }
425 }
426 }
1da177e4
LT
427
428 /* Allocation and initialization of RXDs in Rings */
429 size = 0;
430 for (i = 0; i < config->rx_ring_num; i++) {
431 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
432 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
433 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
434 i);
435 DBG_PRINT(ERR_DBG, "RxDs per Block");
436 return FAILURE;
437 }
438 size += config->rx_cfg[i].num_rxd;
20346722 439 mac_control->rings[i].block_count =
1da177e4 440 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722 441 mac_control->rings[i].pkt_cnt =
442 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
1da177e4 443 }
20346722 444 size = (size * (sizeof(RxD_t)));
445 rx_sz = size;
1da177e4
LT
446
447 for (i = 0; i < config->rx_ring_num; i++) {
20346722 448 mac_control->rings[i].rx_curr_get_info.block_index = 0;
449 mac_control->rings[i].rx_curr_get_info.offset = 0;
450 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 451 config->rx_cfg[i].num_rxd - 1;
20346722 452 mac_control->rings[i].rx_curr_put_info.block_index = 0;
453 mac_control->rings[i].rx_curr_put_info.offset = 0;
454 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 455 config->rx_cfg[i].num_rxd - 1;
20346722 456 mac_control->rings[i].nic = nic;
457 mac_control->rings[i].ring_no = i;
458
1da177e4
LT
459 blk_cnt =
460 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
461 /* Allocating all the Rx blocks */
462 for (j = 0; j < blk_cnt; j++) {
463#ifndef CONFIG_2BUFF_MODE
464 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
465#else
466 size = SIZE_OF_BLOCK;
467#endif
468 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
469 &tmp_p_addr);
470 if (tmp_v_addr == NULL) {
471 /*
20346722 472 * In case of failure, free_shared_mem()
473 * is called, which should free any
474 * memory that was alloced till the
1da177e4
LT
475 * failure happened.
476 */
20346722 477 mac_control->rings[i].rx_blocks[j].block_virt_addr =
1da177e4
LT
478 tmp_v_addr;
479 return -ENOMEM;
480 }
481 memset(tmp_v_addr, 0, size);
20346722 482 mac_control->rings[i].rx_blocks[j].block_virt_addr =
483 tmp_v_addr;
484 mac_control->rings[i].rx_blocks[j].block_dma_addr =
485 tmp_p_addr;
1da177e4
LT
486 }
487 /* Interlinking all Rx Blocks */
488 for (j = 0; j < blk_cnt; j++) {
20346722 489 tmp_v_addr =
490 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 491 tmp_v_addr_next =
20346722 492 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 493 blk_cnt].block_virt_addr;
20346722 494 tmp_p_addr =
495 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 496 tmp_p_addr_next =
20346722 497 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
498 blk_cnt].block_dma_addr;
499
500 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
20346722 501 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
1da177e4
LT
502 * marker.
503 */
504#ifndef CONFIG_2BUFF_MODE
505 pre_rxd_blk->reserved_2_pNext_RxD_block =
506 (unsigned long) tmp_v_addr_next;
507#endif
508 pre_rxd_blk->pNext_RxD_Blk_physical =
509 (u64) tmp_p_addr_next;
510 }
511 }
512
513#ifdef CONFIG_2BUFF_MODE
20346722 514 /*
1da177e4
LT
515 * Allocation of Storages for buffer addresses in 2BUFF mode
516 * and the buffers as well.
517 */
518 for (i = 0; i < config->rx_ring_num; i++) {
519 blk_cnt =
520 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722 521 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
1da177e4 522 GFP_KERNEL);
20346722 523 if (!mac_control->rings[i].ba)
1da177e4
LT
524 return -ENOMEM;
525 for (j = 0; j < blk_cnt; j++) {
526 int k = 0;
20346722 527 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
1da177e4
LT
528 (MAX_RXDS_PER_BLOCK + 1)),
529 GFP_KERNEL);
20346722 530 if (!mac_control->rings[i].ba[j])
1da177e4
LT
531 return -ENOMEM;
532 while (k != MAX_RXDS_PER_BLOCK) {
20346722 533 ba = &mac_control->rings[i].ba[j][k];
1da177e4 534
20346722 535 ba->ba_0_org = (void *) kmalloc
1da177e4
LT
536 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
537 if (!ba->ba_0_org)
538 return -ENOMEM;
20346722 539 tmp = (u64) ba->ba_0_org;
1da177e4 540 tmp += ALIGN_SIZE;
20346722 541 tmp &= ~((u64) ALIGN_SIZE);
1da177e4
LT
542 ba->ba_0 = (void *) tmp;
543
20346722 544 ba->ba_1_org = (void *) kmalloc
1da177e4
LT
545 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
546 if (!ba->ba_1_org)
547 return -ENOMEM;
20346722 548 tmp = (u64) ba->ba_1_org;
1da177e4 549 tmp += ALIGN_SIZE;
20346722 550 tmp &= ~((u64) ALIGN_SIZE);
1da177e4
LT
551 ba->ba_1 = (void *) tmp;
552 k++;
553 }
554 }
555 }
556#endif
557
558 /* Allocation and initialization of Statistics block */
559 size = sizeof(StatInfo_t);
560 mac_control->stats_mem = pci_alloc_consistent
561 (nic->pdev, size, &mac_control->stats_mem_phy);
562
563 if (!mac_control->stats_mem) {
20346722 564 /*
565 * In case of failure, free_shared_mem() is called, which
566 * should free any memory that was alloced till the
1da177e4
LT
567 * failure happened.
568 */
569 return -ENOMEM;
570 }
571 mac_control->stats_mem_sz = size;
572
573 tmp_v_addr = mac_control->stats_mem;
574 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
575 memset(tmp_v_addr, 0, size);
1da177e4
LT
576 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
577 (unsigned long long) tmp_p_addr);
578
579 return SUCCESS;
580}
581
20346722 582/**
583 * free_shared_mem - Free the allocated Memory
1da177e4
LT
584 * @nic: Device private variable.
585 * Description: This function is to free all memory locations allocated by
586 * the init_shared_mem() function and return it to the kernel.
587 */
588
589static void free_shared_mem(struct s2io_nic *nic)
590{
591 int i, j, blk_cnt, size;
592 void *tmp_v_addr;
593 dma_addr_t tmp_p_addr;
594 mac_info_t *mac_control;
595 struct config_param *config;
596 int lst_size, lst_per_page;
597
598
599 if (!nic)
600 return;
601
602 mac_control = &nic->mac_control;
603 config = &nic->config;
604
605 lst_size = (sizeof(TxD_t) * config->max_txds);
606 lst_per_page = PAGE_SIZE / lst_size;
607
608 for (i = 0; i < config->tx_fifo_num; i++) {
609 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
610 lst_per_page);
611 for (j = 0; j < page_num; j++) {
612 int mem_blks = (j * lst_per_page);
0b1f7ebe 613 if ((!mac_control->fifos[i].list_info) ||
614 (!mac_control->fifos[i].list_info[mem_blks].
615 list_virt_addr))
1da177e4
LT
616 break;
617 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722 618 mac_control->fifos[i].
619 list_info[mem_blks].
1da177e4 620 list_virt_addr,
20346722 621 mac_control->fifos[i].
622 list_info[mem_blks].
1da177e4
LT
623 list_phy_addr);
624 }
20346722 625 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
626 }
627
628#ifndef CONFIG_2BUFF_MODE
629 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
630#else
631 size = SIZE_OF_BLOCK;
632#endif
633 for (i = 0; i < config->rx_ring_num; i++) {
20346722 634 blk_cnt = mac_control->rings[i].block_count;
1da177e4 635 for (j = 0; j < blk_cnt; j++) {
20346722 636 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
637 block_virt_addr;
638 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
639 block_dma_addr;
1da177e4
LT
640 if (tmp_v_addr == NULL)
641 break;
642 pci_free_consistent(nic->pdev, size,
643 tmp_v_addr, tmp_p_addr);
644 }
645 }
646
647#ifdef CONFIG_2BUFF_MODE
648 /* Freeing buffer storage addresses in 2BUFF mode. */
649 for (i = 0; i < config->rx_ring_num; i++) {
650 blk_cnt =
651 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
1da177e4
LT
652 for (j = 0; j < blk_cnt; j++) {
653 int k = 0;
20346722 654 if (!mac_control->rings[i].ba[j])
655 continue;
1da177e4 656 while (k != MAX_RXDS_PER_BLOCK) {
20346722 657 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
1da177e4
LT
658 kfree(ba->ba_0_org);
659 kfree(ba->ba_1_org);
660 k++;
661 }
20346722 662 kfree(mac_control->rings[i].ba[j]);
1da177e4 663 }
20346722 664 if (mac_control->rings[i].ba)
665 kfree(mac_control->rings[i].ba);
1da177e4 666 }
1da177e4
LT
667#endif
668
669 if (mac_control->stats_mem) {
670 pci_free_consistent(nic->pdev,
671 mac_control->stats_mem_sz,
672 mac_control->stats_mem,
673 mac_control->stats_mem_phy);
674 }
675}
676
541ae68f 677/**
678 * s2io_verify_pci_mode -
679 */
680
681static int s2io_verify_pci_mode(nic_t *nic)
682{
683 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
684 register u64 val64 = 0;
685 int mode;
686
687 val64 = readq(&bar0->pci_mode);
688 mode = (u8)GET_PCI_MODE(val64);
689
690 if ( val64 & PCI_MODE_UNKNOWN_MODE)
691 return -1; /* Unknown PCI mode */
692 return mode;
693}
694
695
696/**
697 * s2io_print_pci_mode -
698 */
699static int s2io_print_pci_mode(nic_t *nic)
700{
701 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
702 register u64 val64 = 0;
703 int mode;
704 struct config_param *config = &nic->config;
705
706 val64 = readq(&bar0->pci_mode);
707 mode = (u8)GET_PCI_MODE(val64);
708
709 if ( val64 & PCI_MODE_UNKNOWN_MODE)
710 return -1; /* Unknown PCI mode */
711
712 if (val64 & PCI_MODE_32_BITS) {
713 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
714 } else {
715 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
716 }
717
718 switch(mode) {
719 case PCI_MODE_PCI_33:
720 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
721 config->bus_speed = 33;
722 break;
723 case PCI_MODE_PCI_66:
724 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
725 config->bus_speed = 133;
726 break;
727 case PCI_MODE_PCIX_M1_66:
728 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
729 config->bus_speed = 133; /* Herc doubles the clock rate */
730 break;
731 case PCI_MODE_PCIX_M1_100:
732 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
733 config->bus_speed = 200;
734 break;
735 case PCI_MODE_PCIX_M1_133:
736 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
737 config->bus_speed = 266;
738 break;
739 case PCI_MODE_PCIX_M2_66:
740 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
741 config->bus_speed = 133;
742 break;
743 case PCI_MODE_PCIX_M2_100:
744 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
745 config->bus_speed = 200;
746 break;
747 case PCI_MODE_PCIX_M2_133:
748 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
749 config->bus_speed = 266;
750 break;
751 default:
752 return -1; /* Unsupported bus speed */
753 }
754
755 return mode;
756}
757
20346722 758/**
759 * init_nic - Initialization of hardware
1da177e4 760 * @nic: device peivate variable
20346722 761 * Description: The function sequentially configures every block
762 * of the H/W from their reset values.
763 * Return Value: SUCCESS on success and
1da177e4
LT
764 * '-1' on failure (endian settings incorrect).
765 */
766
767static int init_nic(struct s2io_nic *nic)
768{
769 XENA_dev_config_t __iomem *bar0 = nic->bar0;
770 struct net_device *dev = nic->dev;
771 register u64 val64 = 0;
772 void __iomem *add;
773 u32 time;
774 int i, j;
775 mac_info_t *mac_control;
776 struct config_param *config;
777 int mdio_cnt = 0, dtx_cnt = 0;
778 unsigned long long mem_share;
20346722 779 int mem_size;
1da177e4
LT
780
781 mac_control = &nic->mac_control;
782 config = &nic->config;
783
5e25b9dd 784 /* to set the swapper controle on the card */
20346722 785 if(s2io_set_swapper(nic)) {
1da177e4
LT
786 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
787 return -1;
788 }
789
541ae68f 790 /*
791 * Herc requires EOI to be removed from reset before XGXS, so..
792 */
793 if (nic->device_type & XFRAME_II_DEVICE) {
794 val64 = 0xA500000000ULL;
795 writeq(val64, &bar0->sw_reset);
796 msleep(500);
797 val64 = readq(&bar0->sw_reset);
798 }
799
1da177e4
LT
800 /* Remove XGXS from reset state */
801 val64 = 0;
802 writeq(val64, &bar0->sw_reset);
1da177e4 803 msleep(500);
20346722 804 val64 = readq(&bar0->sw_reset);
1da177e4
LT
805
806 /* Enable Receiving broadcasts */
807 add = &bar0->mac_cfg;
808 val64 = readq(&bar0->mac_cfg);
809 val64 |= MAC_RMAC_BCAST_ENABLE;
810 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
811 writel((u32) val64, add);
812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
813 writel((u32) (val64 >> 32), (add + 4));
814
815 /* Read registers in all blocks */
816 val64 = readq(&bar0->mac_int_mask);
817 val64 = readq(&bar0->mc_int_mask);
818 val64 = readq(&bar0->xgxs_int_mask);
819
820 /* Set MTU */
821 val64 = dev->mtu;
822 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
823
20346722 824 /*
825 * Configuring the XAUI Interface of Xena.
1da177e4 826 * ***************************************
20346722 827 * To Configure the Xena's XAUI, one has to write a series
828 * of 64 bit values into two registers in a particular
829 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
830 * which will be defined in the array of configuration values
541ae68f 831 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
20346722 832 * to switch writing from one regsiter to another. We continue
1da177e4 833 * writing these values until we encounter the 'END_SIGN' macro.
20346722 834 * For example, After making a series of 21 writes into
835 * dtx_control register the 'SWITCH_SIGN' appears and hence we
1da177e4
LT
836 * start writing into mdio_control until we encounter END_SIGN.
837 */
541ae68f 838 if (nic->device_type & XFRAME_II_DEVICE) {
839 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
840 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1da177e4 841 &bar0->dtx_control, UF);
541ae68f 842 if (dtx_cnt & 0x1)
843 msleep(1); /* Necessary!! */
1da177e4
LT
844 dtx_cnt++;
845 }
541ae68f 846 } else {
847 while (1) {
848 dtx_cfg:
849 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
850 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
851 dtx_cnt++;
852 goto mdio_cfg;
853 }
854 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
855 &bar0->dtx_control, UF);
856 val64 = readq(&bar0->dtx_control);
857 dtx_cnt++;
858 }
859 mdio_cfg:
860 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
861 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
862 mdio_cnt++;
863 goto dtx_cfg;
864 }
865 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
866 &bar0->mdio_control, UF);
867 val64 = readq(&bar0->mdio_control);
1da177e4 868 mdio_cnt++;
541ae68f 869 }
870 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
871 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
872 break;
873 } else {
1da177e4
LT
874 goto dtx_cfg;
875 }
1da177e4
LT
876 }
877 }
878
879 /* Tx DMA Initialization */
880 val64 = 0;
881 writeq(val64, &bar0->tx_fifo_partition_0);
882 writeq(val64, &bar0->tx_fifo_partition_1);
883 writeq(val64, &bar0->tx_fifo_partition_2);
884 writeq(val64, &bar0->tx_fifo_partition_3);
885
886
887 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
888 val64 |=
889 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
890 13) | vBIT(config->tx_cfg[i].fifo_priority,
891 ((i * 32) + 5), 3);
892
893 if (i == (config->tx_fifo_num - 1)) {
894 if (i % 2 == 0)
895 i++;
896 }
897
898 switch (i) {
899 case 1:
900 writeq(val64, &bar0->tx_fifo_partition_0);
901 val64 = 0;
902 break;
903 case 3:
904 writeq(val64, &bar0->tx_fifo_partition_1);
905 val64 = 0;
906 break;
907 case 5:
908 writeq(val64, &bar0->tx_fifo_partition_2);
909 val64 = 0;
910 break;
911 case 7:
912 writeq(val64, &bar0->tx_fifo_partition_3);
913 break;
914 }
915 }
916
917 /* Enable Tx FIFO partition 0. */
918 val64 = readq(&bar0->tx_fifo_partition_0);
919 val64 |= BIT(0); /* To enable the FIFO partition. */
920 writeq(val64, &bar0->tx_fifo_partition_0);
921
5e25b9dd 922 /*
923 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
924 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
925 */
541ae68f 926 if ((nic->device_type == XFRAME_I_DEVICE) &&
927 (get_xena_rev_id(nic->pdev) < 4))
5e25b9dd 928 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
929
1da177e4
LT
930 val64 = readq(&bar0->tx_fifo_partition_0);
931 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
932 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
933
20346722 934 /*
935 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
936 * integrity checking.
937 */
938 val64 = readq(&bar0->tx_pa_cfg);
939 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
940 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
941 writeq(val64, &bar0->tx_pa_cfg);
942
943 /* Rx DMA intialization. */
944 val64 = 0;
945 for (i = 0; i < config->rx_ring_num; i++) {
946 val64 |=
947 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
948 3);
949 }
950 writeq(val64, &bar0->rx_queue_priority);
951
20346722 952 /*
953 * Allocating equal share of memory to all the
1da177e4
LT
954 * configured Rings.
955 */
956 val64 = 0;
541ae68f 957 if (nic->device_type & XFRAME_II_DEVICE)
958 mem_size = 32;
959 else
960 mem_size = 64;
961
1da177e4
LT
962 for (i = 0; i < config->rx_ring_num; i++) {
963 switch (i) {
964 case 0:
20346722 965 mem_share = (mem_size / config->rx_ring_num +
966 mem_size % config->rx_ring_num);
1da177e4
LT
967 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
968 continue;
969 case 1:
20346722 970 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
971 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
972 continue;
973 case 2:
20346722 974 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
975 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
976 continue;
977 case 3:
20346722 978 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
979 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
980 continue;
981 case 4:
20346722 982 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
983 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
984 continue;
985 case 5:
20346722 986 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
987 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
988 continue;
989 case 6:
20346722 990 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
991 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
992 continue;
993 case 7:
20346722 994 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
995 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
996 continue;
997 }
998 }
999 writeq(val64, &bar0->rx_queue_cfg);
1000
20346722 1001 /*
5e25b9dd 1002 * Filling Tx round robin registers
1003 * as per the number of FIFOs
1da177e4 1004 */
5e25b9dd 1005 switch (config->tx_fifo_num) {
1006 case 1:
1007 val64 = 0x0000000000000000ULL;
1008 writeq(val64, &bar0->tx_w_round_robin_0);
1009 writeq(val64, &bar0->tx_w_round_robin_1);
1010 writeq(val64, &bar0->tx_w_round_robin_2);
1011 writeq(val64, &bar0->tx_w_round_robin_3);
1012 writeq(val64, &bar0->tx_w_round_robin_4);
1013 break;
1014 case 2:
1015 val64 = 0x0000010000010000ULL;
1016 writeq(val64, &bar0->tx_w_round_robin_0);
1017 val64 = 0x0100000100000100ULL;
1018 writeq(val64, &bar0->tx_w_round_robin_1);
1019 val64 = 0x0001000001000001ULL;
1020 writeq(val64, &bar0->tx_w_round_robin_2);
1021 val64 = 0x0000010000010000ULL;
1022 writeq(val64, &bar0->tx_w_round_robin_3);
1023 val64 = 0x0100000000000000ULL;
1024 writeq(val64, &bar0->tx_w_round_robin_4);
1025 break;
1026 case 3:
1027 val64 = 0x0001000102000001ULL;
1028 writeq(val64, &bar0->tx_w_round_robin_0);
1029 val64 = 0x0001020000010001ULL;
1030 writeq(val64, &bar0->tx_w_round_robin_1);
1031 val64 = 0x0200000100010200ULL;
1032 writeq(val64, &bar0->tx_w_round_robin_2);
1033 val64 = 0x0001000102000001ULL;
1034 writeq(val64, &bar0->tx_w_round_robin_3);
1035 val64 = 0x0001020000000000ULL;
1036 writeq(val64, &bar0->tx_w_round_robin_4);
1037 break;
1038 case 4:
1039 val64 = 0x0001020300010200ULL;
1040 writeq(val64, &bar0->tx_w_round_robin_0);
1041 val64 = 0x0100000102030001ULL;
1042 writeq(val64, &bar0->tx_w_round_robin_1);
1043 val64 = 0x0200010000010203ULL;
1044 writeq(val64, &bar0->tx_w_round_robin_2);
1045 val64 = 0x0001020001000001ULL;
1046 writeq(val64, &bar0->tx_w_round_robin_3);
1047 val64 = 0x0203000100000000ULL;
1048 writeq(val64, &bar0->tx_w_round_robin_4);
1049 break;
1050 case 5:
1051 val64 = 0x0001000203000102ULL;
1052 writeq(val64, &bar0->tx_w_round_robin_0);
1053 val64 = 0x0001020001030004ULL;
1054 writeq(val64, &bar0->tx_w_round_robin_1);
1055 val64 = 0x0001000203000102ULL;
1056 writeq(val64, &bar0->tx_w_round_robin_2);
1057 val64 = 0x0001020001030004ULL;
1058 writeq(val64, &bar0->tx_w_round_robin_3);
1059 val64 = 0x0001000000000000ULL;
1060 writeq(val64, &bar0->tx_w_round_robin_4);
1061 break;
1062 case 6:
1063 val64 = 0x0001020304000102ULL;
1064 writeq(val64, &bar0->tx_w_round_robin_0);
1065 val64 = 0x0304050001020001ULL;
1066 writeq(val64, &bar0->tx_w_round_robin_1);
1067 val64 = 0x0203000100000102ULL;
1068 writeq(val64, &bar0->tx_w_round_robin_2);
1069 val64 = 0x0304000102030405ULL;
1070 writeq(val64, &bar0->tx_w_round_robin_3);
1071 val64 = 0x0001000200000000ULL;
1072 writeq(val64, &bar0->tx_w_round_robin_4);
1073 break;
1074 case 7:
1075 val64 = 0x0001020001020300ULL;
1076 writeq(val64, &bar0->tx_w_round_robin_0);
1077 val64 = 0x0102030400010203ULL;
1078 writeq(val64, &bar0->tx_w_round_robin_1);
1079 val64 = 0x0405060001020001ULL;
1080 writeq(val64, &bar0->tx_w_round_robin_2);
1081 val64 = 0x0304050000010200ULL;
1082 writeq(val64, &bar0->tx_w_round_robin_3);
1083 val64 = 0x0102030000000000ULL;
1084 writeq(val64, &bar0->tx_w_round_robin_4);
1085 break;
1086 case 8:
1087 val64 = 0x0001020300040105ULL;
1088 writeq(val64, &bar0->tx_w_round_robin_0);
1089 val64 = 0x0200030106000204ULL;
1090 writeq(val64, &bar0->tx_w_round_robin_1);
1091 val64 = 0x0103000502010007ULL;
1092 writeq(val64, &bar0->tx_w_round_robin_2);
1093 val64 = 0x0304010002060500ULL;
1094 writeq(val64, &bar0->tx_w_round_robin_3);
1095 val64 = 0x0103020400000000ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_4);
1097 break;
1098 }
1099
1100 /* Filling the Rx round robin registers as per the
1101 * number of Rings and steering based on QoS.
1102 */
1103 switch (config->rx_ring_num) {
1104 case 1:
1105 val64 = 0x8080808080808080ULL;
1106 writeq(val64, &bar0->rts_qos_steering);
1107 break;
1108 case 2:
1109 val64 = 0x0000010000010000ULL;
1110 writeq(val64, &bar0->rx_w_round_robin_0);
1111 val64 = 0x0100000100000100ULL;
1112 writeq(val64, &bar0->rx_w_round_robin_1);
1113 val64 = 0x0001000001000001ULL;
1114 writeq(val64, &bar0->rx_w_round_robin_2);
1115 val64 = 0x0000010000010000ULL;
1116 writeq(val64, &bar0->rx_w_round_robin_3);
1117 val64 = 0x0100000000000000ULL;
1118 writeq(val64, &bar0->rx_w_round_robin_4);
1119
1120 val64 = 0x8080808040404040ULL;
1121 writeq(val64, &bar0->rts_qos_steering);
1122 break;
1123 case 3:
1124 val64 = 0x0001000102000001ULL;
1125 writeq(val64, &bar0->rx_w_round_robin_0);
1126 val64 = 0x0001020000010001ULL;
1127 writeq(val64, &bar0->rx_w_round_robin_1);
1128 val64 = 0x0200000100010200ULL;
1129 writeq(val64, &bar0->rx_w_round_robin_2);
1130 val64 = 0x0001000102000001ULL;
1131 writeq(val64, &bar0->rx_w_round_robin_3);
1132 val64 = 0x0001020000000000ULL;
1133 writeq(val64, &bar0->rx_w_round_robin_4);
1134
1135 val64 = 0x8080804040402020ULL;
1136 writeq(val64, &bar0->rts_qos_steering);
1137 break;
1138 case 4:
1139 val64 = 0x0001020300010200ULL;
1140 writeq(val64, &bar0->rx_w_round_robin_0);
1141 val64 = 0x0100000102030001ULL;
1142 writeq(val64, &bar0->rx_w_round_robin_1);
1143 val64 = 0x0200010000010203ULL;
1144 writeq(val64, &bar0->rx_w_round_robin_2);
1145 val64 = 0x0001020001000001ULL;
1146 writeq(val64, &bar0->rx_w_round_robin_3);
1147 val64 = 0x0203000100000000ULL;
1148 writeq(val64, &bar0->rx_w_round_robin_4);
1149
1150 val64 = 0x8080404020201010ULL;
1151 writeq(val64, &bar0->rts_qos_steering);
1152 break;
1153 case 5:
1154 val64 = 0x0001000203000102ULL;
1155 writeq(val64, &bar0->rx_w_round_robin_0);
1156 val64 = 0x0001020001030004ULL;
1157 writeq(val64, &bar0->rx_w_round_robin_1);
1158 val64 = 0x0001000203000102ULL;
1159 writeq(val64, &bar0->rx_w_round_robin_2);
1160 val64 = 0x0001020001030004ULL;
1161 writeq(val64, &bar0->rx_w_round_robin_3);
1162 val64 = 0x0001000000000000ULL;
1163 writeq(val64, &bar0->rx_w_round_robin_4);
1164
1165 val64 = 0x8080404020201008ULL;
1166 writeq(val64, &bar0->rts_qos_steering);
1167 break;
1168 case 6:
1169 val64 = 0x0001020304000102ULL;
1170 writeq(val64, &bar0->rx_w_round_robin_0);
1171 val64 = 0x0304050001020001ULL;
1172 writeq(val64, &bar0->rx_w_round_robin_1);
1173 val64 = 0x0203000100000102ULL;
1174 writeq(val64, &bar0->rx_w_round_robin_2);
1175 val64 = 0x0304000102030405ULL;
1176 writeq(val64, &bar0->rx_w_round_robin_3);
1177 val64 = 0x0001000200000000ULL;
1178 writeq(val64, &bar0->rx_w_round_robin_4);
1179
1180 val64 = 0x8080404020100804ULL;
1181 writeq(val64, &bar0->rts_qos_steering);
1182 break;
1183 case 7:
1184 val64 = 0x0001020001020300ULL;
1185 writeq(val64, &bar0->rx_w_round_robin_0);
1186 val64 = 0x0102030400010203ULL;
1187 writeq(val64, &bar0->rx_w_round_robin_1);
1188 val64 = 0x0405060001020001ULL;
1189 writeq(val64, &bar0->rx_w_round_robin_2);
1190 val64 = 0x0304050000010200ULL;
1191 writeq(val64, &bar0->rx_w_round_robin_3);
1192 val64 = 0x0102030000000000ULL;
1193 writeq(val64, &bar0->rx_w_round_robin_4);
1194
1195 val64 = 0x8080402010080402ULL;
1196 writeq(val64, &bar0->rts_qos_steering);
1197 break;
1198 case 8:
1199 val64 = 0x0001020300040105ULL;
1200 writeq(val64, &bar0->rx_w_round_robin_0);
1201 val64 = 0x0200030106000204ULL;
1202 writeq(val64, &bar0->rx_w_round_robin_1);
1203 val64 = 0x0103000502010007ULL;
1204 writeq(val64, &bar0->rx_w_round_robin_2);
1205 val64 = 0x0304010002060500ULL;
1206 writeq(val64, &bar0->rx_w_round_robin_3);
1207 val64 = 0x0103020400000000ULL;
1208 writeq(val64, &bar0->rx_w_round_robin_4);
1209
1210 val64 = 0x8040201008040201ULL;
1211 writeq(val64, &bar0->rts_qos_steering);
1212 break;
1213 }
1da177e4
LT
1214
1215 /* UDP Fix */
1216 val64 = 0;
20346722 1217 for (i = 0; i < 8; i++)
1da177e4
LT
1218 writeq(val64, &bar0->rts_frm_len_n[i]);
1219
5e25b9dd 1220 /* Set the default rts frame length for the rings configured */
1221 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1222 for (i = 0 ; i < config->rx_ring_num ; i++)
1223 writeq(val64, &bar0->rts_frm_len_n[i]);
1224
1225 /* Set the frame length for the configured rings
1226 * desired by the user
1227 */
1228 for (i = 0; i < config->rx_ring_num; i++) {
1229 /* If rts_frm_len[i] == 0 then it is assumed that user not
1230 * specified frame length steering.
1231 * If the user provides the frame length then program
1232 * the rts_frm_len register for those values or else
1233 * leave it as it is.
1234 */
1235 if (rts_frm_len[i] != 0) {
1236 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1237 &bar0->rts_frm_len_n[i]);
1238 }
1239 }
1da177e4 1240
20346722 1241 /* Program statistics memory */
1da177e4 1242 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1243
541ae68f 1244 if (nic->device_type == XFRAME_II_DEVICE) {
1245 val64 = STAT_BC(0x320);
1246 writeq(val64, &bar0->stat_byte_cnt);
1247 }
1248
20346722 1249 /*
1da177e4
LT
1250 * Initializing the sampling rate for the device to calculate the
1251 * bandwidth utilization.
1252 */
1253 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1254 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1255 writeq(val64, &bar0->mac_link_util);
1256
1257
20346722 1258 /*
1259 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1260 * Scheme.
1261 */
20346722 1262 /*
1263 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1264 * 250 interrupts per sec. Continuous interrupts are enabled
1265 * by default.
1266 */
541ae68f 1267 if (nic->device_type == XFRAME_II_DEVICE) {
1268 int count = (nic->config.bus_speed * 125)/2;
1269 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1270 } else {
1271
1272 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1273 }
1274 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1da177e4 1275 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd 1276 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
541ae68f 1277 if (use_continuous_tx_intrs)
1278 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1279 writeq(val64, &bar0->tti_data1_mem);
1280
1281 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1282 TTI_DATA2_MEM_TX_UFC_B(0x20) |
5e25b9dd 1283 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1284 writeq(val64, &bar0->tti_data2_mem);
1285
1286 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1287 writeq(val64, &bar0->tti_command_mem);
1288
20346722 1289 /*
1da177e4
LT
1290 * Once the operation completes, the Strobe bit of the command
1291 * register will be reset. We poll for this particular condition
1292 * We wait for a maximum of 500ms for the operation to complete,
1293 * if it's not complete by then we return error.
1294 */
1295 time = 0;
1296 while (TRUE) {
1297 val64 = readq(&bar0->tti_command_mem);
1298 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1299 break;
1300 }
1301 if (time > 10) {
1302 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1303 dev->name);
1304 return -1;
1305 }
1306 msleep(50);
1307 time++;
1308 }
1309
b6e3f982 1310 if (nic->config.bimodal) {
1311 int k = 0;
1312 for (k = 0; k < config->rx_ring_num; k++) {
1313 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1314 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1315 writeq(val64, &bar0->tti_command_mem);
541ae68f 1316
541ae68f 1317 /*
b6e3f982 1318 * Once the operation completes, the Strobe bit of the command
1319 * register will be reset. We poll for this particular condition
1320 * We wait for a maximum of 500ms for the operation to complete,
1321 * if it's not complete by then we return error.
1322 */
1323 time = 0;
1324 while (TRUE) {
1325 val64 = readq(&bar0->tti_command_mem);
1326 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1327 break;
1328 }
1329 if (time > 10) {
1330 DBG_PRINT(ERR_DBG,
1331 "%s: TTI init Failed\n",
1332 dev->name);
1333 return -1;
1334 }
1335 time++;
1336 msleep(50);
1337 }
1338 }
541ae68f 1339 } else {
1da177e4 1340
b6e3f982 1341 /* RTI Initialization */
1342 if (nic->device_type == XFRAME_II_DEVICE) {
1343 /*
1344 * Programmed to generate Apprx 500 Intrs per
1345 * second
1346 */
1347 int count = (nic->config.bus_speed * 125)/4;
1348 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1349 } else {
1350 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1351 }
1352 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1353 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1354 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1da177e4 1355
b6e3f982 1356 writeq(val64, &bar0->rti_data1_mem);
1da177e4 1357
b6e3f982 1358 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1359 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1360 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1361 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1362
b6e3f982 1363 for (i = 0; i < config->rx_ring_num; i++) {
1364 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1365 | RTI_CMD_MEM_OFFSET(i);
1366 writeq(val64, &bar0->rti_command_mem);
1367
1368 /*
1369 * Once the operation completes, the Strobe bit of the
1370 * command register will be reset. We poll for this
1371 * particular condition. We wait for a maximum of 500ms
1372 * for the operation to complete, if it's not complete
1373 * by then we return error.
1374 */
1375 time = 0;
1376 while (TRUE) {
1377 val64 = readq(&bar0->rti_command_mem);
1378 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1379 break;
1380 }
1381 if (time > 10) {
1382 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1383 dev->name);
1384 return -1;
1385 }
1386 time++;
1387 msleep(50);
1388 }
1da177e4 1389 }
1da177e4
LT
1390 }
1391
20346722 1392 /*
1393 * Initializing proper values as Pause threshold into all
1da177e4
LT
1394 * the 8 Queues on Rx side.
1395 */
1396 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1397 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1398
1399 /* Disable RMAC PAD STRIPPING */
20346722 1400 add = (void *) &bar0->mac_cfg;
1da177e4
LT
1401 val64 = readq(&bar0->mac_cfg);
1402 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1403 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1404 writel((u32) (val64), add);
1405 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1406 writel((u32) (val64 >> 32), (add + 4));
1407 val64 = readq(&bar0->mac_cfg);
1408
20346722 1409 /*
1410 * Set the time value to be inserted in the pause frame
1da177e4
LT
1411 * generated by xena.
1412 */
1413 val64 = readq(&bar0->rmac_pause_cfg);
1414 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1415 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1416 writeq(val64, &bar0->rmac_pause_cfg);
1417
20346722 1418 /*
1da177e4
LT
1419 * Set the Threshold Limit for Generating the pause frame
1420 * If the amount of data in any Queue exceeds ratio of
1421 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1422 * pause frame is generated
1423 */
1424 val64 = 0;
1425 for (i = 0; i < 4; i++) {
1426 val64 |=
1427 (((u64) 0xFF00 | nic->mac_control.
1428 mc_pause_threshold_q0q3)
1429 << (i * 2 * 8));
1430 }
1431 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1432
1433 val64 = 0;
1434 for (i = 0; i < 4; i++) {
1435 val64 |=
1436 (((u64) 0xFF00 | nic->mac_control.
1437 mc_pause_threshold_q4q7)
1438 << (i * 2 * 8));
1439 }
1440 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1441
20346722 1442 /*
1443 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1444 * exceeded the limit pointed by shared_splits
1445 */
1446 val64 = readq(&bar0->pic_control);
1447 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1448 writeq(val64, &bar0->pic_control);
1449
541ae68f 1450 /*
1451 * Programming the Herc to split every write transaction
1452 * that does not start on an ADB to reduce disconnects.
1453 */
1454 if (nic->device_type == XFRAME_II_DEVICE) {
1455 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1456 writeq(val64, &bar0->wreq_split_mask);
1457 }
1458
a371a07d 1459 /* Setting Link stability period to 64 ms */
1460 if (nic->device_type == XFRAME_II_DEVICE) {
1461 val64 = MISC_LINK_STABILITY_PRD(3);
1462 writeq(val64, &bar0->misc_control);
1463 }
1464
1da177e4
LT
1465 return SUCCESS;
1466}
a371a07d 1467#define LINK_UP_DOWN_INTERRUPT 1
1468#define MAC_RMAC_ERR_TIMER 2
1469
1470#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1471#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1472#else
1473int s2io_link_fault_indication(nic_t *nic)
1474{
1475 if (nic->device_type == XFRAME_II_DEVICE)
1476 return LINK_UP_DOWN_INTERRUPT;
1477 else
1478 return MAC_RMAC_ERR_TIMER;
1479}
1480#endif
1da177e4 1481
20346722 1482/**
1483 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1484 * @nic: device private variable,
1485 * @mask: A mask indicating which Intr block must be modified and,
1486 * @flag: A flag indicating whether to enable or disable the Intrs.
1487 * Description: This function will either disable or enable the interrupts
20346722 1488 * depending on the flag argument. The mask argument can be used to
1489 * enable/disable any Intr block.
1da177e4
LT
1490 * Return Value: NONE.
1491 */
1492
1493static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1494{
1495 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1496 register u64 val64 = 0, temp64 = 0;
1497
1498 /* Top level interrupt classification */
1499 /* PIC Interrupts */
1500 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1501 /* Enable PIC Intrs in the general intr mask register */
1502 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1503 if (flag == ENABLE_INTRS) {
1504 temp64 = readq(&bar0->general_int_mask);
1505 temp64 &= ~((u64) val64);
1506 writeq(temp64, &bar0->general_int_mask);
20346722 1507 /*
a371a07d 1508 * If Hercules adapter enable GPIO otherwise
1509 * disabled all PCIX, Flash, MDIO, IIC and GPIO
20346722 1510 * interrupts for now.
1511 * TODO
1da177e4 1512 */
a371a07d 1513 if (s2io_link_fault_indication(nic) ==
1514 LINK_UP_DOWN_INTERRUPT ) {
1515 temp64 = readq(&bar0->pic_int_mask);
1516 temp64 &= ~((u64) PIC_INT_GPIO);
1517 writeq(temp64, &bar0->pic_int_mask);
1518 temp64 = readq(&bar0->gpio_int_mask);
1519 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1520 writeq(temp64, &bar0->gpio_int_mask);
1521 } else {
1522 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1523 }
20346722 1524 /*
1da177e4
LT
1525 * No MSI Support is available presently, so TTI and
1526 * RTI interrupts are also disabled.
1527 */
1528 } else if (flag == DISABLE_INTRS) {
20346722 1529 /*
1530 * Disable PIC Intrs in the general
1531 * intr mask register
1da177e4
LT
1532 */
1533 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1534 temp64 = readq(&bar0->general_int_mask);
1535 val64 |= temp64;
1536 writeq(val64, &bar0->general_int_mask);
1537 }
1538 }
1539
1540 /* DMA Interrupts */
1541 /* Enabling/Disabling Tx DMA interrupts */
1542 if (mask & TX_DMA_INTR) {
1543 /* Enable TxDMA Intrs in the general intr mask register */
1544 val64 = TXDMA_INT_M;
1545 if (flag == ENABLE_INTRS) {
1546 temp64 = readq(&bar0->general_int_mask);
1547 temp64 &= ~((u64) val64);
1548 writeq(temp64, &bar0->general_int_mask);
20346722 1549 /*
1550 * Keep all interrupts other than PFC interrupt
1da177e4
LT
1551 * and PCC interrupt disabled in DMA level.
1552 */
1553 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1554 TXDMA_PCC_INT_M);
1555 writeq(val64, &bar0->txdma_int_mask);
20346722 1556 /*
1557 * Enable only the MISC error 1 interrupt in PFC block
1da177e4
LT
1558 */
1559 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1560 writeq(val64, &bar0->pfc_err_mask);
20346722 1561 /*
1562 * Enable only the FB_ECC error interrupt in PCC block
1da177e4
LT
1563 */
1564 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1565 writeq(val64, &bar0->pcc_err_mask);
1566 } else if (flag == DISABLE_INTRS) {
20346722 1567 /*
1568 * Disable TxDMA Intrs in the general intr mask
1569 * register
1da177e4
LT
1570 */
1571 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1572 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1573 temp64 = readq(&bar0->general_int_mask);
1574 val64 |= temp64;
1575 writeq(val64, &bar0->general_int_mask);
1576 }
1577 }
1578
1579 /* Enabling/Disabling Rx DMA interrupts */
1580 if (mask & RX_DMA_INTR) {
1581 /* Enable RxDMA Intrs in the general intr mask register */
1582 val64 = RXDMA_INT_M;
1583 if (flag == ENABLE_INTRS) {
1584 temp64 = readq(&bar0->general_int_mask);
1585 temp64 &= ~((u64) val64);
1586 writeq(temp64, &bar0->general_int_mask);
20346722 1587 /*
1588 * All RxDMA block interrupts are disabled for now
1589 * TODO
1da177e4
LT
1590 */
1591 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1592 } else if (flag == DISABLE_INTRS) {
20346722 1593 /*
1594 * Disable RxDMA Intrs in the general intr mask
1595 * register
1da177e4
LT
1596 */
1597 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1598 temp64 = readq(&bar0->general_int_mask);
1599 val64 |= temp64;
1600 writeq(val64, &bar0->general_int_mask);
1601 }
1602 }
1603
1604 /* MAC Interrupts */
1605 /* Enabling/Disabling MAC interrupts */
1606 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1607 val64 = TXMAC_INT_M | RXMAC_INT_M;
1608 if (flag == ENABLE_INTRS) {
1609 temp64 = readq(&bar0->general_int_mask);
1610 temp64 &= ~((u64) val64);
1611 writeq(temp64, &bar0->general_int_mask);
20346722 1612 /*
1613 * All MAC block error interrupts are disabled for now
1da177e4
LT
1614 * TODO
1615 */
1da177e4 1616 } else if (flag == DISABLE_INTRS) {
20346722 1617 /*
1618 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1619 */
1620 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1621 writeq(DISABLE_ALL_INTRS,
1622 &bar0->mac_rmac_err_mask);
1623
1624 temp64 = readq(&bar0->general_int_mask);
1625 val64 |= temp64;
1626 writeq(val64, &bar0->general_int_mask);
1627 }
1628 }
1629
1630 /* XGXS Interrupts */
1631 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1632 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1633 if (flag == ENABLE_INTRS) {
1634 temp64 = readq(&bar0->general_int_mask);
1635 temp64 &= ~((u64) val64);
1636 writeq(temp64, &bar0->general_int_mask);
20346722 1637 /*
1da177e4 1638 * All XGXS block error interrupts are disabled for now
20346722 1639 * TODO
1da177e4
LT
1640 */
1641 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1642 } else if (flag == DISABLE_INTRS) {
20346722 1643 /*
1644 * Disable MC Intrs in the general intr mask register
1da177e4
LT
1645 */
1646 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1647 temp64 = readq(&bar0->general_int_mask);
1648 val64 |= temp64;
1649 writeq(val64, &bar0->general_int_mask);
1650 }
1651 }
1652
1653 /* Memory Controller(MC) interrupts */
1654 if (mask & MC_INTR) {
1655 val64 = MC_INT_M;
1656 if (flag == ENABLE_INTRS) {
1657 temp64 = readq(&bar0->general_int_mask);
1658 temp64 &= ~((u64) val64);
1659 writeq(temp64, &bar0->general_int_mask);
20346722 1660 /*
5e25b9dd 1661 * Enable all MC Intrs.
1da177e4 1662 */
5e25b9dd 1663 writeq(0x0, &bar0->mc_int_mask);
1664 writeq(0x0, &bar0->mc_err_mask);
1da177e4
LT
1665 } else if (flag == DISABLE_INTRS) {
1666 /*
1667 * Disable MC Intrs in the general intr mask register
1668 */
1669 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1670 temp64 = readq(&bar0->general_int_mask);
1671 val64 |= temp64;
1672 writeq(val64, &bar0->general_int_mask);
1673 }
1674 }
1675
1676
1677 /* Tx traffic interrupts */
1678 if (mask & TX_TRAFFIC_INTR) {
1679 val64 = TXTRAFFIC_INT_M;
1680 if (flag == ENABLE_INTRS) {
1681 temp64 = readq(&bar0->general_int_mask);
1682 temp64 &= ~((u64) val64);
1683 writeq(temp64, &bar0->general_int_mask);
20346722 1684 /*
1da177e4 1685 * Enable all the Tx side interrupts
20346722 1686 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1687 */
1688 writeq(0x0, &bar0->tx_traffic_mask);
1689 } else if (flag == DISABLE_INTRS) {
20346722 1690 /*
1691 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1692 * register.
1693 */
1694 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1695 temp64 = readq(&bar0->general_int_mask);
1696 val64 |= temp64;
1697 writeq(val64, &bar0->general_int_mask);
1698 }
1699 }
1700
1701 /* Rx traffic interrupts */
1702 if (mask & RX_TRAFFIC_INTR) {
1703 val64 = RXTRAFFIC_INT_M;
1704 if (flag == ENABLE_INTRS) {
1705 temp64 = readq(&bar0->general_int_mask);
1706 temp64 &= ~((u64) val64);
1707 writeq(temp64, &bar0->general_int_mask);
1708 /* writing 0 Enables all 8 RX interrupt levels */
1709 writeq(0x0, &bar0->rx_traffic_mask);
1710 } else if (flag == DISABLE_INTRS) {
20346722 1711 /*
1712 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1713 * register.
1714 */
1715 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1716 temp64 = readq(&bar0->general_int_mask);
1717 val64 |= temp64;
1718 writeq(val64, &bar0->general_int_mask);
1719 }
1720 }
1721}
1722
541ae68f 1723static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
20346722 1724{
1725 int ret = 0;
1726
1727 if (flag == FALSE) {
541ae68f 1728 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd 1729 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1730 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1731 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1732 ret = 1;
1733 }
541ae68f 1734 }else {
5e25b9dd 1735 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1736 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1737 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1738 ret = 1;
1739 }
20346722 1740 }
1741 } else {
541ae68f 1742 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd 1743 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1744 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1745 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1746 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1747 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1748 ret = 1;
1749 }
1750 } else {
1751 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1752 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1753 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1754 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1755 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1756 ret = 1;
1757 }
20346722 1758 }
1759 }
1760
1761 return ret;
1762}
1763/**
1764 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4
LT
1765 * @val64 : Value read from adapter status register.
1766 * @flag : indicates if the adapter enable bit was ever written once
1767 * before.
1768 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1769 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1770 * differs and the calling function passes the input argument flag to
1771 * indicate this.
20346722 1772 * Return: 1 If xena is quiescence
1da177e4
LT
1773 * 0 If Xena is not quiescence
1774 */
1775
20346722 1776static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1da177e4 1777{
541ae68f 1778 int ret = 0, herc;
1da177e4 1779 u64 tmp64 = ~((u64) val64);
5e25b9dd 1780 int rev_id = get_xena_rev_id(sp->pdev);
1da177e4 1781
541ae68f 1782 herc = (sp->device_type == XFRAME_II_DEVICE);
1da177e4
LT
1783 if (!
1784 (tmp64 &
1785 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1786 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1787 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1788 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1789 ADAPTER_STATUS_P_PLL_LOCK))) {
541ae68f 1790 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1da177e4
LT
1791 }
1792
1793 return ret;
1794}
1795
1796/**
1797 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1798 * @sp: Pointer to device specifc structure
20346722 1799 * Description :
1da177e4
LT
1800 * New procedure to clear mac address reading problems on Alpha platforms
1801 *
1802 */
1803
20346722 1804void fix_mac_address(nic_t * sp)
1da177e4
LT
1805{
1806 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1807 u64 val64;
1808 int i = 0;
1809
1810 while (fix_mac[i] != END_SIGN) {
1811 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1812 udelay(10);
1da177e4
LT
1813 val64 = readq(&bar0->gpio_control);
1814 }
1815}
1816
1817/**
20346722 1818 * start_nic - Turns the device on
1da177e4 1819 * @nic : device private variable.
20346722 1820 * Description:
1821 * This function actually turns the device on. Before this function is
1822 * called,all Registers are configured from their reset states
1823 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1824 * calling this function, the device interrupts are cleared and the NIC is
1825 * literally switched on by writing into the adapter control register.
20346722 1826 * Return Value:
1da177e4
LT
1827 * SUCCESS on success and -1 on failure.
1828 */
1829
1830static int start_nic(struct s2io_nic *nic)
1831{
1832 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1833 struct net_device *dev = nic->dev;
1834 register u64 val64 = 0;
20346722 1835 u16 interruptible;
1836 u16 subid, i;
1da177e4
LT
1837 mac_info_t *mac_control;
1838 struct config_param *config;
1839
1840 mac_control = &nic->mac_control;
1841 config = &nic->config;
1842
1843 /* PRC Initialization and configuration */
1844 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1845 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
1846 &bar0->prc_rxd0_n[i]);
1847
1848 val64 = readq(&bar0->prc_ctrl_n[i]);
b6e3f982 1849 if (nic->config.bimodal)
1850 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1da177e4
LT
1851#ifndef CONFIG_2BUFF_MODE
1852 val64 |= PRC_CTRL_RC_ENABLED;
1853#else
1854 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1855#endif
1856 writeq(val64, &bar0->prc_ctrl_n[i]);
1857 }
1858
1859#ifdef CONFIG_2BUFF_MODE
1860 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1861 val64 = readq(&bar0->rx_pa_cfg);
1862 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1863 writeq(val64, &bar0->rx_pa_cfg);
1864#endif
1865
20346722 1866 /*
1da177e4
LT
1867 * Enabling MC-RLDRAM. After enabling the device, we timeout
1868 * for around 100ms, which is approximately the time required
1869 * for the device to be ready for operation.
1870 */
1871 val64 = readq(&bar0->mc_rldram_mrs);
1872 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1873 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1874 val64 = readq(&bar0->mc_rldram_mrs);
1875
20346722 1876 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
1877
1878 /* Enabling ECC Protection. */
1879 val64 = readq(&bar0->adapter_control);
1880 val64 &= ~ADAPTER_ECC_EN;
1881 writeq(val64, &bar0->adapter_control);
1882
20346722 1883 /*
1884 * Clearing any possible Link state change interrupts that
1da177e4
LT
1885 * could have popped up just before Enabling the card.
1886 */
1887 val64 = readq(&bar0->mac_rmac_err_reg);
1888 if (val64)
1889 writeq(val64, &bar0->mac_rmac_err_reg);
1890
20346722 1891 /*
1892 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
1893 * it.
1894 */
1895 val64 = readq(&bar0->adapter_status);
20346722 1896 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
1897 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1898 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1899 (unsigned long long) val64);
1900 return FAILURE;
1901 }
1902
1903 /* Enable select interrupts */
a371a07d 1904 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
1905 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1906 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1907
1da177e4
LT
1908 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1909
20346722 1910 /*
1da177e4 1911 * With some switches, link might be already up at this point.
20346722 1912 * Because of this weird behavior, when we enable laser,
1913 * we may not get link. We need to handle this. We cannot
1914 * figure out which switch is misbehaving. So we are forced to
1915 * make a global change.
1da177e4
LT
1916 */
1917
1918 /* Enabling Laser. */
1919 val64 = readq(&bar0->adapter_control);
1920 val64 |= ADAPTER_EOI_TX_ON;
1921 writeq(val64, &bar0->adapter_control);
1922
1923 /* SXE-002: Initialize link and activity LED */
1924 subid = nic->pdev->subsystem_device;
541ae68f 1925 if (((subid & 0xFF) >= 0x07) &&
1926 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
1927 val64 = readq(&bar0->gpio_control);
1928 val64 |= 0x0000800000000000ULL;
1929 writeq(val64, &bar0->gpio_control);
1930 val64 = 0x0411040400000000ULL;
20346722 1931 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1da177e4
LT
1932 }
1933
20346722 1934 /*
1935 * Don't see link state interrupts on certain switches, so
1da177e4
LT
1936 * directly scheduling a link state task from here.
1937 */
1938 schedule_work(&nic->set_link_task);
1939
1da177e4
LT
1940 return SUCCESS;
1941}
1942
20346722 1943/**
1944 * free_tx_buffers - Free all queued Tx buffers
1da177e4 1945 * @nic : device private variable.
20346722 1946 * Description:
1da177e4 1947 * Free all queued Tx buffers.
20346722 1948 * Return Value: void
1da177e4
LT
1949*/
1950
1951static void free_tx_buffers(struct s2io_nic *nic)
1952{
1953 struct net_device *dev = nic->dev;
1954 struct sk_buff *skb;
1955 TxD_t *txdp;
1956 int i, j;
1957 mac_info_t *mac_control;
1958 struct config_param *config;
1ddc50d4 1959 int cnt = 0, frg_cnt;
1da177e4
LT
1960
1961 mac_control = &nic->mac_control;
1962 config = &nic->config;
1963
1964 for (i = 0; i < config->tx_fifo_num; i++) {
1965 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
20346722 1966 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1da177e4
LT
1967 list_virt_addr;
1968 skb =
1969 (struct sk_buff *) ((unsigned long) txdp->
1970 Host_Control);
1971 if (skb == NULL) {
1ddc50d4 1972 memset(txdp, 0, sizeof(TxD_t) *
1973 config->max_txds);
1da177e4
LT
1974 continue;
1975 }
1ddc50d4 1976 frg_cnt = skb_shinfo(skb)->nr_frags;
1977 pci_unmap_single(nic->pdev, (dma_addr_t)
1978 txdp->Buffer_Pointer,
1979 skb->len - skb->data_len,
1980 PCI_DMA_TODEVICE);
1981 if (frg_cnt) {
1982 TxD_t *temp;
1983 temp = txdp;
1984 txdp++;
1985 for (j = 0; j < frg_cnt; j++, txdp++) {
1986 skb_frag_t *frag =
1987 &skb_shinfo(skb)->frags[j];
1988 pci_unmap_page(nic->pdev,
1989 (dma_addr_t)
1990 txdp->
1991 Buffer_Pointer,
1992 frag->size,
1993 PCI_DMA_TODEVICE);
1994 }
1995 txdp = temp;
1996 }
1da177e4 1997 dev_kfree_skb(skb);
1ddc50d4 1998 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1da177e4
LT
1999 cnt++;
2000 }
2001 DBG_PRINT(INTR_DBG,
2002 "%s:forcibly freeing %d skbs on FIFO%d\n",
2003 dev->name, cnt, i);
20346722 2004 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2005 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
2006 }
2007}
2008
20346722 2009/**
2010 * stop_nic - To stop the nic
1da177e4 2011 * @nic ; device private variable.
20346722 2012 * Description:
2013 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2014 * function does. This function is called to stop the device.
2015 * Return Value:
2016 * void.
2017 */
2018
2019static void stop_nic(struct s2io_nic *nic)
2020{
2021 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2022 register u64 val64 = 0;
2023 u16 interruptible, i;
2024 mac_info_t *mac_control;
2025 struct config_param *config;
2026
2027 mac_control = &nic->mac_control;
2028 config = &nic->config;
2029
2030 /* Disable all interrupts */
a371a07d 2031 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
2032 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2033 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1da177e4
LT
2034 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2035
2036 /* Disable PRCs */
2037 for (i = 0; i < config->rx_ring_num; i++) {
2038 val64 = readq(&bar0->prc_ctrl_n[i]);
2039 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2040 writeq(val64, &bar0->prc_ctrl_n[i]);
2041 }
2042}
2043
20346722 2044/**
2045 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2046 * @nic: device private variable
20346722 2047 * @ring_no: ring number
2048 * Description:
1da177e4
LT
2049 * The function allocates Rx side skbs and puts the physical
2050 * address of these buffers into the RxD buffer pointers, so that the NIC
2051 * can DMA the received frame into these locations.
2052 * The NIC supports 3 receive modes, viz
2053 * 1. single buffer,
2054 * 2. three buffer and
2055 * 3. Five buffer modes.
20346722 2056 * Each mode defines how many fragments the received frame will be split
2057 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2058 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2059 * is split into 3 fragments. As of now only single buffer mode is
2060 * supported.
2061 * Return Value:
2062 * SUCCESS on success or an appropriate -ve value on failure.
2063 */
2064
20346722 2065int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2066{
2067 struct net_device *dev = nic->dev;
2068 struct sk_buff *skb;
2069 RxD_t *rxdp;
2070 int off, off1, size, block_no, block_no1;
2071 int offset, offset1;
2072 u32 alloc_tab = 0;
20346722 2073 u32 alloc_cnt;
1da177e4
LT
2074 mac_info_t *mac_control;
2075 struct config_param *config;
2076#ifdef CONFIG_2BUFF_MODE
2077 RxD_t *rxdpnext;
2078 int nextblk;
20346722 2079 u64 tmp;
1da177e4
LT
2080 buffAdd_t *ba;
2081 dma_addr_t rxdpphys;
2082#endif
2083#ifndef CONFIG_S2IO_NAPI
2084 unsigned long flags;
2085#endif
2086
2087 mac_control = &nic->mac_control;
2088 config = &nic->config;
20346722 2089 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2090 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4
LT
2091 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2092 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2093
2094 while (alloc_tab < alloc_cnt) {
20346722 2095 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2096 block_index;
20346722 2097 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1da177e4 2098 block_index;
20346722 2099 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2100 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4
LT
2101#ifndef CONFIG_2BUFF_MODE
2102 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2103 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2104#else
2105 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2106 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2107#endif
2108
20346722 2109 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2110 block_virt_addr + off;
2111 if ((offset == offset1) && (rxdp->Host_Control)) {
2112 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2113 DBG_PRINT(INTR_DBG, " info equated\n");
2114 goto end;
2115 }
2116#ifndef CONFIG_2BUFF_MODE
2117 if (rxdp->Control_1 == END_OF_BLOCK) {
20346722 2118 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2119 block_index++;
20346722 2120 mac_control->rings[ring_no].rx_curr_put_info.
2121 block_index %= mac_control->rings[ring_no].block_count;
2122 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2123 block_index;
1da177e4
LT
2124 off++;
2125 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 2126 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4
LT
2127 off;
2128 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2129 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2130 dev->name, rxdp);
2131 }
2132#ifndef CONFIG_S2IO_NAPI
2133 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2134 mac_control->rings[ring_no].put_pos =
1da177e4
LT
2135 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2136 spin_unlock_irqrestore(&nic->put_lock, flags);
2137#endif
2138#else
2139 if (rxdp->Host_Control == END_OF_BLOCK) {
20346722 2140 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2141 block_index++;
20346722 2142 mac_control->rings[ring_no].rx_curr_put_info.block_index
2143 %= mac_control->rings[ring_no].block_count;
2144 block_no = mac_control->rings[ring_no].rx_curr_put_info
2145 .block_index;
1da177e4
LT
2146 off = 0;
2147 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2148 dev->name, block_no,
2149 (unsigned long long) rxdp->Control_1);
20346722 2150 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4 2151 off;
20346722 2152 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2153 block_virt_addr;
2154 }
2155#ifndef CONFIG_S2IO_NAPI
2156 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2157 mac_control->rings[ring_no].put_pos = (block_no *
1da177e4
LT
2158 (MAX_RXDS_PER_BLOCK + 1)) + off;
2159 spin_unlock_irqrestore(&nic->put_lock, flags);
2160#endif
2161#endif
2162
2163#ifndef CONFIG_2BUFF_MODE
2164 if (rxdp->Control_1 & RXD_OWN_XENA)
2165#else
2166 if (rxdp->Control_2 & BIT(0))
2167#endif
2168 {
20346722 2169 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4
LT
2170 offset = off;
2171 goto end;
2172 }
2173#ifdef CONFIG_2BUFF_MODE
20346722 2174 /*
2175 * RxDs Spanning cache lines will be replenished only
2176 * if the succeeding RxD is also owned by Host. It
2177 * will always be the ((8*i)+3) and ((8*i)+6)
2178 * descriptors for the 48 byte descriptor. The offending
1da177e4
LT
2179 * decsriptor is of-course the 3rd descriptor.
2180 */
20346722 2181 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2182 block_dma_addr + (off * sizeof(RxD_t));
2183 if (((u64) (rxdpphys)) % 128 > 80) {
20346722 2184 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2185 block_virt_addr + (off + 1);
2186 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2187 nextblk = (block_no + 1) %
20346722 2188 (mac_control->rings[ring_no].block_count);
2189 rxdpnext = mac_control->rings[ring_no].rx_blocks
1da177e4
LT
2190 [nextblk].block_virt_addr;
2191 }
2192 if (rxdpnext->Control_2 & BIT(0))
2193 goto end;
2194 }
2195#endif
2196
2197#ifndef CONFIG_2BUFF_MODE
2198 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2199#else
2200 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2201#endif
2202 if (!skb) {
2203 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2204 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2205 return -ENOMEM;
2206 }
2207#ifndef CONFIG_2BUFF_MODE
2208 skb_reserve(skb, NET_IP_ALIGN);
2209 memset(rxdp, 0, sizeof(RxD_t));
2210 rxdp->Buffer0_ptr = pci_map_single
2211 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2212 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2213 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2214 rxdp->Host_Control = (unsigned long) (skb);
2215 rxdp->Control_1 |= RXD_OWN_XENA;
2216 off++;
2217 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 2218 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 2219#else
20346722 2220 ba = &mac_control->rings[ring_no].ba[block_no][off];
1da177e4 2221 skb_reserve(skb, BUF0_LEN);
689be439
DM
2222 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2223 if (tmp)
2224 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1da177e4
LT
2225
2226 memset(rxdp, 0, sizeof(RxD_t));
2227 rxdp->Buffer2_ptr = pci_map_single
2228 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2229 PCI_DMA_FROMDEVICE);
2230 rxdp->Buffer0_ptr =
2231 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2232 PCI_DMA_FROMDEVICE);
2233 rxdp->Buffer1_ptr =
2234 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2235 PCI_DMA_FROMDEVICE);
2236
2237 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2238 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2239 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2240 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2241 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2242 rxdp->Control_1 |= RXD_OWN_XENA;
2243 off++;
20346722 2244 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 2245#endif
5e25b9dd 2246 rxdp->Control_2 |= SET_RXD_MARKER;
20346722 2247
1da177e4
LT
2248 atomic_inc(&nic->rx_bufs_left[ring_no]);
2249 alloc_tab++;
2250 }
2251
2252 end:
2253 return SUCCESS;
2254}
2255
2256/**
20346722 2257 * free_rx_buffers - Frees all Rx buffers
1da177e4 2258 * @sp: device private variable.
20346722 2259 * Description:
1da177e4
LT
2260 * This function will free all Rx buffers allocated by host.
2261 * Return Value:
2262 * NONE.
2263 */
2264
2265static void free_rx_buffers(struct s2io_nic *sp)
2266{
2267 struct net_device *dev = sp->dev;
2268 int i, j, blk = 0, off, buf_cnt = 0;
2269 RxD_t *rxdp;
2270 struct sk_buff *skb;
2271 mac_info_t *mac_control;
2272 struct config_param *config;
2273#ifdef CONFIG_2BUFF_MODE
2274 buffAdd_t *ba;
2275#endif
2276
2277 mac_control = &sp->mac_control;
2278 config = &sp->config;
2279
2280 for (i = 0; i < config->rx_ring_num; i++) {
2281 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2282 off = j % (MAX_RXDS_PER_BLOCK + 1);
20346722 2283 rxdp = mac_control->rings[i].rx_blocks[blk].
2284 block_virt_addr + off;
1da177e4
LT
2285
2286#ifndef CONFIG_2BUFF_MODE
2287 if (rxdp->Control_1 == END_OF_BLOCK) {
2288 rxdp =
2289 (RxD_t *) ((unsigned long) rxdp->
2290 Control_2);
2291 j++;
2292 blk++;
2293 }
2294#else
2295 if (rxdp->Host_Control == END_OF_BLOCK) {
2296 blk++;
2297 continue;
2298 }
2299#endif
2300
2301 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2302 memset(rxdp, 0, sizeof(RxD_t));
2303 continue;
2304 }
2305
2306 skb =
2307 (struct sk_buff *) ((unsigned long) rxdp->
2308 Host_Control);
2309 if (skb) {
2310#ifndef CONFIG_2BUFF_MODE
2311 pci_unmap_single(sp->pdev, (dma_addr_t)
2312 rxdp->Buffer0_ptr,
2313 dev->mtu +
2314 HEADER_ETHERNET_II_802_3_SIZE
2315 + HEADER_802_2_SIZE +
2316 HEADER_SNAP_SIZE,
2317 PCI_DMA_FROMDEVICE);
2318#else
20346722 2319 ba = &mac_control->rings[i].ba[blk][off];
1da177e4
LT
2320 pci_unmap_single(sp->pdev, (dma_addr_t)
2321 rxdp->Buffer0_ptr,
2322 BUF0_LEN,
2323 PCI_DMA_FROMDEVICE);
2324 pci_unmap_single(sp->pdev, (dma_addr_t)
2325 rxdp->Buffer1_ptr,
2326 BUF1_LEN,
2327 PCI_DMA_FROMDEVICE);
2328 pci_unmap_single(sp->pdev, (dma_addr_t)
2329 rxdp->Buffer2_ptr,
2330 dev->mtu + BUF0_LEN + 4,
2331 PCI_DMA_FROMDEVICE);
2332#endif
2333 dev_kfree_skb(skb);
2334 atomic_dec(&sp->rx_bufs_left[i]);
2335 buf_cnt++;
2336 }
2337 memset(rxdp, 0, sizeof(RxD_t));
2338 }
20346722 2339 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2340 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2341 mac_control->rings[i].rx_curr_put_info.offset = 0;
2342 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2343 atomic_set(&sp->rx_bufs_left[i], 0);
2344 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2345 dev->name, buf_cnt, i);
2346 }
2347}
2348
2349/**
2350 * s2io_poll - Rx interrupt handler for NAPI support
2351 * @dev : pointer to the device structure.
20346722 2352 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2353 * during one pass through the 'Poll" function.
2354 * Description:
2355 * Comes into picture only if NAPI support has been incorporated. It does
2356 * the same thing that rx_intr_handler does, but not in a interrupt context
2357 * also It will process only a given number of packets.
2358 * Return value:
2359 * 0 on success and 1 if there are No Rx packets to be processed.
2360 */
2361
20346722 2362#if defined(CONFIG_S2IO_NAPI)
1da177e4
LT
2363static int s2io_poll(struct net_device *dev, int *budget)
2364{
2365 nic_t *nic = dev->priv;
20346722 2366 int pkt_cnt = 0, org_pkts_to_process;
1da177e4
LT
2367 mac_info_t *mac_control;
2368 struct config_param *config;
20346722 2369 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2370 u64 val64;
2371 int i;
1da177e4 2372
7ba013ac 2373 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2374 mac_control = &nic->mac_control;
2375 config = &nic->config;
2376
20346722 2377 nic->pkts_to_process = *budget;
2378 if (nic->pkts_to_process > dev->quota)
2379 nic->pkts_to_process = dev->quota;
2380 org_pkts_to_process = nic->pkts_to_process;
1da177e4
LT
2381
2382 val64 = readq(&bar0->rx_traffic_int);
2383 writeq(val64, &bar0->rx_traffic_int);
2384
2385 for (i = 0; i < config->rx_ring_num; i++) {
20346722 2386 rx_intr_handler(&mac_control->rings[i]);
2387 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2388 if (!nic->pkts_to_process) {
2389 /* Quota for the current iteration has been met */
2390 goto no_rx;
1da177e4 2391 }
1da177e4
LT
2392 }
2393 if (!pkt_cnt)
2394 pkt_cnt = 1;
2395
2396 dev->quota -= pkt_cnt;
2397 *budget -= pkt_cnt;
2398 netif_rx_complete(dev);
2399
2400 for (i = 0; i < config->rx_ring_num; i++) {
2401 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2402 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2403 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2404 break;
2405 }
2406 }
2407 /* Re enable the Rx interrupts. */
2408 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
7ba013ac 2409 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2410 return 0;
2411
20346722 2412no_rx:
1da177e4
LT
2413 dev->quota -= pkt_cnt;
2414 *budget -= pkt_cnt;
2415
2416 for (i = 0; i < config->rx_ring_num; i++) {
2417 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2418 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2419 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2420 break;
2421 }
2422 }
7ba013ac 2423 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2424 return 1;
2425}
20346722 2426#endif
2427
2428/**
1da177e4
LT
2429 * rx_intr_handler - Rx interrupt handler
2430 * @nic: device private variable.
20346722 2431 * Description:
2432 * If the interrupt is because of a received frame or if the
1da177e4 2433 * receive ring contains fresh as yet un-processed frames,this function is
20346722 2434 * called. It picks out the RxD at which place the last Rx processing had
2435 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2436 * the offset.
2437 * Return Value:
2438 * NONE.
2439 */
20346722 2440static void rx_intr_handler(ring_info_t *ring_data)
1da177e4 2441{
20346722 2442 nic_t *nic = ring_data->nic;
1da177e4 2443 struct net_device *dev = (struct net_device *) nic->dev;
20346722 2444 int get_block, get_offset, put_block, put_offset, ring_bufs;
1da177e4
LT
2445 rx_curr_get_info_t get_info, put_info;
2446 RxD_t *rxdp;
2447 struct sk_buff *skb;
20346722 2448#ifndef CONFIG_S2IO_NAPI
2449 int pkt_cnt = 0;
1da177e4 2450#endif
7ba013ac 2451 spin_lock(&nic->rx_lock);
2452 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2453 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2454 __FUNCTION__, dev->name);
2455 spin_unlock(&nic->rx_lock);
2456 }
2457
20346722 2458 get_info = ring_data->rx_curr_get_info;
2459 get_block = get_info.block_index;
2460 put_info = ring_data->rx_curr_put_info;
2461 put_block = put_info.block_index;
2462 ring_bufs = get_info.ring_len+1;
2463 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
1da177e4 2464 get_info.offset;
20346722 2465 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2466 get_info.offset;
2467#ifndef CONFIG_S2IO_NAPI
2468 spin_lock(&nic->put_lock);
2469 put_offset = ring_data->put_pos;
2470 spin_unlock(&nic->put_lock);
2471#else
2472 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2473 put_info.offset;
2474#endif
5e25b9dd 2475 while (RXD_IS_UP2DT(rxdp) &&
2476 (((get_offset + 1) % ring_bufs) != put_offset)) {
20346722 2477 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2478 if (skb == NULL) {
2479 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2480 dev->name);
2481 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2482 spin_unlock(&nic->rx_lock);
20346722 2483 return;
1da177e4 2484 }
20346722 2485#ifndef CONFIG_2BUFF_MODE
2486 pci_unmap_single(nic->pdev, (dma_addr_t)
2487 rxdp->Buffer0_ptr,
2488 dev->mtu +
2489 HEADER_ETHERNET_II_802_3_SIZE +
2490 HEADER_802_2_SIZE +
2491 HEADER_SNAP_SIZE,
2492 PCI_DMA_FROMDEVICE);
1da177e4 2493#else
20346722 2494 pci_unmap_single(nic->pdev, (dma_addr_t)
2495 rxdp->Buffer0_ptr,
2496 BUF0_LEN, PCI_DMA_FROMDEVICE);
2497 pci_unmap_single(nic->pdev, (dma_addr_t)
2498 rxdp->Buffer1_ptr,
2499 BUF1_LEN, PCI_DMA_FROMDEVICE);
2500 pci_unmap_single(nic->pdev, (dma_addr_t)
2501 rxdp->Buffer2_ptr,
2502 dev->mtu + BUF0_LEN + 4,
2503 PCI_DMA_FROMDEVICE);
2504#endif
2505 rx_osm_handler(ring_data, rxdp);
2506 get_info.offset++;
2507 ring_data->rx_curr_get_info.offset =
1da177e4 2508 get_info.offset;
20346722 2509 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2510 get_info.offset;
2511 if (get_info.offset &&
2512 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2513 get_info.offset = 0;
2514 ring_data->rx_curr_get_info.offset
2515 = get_info.offset;
2516 get_block++;
2517 get_block %= ring_data->block_count;
2518 ring_data->rx_curr_get_info.block_index
2519 = get_block;
2520 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2521 }
1da177e4 2522
20346722 2523 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1da177e4 2524 get_info.offset;
20346722 2525#ifdef CONFIG_S2IO_NAPI
2526 nic->pkts_to_process -= 1;
2527 if (!nic->pkts_to_process)
2528 break;
2529#else
2530 pkt_cnt++;
1da177e4
LT
2531 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2532 break;
20346722 2533#endif
1da177e4 2534 }
7ba013ac 2535 spin_unlock(&nic->rx_lock);
1da177e4 2536}
20346722 2537
2538/**
1da177e4
LT
2539 * tx_intr_handler - Transmit interrupt handler
2540 * @nic : device private variable
20346722 2541 * Description:
2542 * If an interrupt was raised to indicate DMA complete of the
2543 * Tx packet, this function is called. It identifies the last TxD
2544 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2545 * DMA'ed into the NICs internal memory.
2546 * Return Value:
2547 * NONE
2548 */
2549
20346722 2550static void tx_intr_handler(fifo_info_t *fifo_data)
1da177e4 2551{
20346722 2552 nic_t *nic = fifo_data->nic;
1da177e4
LT
2553 struct net_device *dev = (struct net_device *) nic->dev;
2554 tx_curr_get_info_t get_info, put_info;
2555 struct sk_buff *skb;
2556 TxD_t *txdlp;
1da177e4 2557 u16 j, frg_cnt;
1da177e4 2558
20346722 2559 get_info = fifo_data->tx_curr_get_info;
2560 put_info = fifo_data->tx_curr_put_info;
2561 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2562 list_virt_addr;
2563 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2564 (get_info.offset != put_info.offset) &&
2565 (txdlp->Host_Control)) {
2566 /* Check for TxD errors */
2567 if (txdlp->Control_1 & TXD_T_CODE) {
2568 unsigned long long err;
2569 err = txdlp->Control_1 & TXD_T_CODE;
2570 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2571 err);
2572 }
1da177e4 2573
20346722 2574 skb = (struct sk_buff *) ((unsigned long)
2575 txdlp->Host_Control);
2576 if (skb == NULL) {
2577 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2578 __FUNCTION__);
2579 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2580 return;
2581 }
2582
2583 frg_cnt = skb_shinfo(skb)->nr_frags;
2584 nic->tx_pkt_count++;
2585
2586 pci_unmap_single(nic->pdev, (dma_addr_t)
2587 txdlp->Buffer_Pointer,
2588 skb->len - skb->data_len,
2589 PCI_DMA_TODEVICE);
2590 if (frg_cnt) {
2591 TxD_t *temp;
2592 temp = txdlp;
2593 txdlp++;
2594 for (j = 0; j < frg_cnt; j++, txdlp++) {
2595 skb_frag_t *frag =
2596 &skb_shinfo(skb)->frags[j];
0b1f7ebe 2597 if (!txdlp->Buffer_Pointer)
2598 break;
20346722 2599 pci_unmap_page(nic->pdev,
2600 (dma_addr_t)
2601 txdlp->
2602 Buffer_Pointer,
2603 frag->size,
2604 PCI_DMA_TODEVICE);
1da177e4 2605 }
20346722 2606 txdlp = temp;
1da177e4 2607 }
20346722 2608 memset(txdlp, 0,
2609 (sizeof(TxD_t) * fifo_data->max_txds));
2610
2611 /* Updating the statistics block */
20346722 2612 nic->stats.tx_bytes += skb->len;
2613 dev_kfree_skb_irq(skb);
2614
2615 get_info.offset++;
2616 get_info.offset %= get_info.fifo_len + 1;
2617 txdlp = (TxD_t *) fifo_data->list_info
2618 [get_info.offset].list_virt_addr;
2619 fifo_data->tx_curr_get_info.offset =
2620 get_info.offset;
1da177e4
LT
2621 }
2622
2623 spin_lock(&nic->tx_lock);
2624 if (netif_queue_stopped(dev))
2625 netif_wake_queue(dev);
2626 spin_unlock(&nic->tx_lock);
2627}
2628
20346722 2629/**
1da177e4
LT
2630 * alarm_intr_handler - Alarm Interrrupt handler
2631 * @nic: device private variable
20346722 2632 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 2633 * complete, this function is called. If the interrupt was to indicate
20346722 2634 * a loss of link, the OSM link status handler is invoked for any other
2635 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
2636 * and a H/W reset is issued.
2637 * Return Value:
2638 * NONE
2639*/
2640
2641static void alarm_intr_handler(struct s2io_nic *nic)
2642{
2643 struct net_device *dev = (struct net_device *) nic->dev;
2644 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2645 register u64 val64 = 0, err_reg = 0;
2646
2647 /* Handling link status change error Intr */
a371a07d 2648 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2649 err_reg = readq(&bar0->mac_rmac_err_reg);
2650 writeq(err_reg, &bar0->mac_rmac_err_reg);
2651 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2652 schedule_work(&nic->set_link_task);
2653 }
1da177e4
LT
2654 }
2655
5e25b9dd 2656 /* Handling Ecc errors */
2657 val64 = readq(&bar0->mc_err_reg);
2658 writeq(val64, &bar0->mc_err_reg);
2659 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2660 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac 2661 nic->mac_control.stats_info->sw_stat.
2662 double_ecc_errs++;
5e25b9dd 2663 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2664 dev->name);
2665 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2666 netif_stop_queue(dev);
2667 schedule_work(&nic->rst_timer_task);
2668 } else {
7ba013ac 2669 nic->mac_control.stats_info->sw_stat.
2670 single_ecc_errs++;
5e25b9dd 2671 }
2672 }
2673
1da177e4
LT
2674 /* In case of a serious error, the device will be Reset. */
2675 val64 = readq(&bar0->serr_source);
2676 if (val64 & SERR_SOURCE_ANY) {
2677 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2678 DBG_PRINT(ERR_DBG, "serious error!!\n");
2679 netif_stop_queue(dev);
2680 schedule_work(&nic->rst_timer_task);
2681 }
2682
2683 /*
2684 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2685 * Error occurs, the adapter will be recycled by disabling the
20346722 2686 * adapter enable bit and enabling it again after the device
1da177e4
LT
2687 * becomes Quiescent.
2688 */
2689 val64 = readq(&bar0->pcc_err_reg);
2690 writeq(val64, &bar0->pcc_err_reg);
2691 if (val64 & PCC_FB_ECC_DB_ERR) {
2692 u64 ac = readq(&bar0->adapter_control);
2693 ac &= ~(ADAPTER_CNTL_EN);
2694 writeq(ac, &bar0->adapter_control);
2695 ac = readq(&bar0->adapter_control);
2696 schedule_work(&nic->set_link_task);
2697 }
2698
2699 /* Other type of interrupts are not being handled now, TODO */
2700}
2701
20346722 2702/**
1da177e4 2703 * wait_for_cmd_complete - waits for a command to complete.
20346722 2704 * @sp : private member of the device structure, which is a pointer to the
1da177e4 2705 * s2io_nic structure.
20346722 2706 * Description: Function that waits for a command to Write into RMAC
2707 * ADDR DATA registers to be completed and returns either success or
2708 * error depending on whether the command was complete or not.
1da177e4
LT
2709 * Return value:
2710 * SUCCESS on success and FAILURE on failure.
2711 */
2712
20346722 2713int wait_for_cmd_complete(nic_t * sp)
1da177e4
LT
2714{
2715 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2716 int ret = FAILURE, cnt = 0;
2717 u64 val64;
2718
2719 while (TRUE) {
2720 val64 = readq(&bar0->rmac_addr_cmd_mem);
2721 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2722 ret = SUCCESS;
2723 break;
2724 }
2725 msleep(50);
2726 if (cnt++ > 10)
2727 break;
2728 }
2729
2730 return ret;
2731}
2732
20346722 2733/**
2734 * s2io_reset - Resets the card.
1da177e4
LT
2735 * @sp : private member of the device structure.
2736 * Description: Function to Reset the card. This function then also
20346722 2737 * restores the previously saved PCI configuration space registers as
1da177e4
LT
2738 * the card reset also resets the configuration space.
2739 * Return value:
2740 * void.
2741 */
2742
20346722 2743void s2io_reset(nic_t * sp)
1da177e4
LT
2744{
2745 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2746 u64 val64;
5e25b9dd 2747 u16 subid, pci_cmd;
1da177e4 2748
0b1f7ebe 2749 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2750 if (sp->device_type == XFRAME_I_DEVICE)
2751 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2752
1da177e4
LT
2753 val64 = SW_RESET_ALL;
2754 writeq(val64, &bar0->sw_reset);
2755
20346722 2756 /*
2757 * At this stage, if the PCI write is indeed completed, the
2758 * card is reset and so is the PCI Config space of the device.
2759 * So a read cannot be issued at this stage on any of the
1da177e4
LT
2760 * registers to ensure the write into "sw_reset" register
2761 * has gone through.
2762 * Question: Is there any system call that will explicitly force
2763 * all the write commands still pending on the bus to be pushed
2764 * through?
2765 * As of now I'am just giving a 250ms delay and hoping that the
2766 * PCI write to sw_reset register is done by this time.
2767 */
2768 msleep(250);
2769
541ae68f 2770 if (!(sp->device_type & XFRAME_II_DEVICE)) {
0b1f7ebe 2771 /* Restore the PCI state saved during initializarion. */
541ae68f 2772 pci_restore_state(sp->pdev);
0b1f7ebe 2773 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2774 pci_cmd);
541ae68f 2775 } else {
2776 pci_set_master(sp->pdev);
2777 }
1da177e4
LT
2778 s2io_init_pci(sp);
2779
2780 msleep(250);
2781
20346722 2782 /* Set swapper to enable I/O register access */
2783 s2io_set_swapper(sp);
2784
5e25b9dd 2785 /* Clear certain PCI/PCI-X fields after reset */
2786 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2787 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2788 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2789
2790 val64 = readq(&bar0->txpic_int_reg);
2791 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2792 writeq(val64, &bar0->txpic_int_reg);
2793
2794 /* Clearing PCIX Ecc status register */
2795 pci_write_config_dword(sp->pdev, 0x68, 0);
2796
20346722 2797 /* Reset device statistics maintained by OS */
2798 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2799
1da177e4
LT
2800 /* SXE-002: Configure link and activity LED to turn it off */
2801 subid = sp->pdev->subsystem_device;
541ae68f 2802 if (((subid & 0xFF) >= 0x07) &&
2803 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2804 val64 = readq(&bar0->gpio_control);
2805 val64 |= 0x0000800000000000ULL;
2806 writeq(val64, &bar0->gpio_control);
2807 val64 = 0x0411040400000000ULL;
20346722 2808 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1da177e4
LT
2809 }
2810
541ae68f 2811 /*
2812 * Clear spurious ECC interrupts that would have occured on
2813 * XFRAME II cards after reset.
2814 */
2815 if (sp->device_type == XFRAME_II_DEVICE) {
2816 val64 = readq(&bar0->pcc_err_reg);
2817 writeq(val64, &bar0->pcc_err_reg);
2818 }
2819
1da177e4
LT
2820 sp->device_enabled_once = FALSE;
2821}
2822
2823/**
20346722 2824 * s2io_set_swapper - to set the swapper controle on the card
2825 * @sp : private member of the device structure,
1da177e4 2826 * pointer to the s2io_nic structure.
20346722 2827 * Description: Function to set the swapper control on the card
1da177e4
LT
2828 * correctly depending on the 'endianness' of the system.
2829 * Return value:
2830 * SUCCESS on success and FAILURE on failure.
2831 */
2832
20346722 2833int s2io_set_swapper(nic_t * sp)
1da177e4
LT
2834{
2835 struct net_device *dev = sp->dev;
2836 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2837 u64 val64, valt, valr;
2838
20346722 2839 /*
1da177e4
LT
2840 * Set proper endian settings and verify the same by reading
2841 * the PIF Feed-back register.
2842 */
2843
2844 val64 = readq(&bar0->pif_rd_swapper_fb);
2845 if (val64 != 0x0123456789ABCDEFULL) {
2846 int i = 0;
2847 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2848 0x8100008181000081ULL, /* FE=1, SE=0 */
2849 0x4200004242000042ULL, /* FE=0, SE=1 */
2850 0}; /* FE=0, SE=0 */
2851
2852 while(i<4) {
2853 writeq(value[i], &bar0->swapper_ctrl);
2854 val64 = readq(&bar0->pif_rd_swapper_fb);
2855 if (val64 == 0x0123456789ABCDEFULL)
2856 break;
2857 i++;
2858 }
2859 if (i == 4) {
2860 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2861 dev->name);
2862 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2863 (unsigned long long) val64);
2864 return FAILURE;
2865 }
2866 valr = value[i];
2867 } else {
2868 valr = readq(&bar0->swapper_ctrl);
2869 }
2870
2871 valt = 0x0123456789ABCDEFULL;
2872 writeq(valt, &bar0->xmsi_address);
2873 val64 = readq(&bar0->xmsi_address);
2874
2875 if(val64 != valt) {
2876 int i = 0;
2877 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2878 0x0081810000818100ULL, /* FE=1, SE=0 */
2879 0x0042420000424200ULL, /* FE=0, SE=1 */
2880 0}; /* FE=0, SE=0 */
2881
2882 while(i<4) {
2883 writeq((value[i] | valr), &bar0->swapper_ctrl);
2884 writeq(valt, &bar0->xmsi_address);
2885 val64 = readq(&bar0->xmsi_address);
2886 if(val64 == valt)
2887 break;
2888 i++;
2889 }
2890 if(i == 4) {
20346722 2891 unsigned long long x = val64;
1da177e4 2892 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 2893 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
2894 return FAILURE;
2895 }
2896 }
2897 val64 = readq(&bar0->swapper_ctrl);
2898 val64 &= 0xFFFF000000000000ULL;
2899
2900#ifdef __BIG_ENDIAN
20346722 2901 /*
2902 * The device by default set to a big endian format, so a
1da177e4
LT
2903 * big endian driver need not set anything.
2904 */
2905 val64 |= (SWAPPER_CTRL_TXP_FE |
2906 SWAPPER_CTRL_TXP_SE |
2907 SWAPPER_CTRL_TXD_R_FE |
2908 SWAPPER_CTRL_TXD_W_FE |
2909 SWAPPER_CTRL_TXF_R_FE |
2910 SWAPPER_CTRL_RXD_R_FE |
2911 SWAPPER_CTRL_RXD_W_FE |
2912 SWAPPER_CTRL_RXF_W_FE |
2913 SWAPPER_CTRL_XMSI_FE |
2914 SWAPPER_CTRL_XMSI_SE |
2915 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2916 writeq(val64, &bar0->swapper_ctrl);
2917#else
20346722 2918 /*
1da177e4 2919 * Initially we enable all bits to make it accessible by the
20346722 2920 * driver, then we selectively enable only those bits that
1da177e4
LT
2921 * we want to set.
2922 */
2923 val64 |= (SWAPPER_CTRL_TXP_FE |
2924 SWAPPER_CTRL_TXP_SE |
2925 SWAPPER_CTRL_TXD_R_FE |
2926 SWAPPER_CTRL_TXD_R_SE |
2927 SWAPPER_CTRL_TXD_W_FE |
2928 SWAPPER_CTRL_TXD_W_SE |
2929 SWAPPER_CTRL_TXF_R_FE |
2930 SWAPPER_CTRL_RXD_R_FE |
2931 SWAPPER_CTRL_RXD_R_SE |
2932 SWAPPER_CTRL_RXD_W_FE |
2933 SWAPPER_CTRL_RXD_W_SE |
2934 SWAPPER_CTRL_RXF_W_FE |
2935 SWAPPER_CTRL_XMSI_FE |
2936 SWAPPER_CTRL_XMSI_SE |
2937 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2938 writeq(val64, &bar0->swapper_ctrl);
2939#endif
2940 val64 = readq(&bar0->swapper_ctrl);
2941
20346722 2942 /*
2943 * Verifying if endian settings are accurate by reading a
1da177e4
LT
2944 * feedback register.
2945 */
2946 val64 = readq(&bar0->pif_rd_swapper_fb);
2947 if (val64 != 0x0123456789ABCDEFULL) {
2948 /* Endian settings are incorrect, calls for another dekko. */
2949 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2950 dev->name);
2951 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2952 (unsigned long long) val64);
2953 return FAILURE;
2954 }
2955
2956 return SUCCESS;
2957}
2958
2959/* ********************************************************* *
2960 * Functions defined below concern the OS part of the driver *
2961 * ********************************************************* */
2962
20346722 2963/**
1da177e4
LT
2964 * s2io_open - open entry point of the driver
2965 * @dev : pointer to the device structure.
2966 * Description:
2967 * This function is the open entry point of the driver. It mainly calls a
2968 * function to allocate Rx buffers and inserts them into the buffer
20346722 2969 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
2970 * Return value:
2971 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2972 * file on failure.
2973 */
2974
20346722 2975int s2io_open(struct net_device *dev)
1da177e4
LT
2976{
2977 nic_t *sp = dev->priv;
2978 int err = 0;
2979
20346722 2980 /*
2981 * Make sure you have link off by default every time
1da177e4
LT
2982 * Nic is initialized
2983 */
2984 netif_carrier_off(dev);
0b1f7ebe 2985 sp->last_link_state = 0;
1da177e4
LT
2986
2987 /* Initialize H/W and enable interrupts */
2988 if (s2io_card_up(sp)) {
2989 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2990 dev->name);
20346722 2991 err = -ENODEV;
2992 goto hw_init_failed;
1da177e4
LT
2993 }
2994
2995 /* After proper initialization of H/W, register ISR */
20346722 2996 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
1da177e4
LT
2997 sp->name, dev);
2998 if (err) {
1da177e4
LT
2999 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3000 dev->name);
20346722 3001 goto isr_registration_failed;
1da177e4
LT
3002 }
3003
3004 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3005 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
20346722 3006 err = -ENODEV;
3007 goto setting_mac_address_failed;
1da177e4
LT
3008 }
3009
3010 netif_start_queue(dev);
3011 return 0;
20346722 3012
3013setting_mac_address_failed:
3014 free_irq(sp->pdev->irq, dev);
3015isr_registration_failed:
25fff88e 3016 del_timer_sync(&sp->alarm_timer);
20346722 3017 s2io_reset(sp);
3018hw_init_failed:
3019 return err;
1da177e4
LT
3020}
3021
3022/**
3023 * s2io_close -close entry point of the driver
3024 * @dev : device pointer.
3025 * Description:
3026 * This is the stop entry point of the driver. It needs to undo exactly
3027 * whatever was done by the open entry point,thus it's usually referred to
3028 * as the close function.Among other things this function mainly stops the
3029 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3030 * Return value:
3031 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3032 * file on failure.
3033 */
3034
20346722 3035int s2io_close(struct net_device *dev)
1da177e4
LT
3036{
3037 nic_t *sp = dev->priv;
1da177e4
LT
3038 flush_scheduled_work();
3039 netif_stop_queue(dev);
3040 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3041 s2io_card_down(sp);
3042
20346722 3043 free_irq(sp->pdev->irq, dev);
1da177e4
LT
3044 sp->device_close_flag = TRUE; /* Device is shut down. */
3045 return 0;
3046}
3047
3048/**
3049 * s2io_xmit - Tx entry point of te driver
3050 * @skb : the socket buffer containing the Tx data.
3051 * @dev : device pointer.
3052 * Description :
3053 * This function is the Tx entry point of the driver. S2IO NIC supports
3054 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3055 * NOTE: when device cant queue the pkt,just the trans_start variable will
3056 * not be upadted.
3057 * Return value:
3058 * 0 on success & 1 on failure.
3059 */
3060
20346722 3061int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
3062{
3063 nic_t *sp = dev->priv;
3064 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3065 register u64 val64;
3066 TxD_t *txdp;
3067 TxFIFO_element_t __iomem *tx_fifo;
3068 unsigned long flags;
3069#ifdef NETIF_F_TSO
3070 int mss;
3071#endif
be3a6b02 3072 u16 vlan_tag = 0;
3073 int vlan_priority = 0;
1da177e4
LT
3074 mac_info_t *mac_control;
3075 struct config_param *config;
1da177e4
LT
3076
3077 mac_control = &sp->mac_control;
3078 config = &sp->config;
3079
20346722 3080 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 3081 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 3082 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 3083 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
3084 dev->name);
3085 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722 3086 dev_kfree_skb(skb);
3087 return 0;
1da177e4
LT
3088 }
3089
3090 queue = 0;
1da177e4 3091
be3a6b02 3092 /* Get Fifo number to Transmit based on vlan priority */
3093 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3094 vlan_tag = vlan_tx_tag_get(skb);
3095 vlan_priority = vlan_tag >> 13;
3096 queue = config->fifo_mapping[vlan_priority];
3097 }
3098
20346722 3099 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3100 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3101 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3102 list_virt_addr;
3103
3104 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4
LT
3105 /* Avoid "put" pointer going beyond "get" pointer */
3106 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3107 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
3108 netif_stop_queue(dev);
3109 dev_kfree_skb(skb);
3110 spin_unlock_irqrestore(&sp->tx_lock, flags);
3111 return 0;
3112 }
0b1f7ebe 3113
3114 /* A buffer with no data will be dropped */
3115 if (!skb->len) {
3116 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3117 dev_kfree_skb(skb);
3118 spin_unlock_irqrestore(&sp->tx_lock, flags);
3119 return 0;
3120 }
3121
1da177e4
LT
3122#ifdef NETIF_F_TSO
3123 mss = skb_shinfo(skb)->tso_size;
3124 if (mss) {
3125 txdp->Control_1 |= TXD_TCP_LSO_EN;
3126 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3127 }
3128#endif
3129
3130 frg_cnt = skb_shinfo(skb)->nr_frags;
3131 frg_len = skb->len - skb->data_len;
3132
1da177e4
LT
3133 txdp->Buffer_Pointer = pci_map_single
3134 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
20346722 3135 txdp->Host_Control = (unsigned long) skb;
1da177e4
LT
3136 if (skb->ip_summed == CHECKSUM_HW) {
3137 txdp->Control_2 |=
3138 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3139 TXD_TX_CKO_UDP_EN);
3140 }
3141
3142 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 3143
be3a6b02 3144 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3145 txdp->Control_2 |= TXD_VLAN_ENABLE;
3146 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3147 }
3148
1da177e4
LT
3149 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3150 TXD_GATHER_CODE_FIRST);
3151 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3152
3153 /* For fragmented SKB. */
3154 for (i = 0; i < frg_cnt; i++) {
3155 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe 3156 /* A '0' length fragment will be ignored */
3157 if (!frag->size)
3158 continue;
1da177e4
LT
3159 txdp++;
3160 txdp->Buffer_Pointer = (u64) pci_map_page
3161 (sp->pdev, frag->page, frag->page_offset,
3162 frag->size, PCI_DMA_TODEVICE);
3163 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3164 }
3165 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3166
3167 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 3168 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
3169 writeq(val64, &tx_fifo->TxDL_Pointer);
3170
fe113638 3171 wmb();
3172
1da177e4
LT
3173 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3174 TX_FIFO_LAST_LIST);
20346722 3175
1da177e4
LT
3176#ifdef NETIF_F_TSO
3177 if (mss)
3178 val64 |= TX_FIFO_SPECIAL_FUNC;
3179#endif
3180 writeq(val64, &tx_fifo->List_Control);
3181
1da177e4 3182 put_off++;
20346722 3183 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3184 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
3185
3186 /* Avoid "put" pointer going beyond "get" pointer */
3187 if (((put_off + 1) % queue_len) == get_off) {
3188 DBG_PRINT(TX_DBG,
3189 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3190 put_off, get_off);
3191 netif_stop_queue(dev);
3192 }
3193
3194 dev->trans_start = jiffies;
3195 spin_unlock_irqrestore(&sp->tx_lock, flags);
3196
3197 return 0;
3198}
3199
25fff88e 3200static void
3201s2io_alarm_handle(unsigned long data)
3202{
3203 nic_t *sp = (nic_t *)data;
3204
3205 alarm_intr_handler(sp);
3206 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3207}
3208
a371a07d 3209static void s2io_txpic_intr_handle(nic_t *sp)
3210{
3211 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0;
3212 u64 val64;
3213
3214 val64 = readq(&bar0->pic_int_status);
3215 if (val64 & PIC_INT_GPIO) {
3216 val64 = readq(&bar0->gpio_int_reg);
3217 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3218 (val64 & GPIO_INT_REG_LINK_UP)) {
3219 val64 |= GPIO_INT_REG_LINK_DOWN;
3220 val64 |= GPIO_INT_REG_LINK_UP;
3221 writeq(val64, &bar0->gpio_int_reg);
3222 goto masking;
3223 }
3224
3225 if (((sp->last_link_state == LINK_UP) &&
3226 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3227 ((sp->last_link_state == LINK_DOWN) &&
3228 (val64 & GPIO_INT_REG_LINK_UP))) {
3229 val64 = readq(&bar0->gpio_int_mask);
3230 val64 |= GPIO_INT_MASK_LINK_DOWN;
3231 val64 |= GPIO_INT_MASK_LINK_UP;
3232 writeq(val64, &bar0->gpio_int_mask);
3233 s2io_set_link((unsigned long)sp);
3234 }
3235masking:
3236 if (sp->last_link_state == LINK_UP) {
3237 /*enable down interrupt */
3238 val64 = readq(&bar0->gpio_int_mask);
3239 /* unmasks link down intr */
3240 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3241 /* masks link up intr */
3242 val64 |= GPIO_INT_MASK_LINK_UP;
3243 writeq(val64, &bar0->gpio_int_mask);
3244 } else {
3245 /*enable UP Interrupt */
3246 val64 = readq(&bar0->gpio_int_mask);
3247 /* unmasks link up interrupt */
3248 val64 &= ~GPIO_INT_MASK_LINK_UP;
3249 /* masks link down interrupt */
3250 val64 |= GPIO_INT_MASK_LINK_DOWN;
3251 writeq(val64, &bar0->gpio_int_mask);
3252 }
3253 }
3254}
3255
1da177e4
LT
3256/**
3257 * s2io_isr - ISR handler of the device .
3258 * @irq: the irq of the device.
3259 * @dev_id: a void pointer to the dev structure of the NIC.
3260 * @pt_regs: pointer to the registers pushed on the stack.
20346722 3261 * Description: This function is the ISR handler of the device. It
3262 * identifies the reason for the interrupt and calls the relevant
3263 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
3264 * recv buffers, if their numbers are below the panic value which is
3265 * presently set to 25% of the original number of rcv buffers allocated.
3266 * Return value:
20346722 3267 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
3268 * IRQ_NONE: will be returned if interrupt is not from our device
3269 */
3270static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3271{
3272 struct net_device *dev = (struct net_device *) dev_id;
3273 nic_t *sp = dev->priv;
3274 XENA_dev_config_t __iomem *bar0 = sp->bar0;
20346722 3275 int i;
fe113638 3276 u64 reason = 0, val64;
1da177e4
LT
3277 mac_info_t *mac_control;
3278 struct config_param *config;
3279
7ba013ac 3280 atomic_inc(&sp->isr_cnt);
1da177e4
LT
3281 mac_control = &sp->mac_control;
3282 config = &sp->config;
3283
20346722 3284 /*
1da177e4
LT
3285 * Identify the cause for interrupt and call the appropriate
3286 * interrupt handler. Causes for the interrupt could be;
3287 * 1. Rx of packet.
3288 * 2. Tx complete.
3289 * 3. Link down.
20346722 3290 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
3291 */
3292 reason = readq(&bar0->general_int_status);
3293
3294 if (!reason) {
3295 /* The interrupt was not raised by Xena. */
7ba013ac 3296 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3297 return IRQ_NONE;
3298 }
3299
1da177e4
LT
3300#ifdef CONFIG_S2IO_NAPI
3301 if (reason & GEN_INTR_RXTRAFFIC) {
3302 if (netif_rx_schedule_prep(dev)) {
3303 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3304 DISABLE_INTRS);
3305 __netif_rx_schedule(dev);
3306 }
3307 }
3308#else
3309 /* If Intr is because of Rx Traffic */
3310 if (reason & GEN_INTR_RXTRAFFIC) {
fe113638 3311 /*
3312 * rx_traffic_int reg is an R1 register, writing all 1's
3313 * will ensure that the actual interrupt causing bit get's
3314 * cleared and hence a read can be avoided.
3315 */
3316 val64 = 0xFFFFFFFFFFFFFFFFULL;
3317 writeq(val64, &bar0->rx_traffic_int);
20346722 3318 for (i = 0; i < config->rx_ring_num; i++) {
3319 rx_intr_handler(&mac_control->rings[i]);
3320 }
1da177e4
LT
3321 }
3322#endif
3323
20346722 3324 /* If Intr is because of Tx Traffic */
3325 if (reason & GEN_INTR_TXTRAFFIC) {
fe113638 3326 /*
3327 * tx_traffic_int reg is an R1 register, writing all 1's
3328 * will ensure that the actual interrupt causing bit get's
3329 * cleared and hence a read can be avoided.
3330 */
3331 val64 = 0xFFFFFFFFFFFFFFFFULL;
3332 writeq(val64, &bar0->tx_traffic_int);
3333
20346722 3334 for (i = 0; i < config->tx_fifo_num; i++)
3335 tx_intr_handler(&mac_control->fifos[i]);
3336 }
3337
a371a07d 3338 if (reason & GEN_INTR_TXPIC)
3339 s2io_txpic_intr_handle(sp);
20346722 3340 /*
3341 * If the Rx buffer count is below the panic threshold then
3342 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
3343 * else schedule a tasklet to reallocate the buffers.
3344 */
3345#ifndef CONFIG_S2IO_NAPI
3346 for (i = 0; i < config->rx_ring_num; i++) {
20346722 3347 int ret;
1da177e4
LT
3348 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3349 int level = rx_buffer_level(sp, rxb_size, i);
3350
3351 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3352 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3353 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3354 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3355 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3356 dev->name);
3357 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3358 clear_bit(0, (&sp->tasklet_status));
7ba013ac 3359 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3360 return IRQ_HANDLED;
3361 }
3362 clear_bit(0, (&sp->tasklet_status));
3363 } else if (level == LOW) {
3364 tasklet_schedule(&sp->task);
3365 }
3366 }
3367#endif
3368
7ba013ac 3369 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3370 return IRQ_HANDLED;
3371}
3372
7ba013ac 3373/**
3374 * s2io_updt_stats -
3375 */
3376static void s2io_updt_stats(nic_t *sp)
3377{
3378 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3379 u64 val64;
3380 int cnt = 0;
3381
3382 if (atomic_read(&sp->card_state) == CARD_UP) {
3383 /* Apprx 30us on a 133 MHz bus */
3384 val64 = SET_UPDT_CLICKS(10) |
3385 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3386 writeq(val64, &bar0->stat_cfg);
3387 do {
3388 udelay(100);
3389 val64 = readq(&bar0->stat_cfg);
3390 if (!(val64 & BIT(0)))
3391 break;
3392 cnt++;
3393 if (cnt == 5)
3394 break; /* Updt failed */
3395 } while(1);
3396 }
3397}
3398
1da177e4 3399/**
20346722 3400 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
3401 * @dev : pointer to the device structure.
3402 * Description:
20346722 3403 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
3404 * structure and returns a pointer to the same.
3405 * Return value:
3406 * pointer to the updated net_device_stats structure.
3407 */
3408
20346722 3409struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4
LT
3410{
3411 nic_t *sp = dev->priv;
3412 mac_info_t *mac_control;
3413 struct config_param *config;
3414
20346722 3415
1da177e4
LT
3416 mac_control = &sp->mac_control;
3417 config = &sp->config;
3418
7ba013ac 3419 /* Configure Stats for immediate updt */
3420 s2io_updt_stats(sp);
3421
3422 sp->stats.tx_packets =
3423 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722 3424 sp->stats.tx_errors =
3425 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3426 sp->stats.rx_errors =
3427 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3428 sp->stats.multicast =
3429 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 3430 sp->stats.rx_length_errors =
20346722 3431 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
3432
3433 return (&sp->stats);
3434}
3435
3436/**
3437 * s2io_set_multicast - entry point for multicast address enable/disable.
3438 * @dev : pointer to the device structure
3439 * Description:
20346722 3440 * This function is a driver entry point which gets called by the kernel
3441 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
3442 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3443 * determine, if multicast address must be enabled or if promiscuous mode
3444 * is to be disabled etc.
3445 * Return value:
3446 * void.
3447 */
3448
3449static void s2io_set_multicast(struct net_device *dev)
3450{
3451 int i, j, prev_cnt;
3452 struct dev_mc_list *mclist;
3453 nic_t *sp = dev->priv;
3454 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3455 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3456 0xfeffffffffffULL;
3457 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3458 void __iomem *add;
3459
3460 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3461 /* Enable all Multicast addresses */
3462 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3463 &bar0->rmac_addr_data0_mem);
3464 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3465 &bar0->rmac_addr_data1_mem);
3466 val64 = RMAC_ADDR_CMD_MEM_WE |
3467 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3468 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3469 writeq(val64, &bar0->rmac_addr_cmd_mem);
3470 /* Wait till command completes */
3471 wait_for_cmd_complete(sp);
3472
3473 sp->m_cast_flg = 1;
3474 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3475 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3476 /* Disable all Multicast addresses */
3477 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3478 &bar0->rmac_addr_data0_mem);
5e25b9dd 3479 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3480 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3481 val64 = RMAC_ADDR_CMD_MEM_WE |
3482 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3483 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3484 writeq(val64, &bar0->rmac_addr_cmd_mem);
3485 /* Wait till command completes */
3486 wait_for_cmd_complete(sp);
3487
3488 sp->m_cast_flg = 0;
3489 sp->all_multi_pos = 0;
3490 }
3491
3492 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3493 /* Put the NIC into promiscuous mode */
3494 add = &bar0->mac_cfg;
3495 val64 = readq(&bar0->mac_cfg);
3496 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3497
3498 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3499 writel((u32) val64, add);
3500 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3501 writel((u32) (val64 >> 32), (add + 4));
3502
3503 val64 = readq(&bar0->mac_cfg);
3504 sp->promisc_flg = 1;
3505 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3506 dev->name);
3507 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3508 /* Remove the NIC from promiscuous mode */
3509 add = &bar0->mac_cfg;
3510 val64 = readq(&bar0->mac_cfg);
3511 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3512
3513 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3514 writel((u32) val64, add);
3515 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3516 writel((u32) (val64 >> 32), (add + 4));
3517
3518 val64 = readq(&bar0->mac_cfg);
3519 sp->promisc_flg = 0;
3520 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3521 dev->name);
3522 }
3523
3524 /* Update individual M_CAST address list */
3525 if ((!sp->m_cast_flg) && dev->mc_count) {
3526 if (dev->mc_count >
3527 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3528 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3529 dev->name);
3530 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3531 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3532 return;
3533 }
3534
3535 prev_cnt = sp->mc_addr_count;
3536 sp->mc_addr_count = dev->mc_count;
3537
3538 /* Clear out the previous list of Mc in the H/W. */
3539 for (i = 0; i < prev_cnt; i++) {
3540 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3541 &bar0->rmac_addr_data0_mem);
3542 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3543 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3544 val64 = RMAC_ADDR_CMD_MEM_WE |
3545 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3546 RMAC_ADDR_CMD_MEM_OFFSET
3547 (MAC_MC_ADDR_START_OFFSET + i);
3548 writeq(val64, &bar0->rmac_addr_cmd_mem);
3549
3550 /* Wait for command completes */
3551 if (wait_for_cmd_complete(sp)) {
3552 DBG_PRINT(ERR_DBG, "%s: Adding ",
3553 dev->name);
3554 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3555 return;
3556 }
3557 }
3558
3559 /* Create the new Rx filter list and update the same in H/W. */
3560 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3561 i++, mclist = mclist->next) {
3562 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3563 ETH_ALEN);
3564 for (j = 0; j < ETH_ALEN; j++) {
3565 mac_addr |= mclist->dmi_addr[j];
3566 mac_addr <<= 8;
3567 }
3568 mac_addr >>= 8;
3569 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3570 &bar0->rmac_addr_data0_mem);
3571 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3572 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3573 val64 = RMAC_ADDR_CMD_MEM_WE |
3574 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3575 RMAC_ADDR_CMD_MEM_OFFSET
3576 (i + MAC_MC_ADDR_START_OFFSET);
3577 writeq(val64, &bar0->rmac_addr_cmd_mem);
3578
3579 /* Wait for command completes */
3580 if (wait_for_cmd_complete(sp)) {
3581 DBG_PRINT(ERR_DBG, "%s: Adding ",
3582 dev->name);
3583 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3584 return;
3585 }
3586 }
3587 }
3588}
3589
3590/**
20346722 3591 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
3592 * @dev : pointer to the device structure.
3593 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 3594 * Description : This procedure will program the Xframe to receive
1da177e4 3595 * frames with new Mac Address
20346722 3596 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
3597 * as defined in errno.h file on failure.
3598 */
3599
3600int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3601{
3602 nic_t *sp = dev->priv;
3603 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3604 register u64 val64, mac_addr = 0;
3605 int i;
3606
20346722 3607 /*
1da177e4
LT
3608 * Set the new MAC address as the new unicast filter and reflect this
3609 * change on the device address registered with the OS. It will be
20346722 3610 * at offset 0.
1da177e4
LT
3611 */
3612 for (i = 0; i < ETH_ALEN; i++) {
3613 mac_addr <<= 8;
3614 mac_addr |= addr[i];
3615 }
3616
3617 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3618 &bar0->rmac_addr_data0_mem);
3619
3620 val64 =
3621 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3622 RMAC_ADDR_CMD_MEM_OFFSET(0);
3623 writeq(val64, &bar0->rmac_addr_cmd_mem);
3624 /* Wait till command completes */
3625 if (wait_for_cmd_complete(sp)) {
3626 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3627 return FAILURE;
3628 }
3629
3630 return SUCCESS;
3631}
3632
3633/**
20346722 3634 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
3635 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3636 * @info: pointer to the structure with parameters given by ethtool to set
3637 * link information.
3638 * Description:
20346722 3639 * The function sets different link parameters provided by the user onto
1da177e4
LT
3640 * the NIC.
3641 * Return value:
3642 * 0 on success.
3643*/
3644
3645static int s2io_ethtool_sset(struct net_device *dev,
3646 struct ethtool_cmd *info)
3647{
3648 nic_t *sp = dev->priv;
3649 if ((info->autoneg == AUTONEG_ENABLE) ||
3650 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3651 return -EINVAL;
3652 else {
3653 s2io_close(sp->dev);
3654 s2io_open(sp->dev);
3655 }
3656
3657 return 0;
3658}
3659
3660/**
20346722 3661 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
3662 * @sp : private member of the device structure, pointer to the
3663 * s2io_nic structure.
3664 * @info : pointer to the structure with parameters given by ethtool
3665 * to return link information.
3666 * Description:
3667 * Returns link specific information like speed, duplex etc.. to ethtool.
3668 * Return value :
3669 * return 0 on success.
3670 */
3671
3672static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3673{
3674 nic_t *sp = dev->priv;
3675 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3676 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3677 info->port = PORT_FIBRE;
3678 /* info->transceiver?? TODO */
3679
3680 if (netif_carrier_ok(sp->dev)) {
3681 info->speed = 10000;
3682 info->duplex = DUPLEX_FULL;
3683 } else {
3684 info->speed = -1;
3685 info->duplex = -1;
3686 }
3687
3688 info->autoneg = AUTONEG_DISABLE;
3689 return 0;
3690}
3691
3692/**
20346722 3693 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3694 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3695 * s2io_nic structure.
3696 * @info : pointer to the structure with parameters given by ethtool to
3697 * return driver information.
3698 * Description:
3699 * Returns driver specefic information like name, version etc.. to ethtool.
3700 * Return value:
3701 * void
3702 */
3703
3704static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3705 struct ethtool_drvinfo *info)
3706{
3707 nic_t *sp = dev->priv;
3708
3709 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3710 strncpy(info->version, s2io_driver_version,
3711 sizeof(s2io_driver_version));
3712 strncpy(info->fw_version, "", 32);
3713 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3714 info->regdump_len = XENA_REG_SPACE;
3715 info->eedump_len = XENA_EEPROM_SPACE;
3716 info->testinfo_len = S2IO_TEST_LEN;
3717 info->n_stats = S2IO_STAT_LEN;
3718}
3719
3720/**
3721 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 3722 * @sp: private member of the device structure, which is a pointer to the
1da177e4 3723 * s2io_nic structure.
20346722 3724 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
3725 * dumping the registers.
3726 * @reg_space: The input argumnet into which all the registers are dumped.
3727 * Description:
3728 * Dumps the entire register space of xFrame NIC into the user given
3729 * buffer area.
3730 * Return value :
3731 * void .
3732*/
3733
3734static void s2io_ethtool_gregs(struct net_device *dev,
3735 struct ethtool_regs *regs, void *space)
3736{
3737 int i;
3738 u64 reg;
3739 u8 *reg_space = (u8 *) space;
3740 nic_t *sp = dev->priv;
3741
3742 regs->len = XENA_REG_SPACE;
3743 regs->version = sp->pdev->subsystem_device;
3744
3745 for (i = 0; i < regs->len; i += 8) {
3746 reg = readq(sp->bar0 + i);
3747 memcpy((reg_space + i), &reg, 8);
3748 }
3749}
3750
3751/**
3752 * s2io_phy_id - timer function that alternates adapter LED.
20346722 3753 * @data : address of the private member of the device structure, which
1da177e4 3754 * is a pointer to the s2io_nic structure, provided as an u32.
20346722 3755 * Description: This is actually the timer function that alternates the
3756 * adapter LED bit of the adapter control bit to set/reset every time on
3757 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
3758 * once every second.
3759*/
3760static void s2io_phy_id(unsigned long data)
3761{
3762 nic_t *sp = (nic_t *) data;
3763 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3764 u64 val64 = 0;
3765 u16 subid;
3766
3767 subid = sp->pdev->subsystem_device;
541ae68f 3768 if ((sp->device_type == XFRAME_II_DEVICE) ||
3769 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
3770 val64 = readq(&bar0->gpio_control);
3771 val64 ^= GPIO_CTRL_GPIO_0;
3772 writeq(val64, &bar0->gpio_control);
3773 } else {
3774 val64 = readq(&bar0->adapter_control);
3775 val64 ^= ADAPTER_LED_ON;
3776 writeq(val64, &bar0->adapter_control);
3777 }
3778
3779 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3780}
3781
3782/**
3783 * s2io_ethtool_idnic - To physically identify the nic on the system.
3784 * @sp : private member of the device structure, which is a pointer to the
3785 * s2io_nic structure.
20346722 3786 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
3787 * ethtool.
3788 * Description: Used to physically identify the NIC on the system.
20346722 3789 * The Link LED will blink for a time specified by the user for
1da177e4 3790 * identification.
20346722 3791 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
3792 * identification is possible only if it's link is up.
3793 * Return value:
3794 * int , returns 0 on success
3795 */
3796
3797static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3798{
3799 u64 val64 = 0, last_gpio_ctrl_val;
3800 nic_t *sp = dev->priv;
3801 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3802 u16 subid;
3803
3804 subid = sp->pdev->subsystem_device;
3805 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f 3806 if ((sp->device_type == XFRAME_I_DEVICE) &&
3807 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
3808 val64 = readq(&bar0->adapter_control);
3809 if (!(val64 & ADAPTER_CNTL_EN)) {
3810 printk(KERN_ERR
3811 "Adapter Link down, cannot blink LED\n");
3812 return -EFAULT;
3813 }
3814 }
3815 if (sp->id_timer.function == NULL) {
3816 init_timer(&sp->id_timer);
3817 sp->id_timer.function = s2io_phy_id;
3818 sp->id_timer.data = (unsigned long) sp;
3819 }
3820 mod_timer(&sp->id_timer, jiffies);
3821 if (data)
20346722 3822 msleep_interruptible(data * HZ);
1da177e4 3823 else
20346722 3824 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
3825 del_timer_sync(&sp->id_timer);
3826
541ae68f 3827 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
3828 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3829 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3830 }
3831
3832 return 0;
3833}
3834
3835/**
3836 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722 3837 * @sp : private member of the device structure, which is a pointer to the
3838 * s2io_nic structure.
1da177e4
LT
3839 * @ep : pointer to the structure with pause parameters given by ethtool.
3840 * Description:
3841 * Returns the Pause frame generation and reception capability of the NIC.
3842 * Return value:
3843 * void
3844 */
3845static void s2io_ethtool_getpause_data(struct net_device *dev,
3846 struct ethtool_pauseparam *ep)
3847{
3848 u64 val64;
3849 nic_t *sp = dev->priv;
3850 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3851
3852 val64 = readq(&bar0->rmac_pause_cfg);
3853 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3854 ep->tx_pause = TRUE;
3855 if (val64 & RMAC_PAUSE_RX_ENABLE)
3856 ep->rx_pause = TRUE;
3857 ep->autoneg = FALSE;
3858}
3859
3860/**
3861 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 3862 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3863 * s2io_nic structure.
3864 * @ep : pointer to the structure with pause parameters given by ethtool.
3865 * Description:
3866 * It can be used to set or reset Pause frame generation or reception
3867 * support of the NIC.
3868 * Return value:
3869 * int, returns 0 on Success
3870 */
3871
3872static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 3873 struct ethtool_pauseparam *ep)
1da177e4
LT
3874{
3875 u64 val64;
3876 nic_t *sp = dev->priv;
3877 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3878
3879 val64 = readq(&bar0->rmac_pause_cfg);
3880 if (ep->tx_pause)
3881 val64 |= RMAC_PAUSE_GEN_ENABLE;
3882 else
3883 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3884 if (ep->rx_pause)
3885 val64 |= RMAC_PAUSE_RX_ENABLE;
3886 else
3887 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3888 writeq(val64, &bar0->rmac_pause_cfg);
3889 return 0;
3890}
3891
3892/**
3893 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 3894 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3895 * s2io_nic structure.
3896 * @off : offset at which the data must be written
3897 * @data : Its an output parameter where the data read at the given
20346722 3898 * offset is stored.
1da177e4 3899 * Description:
20346722 3900 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
3901 * read data.
3902 * NOTE: Will allow to read only part of the EEPROM visible through the
3903 * I2C bus.
3904 * Return value:
3905 * -1 on failure and 0 on success.
3906 */
3907
3908#define S2IO_DEV_ID 5
3909static int read_eeprom(nic_t * sp, int off, u32 * data)
3910{
3911 int ret = -1;
3912 u32 exit_cnt = 0;
3913 u64 val64;
3914 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3915
3916 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3917 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3918 I2C_CONTROL_CNTL_START;
3919 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3920
3921 while (exit_cnt < 5) {
3922 val64 = readq(&bar0->i2c_control);
3923 if (I2C_CONTROL_CNTL_END(val64)) {
3924 *data = I2C_CONTROL_GET_DATA(val64);
3925 ret = 0;
3926 break;
3927 }
3928 msleep(50);
3929 exit_cnt++;
3930 }
3931
3932 return ret;
3933}
3934
3935/**
3936 * write_eeprom - actually writes the relevant part of the data value.
3937 * @sp : private member of the device structure, which is a pointer to the
3938 * s2io_nic structure.
3939 * @off : offset at which the data must be written
3940 * @data : The data that is to be written
20346722 3941 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
3942 * the Eeprom. (max of 3)
3943 * Description:
3944 * Actually writes the relevant part of the data value into the Eeprom
3945 * through the I2C bus.
3946 * Return value:
3947 * 0 on success, -1 on failure.
3948 */
3949
3950static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3951{
3952 int exit_cnt = 0, ret = -1;
3953 u64 val64;
3954 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3955
3956 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3957 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3958 I2C_CONTROL_CNTL_START;
3959 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3960
3961 while (exit_cnt < 5) {
3962 val64 = readq(&bar0->i2c_control);
3963 if (I2C_CONTROL_CNTL_END(val64)) {
3964 if (!(val64 & I2C_CONTROL_NACK))
3965 ret = 0;
3966 break;
3967 }
3968 msleep(50);
3969 exit_cnt++;
3970 }
3971
3972 return ret;
3973}
3974
3975/**
3976 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3977 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 3978 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
3979 * containing all relevant information.
3980 * @data_buf : user defined value to be written into Eeprom.
3981 * Description: Reads the values stored in the Eeprom at given offset
3982 * for a given length. Stores these values int the input argument data
3983 * buffer 'data_buf' and returns these to the caller (ethtool.)
3984 * Return value:
3985 * int 0 on success
3986 */
3987
3988static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 3989 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4
LT
3990{
3991 u32 data, i, valid;
3992 nic_t *sp = dev->priv;
3993
3994 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3995
3996 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3997 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3998
3999 for (i = 0; i < eeprom->len; i += 4) {
4000 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4001 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4002 return -EFAULT;
4003 }
4004 valid = INV(data);
4005 memcpy((data_buf + i), &valid, 4);
4006 }
4007 return 0;
4008}
4009
4010/**
4011 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4012 * @sp : private member of the device structure, which is a pointer to the
4013 * s2io_nic structure.
20346722 4014 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
4015 * containing all relevant information.
4016 * @data_buf ; user defined value to be written into Eeprom.
4017 * Description:
4018 * Tries to write the user provided value in the Eeprom, at the offset
4019 * given by the user.
4020 * Return value:
4021 * 0 on success, -EFAULT on failure.
4022 */
4023
4024static int s2io_ethtool_seeprom(struct net_device *dev,
4025 struct ethtool_eeprom *eeprom,
4026 u8 * data_buf)
4027{
4028 int len = eeprom->len, cnt = 0;
4029 u32 valid = 0, data;
4030 nic_t *sp = dev->priv;
4031
4032 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4033 DBG_PRINT(ERR_DBG,
4034 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4035 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4036 eeprom->magic);
4037 return -EFAULT;
4038 }
4039
4040 while (len) {
4041 data = (u32) data_buf[cnt] & 0x000000FF;
4042 if (data) {
4043 valid = (u32) (data << 24);
4044 } else
4045 valid = data;
4046
4047 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4048 DBG_PRINT(ERR_DBG,
4049 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4050 DBG_PRINT(ERR_DBG,
4051 "write into the specified offset\n");
4052 return -EFAULT;
4053 }
4054 cnt++;
4055 len--;
4056 }
4057
4058 return 0;
4059}
4060
4061/**
20346722 4062 * s2io_register_test - reads and writes into all clock domains.
4063 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4064 * s2io_nic structure.
4065 * @data : variable that returns the result of each of the test conducted b
4066 * by the driver.
4067 * Description:
4068 * Read and write into all clock domains. The NIC has 3 clock domains,
4069 * see that registers in all the three regions are accessible.
4070 * Return value:
4071 * 0 on success.
4072 */
4073
4074static int s2io_register_test(nic_t * sp, uint64_t * data)
4075{
4076 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4077 u64 val64 = 0;
4078 int fail = 0;
4079
20346722 4080 val64 = readq(&bar0->pif_rd_swapper_fb);
4081 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
4082 fail = 1;
4083 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4084 }
4085
4086 val64 = readq(&bar0->rmac_pause_cfg);
4087 if (val64 != 0xc000ffff00000000ULL) {
4088 fail = 1;
4089 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4090 }
4091
4092 val64 = readq(&bar0->rx_queue_cfg);
4093 if (val64 != 0x0808080808080808ULL) {
4094 fail = 1;
4095 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4096 }
4097
4098 val64 = readq(&bar0->xgxs_efifo_cfg);
4099 if (val64 != 0x000000001923141EULL) {
4100 fail = 1;
4101 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4102 }
4103
4104 val64 = 0x5A5A5A5A5A5A5A5AULL;
4105 writeq(val64, &bar0->xmsi_data);
4106 val64 = readq(&bar0->xmsi_data);
4107 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4108 fail = 1;
4109 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4110 }
4111
4112 val64 = 0xA5A5A5A5A5A5A5A5ULL;
4113 writeq(val64, &bar0->xmsi_data);
4114 val64 = readq(&bar0->xmsi_data);
4115 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4116 fail = 1;
4117 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4118 }
4119
4120 *data = fail;
4121 return 0;
4122}
4123
4124/**
20346722 4125 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
4126 * @sp : private member of the device structure, which is a pointer to the
4127 * s2io_nic structure.
4128 * @data:variable that returns the result of each of the test conducted by
4129 * the driver.
4130 * Description:
20346722 4131 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
4132 * register.
4133 * Return value:
4134 * 0 on success.
4135 */
4136
4137static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4138{
4139 int fail = 0;
4140 u32 ret_data;
4141
4142 /* Test Write Error at offset 0 */
4143 if (!write_eeprom(sp, 0, 0, 3))
4144 fail = 1;
4145
4146 /* Test Write at offset 4f0 */
4147 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4148 fail = 1;
4149 if (read_eeprom(sp, 0x4F0, &ret_data))
4150 fail = 1;
4151
4152 if (ret_data != 0x01234567)
4153 fail = 1;
4154
4155 /* Reset the EEPROM data go FFFF */
4156 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4157
4158 /* Test Write Request Error at offset 0x7c */
4159 if (!write_eeprom(sp, 0x07C, 0, 3))
4160 fail = 1;
4161
4162 /* Test Write Request at offset 0x7fc */
4163 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4164 fail = 1;
4165 if (read_eeprom(sp, 0x7FC, &ret_data))
4166 fail = 1;
4167
4168 if (ret_data != 0x01234567)
4169 fail = 1;
4170
4171 /* Reset the EEPROM data go FFFF */
4172 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4173
4174 /* Test Write Error at offset 0x80 */
4175 if (!write_eeprom(sp, 0x080, 0, 3))
4176 fail = 1;
4177
4178 /* Test Write Error at offset 0xfc */
4179 if (!write_eeprom(sp, 0x0FC, 0, 3))
4180 fail = 1;
4181
4182 /* Test Write Error at offset 0x100 */
4183 if (!write_eeprom(sp, 0x100, 0, 3))
4184 fail = 1;
4185
4186 /* Test Write Error at offset 4ec */
4187 if (!write_eeprom(sp, 0x4EC, 0, 3))
4188 fail = 1;
4189
4190 *data = fail;
4191 return 0;
4192}
4193
4194/**
4195 * s2io_bist_test - invokes the MemBist test of the card .
20346722 4196 * @sp : private member of the device structure, which is a pointer to the
1da177e4 4197 * s2io_nic structure.
20346722 4198 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
4199 * the driver.
4200 * Description:
4201 * This invokes the MemBist test of the card. We give around
4202 * 2 secs time for the Test to complete. If it's still not complete
20346722 4203 * within this peiod, we consider that the test failed.
1da177e4
LT
4204 * Return value:
4205 * 0 on success and -1 on failure.
4206 */
4207
4208static int s2io_bist_test(nic_t * sp, uint64_t * data)
4209{
4210 u8 bist = 0;
4211 int cnt = 0, ret = -1;
4212
4213 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4214 bist |= PCI_BIST_START;
4215 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4216
4217 while (cnt < 20) {
4218 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4219 if (!(bist & PCI_BIST_START)) {
4220 *data = (bist & PCI_BIST_CODE_MASK);
4221 ret = 0;
4222 break;
4223 }
4224 msleep(100);
4225 cnt++;
4226 }
4227
4228 return ret;
4229}
4230
4231/**
20346722 4232 * s2io-link_test - verifies the link state of the nic
4233 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
4234 * s2io_nic structure.
4235 * @data: variable that returns the result of each of the test conducted by
4236 * the driver.
4237 * Description:
20346722 4238 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
4239 * argument 'data' appropriately.
4240 * Return value:
4241 * 0 on success.
4242 */
4243
4244static int s2io_link_test(nic_t * sp, uint64_t * data)
4245{
4246 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4247 u64 val64;
4248
4249 val64 = readq(&bar0->adapter_status);
4250 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4251 *data = 1;
4252
4253 return 0;
4254}
4255
4256/**
20346722 4257 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4258 * @sp - private member of the device structure, which is a pointer to the
1da177e4 4259 * s2io_nic structure.
20346722 4260 * @data - variable that returns the result of each of the test
1da177e4
LT
4261 * conducted by the driver.
4262 * Description:
20346722 4263 * This is one of the offline test that tests the read and write
1da177e4
LT
4264 * access to the RldRam chip on the NIC.
4265 * Return value:
4266 * 0 on success.
4267 */
4268
4269static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4270{
4271 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4272 u64 val64;
4273 int cnt, iteration = 0, test_pass = 0;
4274
4275 val64 = readq(&bar0->adapter_control);
4276 val64 &= ~ADAPTER_ECC_EN;
4277 writeq(val64, &bar0->adapter_control);
4278
4279 val64 = readq(&bar0->mc_rldram_test_ctrl);
4280 val64 |= MC_RLDRAM_TEST_MODE;
4281 writeq(val64, &bar0->mc_rldram_test_ctrl);
4282
4283 val64 = readq(&bar0->mc_rldram_mrs);
4284 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4285 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4286
4287 val64 |= MC_RLDRAM_MRS_ENABLE;
4288 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4289
4290 while (iteration < 2) {
4291 val64 = 0x55555555aaaa0000ULL;
4292 if (iteration == 1) {
4293 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4294 }
4295 writeq(val64, &bar0->mc_rldram_test_d0);
4296
4297 val64 = 0xaaaa5a5555550000ULL;
4298 if (iteration == 1) {
4299 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4300 }
4301 writeq(val64, &bar0->mc_rldram_test_d1);
4302
4303 val64 = 0x55aaaaaaaa5a0000ULL;
4304 if (iteration == 1) {
4305 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4306 }
4307 writeq(val64, &bar0->mc_rldram_test_d2);
4308
4309 val64 = (u64) (0x0000003fffff0000ULL);
4310 writeq(val64, &bar0->mc_rldram_test_add);
4311
4312
4313 val64 = MC_RLDRAM_TEST_MODE;
4314 writeq(val64, &bar0->mc_rldram_test_ctrl);
4315
4316 val64 |=
4317 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4318 MC_RLDRAM_TEST_GO;
4319 writeq(val64, &bar0->mc_rldram_test_ctrl);
4320
4321 for (cnt = 0; cnt < 5; cnt++) {
4322 val64 = readq(&bar0->mc_rldram_test_ctrl);
4323 if (val64 & MC_RLDRAM_TEST_DONE)
4324 break;
4325 msleep(200);
4326 }
4327
4328 if (cnt == 5)
4329 break;
4330
4331 val64 = MC_RLDRAM_TEST_MODE;
4332 writeq(val64, &bar0->mc_rldram_test_ctrl);
4333
4334 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4335 writeq(val64, &bar0->mc_rldram_test_ctrl);
4336
4337 for (cnt = 0; cnt < 5; cnt++) {
4338 val64 = readq(&bar0->mc_rldram_test_ctrl);
4339 if (val64 & MC_RLDRAM_TEST_DONE)
4340 break;
4341 msleep(500);
4342 }
4343
4344 if (cnt == 5)
4345 break;
4346
4347 val64 = readq(&bar0->mc_rldram_test_ctrl);
4348 if (val64 & MC_RLDRAM_TEST_PASS)
4349 test_pass = 1;
4350
4351 iteration++;
4352 }
4353
4354 if (!test_pass)
4355 *data = 1;
4356 else
4357 *data = 0;
4358
4359 return 0;
4360}
4361
4362/**
4363 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4364 * @sp : private member of the device structure, which is a pointer to the
4365 * s2io_nic structure.
4366 * @ethtest : pointer to a ethtool command specific structure that will be
4367 * returned to the user.
20346722 4368 * @data : variable that returns the result of each of the test
1da177e4
LT
4369 * conducted by the driver.
4370 * Description:
4371 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4372 * the health of the card.
4373 * Return value:
4374 * void
4375 */
4376
4377static void s2io_ethtool_test(struct net_device *dev,
4378 struct ethtool_test *ethtest,
4379 uint64_t * data)
4380{
4381 nic_t *sp = dev->priv;
4382 int orig_state = netif_running(sp->dev);
4383
4384 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4385 /* Offline Tests. */
20346722 4386 if (orig_state)
1da177e4 4387 s2io_close(sp->dev);
1da177e4
LT
4388
4389 if (s2io_register_test(sp, &data[0]))
4390 ethtest->flags |= ETH_TEST_FL_FAILED;
4391
4392 s2io_reset(sp);
1da177e4
LT
4393
4394 if (s2io_rldram_test(sp, &data[3]))
4395 ethtest->flags |= ETH_TEST_FL_FAILED;
4396
4397 s2io_reset(sp);
1da177e4
LT
4398
4399 if (s2io_eeprom_test(sp, &data[1]))
4400 ethtest->flags |= ETH_TEST_FL_FAILED;
4401
4402 if (s2io_bist_test(sp, &data[4]))
4403 ethtest->flags |= ETH_TEST_FL_FAILED;
4404
4405 if (orig_state)
4406 s2io_open(sp->dev);
4407
4408 data[2] = 0;
4409 } else {
4410 /* Online Tests. */
4411 if (!orig_state) {
4412 DBG_PRINT(ERR_DBG,
4413 "%s: is not up, cannot run test\n",
4414 dev->name);
4415 data[0] = -1;
4416 data[1] = -1;
4417 data[2] = -1;
4418 data[3] = -1;
4419 data[4] = -1;
4420 }
4421
4422 if (s2io_link_test(sp, &data[2]))
4423 ethtest->flags |= ETH_TEST_FL_FAILED;
4424
4425 data[0] = 0;
4426 data[1] = 0;
4427 data[3] = 0;
4428 data[4] = 0;
4429 }
4430}
4431
4432static void s2io_get_ethtool_stats(struct net_device *dev,
4433 struct ethtool_stats *estats,
4434 u64 * tmp_stats)
4435{
4436 int i = 0;
4437 nic_t *sp = dev->priv;
4438 StatInfo_t *stat_info = sp->mac_control.stats_info;
4439
7ba013ac 4440 s2io_updt_stats(sp);
541ae68f 4441 tmp_stats[i++] =
4442 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4443 le32_to_cpu(stat_info->tmac_frms);
4444 tmp_stats[i++] =
4445 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4446 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 4447 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f 4448 tmp_stats[i++] =
4449 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4450 le32_to_cpu(stat_info->tmac_mcst_frms);
4451 tmp_stats[i++] =
4452 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4453 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 4454 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
541ae68f 4455 tmp_stats[i++] =
4456 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4457 le32_to_cpu(stat_info->tmac_any_err_frms);
1da177e4 4458 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f 4459 tmp_stats[i++] =
4460 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4461 le32_to_cpu(stat_info->tmac_vld_ip);
4462 tmp_stats[i++] =
4463 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4464 le32_to_cpu(stat_info->tmac_drop_ip);
4465 tmp_stats[i++] =
4466 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4467 le32_to_cpu(stat_info->tmac_icmp);
4468 tmp_stats[i++] =
4469 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4470 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 4471 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f 4472 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4473 le32_to_cpu(stat_info->tmac_udp);
4474 tmp_stats[i++] =
4475 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4476 le32_to_cpu(stat_info->rmac_vld_frms);
4477 tmp_stats[i++] =
4478 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4479 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
4480 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4481 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f 4482 tmp_stats[i++] =
4483 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4484 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4485 tmp_stats[i++] =
4486 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4487 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4
LT
4488 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4489 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4490 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
541ae68f 4491 tmp_stats[i++] =
4492 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4493 le32_to_cpu(stat_info->rmac_discarded_frms);
4494 tmp_stats[i++] =
4495 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4496 le32_to_cpu(stat_info->rmac_usized_frms);
4497 tmp_stats[i++] =
4498 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4499 le32_to_cpu(stat_info->rmac_osized_frms);
4500 tmp_stats[i++] =
4501 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4502 le32_to_cpu(stat_info->rmac_frag_frms);
4503 tmp_stats[i++] =
4504 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4505 le32_to_cpu(stat_info->rmac_jabber_frms);
4506 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4507 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
4508 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4509 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
541ae68f 4510 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4511 le32_to_cpu(stat_info->rmac_drop_ip);
4512 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4513 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 4514 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
541ae68f 4515 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4516 le32_to_cpu(stat_info->rmac_udp);
4517 tmp_stats[i++] =
4518 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4519 le32_to_cpu(stat_info->rmac_err_drp_udp);
4520 tmp_stats[i++] =
4521 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4522 le32_to_cpu(stat_info->rmac_pause_cnt);
4523 tmp_stats[i++] =
4524 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4525 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 4526 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
7ba013ac 4527 tmp_stats[i++] = 0;
4528 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4529 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
1da177e4
LT
4530}
4531
20346722 4532int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
4533{
4534 return (XENA_REG_SPACE);
4535}
4536
4537
20346722 4538u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4
LT
4539{
4540 nic_t *sp = dev->priv;
4541
4542 return (sp->rx_csum);
4543}
20346722 4544int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4545{
4546 nic_t *sp = dev->priv;
4547
4548 if (data)
4549 sp->rx_csum = 1;
4550 else
4551 sp->rx_csum = 0;
4552
4553 return 0;
4554}
20346722 4555int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
4556{
4557 return (XENA_EEPROM_SPACE);
4558}
4559
20346722 4560int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
4561{
4562 return (S2IO_TEST_LEN);
4563}
20346722 4564void s2io_ethtool_get_strings(struct net_device *dev,
4565 u32 stringset, u8 * data)
1da177e4
LT
4566{
4567 switch (stringset) {
4568 case ETH_SS_TEST:
4569 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4570 break;
4571 case ETH_SS_STATS:
4572 memcpy(data, &ethtool_stats_keys,
4573 sizeof(ethtool_stats_keys));
4574 }
4575}
1da177e4
LT
4576static int s2io_ethtool_get_stats_count(struct net_device *dev)
4577{
4578 return (S2IO_STAT_LEN);
4579}
4580
20346722 4581int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4582{
4583 if (data)
4584 dev->features |= NETIF_F_IP_CSUM;
4585 else
4586 dev->features &= ~NETIF_F_IP_CSUM;
4587
4588 return 0;
4589}
4590
4591
4592static struct ethtool_ops netdev_ethtool_ops = {
4593 .get_settings = s2io_ethtool_gset,
4594 .set_settings = s2io_ethtool_sset,
4595 .get_drvinfo = s2io_ethtool_gdrvinfo,
4596 .get_regs_len = s2io_ethtool_get_regs_len,
4597 .get_regs = s2io_ethtool_gregs,
4598 .get_link = ethtool_op_get_link,
4599 .get_eeprom_len = s2io_get_eeprom_len,
4600 .get_eeprom = s2io_ethtool_geeprom,
4601 .set_eeprom = s2io_ethtool_seeprom,
4602 .get_pauseparam = s2io_ethtool_getpause_data,
4603 .set_pauseparam = s2io_ethtool_setpause_data,
4604 .get_rx_csum = s2io_ethtool_get_rx_csum,
4605 .set_rx_csum = s2io_ethtool_set_rx_csum,
4606 .get_tx_csum = ethtool_op_get_tx_csum,
4607 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4608 .get_sg = ethtool_op_get_sg,
4609 .set_sg = ethtool_op_set_sg,
4610#ifdef NETIF_F_TSO
4611 .get_tso = ethtool_op_get_tso,
4612 .set_tso = ethtool_op_set_tso,
4613#endif
4614 .self_test_count = s2io_ethtool_self_test_count,
4615 .self_test = s2io_ethtool_test,
4616 .get_strings = s2io_ethtool_get_strings,
4617 .phys_id = s2io_ethtool_idnic,
4618 .get_stats_count = s2io_ethtool_get_stats_count,
4619 .get_ethtool_stats = s2io_get_ethtool_stats
4620};
4621
4622/**
20346722 4623 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
4624 * @dev : Device pointer.
4625 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4626 * a proprietary structure used to pass information to the driver.
4627 * @cmd : This is used to distinguish between the different commands that
4628 * can be passed to the IOCTL functions.
4629 * Description:
20346722 4630 * Currently there are no special functionality supported in IOCTL, hence
4631 * function always return EOPNOTSUPPORTED
1da177e4
LT
4632 */
4633
20346722 4634int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
4635{
4636 return -EOPNOTSUPP;
4637}
4638
4639/**
4640 * s2io_change_mtu - entry point to change MTU size for the device.
4641 * @dev : device pointer.
4642 * @new_mtu : the new MTU size for the device.
4643 * Description: A driver entry point to change MTU size for the device.
4644 * Before changing the MTU the device must be stopped.
4645 * Return value:
4646 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4647 * file on failure.
4648 */
4649
20346722 4650int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4
LT
4651{
4652 nic_t *sp = dev->priv;
1da177e4
LT
4653
4654 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4655 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4656 dev->name);
4657 return -EPERM;
4658 }
4659
1da177e4 4660 dev->mtu = new_mtu;
d8892c6e 4661 if (netif_running(dev)) {
4662 s2io_card_down(sp);
4663 netif_stop_queue(dev);
4664 if (s2io_card_up(sp)) {
4665 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4666 __FUNCTION__);
4667 }
4668 if (netif_queue_stopped(dev))
4669 netif_wake_queue(dev);
4670 } else { /* Device is down */
4671 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4672 u64 val64 = new_mtu;
4673
4674 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4675 }
1da177e4
LT
4676
4677 return 0;
4678}
4679
4680/**
4681 * s2io_tasklet - Bottom half of the ISR.
4682 * @dev_adr : address of the device structure in dma_addr_t format.
4683 * Description:
4684 * This is the tasklet or the bottom half of the ISR. This is
20346722 4685 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 4686 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 4687 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
4688 * replenish the Rx buffers in the Rx buffer descriptors.
4689 * Return value:
4690 * void.
4691 */
4692
4693static void s2io_tasklet(unsigned long dev_addr)
4694{
4695 struct net_device *dev = (struct net_device *) dev_addr;
4696 nic_t *sp = dev->priv;
4697 int i, ret;
4698 mac_info_t *mac_control;
4699 struct config_param *config;
4700
4701 mac_control = &sp->mac_control;
4702 config = &sp->config;
4703
4704 if (!TASKLET_IN_USE) {
4705 for (i = 0; i < config->rx_ring_num; i++) {
4706 ret = fill_rx_buffers(sp, i);
4707 if (ret == -ENOMEM) {
4708 DBG_PRINT(ERR_DBG, "%s: Out of ",
4709 dev->name);
4710 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4711 break;
4712 } else if (ret == -EFILL) {
4713 DBG_PRINT(ERR_DBG,
4714 "%s: Rx Ring %d is full\n",
4715 dev->name, i);
4716 break;
4717 }
4718 }
4719 clear_bit(0, (&sp->tasklet_status));
4720 }
4721}
4722
4723/**
4724 * s2io_set_link - Set the LInk status
4725 * @data: long pointer to device private structue
4726 * Description: Sets the link status for the adapter
4727 */
4728
4729static void s2io_set_link(unsigned long data)
4730{
4731 nic_t *nic = (nic_t *) data;
4732 struct net_device *dev = nic->dev;
4733 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4734 register u64 val64;
4735 u16 subid;
4736
4737 if (test_and_set_bit(0, &(nic->link_state))) {
4738 /* The card is being reset, no point doing anything */
4739 return;
4740 }
4741
4742 subid = nic->pdev->subsystem_device;
a371a07d 4743 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
4744 /*
4745 * Allow a small delay for the NICs self initiated
4746 * cleanup to complete.
4747 */
4748 msleep(100);
4749 }
1da177e4
LT
4750
4751 val64 = readq(&bar0->adapter_status);
20346722 4752 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
4753 if (LINK_IS_UP(val64)) {
4754 val64 = readq(&bar0->adapter_control);
4755 val64 |= ADAPTER_CNTL_EN;
4756 writeq(val64, &bar0->adapter_control);
541ae68f 4757 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4758 subid)) {
1da177e4
LT
4759 val64 = readq(&bar0->gpio_control);
4760 val64 |= GPIO_CTRL_GPIO_0;
4761 writeq(val64, &bar0->gpio_control);
4762 val64 = readq(&bar0->gpio_control);
4763 } else {
4764 val64 |= ADAPTER_LED_ON;
4765 writeq(val64, &bar0->adapter_control);
4766 }
a371a07d 4767 if (s2io_link_fault_indication(nic) ==
4768 MAC_RMAC_ERR_TIMER) {
4769 val64 = readq(&bar0->adapter_status);
4770 if (!LINK_IS_UP(val64)) {
4771 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4772 DBG_PRINT(ERR_DBG, " Link down");
4773 DBG_PRINT(ERR_DBG, "after ");
4774 DBG_PRINT(ERR_DBG, "enabling ");
4775 DBG_PRINT(ERR_DBG, "device \n");
4776 }
1da177e4
LT
4777 }
4778 if (nic->device_enabled_once == FALSE) {
4779 nic->device_enabled_once = TRUE;
4780 }
4781 s2io_link(nic, LINK_UP);
4782 } else {
541ae68f 4783 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4784 subid)) {
1da177e4
LT
4785 val64 = readq(&bar0->gpio_control);
4786 val64 &= ~GPIO_CTRL_GPIO_0;
4787 writeq(val64, &bar0->gpio_control);
4788 val64 = readq(&bar0->gpio_control);
4789 }
4790 s2io_link(nic, LINK_DOWN);
4791 }
4792 } else { /* NIC is not Quiescent. */
4793 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4794 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4795 netif_stop_queue(dev);
4796 }
4797 clear_bit(0, &(nic->link_state));
4798}
4799
4800static void s2io_card_down(nic_t * sp)
4801{
4802 int cnt = 0;
4803 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4804 unsigned long flags;
4805 register u64 val64 = 0;
4806
25fff88e 4807 del_timer_sync(&sp->alarm_timer);
1da177e4 4808 /* If s2io_set_link task is executing, wait till it completes. */
20346722 4809 while (test_and_set_bit(0, &(sp->link_state))) {
1da177e4 4810 msleep(50);
20346722 4811 }
1da177e4
LT
4812 atomic_set(&sp->card_state, CARD_DOWN);
4813
4814 /* disable Tx and Rx traffic on the NIC */
4815 stop_nic(sp);
4816
4817 /* Kill tasklet. */
4818 tasklet_kill(&sp->task);
4819
4820 /* Check if the device is Quiescent and then Reset the NIC */
4821 do {
4822 val64 = readq(&bar0->adapter_status);
20346722 4823 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
1da177e4
LT
4824 break;
4825 }
4826
4827 msleep(50);
4828 cnt++;
4829 if (cnt == 10) {
4830 DBG_PRINT(ERR_DBG,
4831 "s2io_close:Device not Quiescent ");
4832 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4833 (unsigned long long) val64);
4834 break;
4835 }
4836 } while (1);
1da177e4
LT
4837 s2io_reset(sp);
4838
7ba013ac 4839 /* Waiting till all Interrupt handlers are complete */
4840 cnt = 0;
4841 do {
4842 msleep(10);
4843 if (!atomic_read(&sp->isr_cnt))
4844 break;
4845 cnt++;
4846 } while(cnt < 5);
4847
4848 spin_lock_irqsave(&sp->tx_lock, flags);
4849 /* Free all Tx buffers */
1da177e4 4850 free_tx_buffers(sp);
7ba013ac 4851 spin_unlock_irqrestore(&sp->tx_lock, flags);
4852
4853 /* Free all Rx buffers */
4854 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 4855 free_rx_buffers(sp);
7ba013ac 4856 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 4857
1da177e4
LT
4858 clear_bit(0, &(sp->link_state));
4859}
4860
4861static int s2io_card_up(nic_t * sp)
4862{
4863 int i, ret;
4864 mac_info_t *mac_control;
4865 struct config_param *config;
4866 struct net_device *dev = (struct net_device *) sp->dev;
4867
4868 /* Initialize the H/W I/O registers */
4869 if (init_nic(sp) != 0) {
4870 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4871 dev->name);
4872 return -ENODEV;
4873 }
4874
20346722 4875 /*
4876 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
4877 * Rx ring and initializing buffers into 30 Rx blocks
4878 */
4879 mac_control = &sp->mac_control;
4880 config = &sp->config;
4881
4882 for (i = 0; i < config->rx_ring_num; i++) {
4883 if ((ret = fill_rx_buffers(sp, i))) {
4884 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4885 dev->name);
4886 s2io_reset(sp);
4887 free_rx_buffers(sp);
4888 return -ENOMEM;
4889 }
4890 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4891 atomic_read(&sp->rx_bufs_left[i]));
4892 }
4893
4894 /* Setting its receive mode */
4895 s2io_set_multicast(dev);
4896
4897 /* Enable tasklet for the device */
4898 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4899
4900 /* Enable Rx Traffic and interrupts on the NIC */
4901 if (start_nic(sp)) {
4902 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4903 tasklet_kill(&sp->task);
4904 s2io_reset(sp);
4905 free_irq(dev->irq, dev);
4906 free_rx_buffers(sp);
4907 return -ENODEV;
4908 }
4909
25fff88e 4910 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4911
1da177e4
LT
4912 atomic_set(&sp->card_state, CARD_UP);
4913 return 0;
4914}
4915
20346722 4916/**
1da177e4
LT
4917 * s2io_restart_nic - Resets the NIC.
4918 * @data : long pointer to the device private structure
4919 * Description:
4920 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 4921 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
4922 * the run time of the watch dog routine which is run holding a
4923 * spin lock.
4924 */
4925
4926static void s2io_restart_nic(unsigned long data)
4927{
4928 struct net_device *dev = (struct net_device *) data;
4929 nic_t *sp = dev->priv;
4930
4931 s2io_card_down(sp);
4932 if (s2io_card_up(sp)) {
4933 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4934 dev->name);
4935 }
4936 netif_wake_queue(dev);
4937 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4938 dev->name);
20346722 4939
1da177e4
LT
4940}
4941
20346722 4942/**
4943 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
4944 * @dev : Pointer to net device structure
4945 * Description:
4946 * This function is triggered if the Tx Queue is stopped
4947 * for a pre-defined amount of time when the Interface is still up.
4948 * If the Interface is jammed in such a situation, the hardware is
4949 * reset (by s2io_close) and restarted again (by s2io_open) to
4950 * overcome any problem that might have been caused in the hardware.
4951 * Return value:
4952 * void
4953 */
4954
4955static void s2io_tx_watchdog(struct net_device *dev)
4956{
4957 nic_t *sp = dev->priv;
4958
4959 if (netif_carrier_ok(dev)) {
4960 schedule_work(&sp->rst_timer_task);
4961 }
4962}
4963
4964/**
4965 * rx_osm_handler - To perform some OS related operations on SKB.
4966 * @sp: private member of the device structure,pointer to s2io_nic structure.
4967 * @skb : the socket buffer pointer.
4968 * @len : length of the packet
4969 * @cksum : FCS checksum of the frame.
4970 * @ring_no : the ring from which this RxD was extracted.
20346722 4971 * Description:
1da177e4
LT
4972 * This function is called by the Tx interrupt serivce routine to perform
4973 * some OS related operations on the SKB before passing it to the upper
4974 * layers. It mainly checks if the checksum is OK, if so adds it to the
4975 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4976 * to the upper layer. If the checksum is wrong, it increments the Rx
4977 * packet error count, frees the SKB and returns error.
4978 * Return value:
4979 * SUCCESS on success and -1 on failure.
4980 */
20346722 4981static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
1da177e4 4982{
20346722 4983 nic_t *sp = ring_data->nic;
1da177e4 4984 struct net_device *dev = (struct net_device *) sp->dev;
20346722 4985 struct sk_buff *skb = (struct sk_buff *)
4986 ((unsigned long) rxdp->Host_Control);
4987 int ring_no = ring_data->ring_no;
1da177e4
LT
4988 u16 l3_csum, l4_csum;
4989#ifdef CONFIG_2BUFF_MODE
20346722 4990 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4991 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4992 int get_block = ring_data->rx_curr_get_info.block_index;
4993 int get_off = ring_data->rx_curr_get_info.offset;
4994 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
1da177e4 4995 unsigned char *buff;
20346722 4996#else
4997 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
1da177e4 4998#endif
20346722 4999 skb->dev = dev;
5000 if (rxdp->Control_1 & RXD_T_CODE) {
5001 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5002 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5003 dev->name, err);
1ddc50d4 5004 dev_kfree_skb(skb);
5005 sp->stats.rx_crc_errors++;
5006 atomic_dec(&sp->rx_bufs_left[ring_no]);
5007 rxdp->Host_Control = 0;
5008 return 0;
20346722 5009 }
1da177e4 5010
20346722 5011 /* Updating statistics */
5012 rxdp->Host_Control = 0;
5013 sp->rx_pkt_count++;
5014 sp->stats.rx_packets++;
5015#ifndef CONFIG_2BUFF_MODE
5016 sp->stats.rx_bytes += len;
5017#else
5018 sp->stats.rx_bytes += buf0_len + buf2_len;
5019#endif
5020
5021#ifndef CONFIG_2BUFF_MODE
5022 skb_put(skb, len);
5023#else
5024 buff = skb_push(skb, buf0_len);
5025 memcpy(buff, ba->ba_0, buf0_len);
5026 skb_put(skb, buf2_len);
5027#endif
5028
5029 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5030 (sp->rx_csum)) {
5031 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
5032 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5033 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 5034 /*
1da177e4
LT
5035 * NIC verifies if the Checksum of the received
5036 * frame is Ok or not and accordingly returns
5037 * a flag in the RxD.
5038 */
5039 skb->ip_summed = CHECKSUM_UNNECESSARY;
5040 } else {
20346722 5041 /*
5042 * Packet with erroneous checksum, let the
1da177e4
LT
5043 * upper layers deal with it.
5044 */
5045 skb->ip_summed = CHECKSUM_NONE;
5046 }
5047 } else {
5048 skb->ip_summed = CHECKSUM_NONE;
5049 }
5050
1da177e4 5051 skb->protocol = eth_type_trans(skb, dev);
1da177e4 5052#ifdef CONFIG_S2IO_NAPI
be3a6b02 5053 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5054 /* Queueing the vlan frame to the upper layer */
5055 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5056 RXD_GET_VLAN_TAG(rxdp->Control_2));
5057 } else {
5058 netif_receive_skb(skb);
5059 }
1da177e4 5060#else
be3a6b02 5061 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5062 /* Queueing the vlan frame to the upper layer */
5063 vlan_hwaccel_rx(skb, sp->vlgrp,
5064 RXD_GET_VLAN_TAG(rxdp->Control_2));
5065 } else {
5066 netif_rx(skb);
5067 }
1da177e4 5068#endif
1da177e4 5069 dev->last_rx = jiffies;
1da177e4 5070 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
5071 return SUCCESS;
5072}
5073
5074/**
5075 * s2io_link - stops/starts the Tx queue.
5076 * @sp : private member of the device structure, which is a pointer to the
5077 * s2io_nic structure.
5078 * @link : inidicates whether link is UP/DOWN.
5079 * Description:
5080 * This function stops/starts the Tx queue depending on whether the link
20346722 5081 * status of the NIC is is down or up. This is called by the Alarm
5082 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
5083 * Return value:
5084 * void.
5085 */
5086
20346722 5087void s2io_link(nic_t * sp, int link)
1da177e4
LT
5088{
5089 struct net_device *dev = (struct net_device *) sp->dev;
5090
5091 if (link != sp->last_link_state) {
5092 if (link == LINK_DOWN) {
5093 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5094 netif_carrier_off(dev);
5095 } else {
5096 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5097 netif_carrier_on(dev);
5098 }
5099 }
5100 sp->last_link_state = link;
5101}
5102
5103/**
20346722 5104 * get_xena_rev_id - to identify revision ID of xena.
5105 * @pdev : PCI Dev structure
5106 * Description:
5107 * Function to identify the Revision ID of xena.
5108 * Return value:
5109 * returns the revision ID of the device.
5110 */
5111
5112int get_xena_rev_id(struct pci_dev *pdev)
5113{
5114 u8 id = 0;
5115 int ret;
5116 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5117 return id;
5118}
5119
5120/**
5121 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5122 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5123 * s2io_nic structure.
5124 * Description:
5125 * This function initializes a few of the PCI and PCI-X configuration registers
5126 * with recommended values.
5127 * Return value:
5128 * void
5129 */
5130
5131static void s2io_init_pci(nic_t * sp)
5132{
20346722 5133 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
5134
5135 /* Enable Data Parity Error Recovery in PCI-X command register. */
5136 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5137 &(pcix_cmd));
1da177e4 5138 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5139 (pcix_cmd | 1));
1da177e4 5140 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5141 &(pcix_cmd));
1da177e4
LT
5142
5143 /* Set the PErr Response bit in PCI command register. */
5144 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5145 pci_write_config_word(sp->pdev, PCI_COMMAND,
5146 (pci_cmd | PCI_COMMAND_PARITY));
5147 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5148
1da177e4 5149 /* Forcibly disabling relaxed ordering capability of the card. */
20346722 5150 pcix_cmd &= 0xfffd;
1da177e4 5151 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5152 pcix_cmd);
1da177e4 5153 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5154 &(pcix_cmd));
1da177e4
LT
5155}
5156
5157MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5158MODULE_LICENSE("GPL");
5159module_param(tx_fifo_num, int, 0);
1da177e4 5160module_param(rx_ring_num, int, 0);
20346722 5161module_param_array(tx_fifo_len, uint, NULL, 0);
5162module_param_array(rx_ring_sz, uint, NULL, 0);
20346722 5163module_param_array(rts_frm_len, uint, NULL, 0);
5e25b9dd 5164module_param(use_continuous_tx_intrs, int, 1);
1da177e4
LT
5165module_param(rmac_pause_time, int, 0);
5166module_param(mc_pause_threshold_q0q3, int, 0);
5167module_param(mc_pause_threshold_q4q7, int, 0);
5168module_param(shared_splits, int, 0);
5169module_param(tmac_util_period, int, 0);
5170module_param(rmac_util_period, int, 0);
b6e3f982 5171module_param(bimodal, bool, 0);
1da177e4
LT
5172#ifndef CONFIG_S2IO_NAPI
5173module_param(indicate_max_pkts, int, 0);
5174#endif
20346722 5175
1da177e4 5176/**
20346722 5177 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
5178 * @pdev : structure containing the PCI related information of the device.
5179 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5180 * Description:
5181 * The function initializes an adapter identified by the pci_dec structure.
20346722 5182 * All OS related initialization including memory and device structure and
5183 * initlaization of the device private variable is done. Also the swapper
5184 * control register is initialized to enable read and write into the I/O
1da177e4
LT
5185 * registers of the device.
5186 * Return value:
5187 * returns 0 on success and negative on failure.
5188 */
5189
5190static int __devinit
5191s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5192{
5193 nic_t *sp;
5194 struct net_device *dev;
1da177e4
LT
5195 int i, j, ret;
5196 int dma_flag = FALSE;
5197 u32 mac_up, mac_down;
5198 u64 val64 = 0, tmp64 = 0;
5199 XENA_dev_config_t __iomem *bar0 = NULL;
5200 u16 subid;
5201 mac_info_t *mac_control;
5202 struct config_param *config;
541ae68f 5203 int mode;
1da177e4 5204
20346722 5205#ifdef CONFIG_S2IO_NAPI
5206 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5207#endif
1da177e4
LT
5208
5209 if ((ret = pci_enable_device(pdev))) {
5210 DBG_PRINT(ERR_DBG,
5211 "s2io_init_nic: pci_enable_device failed\n");
5212 return ret;
5213 }
5214
1e7f0bd8 5215 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
5216 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5217 dma_flag = TRUE;
1da177e4 5218 if (pci_set_consistent_dma_mask
1e7f0bd8 5219 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
5220 DBG_PRINT(ERR_DBG,
5221 "Unable to obtain 64bit DMA for \
5222 consistent allocations\n");
5223 pci_disable_device(pdev);
5224 return -ENOMEM;
5225 }
1e7f0bd8 5226 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
5227 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5228 } else {
5229 pci_disable_device(pdev);
5230 return -ENOMEM;
5231 }
5232
5233 if (pci_request_regions(pdev, s2io_driver_name)) {
5234 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5235 pci_disable_device(pdev);
5236 return -ENODEV;
5237 }
5238
5239 dev = alloc_etherdev(sizeof(nic_t));
5240 if (dev == NULL) {
5241 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5242 pci_disable_device(pdev);
5243 pci_release_regions(pdev);
5244 return -ENODEV;
5245 }
5246
5247 pci_set_master(pdev);
5248 pci_set_drvdata(pdev, dev);
5249 SET_MODULE_OWNER(dev);
5250 SET_NETDEV_DEV(dev, &pdev->dev);
5251
5252 /* Private member variable initialized to s2io NIC structure */
5253 sp = dev->priv;
5254 memset(sp, 0, sizeof(nic_t));
5255 sp->dev = dev;
5256 sp->pdev = pdev;
1da177e4 5257 sp->high_dma_flag = dma_flag;
1da177e4 5258 sp->device_enabled_once = FALSE;
1da177e4 5259
541ae68f 5260 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5261 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5262 sp->device_type = XFRAME_II_DEVICE;
5263 else
5264 sp->device_type = XFRAME_I_DEVICE;
5265
1da177e4
LT
5266 /* Initialize some PCI/PCI-X fields of the NIC. */
5267 s2io_init_pci(sp);
5268
20346722 5269 /*
1da177e4 5270 * Setting the device configuration parameters.
20346722 5271 * Most of these parameters can be specified by the user during
5272 * module insertion as they are module loadable parameters. If
5273 * these parameters are not not specified during load time, they
1da177e4
LT
5274 * are initialized with default values.
5275 */
5276 mac_control = &sp->mac_control;
5277 config = &sp->config;
5278
5279 /* Tx side parameters. */
0b1f7ebe 5280 if (tx_fifo_len[0] == 0)
5281 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
1da177e4
LT
5282 config->tx_fifo_num = tx_fifo_num;
5283 for (i = 0; i < MAX_TX_FIFOS; i++) {
5284 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5285 config->tx_cfg[i].fifo_priority = i;
5286 }
5287
20346722 5288 /* mapping the QoS priority to the configured fifos */
5289 for (i = 0; i < MAX_TX_FIFOS; i++)
5290 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5291
1da177e4
LT
5292 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5293 for (i = 0; i < config->tx_fifo_num; i++) {
5294 config->tx_cfg[i].f_no_snoop =
5295 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5296 if (config->tx_cfg[i].fifo_len < 65) {
5297 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5298 break;
5299 }
5300 }
5301 config->max_txds = MAX_SKB_FRAGS;
5302
5303 /* Rx side parameters. */
0b1f7ebe 5304 if (rx_ring_sz[0] == 0)
5305 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
1da177e4
LT
5306 config->rx_ring_num = rx_ring_num;
5307 for (i = 0; i < MAX_RX_RINGS; i++) {
5308 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5309 (MAX_RXDS_PER_BLOCK + 1);
5310 config->rx_cfg[i].ring_priority = i;
5311 }
5312
5313 for (i = 0; i < rx_ring_num; i++) {
5314 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5315 config->rx_cfg[i].f_no_snoop =
5316 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5317 }
5318
5319 /* Setting Mac Control parameters */
5320 mac_control->rmac_pause_time = rmac_pause_time;
5321 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5322 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5323
5324
5325 /* Initialize Ring buffer parameters. */
5326 for (i = 0; i < config->rx_ring_num; i++)
5327 atomic_set(&sp->rx_bufs_left[i], 0);
5328
7ba013ac 5329 /* Initialize the number of ISRs currently running */
5330 atomic_set(&sp->isr_cnt, 0);
5331
1da177e4
LT
5332 /* initialize the shared memory used by the NIC and the host */
5333 if (init_shared_mem(sp)) {
5334 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
0b1f7ebe 5335 __FUNCTION__);
1da177e4
LT
5336 ret = -ENOMEM;
5337 goto mem_alloc_failed;
5338 }
5339
5340 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5341 pci_resource_len(pdev, 0));
5342 if (!sp->bar0) {
5343 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5344 dev->name);
5345 ret = -ENOMEM;
5346 goto bar0_remap_failed;
5347 }
5348
5349 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5350 pci_resource_len(pdev, 2));
5351 if (!sp->bar1) {
5352 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5353 dev->name);
5354 ret = -ENOMEM;
5355 goto bar1_remap_failed;
5356 }
5357
5358 dev->irq = pdev->irq;
5359 dev->base_addr = (unsigned long) sp->bar0;
5360
5361 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5362 for (j = 0; j < MAX_TX_FIFOS; j++) {
5363 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5364 (sp->bar1 + (j * 0x00020000));
5365 }
5366
5367 /* Driver entry points */
5368 dev->open = &s2io_open;
5369 dev->stop = &s2io_close;
5370 dev->hard_start_xmit = &s2io_xmit;
5371 dev->get_stats = &s2io_get_stats;
5372 dev->set_multicast_list = &s2io_set_multicast;
5373 dev->do_ioctl = &s2io_ioctl;
5374 dev->change_mtu = &s2io_change_mtu;
5375 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02 5376 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5377 dev->vlan_rx_register = s2io_vlan_rx_register;
5378 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 5379
1da177e4
LT
5380 /*
5381 * will use eth_mac_addr() for dev->set_mac_address
5382 * mac address will be set every time dev->open() is called
5383 */
20346722 5384#if defined(CONFIG_S2IO_NAPI)
1da177e4 5385 dev->poll = s2io_poll;
20346722 5386 dev->weight = 32;
1da177e4
LT
5387#endif
5388
5389 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5390 if (sp->high_dma_flag == TRUE)
5391 dev->features |= NETIF_F_HIGHDMA;
5392#ifdef NETIF_F_TSO
5393 dev->features |= NETIF_F_TSO;
5394#endif
5395
5396 dev->tx_timeout = &s2io_tx_watchdog;
5397 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5398 INIT_WORK(&sp->rst_timer_task,
5399 (void (*)(void *)) s2io_restart_nic, dev);
5400 INIT_WORK(&sp->set_link_task,
5401 (void (*)(void *)) s2io_set_link, sp);
5402
541ae68f 5403 if (!(sp->device_type & XFRAME_II_DEVICE)) {
5404 pci_save_state(sp->pdev);
5405 }
1da177e4
LT
5406
5407 /* Setting swapper control on the NIC, for proper reset operation */
5408 if (s2io_set_swapper(sp)) {
5409 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5410 dev->name);
5411 ret = -EAGAIN;
5412 goto set_swap_failed;
5413 }
5414
541ae68f 5415 /* Verify if the Herc works on the slot its placed into */
5416 if (sp->device_type & XFRAME_II_DEVICE) {
5417 mode = s2io_verify_pci_mode(sp);
5418 if (mode < 0) {
5419 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5420 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5421 ret = -EBADSLT;
5422 goto set_swap_failed;
5423 }
5424 }
5425
5426 /* Not needed for Herc */
5427 if (sp->device_type & XFRAME_I_DEVICE) {
5428 /*
5429 * Fix for all "FFs" MAC address problems observed on
5430 * Alpha platforms
5431 */
5432 fix_mac_address(sp);
5433 s2io_reset(sp);
5434 }
1da177e4
LT
5435
5436 /*
1da177e4
LT
5437 * MAC address initialization.
5438 * For now only one mac address will be read and used.
5439 */
5440 bar0 = sp->bar0;
5441 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5442 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5443 writeq(val64, &bar0->rmac_addr_cmd_mem);
5444 wait_for_cmd_complete(sp);
5445
5446 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5447 mac_down = (u32) tmp64;
5448 mac_up = (u32) (tmp64 >> 32);
5449
5450 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5451
5452 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5453 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5454 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5455 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5456 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5457 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5458
1da177e4
LT
5459 /* Set the factory defined MAC address initially */
5460 dev->addr_len = ETH_ALEN;
5461 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5462
5463 /*
20346722 5464 * Initialize the tasklet status and link state flags
541ae68f 5465 * and the card state parameter
1da177e4
LT
5466 */
5467 atomic_set(&(sp->card_state), 0);
5468 sp->tasklet_status = 0;
5469 sp->link_state = 0;
5470
1da177e4
LT
5471 /* Initialize spinlocks */
5472 spin_lock_init(&sp->tx_lock);
5473#ifndef CONFIG_S2IO_NAPI
5474 spin_lock_init(&sp->put_lock);
5475#endif
7ba013ac 5476 spin_lock_init(&sp->rx_lock);
1da177e4 5477
20346722 5478 /*
5479 * SXE-002: Configure link and activity LED to init state
5480 * on driver load.
1da177e4
LT
5481 */
5482 subid = sp->pdev->subsystem_device;
5483 if ((subid & 0xFF) >= 0x07) {
5484 val64 = readq(&bar0->gpio_control);
5485 val64 |= 0x0000800000000000ULL;
5486 writeq(val64, &bar0->gpio_control);
5487 val64 = 0x0411040400000000ULL;
5488 writeq(val64, (void __iomem *) bar0 + 0x2700);
5489 val64 = readq(&bar0->gpio_control);
5490 }
5491
5492 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5493
5494 if (register_netdev(dev)) {
5495 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5496 ret = -ENODEV;
5497 goto register_failed;
5498 }
5499
541ae68f 5500 if (sp->device_type & XFRAME_II_DEVICE) {
5501 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5502 dev->name);
5503 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5504 get_xena_rev_id(sp->pdev),
5505 s2io_driver_version);
5506 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5507 sp->def_mac_addr[0].mac_addr[0],
5508 sp->def_mac_addr[0].mac_addr[1],
5509 sp->def_mac_addr[0].mac_addr[2],
5510 sp->def_mac_addr[0].mac_addr[3],
5511 sp->def_mac_addr[0].mac_addr[4],
5512 sp->def_mac_addr[0].mac_addr[5]);
0b1f7ebe 5513 mode = s2io_print_pci_mode(sp);
541ae68f 5514 if (mode < 0) {
5515 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5516 ret = -EBADSLT;
5517 goto set_swap_failed;
5518 }
5519 } else {
5520 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5521 dev->name);
5522 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5523 get_xena_rev_id(sp->pdev),
5524 s2io_driver_version);
5525 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5526 sp->def_mac_addr[0].mac_addr[0],
5527 sp->def_mac_addr[0].mac_addr[1],
5528 sp->def_mac_addr[0].mac_addr[2],
5529 sp->def_mac_addr[0].mac_addr[3],
5530 sp->def_mac_addr[0].mac_addr[4],
5531 sp->def_mac_addr[0].mac_addr[5]);
5532 }
5533
7ba013ac 5534 /* Initialize device name */
5535 strcpy(sp->name, dev->name);
541ae68f 5536 if (sp->device_type & XFRAME_II_DEVICE)
5537 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5538 else
5539 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
7ba013ac 5540
b6e3f982 5541 /* Initialize bimodal Interrupts */
5542 sp->config.bimodal = bimodal;
5543 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5544 sp->config.bimodal = 0;
5545 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5546 dev->name);
5547 }
5548
20346722 5549 /*
5550 * Make Link state as off at this point, when the Link change
5551 * interrupt comes the state will be automatically changed to
1da177e4
LT
5552 * the right state.
5553 */
5554 netif_carrier_off(dev);
1da177e4
LT
5555
5556 return 0;
5557
5558 register_failed:
5559 set_swap_failed:
5560 iounmap(sp->bar1);
5561 bar1_remap_failed:
5562 iounmap(sp->bar0);
5563 bar0_remap_failed:
5564 mem_alloc_failed:
5565 free_shared_mem(sp);
5566 pci_disable_device(pdev);
5567 pci_release_regions(pdev);
5568 pci_set_drvdata(pdev, NULL);
5569 free_netdev(dev);
5570
5571 return ret;
5572}
5573
5574/**
20346722 5575 * s2io_rem_nic - Free the PCI device
1da177e4 5576 * @pdev: structure containing the PCI related information of the device.
20346722 5577 * Description: This function is called by the Pci subsystem to release a
1da177e4 5578 * PCI device and free up all resource held up by the device. This could
20346722 5579 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
5580 * from memory.
5581 */
5582
5583static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5584{
5585 struct net_device *dev =
5586 (struct net_device *) pci_get_drvdata(pdev);
5587 nic_t *sp;
5588
5589 if (dev == NULL) {
5590 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5591 return;
5592 }
5593
5594 sp = dev->priv;
5595 unregister_netdev(dev);
5596
5597 free_shared_mem(sp);
5598 iounmap(sp->bar0);
5599 iounmap(sp->bar1);
5600 pci_disable_device(pdev);
5601 pci_release_regions(pdev);
5602 pci_set_drvdata(pdev, NULL);
1da177e4
LT
5603 free_netdev(dev);
5604}
5605
5606/**
5607 * s2io_starter - Entry point for the driver
5608 * Description: This function is the entry point for the driver. It verifies
5609 * the module loadable parameters and initializes PCI configuration space.
5610 */
5611
5612int __init s2io_starter(void)
5613{
5614 return pci_module_init(&s2io_driver);
5615}
5616
5617/**
20346722 5618 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
5619 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5620 */
5621
20346722 5622void s2io_closer(void)
1da177e4
LT
5623{
5624 pci_unregister_driver(&s2io_driver);
5625 DBG_PRINT(INIT_DBG, "cleanup done\n");
5626}
5627
5628module_init(s2io_starter);
5629module_exit(s2io_closer);
This page took 0.43782 seconds and 5 git commands to generate.