Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[deliverable/linux.git] / drivers / net / s2io.c
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver.
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
34 * values are 1, 2 and 3.
35 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
36 * tx_fifo_len: This too is an array of 8. Each element defines the number of
37 * Tx descriptors that can be associated with each corresponding FIFO.
38 ************************************************************************/
39
40 #include <linux/config.h>
41 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/errno.h>
44 #include <linux/ioport.h>
45 #include <linux/pci.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/kernel.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/skbuff.h>
51 #include <linux/init.h>
52 #include <linux/delay.h>
53 #include <linux/stddef.h>
54 #include <linux/ioctl.h>
55 #include <linux/timex.h>
56 #include <linux/sched.h>
57 #include <linux/ethtool.h>
58 #include <linux/version.h>
59 #include <linux/workqueue.h>
60 #include <linux/if_vlan.h>
61
62 #include <asm/system.h>
63 #include <asm/uaccess.h>
64 #include <asm/io.h>
65
66 /* local include */
67 #include "s2io.h"
68 #include "s2io-regs.h"
69
70 #define DRV_VERSION "Version 2.0.9.3"
71
72 /* S2io Driver name & version. */
73 static char s2io_driver_name[] = "Neterion";
74 static char s2io_driver_version[] = DRV_VERSION;
75
76 int rxd_size[4] = {32,48,48,64};
77 int rxd_count[4] = {127,85,85,63};
78
79 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
80 {
81 int ret;
82
83 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
84 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
85
86 return ret;
87 }
88
89 /*
90 * Cards with following subsystem_id have a link state indication
91 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
92 * macro below identifies these cards given the subsystem_id.
93 */
94 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
95 (dev_type == XFRAME_I_DEVICE) ? \
96 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
97 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
98
99 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
100 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
101 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
102 #define PANIC 1
103 #define LOW 2
104 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
105 {
106 int level = 0;
107 mac_info_t *mac_control;
108
109 mac_control = &sp->mac_control;
110 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
111 level = LOW;
112 if (rxb_size <= rxd_count[sp->rxd_mode]) {
113 level = PANIC;
114 }
115 }
116
117 return level;
118 }
119
120 /* Ethtool related variables and Macros. */
121 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
122 "Register test\t(offline)",
123 "Eeprom test\t(offline)",
124 "Link test\t(online)",
125 "RLDRAM test\t(offline)",
126 "BIST Test\t(offline)"
127 };
128
129 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
130 {"tmac_frms"},
131 {"tmac_data_octets"},
132 {"tmac_drop_frms"},
133 {"tmac_mcst_frms"},
134 {"tmac_bcst_frms"},
135 {"tmac_pause_ctrl_frms"},
136 {"tmac_any_err_frms"},
137 {"tmac_vld_ip_octets"},
138 {"tmac_vld_ip"},
139 {"tmac_drop_ip"},
140 {"tmac_icmp"},
141 {"tmac_rst_tcp"},
142 {"tmac_tcp"},
143 {"tmac_udp"},
144 {"rmac_vld_frms"},
145 {"rmac_data_octets"},
146 {"rmac_fcs_err_frms"},
147 {"rmac_drop_frms"},
148 {"rmac_vld_mcst_frms"},
149 {"rmac_vld_bcst_frms"},
150 {"rmac_in_rng_len_err_frms"},
151 {"rmac_long_frms"},
152 {"rmac_pause_ctrl_frms"},
153 {"rmac_discarded_frms"},
154 {"rmac_usized_frms"},
155 {"rmac_osized_frms"},
156 {"rmac_frag_frms"},
157 {"rmac_jabber_frms"},
158 {"rmac_ip"},
159 {"rmac_ip_octets"},
160 {"rmac_hdr_err_ip"},
161 {"rmac_drop_ip"},
162 {"rmac_icmp"},
163 {"rmac_tcp"},
164 {"rmac_udp"},
165 {"rmac_err_drp_udp"},
166 {"rmac_pause_cnt"},
167 {"rmac_accepted_ip"},
168 {"rmac_err_tcp"},
169 {"\n DRIVER STATISTICS"},
170 {"single_bit_ecc_errs"},
171 {"double_bit_ecc_errs"},
172 };
173
174 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
175 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
176
177 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
178 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
179
180 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
181 init_timer(&timer); \
182 timer.function = handle; \
183 timer.data = (unsigned long) arg; \
184 mod_timer(&timer, (jiffies + exp)) \
185
186 /* Add the vlan */
187 static void s2io_vlan_rx_register(struct net_device *dev,
188 struct vlan_group *grp)
189 {
190 nic_t *nic = dev->priv;
191 unsigned long flags;
192
193 spin_lock_irqsave(&nic->tx_lock, flags);
194 nic->vlgrp = grp;
195 spin_unlock_irqrestore(&nic->tx_lock, flags);
196 }
197
198 /* Unregister the vlan */
199 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
200 {
201 nic_t *nic = dev->priv;
202 unsigned long flags;
203
204 spin_lock_irqsave(&nic->tx_lock, flags);
205 if (nic->vlgrp)
206 nic->vlgrp->vlan_devices[vid] = NULL;
207 spin_unlock_irqrestore(&nic->tx_lock, flags);
208 }
209
210 /*
211 * Constants to be programmed into the Xena's registers, to configure
212 * the XAUI.
213 */
214
215 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
216 #define END_SIGN 0x0
217
218 static u64 herc_act_dtx_cfg[] = {
219 /* Set address */
220 0x8000051536750000ULL, 0x80000515367500E0ULL,
221 /* Write data */
222 0x8000051536750004ULL, 0x80000515367500E4ULL,
223 /* Set address */
224 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
225 /* Write data */
226 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
227 /* Set address */
228 0x801205150D440000ULL, 0x801205150D4400E0ULL,
229 /* Write data */
230 0x801205150D440004ULL, 0x801205150D4400E4ULL,
231 /* Set address */
232 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
233 /* Write data */
234 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
235 /* Done */
236 END_SIGN
237 };
238
239 static u64 xena_mdio_cfg[] = {
240 /* Reset PMA PLL */
241 0xC001010000000000ULL, 0xC0010100000000E0ULL,
242 0xC0010100008000E4ULL,
243 /* Remove Reset from PMA PLL */
244 0xC001010000000000ULL, 0xC0010100000000E0ULL,
245 0xC0010100000000E4ULL,
246 END_SIGN
247 };
248
249 static u64 xena_dtx_cfg[] = {
250 0x8000051500000000ULL, 0x80000515000000E0ULL,
251 0x80000515D93500E4ULL, 0x8001051500000000ULL,
252 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
253 0x8002051500000000ULL, 0x80020515000000E0ULL,
254 0x80020515F21000E4ULL,
255 /* Set PADLOOPBACKN */
256 0x8002051500000000ULL, 0x80020515000000E0ULL,
257 0x80020515B20000E4ULL, 0x8003051500000000ULL,
258 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
259 0x8004051500000000ULL, 0x80040515000000E0ULL,
260 0x80040515B20000E4ULL, 0x8005051500000000ULL,
261 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
262 SWITCH_SIGN,
263 /* Remove PADLOOPBACKN */
264 0x8002051500000000ULL, 0x80020515000000E0ULL,
265 0x80020515F20000E4ULL, 0x8003051500000000ULL,
266 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
267 0x8004051500000000ULL, 0x80040515000000E0ULL,
268 0x80040515F20000E4ULL, 0x8005051500000000ULL,
269 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
270 END_SIGN
271 };
272
273 /*
274 * Constants for Fixing the MacAddress problem seen mostly on
275 * Alpha machines.
276 */
277 static u64 fix_mac[] = {
278 0x0060000000000000ULL, 0x0060600000000000ULL,
279 0x0040600000000000ULL, 0x0000600000000000ULL,
280 0x0020600000000000ULL, 0x0060600000000000ULL,
281 0x0020600000000000ULL, 0x0060600000000000ULL,
282 0x0020600000000000ULL, 0x0060600000000000ULL,
283 0x0020600000000000ULL, 0x0060600000000000ULL,
284 0x0020600000000000ULL, 0x0060600000000000ULL,
285 0x0020600000000000ULL, 0x0060600000000000ULL,
286 0x0020600000000000ULL, 0x0060600000000000ULL,
287 0x0020600000000000ULL, 0x0060600000000000ULL,
288 0x0020600000000000ULL, 0x0060600000000000ULL,
289 0x0020600000000000ULL, 0x0060600000000000ULL,
290 0x0020600000000000ULL, 0x0000600000000000ULL,
291 0x0040600000000000ULL, 0x0060600000000000ULL,
292 END_SIGN
293 };
294
295 /* Module Loadable parameters. */
296 static unsigned int tx_fifo_num = 1;
297 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
298 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
299 static unsigned int rx_ring_num = 1;
300 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
301 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
302 static unsigned int rts_frm_len[MAX_RX_RINGS] =
303 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
304 static unsigned int rx_ring_mode = 1;
305 static unsigned int use_continuous_tx_intrs = 1;
306 static unsigned int rmac_pause_time = 65535;
307 static unsigned int mc_pause_threshold_q0q3 = 187;
308 static unsigned int mc_pause_threshold_q4q7 = 187;
309 static unsigned int shared_splits;
310 static unsigned int tmac_util_period = 5;
311 static unsigned int rmac_util_period = 5;
312 static unsigned int bimodal = 0;
313 static unsigned int l3l4hdr_size = 128;
314 #ifndef CONFIG_S2IO_NAPI
315 static unsigned int indicate_max_pkts;
316 #endif
317 /* Frequency of Rx desc syncs expressed as power of 2 */
318 static unsigned int rxsync_frequency = 3;
319 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
320 static unsigned int intr_type = 0;
321
322 /*
323 * S2IO device table.
324 * This table lists all the devices that this driver supports.
325 */
326 static struct pci_device_id s2io_tbl[] __devinitdata = {
327 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
328 PCI_ANY_ID, PCI_ANY_ID},
329 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
330 PCI_ANY_ID, PCI_ANY_ID},
331 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
332 PCI_ANY_ID, PCI_ANY_ID},
333 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
334 PCI_ANY_ID, PCI_ANY_ID},
335 {0,}
336 };
337
338 MODULE_DEVICE_TABLE(pci, s2io_tbl);
339
340 static struct pci_driver s2io_driver = {
341 .name = "S2IO",
342 .id_table = s2io_tbl,
343 .probe = s2io_init_nic,
344 .remove = __devexit_p(s2io_rem_nic),
345 };
346
347 /* A simplifier macro used both by init and free shared_mem Fns(). */
348 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
349
350 /**
351 * init_shared_mem - Allocation and Initialization of Memory
352 * @nic: Device private variable.
353 * Description: The function allocates all the memory areas shared
354 * between the NIC and the driver. This includes Tx descriptors,
355 * Rx descriptors and the statistics block.
356 */
357
358 static int init_shared_mem(struct s2io_nic *nic)
359 {
360 u32 size;
361 void *tmp_v_addr, *tmp_v_addr_next;
362 dma_addr_t tmp_p_addr, tmp_p_addr_next;
363 RxD_block_t *pre_rxd_blk = NULL;
364 int i, j, blk_cnt, rx_sz, tx_sz;
365 int lst_size, lst_per_page;
366 struct net_device *dev = nic->dev;
367 unsigned long tmp;
368 buffAdd_t *ba;
369
370 mac_info_t *mac_control;
371 struct config_param *config;
372
373 mac_control = &nic->mac_control;
374 config = &nic->config;
375
376
377 /* Allocation and initialization of TXDLs in FIOFs */
378 size = 0;
379 for (i = 0; i < config->tx_fifo_num; i++) {
380 size += config->tx_cfg[i].fifo_len;
381 }
382 if (size > MAX_AVAILABLE_TXDS) {
383 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
384 __FUNCTION__);
385 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
386 return FAILURE;
387 }
388
389 lst_size = (sizeof(TxD_t) * config->max_txds);
390 tx_sz = lst_size * size;
391 lst_per_page = PAGE_SIZE / lst_size;
392
393 for (i = 0; i < config->tx_fifo_num; i++) {
394 int fifo_len = config->tx_cfg[i].fifo_len;
395 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
396 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
397 GFP_KERNEL);
398 if (!mac_control->fifos[i].list_info) {
399 DBG_PRINT(ERR_DBG,
400 "Malloc failed for list_info\n");
401 return -ENOMEM;
402 }
403 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
404 }
405 for (i = 0; i < config->tx_fifo_num; i++) {
406 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
407 lst_per_page);
408 mac_control->fifos[i].tx_curr_put_info.offset = 0;
409 mac_control->fifos[i].tx_curr_put_info.fifo_len =
410 config->tx_cfg[i].fifo_len - 1;
411 mac_control->fifos[i].tx_curr_get_info.offset = 0;
412 mac_control->fifos[i].tx_curr_get_info.fifo_len =
413 config->tx_cfg[i].fifo_len - 1;
414 mac_control->fifos[i].fifo_no = i;
415 mac_control->fifos[i].nic = nic;
416 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1;
417
418 for (j = 0; j < page_num; j++) {
419 int k = 0;
420 dma_addr_t tmp_p;
421 void *tmp_v;
422 tmp_v = pci_alloc_consistent(nic->pdev,
423 PAGE_SIZE, &tmp_p);
424 if (!tmp_v) {
425 DBG_PRINT(ERR_DBG,
426 "pci_alloc_consistent ");
427 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
428 return -ENOMEM;
429 }
430 /* If we got a zero DMA address(can happen on
431 * certain platforms like PPC), reallocate.
432 * Store virtual address of page we don't want,
433 * to be freed later.
434 */
435 if (!tmp_p) {
436 mac_control->zerodma_virt_addr = tmp_v;
437 DBG_PRINT(INIT_DBG,
438 "%s: Zero DMA address for TxDL. ", dev->name);
439 DBG_PRINT(INIT_DBG,
440 "Virtual address %p\n", tmp_v);
441 tmp_v = pci_alloc_consistent(nic->pdev,
442 PAGE_SIZE, &tmp_p);
443 if (!tmp_v) {
444 DBG_PRINT(ERR_DBG,
445 "pci_alloc_consistent ");
446 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
447 return -ENOMEM;
448 }
449 }
450 while (k < lst_per_page) {
451 int l = (j * lst_per_page) + k;
452 if (l == config->tx_cfg[i].fifo_len)
453 break;
454 mac_control->fifos[i].list_info[l].list_virt_addr =
455 tmp_v + (k * lst_size);
456 mac_control->fifos[i].list_info[l].list_phy_addr =
457 tmp_p + (k * lst_size);
458 k++;
459 }
460 }
461 }
462
463 /* Allocation and initialization of RXDs in Rings */
464 size = 0;
465 for (i = 0; i < config->rx_ring_num; i++) {
466 if (config->rx_cfg[i].num_rxd %
467 (rxd_count[nic->rxd_mode] + 1)) {
468 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
469 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
470 i);
471 DBG_PRINT(ERR_DBG, "RxDs per Block");
472 return FAILURE;
473 }
474 size += config->rx_cfg[i].num_rxd;
475 mac_control->rings[i].block_count =
476 config->rx_cfg[i].num_rxd /
477 (rxd_count[nic->rxd_mode] + 1 );
478 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
479 mac_control->rings[i].block_count;
480 }
481 if (nic->rxd_mode == RXD_MODE_1)
482 size = (size * (sizeof(RxD1_t)));
483 else
484 size = (size * (sizeof(RxD3_t)));
485 rx_sz = size;
486
487 for (i = 0; i < config->rx_ring_num; i++) {
488 mac_control->rings[i].rx_curr_get_info.block_index = 0;
489 mac_control->rings[i].rx_curr_get_info.offset = 0;
490 mac_control->rings[i].rx_curr_get_info.ring_len =
491 config->rx_cfg[i].num_rxd - 1;
492 mac_control->rings[i].rx_curr_put_info.block_index = 0;
493 mac_control->rings[i].rx_curr_put_info.offset = 0;
494 mac_control->rings[i].rx_curr_put_info.ring_len =
495 config->rx_cfg[i].num_rxd - 1;
496 mac_control->rings[i].nic = nic;
497 mac_control->rings[i].ring_no = i;
498
499 blk_cnt = config->rx_cfg[i].num_rxd /
500 (rxd_count[nic->rxd_mode] + 1);
501 /* Allocating all the Rx blocks */
502 for (j = 0; j < blk_cnt; j++) {
503 rx_block_info_t *rx_blocks;
504 int l;
505
506 rx_blocks = &mac_control->rings[i].rx_blocks[j];
507 size = SIZE_OF_BLOCK; //size is always page size
508 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
509 &tmp_p_addr);
510 if (tmp_v_addr == NULL) {
511 /*
512 * In case of failure, free_shared_mem()
513 * is called, which should free any
514 * memory that was alloced till the
515 * failure happened.
516 */
517 rx_blocks->block_virt_addr = tmp_v_addr;
518 return -ENOMEM;
519 }
520 memset(tmp_v_addr, 0, size);
521 rx_blocks->block_virt_addr = tmp_v_addr;
522 rx_blocks->block_dma_addr = tmp_p_addr;
523 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
524 rxd_count[nic->rxd_mode],
525 GFP_KERNEL);
526 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
527 rx_blocks->rxds[l].virt_addr =
528 rx_blocks->block_virt_addr +
529 (rxd_size[nic->rxd_mode] * l);
530 rx_blocks->rxds[l].dma_addr =
531 rx_blocks->block_dma_addr +
532 (rxd_size[nic->rxd_mode] * l);
533 }
534
535 mac_control->rings[i].rx_blocks[j].block_virt_addr =
536 tmp_v_addr;
537 mac_control->rings[i].rx_blocks[j].block_dma_addr =
538 tmp_p_addr;
539 }
540 /* Interlinking all Rx Blocks */
541 for (j = 0; j < blk_cnt; j++) {
542 tmp_v_addr =
543 mac_control->rings[i].rx_blocks[j].block_virt_addr;
544 tmp_v_addr_next =
545 mac_control->rings[i].rx_blocks[(j + 1) %
546 blk_cnt].block_virt_addr;
547 tmp_p_addr =
548 mac_control->rings[i].rx_blocks[j].block_dma_addr;
549 tmp_p_addr_next =
550 mac_control->rings[i].rx_blocks[(j + 1) %
551 blk_cnt].block_dma_addr;
552
553 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
554 pre_rxd_blk->reserved_2_pNext_RxD_block =
555 (unsigned long) tmp_v_addr_next;
556 pre_rxd_blk->pNext_RxD_Blk_physical =
557 (u64) tmp_p_addr_next;
558 }
559 }
560 if (nic->rxd_mode >= RXD_MODE_3A) {
561 /*
562 * Allocation of Storages for buffer addresses in 2BUFF mode
563 * and the buffers as well.
564 */
565 for (i = 0; i < config->rx_ring_num; i++) {
566 blk_cnt = config->rx_cfg[i].num_rxd /
567 (rxd_count[nic->rxd_mode]+ 1);
568 mac_control->rings[i].ba =
569 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
570 GFP_KERNEL);
571 if (!mac_control->rings[i].ba)
572 return -ENOMEM;
573 for (j = 0; j < blk_cnt; j++) {
574 int k = 0;
575 mac_control->rings[i].ba[j] =
576 kmalloc((sizeof(buffAdd_t) *
577 (rxd_count[nic->rxd_mode] + 1)),
578 GFP_KERNEL);
579 if (!mac_control->rings[i].ba[j])
580 return -ENOMEM;
581 while (k != rxd_count[nic->rxd_mode]) {
582 ba = &mac_control->rings[i].ba[j][k];
583
584 ba->ba_0_org = (void *) kmalloc
585 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
586 if (!ba->ba_0_org)
587 return -ENOMEM;
588 tmp = (unsigned long)ba->ba_0_org;
589 tmp += ALIGN_SIZE;
590 tmp &= ~((unsigned long) ALIGN_SIZE);
591 ba->ba_0 = (void *) tmp;
592
593 ba->ba_1_org = (void *) kmalloc
594 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
595 if (!ba->ba_1_org)
596 return -ENOMEM;
597 tmp = (unsigned long) ba->ba_1_org;
598 tmp += ALIGN_SIZE;
599 tmp &= ~((unsigned long) ALIGN_SIZE);
600 ba->ba_1 = (void *) tmp;
601 k++;
602 }
603 }
604 }
605 }
606
607 /* Allocation and initialization of Statistics block */
608 size = sizeof(StatInfo_t);
609 mac_control->stats_mem = pci_alloc_consistent
610 (nic->pdev, size, &mac_control->stats_mem_phy);
611
612 if (!mac_control->stats_mem) {
613 /*
614 * In case of failure, free_shared_mem() is called, which
615 * should free any memory that was alloced till the
616 * failure happened.
617 */
618 return -ENOMEM;
619 }
620 mac_control->stats_mem_sz = size;
621
622 tmp_v_addr = mac_control->stats_mem;
623 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
624 memset(tmp_v_addr, 0, size);
625 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
626 (unsigned long long) tmp_p_addr);
627
628 return SUCCESS;
629 }
630
631 /**
632 * free_shared_mem - Free the allocated Memory
633 * @nic: Device private variable.
634 * Description: This function is to free all memory locations allocated by
635 * the init_shared_mem() function and return it to the kernel.
636 */
637
638 static void free_shared_mem(struct s2io_nic *nic)
639 {
640 int i, j, blk_cnt, size;
641 void *tmp_v_addr;
642 dma_addr_t tmp_p_addr;
643 mac_info_t *mac_control;
644 struct config_param *config;
645 int lst_size, lst_per_page;
646 struct net_device *dev = nic->dev;
647
648 if (!nic)
649 return;
650
651 mac_control = &nic->mac_control;
652 config = &nic->config;
653
654 lst_size = (sizeof(TxD_t) * config->max_txds);
655 lst_per_page = PAGE_SIZE / lst_size;
656
657 for (i = 0; i < config->tx_fifo_num; i++) {
658 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
659 lst_per_page);
660 for (j = 0; j < page_num; j++) {
661 int mem_blks = (j * lst_per_page);
662 if (!mac_control->fifos[i].list_info)
663 return;
664 if (!mac_control->fifos[i].list_info[mem_blks].
665 list_virt_addr)
666 break;
667 pci_free_consistent(nic->pdev, PAGE_SIZE,
668 mac_control->fifos[i].
669 list_info[mem_blks].
670 list_virt_addr,
671 mac_control->fifos[i].
672 list_info[mem_blks].
673 list_phy_addr);
674 }
675 /* If we got a zero DMA address during allocation,
676 * free the page now
677 */
678 if (mac_control->zerodma_virt_addr) {
679 pci_free_consistent(nic->pdev, PAGE_SIZE,
680 mac_control->zerodma_virt_addr,
681 (dma_addr_t)0);
682 DBG_PRINT(INIT_DBG,
683 "%s: Freeing TxDL with zero DMA addr. ",
684 dev->name);
685 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
686 mac_control->zerodma_virt_addr);
687 }
688 kfree(mac_control->fifos[i].list_info);
689 }
690
691 size = SIZE_OF_BLOCK;
692 for (i = 0; i < config->rx_ring_num; i++) {
693 blk_cnt = mac_control->rings[i].block_count;
694 for (j = 0; j < blk_cnt; j++) {
695 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
696 block_virt_addr;
697 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
698 block_dma_addr;
699 if (tmp_v_addr == NULL)
700 break;
701 pci_free_consistent(nic->pdev, size,
702 tmp_v_addr, tmp_p_addr);
703 kfree(mac_control->rings[i].rx_blocks[j].rxds);
704 }
705 }
706
707 if (nic->rxd_mode >= RXD_MODE_3A) {
708 /* Freeing buffer storage addresses in 2BUFF mode. */
709 for (i = 0; i < config->rx_ring_num; i++) {
710 blk_cnt = config->rx_cfg[i].num_rxd /
711 (rxd_count[nic->rxd_mode] + 1);
712 for (j = 0; j < blk_cnt; j++) {
713 int k = 0;
714 if (!mac_control->rings[i].ba[j])
715 continue;
716 while (k != rxd_count[nic->rxd_mode]) {
717 buffAdd_t *ba =
718 &mac_control->rings[i].ba[j][k];
719 kfree(ba->ba_0_org);
720 kfree(ba->ba_1_org);
721 k++;
722 }
723 kfree(mac_control->rings[i].ba[j]);
724 }
725 kfree(mac_control->rings[i].ba);
726 }
727 }
728
729 if (mac_control->stats_mem) {
730 pci_free_consistent(nic->pdev,
731 mac_control->stats_mem_sz,
732 mac_control->stats_mem,
733 mac_control->stats_mem_phy);
734 }
735 }
736
737 /**
738 * s2io_verify_pci_mode -
739 */
740
741 static int s2io_verify_pci_mode(nic_t *nic)
742 {
743 XENA_dev_config_t __iomem *bar0 = nic->bar0;
744 register u64 val64 = 0;
745 int mode;
746
747 val64 = readq(&bar0->pci_mode);
748 mode = (u8)GET_PCI_MODE(val64);
749
750 if ( val64 & PCI_MODE_UNKNOWN_MODE)
751 return -1; /* Unknown PCI mode */
752 return mode;
753 }
754
755
756 /**
757 * s2io_print_pci_mode -
758 */
759 static int s2io_print_pci_mode(nic_t *nic)
760 {
761 XENA_dev_config_t __iomem *bar0 = nic->bar0;
762 register u64 val64 = 0;
763 int mode;
764 struct config_param *config = &nic->config;
765
766 val64 = readq(&bar0->pci_mode);
767 mode = (u8)GET_PCI_MODE(val64);
768
769 if ( val64 & PCI_MODE_UNKNOWN_MODE)
770 return -1; /* Unknown PCI mode */
771
772 if (val64 & PCI_MODE_32_BITS) {
773 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
774 } else {
775 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
776 }
777
778 switch(mode) {
779 case PCI_MODE_PCI_33:
780 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
781 config->bus_speed = 33;
782 break;
783 case PCI_MODE_PCI_66:
784 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
785 config->bus_speed = 133;
786 break;
787 case PCI_MODE_PCIX_M1_66:
788 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
789 config->bus_speed = 133; /* Herc doubles the clock rate */
790 break;
791 case PCI_MODE_PCIX_M1_100:
792 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
793 config->bus_speed = 200;
794 break;
795 case PCI_MODE_PCIX_M1_133:
796 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
797 config->bus_speed = 266;
798 break;
799 case PCI_MODE_PCIX_M2_66:
800 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
801 config->bus_speed = 133;
802 break;
803 case PCI_MODE_PCIX_M2_100:
804 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
805 config->bus_speed = 200;
806 break;
807 case PCI_MODE_PCIX_M2_133:
808 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
809 config->bus_speed = 266;
810 break;
811 default:
812 return -1; /* Unsupported bus speed */
813 }
814
815 return mode;
816 }
817
818 /**
819 * init_nic - Initialization of hardware
820 * @nic: device peivate variable
821 * Description: The function sequentially configures every block
822 * of the H/W from their reset values.
823 * Return Value: SUCCESS on success and
824 * '-1' on failure (endian settings incorrect).
825 */
826
827 static int init_nic(struct s2io_nic *nic)
828 {
829 XENA_dev_config_t __iomem *bar0 = nic->bar0;
830 struct net_device *dev = nic->dev;
831 register u64 val64 = 0;
832 void __iomem *add;
833 u32 time;
834 int i, j;
835 mac_info_t *mac_control;
836 struct config_param *config;
837 int mdio_cnt = 0, dtx_cnt = 0;
838 unsigned long long mem_share;
839 int mem_size;
840
841 mac_control = &nic->mac_control;
842 config = &nic->config;
843
844 /* to set the swapper controle on the card */
845 if(s2io_set_swapper(nic)) {
846 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
847 return -1;
848 }
849
850 /*
851 * Herc requires EOI to be removed from reset before XGXS, so..
852 */
853 if (nic->device_type & XFRAME_II_DEVICE) {
854 val64 = 0xA500000000ULL;
855 writeq(val64, &bar0->sw_reset);
856 msleep(500);
857 val64 = readq(&bar0->sw_reset);
858 }
859
860 /* Remove XGXS from reset state */
861 val64 = 0;
862 writeq(val64, &bar0->sw_reset);
863 msleep(500);
864 val64 = readq(&bar0->sw_reset);
865
866 /* Enable Receiving broadcasts */
867 add = &bar0->mac_cfg;
868 val64 = readq(&bar0->mac_cfg);
869 val64 |= MAC_RMAC_BCAST_ENABLE;
870 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
871 writel((u32) val64, add);
872 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
873 writel((u32) (val64 >> 32), (add + 4));
874
875 /* Read registers in all blocks */
876 val64 = readq(&bar0->mac_int_mask);
877 val64 = readq(&bar0->mc_int_mask);
878 val64 = readq(&bar0->xgxs_int_mask);
879
880 /* Set MTU */
881 val64 = dev->mtu;
882 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
883
884 /*
885 * Configuring the XAUI Interface of Xena.
886 * ***************************************
887 * To Configure the Xena's XAUI, one has to write a series
888 * of 64 bit values into two registers in a particular
889 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
890 * which will be defined in the array of configuration values
891 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
892 * to switch writing from one regsiter to another. We continue
893 * writing these values until we encounter the 'END_SIGN' macro.
894 * For example, After making a series of 21 writes into
895 * dtx_control register the 'SWITCH_SIGN' appears and hence we
896 * start writing into mdio_control until we encounter END_SIGN.
897 */
898 if (nic->device_type & XFRAME_II_DEVICE) {
899 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
900 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
901 &bar0->dtx_control, UF);
902 if (dtx_cnt & 0x1)
903 msleep(1); /* Necessary!! */
904 dtx_cnt++;
905 }
906 } else {
907 while (1) {
908 dtx_cfg:
909 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
910 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
911 dtx_cnt++;
912 goto mdio_cfg;
913 }
914 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
915 &bar0->dtx_control, UF);
916 val64 = readq(&bar0->dtx_control);
917 dtx_cnt++;
918 }
919 mdio_cfg:
920 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
921 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
922 mdio_cnt++;
923 goto dtx_cfg;
924 }
925 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
926 &bar0->mdio_control, UF);
927 val64 = readq(&bar0->mdio_control);
928 mdio_cnt++;
929 }
930 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
931 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
932 break;
933 } else {
934 goto dtx_cfg;
935 }
936 }
937 }
938
939 /* Tx DMA Initialization */
940 val64 = 0;
941 writeq(val64, &bar0->tx_fifo_partition_0);
942 writeq(val64, &bar0->tx_fifo_partition_1);
943 writeq(val64, &bar0->tx_fifo_partition_2);
944 writeq(val64, &bar0->tx_fifo_partition_3);
945
946
947 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
948 val64 |=
949 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
950 13) | vBIT(config->tx_cfg[i].fifo_priority,
951 ((i * 32) + 5), 3);
952
953 if (i == (config->tx_fifo_num - 1)) {
954 if (i % 2 == 0)
955 i++;
956 }
957
958 switch (i) {
959 case 1:
960 writeq(val64, &bar0->tx_fifo_partition_0);
961 val64 = 0;
962 break;
963 case 3:
964 writeq(val64, &bar0->tx_fifo_partition_1);
965 val64 = 0;
966 break;
967 case 5:
968 writeq(val64, &bar0->tx_fifo_partition_2);
969 val64 = 0;
970 break;
971 case 7:
972 writeq(val64, &bar0->tx_fifo_partition_3);
973 break;
974 }
975 }
976
977 /* Enable Tx FIFO partition 0. */
978 val64 = readq(&bar0->tx_fifo_partition_0);
979 val64 |= BIT(0); /* To enable the FIFO partition. */
980 writeq(val64, &bar0->tx_fifo_partition_0);
981
982 /*
983 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
984 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
985 */
986 if ((nic->device_type == XFRAME_I_DEVICE) &&
987 (get_xena_rev_id(nic->pdev) < 4))
988 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
989
990 val64 = readq(&bar0->tx_fifo_partition_0);
991 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
992 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
993
994 /*
995 * Initialization of Tx_PA_CONFIG register to ignore packet
996 * integrity checking.
997 */
998 val64 = readq(&bar0->tx_pa_cfg);
999 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1000 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1001 writeq(val64, &bar0->tx_pa_cfg);
1002
1003 /* Rx DMA intialization. */
1004 val64 = 0;
1005 for (i = 0; i < config->rx_ring_num; i++) {
1006 val64 |=
1007 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1008 3);
1009 }
1010 writeq(val64, &bar0->rx_queue_priority);
1011
1012 /*
1013 * Allocating equal share of memory to all the
1014 * configured Rings.
1015 */
1016 val64 = 0;
1017 if (nic->device_type & XFRAME_II_DEVICE)
1018 mem_size = 32;
1019 else
1020 mem_size = 64;
1021
1022 for (i = 0; i < config->rx_ring_num; i++) {
1023 switch (i) {
1024 case 0:
1025 mem_share = (mem_size / config->rx_ring_num +
1026 mem_size % config->rx_ring_num);
1027 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1028 continue;
1029 case 1:
1030 mem_share = (mem_size / config->rx_ring_num);
1031 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1032 continue;
1033 case 2:
1034 mem_share = (mem_size / config->rx_ring_num);
1035 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1036 continue;
1037 case 3:
1038 mem_share = (mem_size / config->rx_ring_num);
1039 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1040 continue;
1041 case 4:
1042 mem_share = (mem_size / config->rx_ring_num);
1043 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1044 continue;
1045 case 5:
1046 mem_share = (mem_size / config->rx_ring_num);
1047 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1048 continue;
1049 case 6:
1050 mem_share = (mem_size / config->rx_ring_num);
1051 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1052 continue;
1053 case 7:
1054 mem_share = (mem_size / config->rx_ring_num);
1055 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1056 continue;
1057 }
1058 }
1059 writeq(val64, &bar0->rx_queue_cfg);
1060
1061 /*
1062 * Filling Tx round robin registers
1063 * as per the number of FIFOs
1064 */
1065 switch (config->tx_fifo_num) {
1066 case 1:
1067 val64 = 0x0000000000000000ULL;
1068 writeq(val64, &bar0->tx_w_round_robin_0);
1069 writeq(val64, &bar0->tx_w_round_robin_1);
1070 writeq(val64, &bar0->tx_w_round_robin_2);
1071 writeq(val64, &bar0->tx_w_round_robin_3);
1072 writeq(val64, &bar0->tx_w_round_robin_4);
1073 break;
1074 case 2:
1075 val64 = 0x0000010000010000ULL;
1076 writeq(val64, &bar0->tx_w_round_robin_0);
1077 val64 = 0x0100000100000100ULL;
1078 writeq(val64, &bar0->tx_w_round_robin_1);
1079 val64 = 0x0001000001000001ULL;
1080 writeq(val64, &bar0->tx_w_round_robin_2);
1081 val64 = 0x0000010000010000ULL;
1082 writeq(val64, &bar0->tx_w_round_robin_3);
1083 val64 = 0x0100000000000000ULL;
1084 writeq(val64, &bar0->tx_w_round_robin_4);
1085 break;
1086 case 3:
1087 val64 = 0x0001000102000001ULL;
1088 writeq(val64, &bar0->tx_w_round_robin_0);
1089 val64 = 0x0001020000010001ULL;
1090 writeq(val64, &bar0->tx_w_round_robin_1);
1091 val64 = 0x0200000100010200ULL;
1092 writeq(val64, &bar0->tx_w_round_robin_2);
1093 val64 = 0x0001000102000001ULL;
1094 writeq(val64, &bar0->tx_w_round_robin_3);
1095 val64 = 0x0001020000000000ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_4);
1097 break;
1098 case 4:
1099 val64 = 0x0001020300010200ULL;
1100 writeq(val64, &bar0->tx_w_round_robin_0);
1101 val64 = 0x0100000102030001ULL;
1102 writeq(val64, &bar0->tx_w_round_robin_1);
1103 val64 = 0x0200010000010203ULL;
1104 writeq(val64, &bar0->tx_w_round_robin_2);
1105 val64 = 0x0001020001000001ULL;
1106 writeq(val64, &bar0->tx_w_round_robin_3);
1107 val64 = 0x0203000100000000ULL;
1108 writeq(val64, &bar0->tx_w_round_robin_4);
1109 break;
1110 case 5:
1111 val64 = 0x0001000203000102ULL;
1112 writeq(val64, &bar0->tx_w_round_robin_0);
1113 val64 = 0x0001020001030004ULL;
1114 writeq(val64, &bar0->tx_w_round_robin_1);
1115 val64 = 0x0001000203000102ULL;
1116 writeq(val64, &bar0->tx_w_round_robin_2);
1117 val64 = 0x0001020001030004ULL;
1118 writeq(val64, &bar0->tx_w_round_robin_3);
1119 val64 = 0x0001000000000000ULL;
1120 writeq(val64, &bar0->tx_w_round_robin_4);
1121 break;
1122 case 6:
1123 val64 = 0x0001020304000102ULL;
1124 writeq(val64, &bar0->tx_w_round_robin_0);
1125 val64 = 0x0304050001020001ULL;
1126 writeq(val64, &bar0->tx_w_round_robin_1);
1127 val64 = 0x0203000100000102ULL;
1128 writeq(val64, &bar0->tx_w_round_robin_2);
1129 val64 = 0x0304000102030405ULL;
1130 writeq(val64, &bar0->tx_w_round_robin_3);
1131 val64 = 0x0001000200000000ULL;
1132 writeq(val64, &bar0->tx_w_round_robin_4);
1133 break;
1134 case 7:
1135 val64 = 0x0001020001020300ULL;
1136 writeq(val64, &bar0->tx_w_round_robin_0);
1137 val64 = 0x0102030400010203ULL;
1138 writeq(val64, &bar0->tx_w_round_robin_1);
1139 val64 = 0x0405060001020001ULL;
1140 writeq(val64, &bar0->tx_w_round_robin_2);
1141 val64 = 0x0304050000010200ULL;
1142 writeq(val64, &bar0->tx_w_round_robin_3);
1143 val64 = 0x0102030000000000ULL;
1144 writeq(val64, &bar0->tx_w_round_robin_4);
1145 break;
1146 case 8:
1147 val64 = 0x0001020300040105ULL;
1148 writeq(val64, &bar0->tx_w_round_robin_0);
1149 val64 = 0x0200030106000204ULL;
1150 writeq(val64, &bar0->tx_w_round_robin_1);
1151 val64 = 0x0103000502010007ULL;
1152 writeq(val64, &bar0->tx_w_round_robin_2);
1153 val64 = 0x0304010002060500ULL;
1154 writeq(val64, &bar0->tx_w_round_robin_3);
1155 val64 = 0x0103020400000000ULL;
1156 writeq(val64, &bar0->tx_w_round_robin_4);
1157 break;
1158 }
1159
1160 /* Filling the Rx round robin registers as per the
1161 * number of Rings and steering based on QoS.
1162 */
1163 switch (config->rx_ring_num) {
1164 case 1:
1165 val64 = 0x8080808080808080ULL;
1166 writeq(val64, &bar0->rts_qos_steering);
1167 break;
1168 case 2:
1169 val64 = 0x0000010000010000ULL;
1170 writeq(val64, &bar0->rx_w_round_robin_0);
1171 val64 = 0x0100000100000100ULL;
1172 writeq(val64, &bar0->rx_w_round_robin_1);
1173 val64 = 0x0001000001000001ULL;
1174 writeq(val64, &bar0->rx_w_round_robin_2);
1175 val64 = 0x0000010000010000ULL;
1176 writeq(val64, &bar0->rx_w_round_robin_3);
1177 val64 = 0x0100000000000000ULL;
1178 writeq(val64, &bar0->rx_w_round_robin_4);
1179
1180 val64 = 0x8080808040404040ULL;
1181 writeq(val64, &bar0->rts_qos_steering);
1182 break;
1183 case 3:
1184 val64 = 0x0001000102000001ULL;
1185 writeq(val64, &bar0->rx_w_round_robin_0);
1186 val64 = 0x0001020000010001ULL;
1187 writeq(val64, &bar0->rx_w_round_robin_1);
1188 val64 = 0x0200000100010200ULL;
1189 writeq(val64, &bar0->rx_w_round_robin_2);
1190 val64 = 0x0001000102000001ULL;
1191 writeq(val64, &bar0->rx_w_round_robin_3);
1192 val64 = 0x0001020000000000ULL;
1193 writeq(val64, &bar0->rx_w_round_robin_4);
1194
1195 val64 = 0x8080804040402020ULL;
1196 writeq(val64, &bar0->rts_qos_steering);
1197 break;
1198 case 4:
1199 val64 = 0x0001020300010200ULL;
1200 writeq(val64, &bar0->rx_w_round_robin_0);
1201 val64 = 0x0100000102030001ULL;
1202 writeq(val64, &bar0->rx_w_round_robin_1);
1203 val64 = 0x0200010000010203ULL;
1204 writeq(val64, &bar0->rx_w_round_robin_2);
1205 val64 = 0x0001020001000001ULL;
1206 writeq(val64, &bar0->rx_w_round_robin_3);
1207 val64 = 0x0203000100000000ULL;
1208 writeq(val64, &bar0->rx_w_round_robin_4);
1209
1210 val64 = 0x8080404020201010ULL;
1211 writeq(val64, &bar0->rts_qos_steering);
1212 break;
1213 case 5:
1214 val64 = 0x0001000203000102ULL;
1215 writeq(val64, &bar0->rx_w_round_robin_0);
1216 val64 = 0x0001020001030004ULL;
1217 writeq(val64, &bar0->rx_w_round_robin_1);
1218 val64 = 0x0001000203000102ULL;
1219 writeq(val64, &bar0->rx_w_round_robin_2);
1220 val64 = 0x0001020001030004ULL;
1221 writeq(val64, &bar0->rx_w_round_robin_3);
1222 val64 = 0x0001000000000000ULL;
1223 writeq(val64, &bar0->rx_w_round_robin_4);
1224
1225 val64 = 0x8080404020201008ULL;
1226 writeq(val64, &bar0->rts_qos_steering);
1227 break;
1228 case 6:
1229 val64 = 0x0001020304000102ULL;
1230 writeq(val64, &bar0->rx_w_round_robin_0);
1231 val64 = 0x0304050001020001ULL;
1232 writeq(val64, &bar0->rx_w_round_robin_1);
1233 val64 = 0x0203000100000102ULL;
1234 writeq(val64, &bar0->rx_w_round_robin_2);
1235 val64 = 0x0304000102030405ULL;
1236 writeq(val64, &bar0->rx_w_round_robin_3);
1237 val64 = 0x0001000200000000ULL;
1238 writeq(val64, &bar0->rx_w_round_robin_4);
1239
1240 val64 = 0x8080404020100804ULL;
1241 writeq(val64, &bar0->rts_qos_steering);
1242 break;
1243 case 7:
1244 val64 = 0x0001020001020300ULL;
1245 writeq(val64, &bar0->rx_w_round_robin_0);
1246 val64 = 0x0102030400010203ULL;
1247 writeq(val64, &bar0->rx_w_round_robin_1);
1248 val64 = 0x0405060001020001ULL;
1249 writeq(val64, &bar0->rx_w_round_robin_2);
1250 val64 = 0x0304050000010200ULL;
1251 writeq(val64, &bar0->rx_w_round_robin_3);
1252 val64 = 0x0102030000000000ULL;
1253 writeq(val64, &bar0->rx_w_round_robin_4);
1254
1255 val64 = 0x8080402010080402ULL;
1256 writeq(val64, &bar0->rts_qos_steering);
1257 break;
1258 case 8:
1259 val64 = 0x0001020300040105ULL;
1260 writeq(val64, &bar0->rx_w_round_robin_0);
1261 val64 = 0x0200030106000204ULL;
1262 writeq(val64, &bar0->rx_w_round_robin_1);
1263 val64 = 0x0103000502010007ULL;
1264 writeq(val64, &bar0->rx_w_round_robin_2);
1265 val64 = 0x0304010002060500ULL;
1266 writeq(val64, &bar0->rx_w_round_robin_3);
1267 val64 = 0x0103020400000000ULL;
1268 writeq(val64, &bar0->rx_w_round_robin_4);
1269
1270 val64 = 0x8040201008040201ULL;
1271 writeq(val64, &bar0->rts_qos_steering);
1272 break;
1273 }
1274
1275 /* UDP Fix */
1276 val64 = 0;
1277 for (i = 0; i < 8; i++)
1278 writeq(val64, &bar0->rts_frm_len_n[i]);
1279
1280 /* Set the default rts frame length for the rings configured */
1281 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1282 for (i = 0 ; i < config->rx_ring_num ; i++)
1283 writeq(val64, &bar0->rts_frm_len_n[i]);
1284
1285 /* Set the frame length for the configured rings
1286 * desired by the user
1287 */
1288 for (i = 0; i < config->rx_ring_num; i++) {
1289 /* If rts_frm_len[i] == 0 then it is assumed that user not
1290 * specified frame length steering.
1291 * If the user provides the frame length then program
1292 * the rts_frm_len register for those values or else
1293 * leave it as it is.
1294 */
1295 if (rts_frm_len[i] != 0) {
1296 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1297 &bar0->rts_frm_len_n[i]);
1298 }
1299 }
1300
1301 /* Program statistics memory */
1302 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1303
1304 if (nic->device_type == XFRAME_II_DEVICE) {
1305 val64 = STAT_BC(0x320);
1306 writeq(val64, &bar0->stat_byte_cnt);
1307 }
1308
1309 /*
1310 * Initializing the sampling rate for the device to calculate the
1311 * bandwidth utilization.
1312 */
1313 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1314 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1315 writeq(val64, &bar0->mac_link_util);
1316
1317
1318 /*
1319 * Initializing the Transmit and Receive Traffic Interrupt
1320 * Scheme.
1321 */
1322 /*
1323 * TTI Initialization. Default Tx timer gets us about
1324 * 250 interrupts per sec. Continuous interrupts are enabled
1325 * by default.
1326 */
1327 if (nic->device_type == XFRAME_II_DEVICE) {
1328 int count = (nic->config.bus_speed * 125)/2;
1329 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1330 } else {
1331
1332 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1333 }
1334 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1335 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1336 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1337 if (use_continuous_tx_intrs)
1338 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1339 writeq(val64, &bar0->tti_data1_mem);
1340
1341 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1342 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1343 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1344 writeq(val64, &bar0->tti_data2_mem);
1345
1346 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1347 writeq(val64, &bar0->tti_command_mem);
1348
1349 /*
1350 * Once the operation completes, the Strobe bit of the command
1351 * register will be reset. We poll for this particular condition
1352 * We wait for a maximum of 500ms for the operation to complete,
1353 * if it's not complete by then we return error.
1354 */
1355 time = 0;
1356 while (TRUE) {
1357 val64 = readq(&bar0->tti_command_mem);
1358 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1359 break;
1360 }
1361 if (time > 10) {
1362 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1363 dev->name);
1364 return -1;
1365 }
1366 msleep(50);
1367 time++;
1368 }
1369
1370 if (nic->config.bimodal) {
1371 int k = 0;
1372 for (k = 0; k < config->rx_ring_num; k++) {
1373 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1374 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1375 writeq(val64, &bar0->tti_command_mem);
1376
1377 /*
1378 * Once the operation completes, the Strobe bit of the command
1379 * register will be reset. We poll for this particular condition
1380 * We wait for a maximum of 500ms for the operation to complete,
1381 * if it's not complete by then we return error.
1382 */
1383 time = 0;
1384 while (TRUE) {
1385 val64 = readq(&bar0->tti_command_mem);
1386 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1387 break;
1388 }
1389 if (time > 10) {
1390 DBG_PRINT(ERR_DBG,
1391 "%s: TTI init Failed\n",
1392 dev->name);
1393 return -1;
1394 }
1395 time++;
1396 msleep(50);
1397 }
1398 }
1399 } else {
1400
1401 /* RTI Initialization */
1402 if (nic->device_type == XFRAME_II_DEVICE) {
1403 /*
1404 * Programmed to generate Apprx 500 Intrs per
1405 * second
1406 */
1407 int count = (nic->config.bus_speed * 125)/4;
1408 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1409 } else {
1410 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1411 }
1412 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1413 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1414 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1415
1416 writeq(val64, &bar0->rti_data1_mem);
1417
1418 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1419 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1420 if (nic->intr_type == MSI_X)
1421 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1422 RTI_DATA2_MEM_RX_UFC_D(0x40));
1423 else
1424 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1425 RTI_DATA2_MEM_RX_UFC_D(0x80));
1426 writeq(val64, &bar0->rti_data2_mem);
1427
1428 for (i = 0; i < config->rx_ring_num; i++) {
1429 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1430 | RTI_CMD_MEM_OFFSET(i);
1431 writeq(val64, &bar0->rti_command_mem);
1432
1433 /*
1434 * Once the operation completes, the Strobe bit of the
1435 * command register will be reset. We poll for this
1436 * particular condition. We wait for a maximum of 500ms
1437 * for the operation to complete, if it's not complete
1438 * by then we return error.
1439 */
1440 time = 0;
1441 while (TRUE) {
1442 val64 = readq(&bar0->rti_command_mem);
1443 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1444 break;
1445 }
1446 if (time > 10) {
1447 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1448 dev->name);
1449 return -1;
1450 }
1451 time++;
1452 msleep(50);
1453 }
1454 }
1455 }
1456
1457 /*
1458 * Initializing proper values as Pause threshold into all
1459 * the 8 Queues on Rx side.
1460 */
1461 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1462 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1463
1464 /* Disable RMAC PAD STRIPPING */
1465 add = &bar0->mac_cfg;
1466 val64 = readq(&bar0->mac_cfg);
1467 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1468 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1469 writel((u32) (val64), add);
1470 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1471 writel((u32) (val64 >> 32), (add + 4));
1472 val64 = readq(&bar0->mac_cfg);
1473
1474 /*
1475 * Set the time value to be inserted in the pause frame
1476 * generated by xena.
1477 */
1478 val64 = readq(&bar0->rmac_pause_cfg);
1479 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1480 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1481 writeq(val64, &bar0->rmac_pause_cfg);
1482
1483 /*
1484 * Set the Threshold Limit for Generating the pause frame
1485 * If the amount of data in any Queue exceeds ratio of
1486 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1487 * pause frame is generated
1488 */
1489 val64 = 0;
1490 for (i = 0; i < 4; i++) {
1491 val64 |=
1492 (((u64) 0xFF00 | nic->mac_control.
1493 mc_pause_threshold_q0q3)
1494 << (i * 2 * 8));
1495 }
1496 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1497
1498 val64 = 0;
1499 for (i = 0; i < 4; i++) {
1500 val64 |=
1501 (((u64) 0xFF00 | nic->mac_control.
1502 mc_pause_threshold_q4q7)
1503 << (i * 2 * 8));
1504 }
1505 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1506
1507 /*
1508 * TxDMA will stop Read request if the number of read split has
1509 * exceeded the limit pointed by shared_splits
1510 */
1511 val64 = readq(&bar0->pic_control);
1512 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1513 writeq(val64, &bar0->pic_control);
1514
1515 /*
1516 * Programming the Herc to split every write transaction
1517 * that does not start on an ADB to reduce disconnects.
1518 */
1519 if (nic->device_type == XFRAME_II_DEVICE) {
1520 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1521 writeq(val64, &bar0->wreq_split_mask);
1522 }
1523
1524 /* Setting Link stability period to 64 ms */
1525 if (nic->device_type == XFRAME_II_DEVICE) {
1526 val64 = MISC_LINK_STABILITY_PRD(3);
1527 writeq(val64, &bar0->misc_control);
1528 }
1529
1530 return SUCCESS;
1531 }
1532 #define LINK_UP_DOWN_INTERRUPT 1
1533 #define MAC_RMAC_ERR_TIMER 2
1534
1535 int s2io_link_fault_indication(nic_t *nic)
1536 {
1537 if (nic->intr_type != INTA)
1538 return MAC_RMAC_ERR_TIMER;
1539 if (nic->device_type == XFRAME_II_DEVICE)
1540 return LINK_UP_DOWN_INTERRUPT;
1541 else
1542 return MAC_RMAC_ERR_TIMER;
1543 }
1544
1545 /**
1546 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1547 * @nic: device private variable,
1548 * @mask: A mask indicating which Intr block must be modified and,
1549 * @flag: A flag indicating whether to enable or disable the Intrs.
1550 * Description: This function will either disable or enable the interrupts
1551 * depending on the flag argument. The mask argument can be used to
1552 * enable/disable any Intr block.
1553 * Return Value: NONE.
1554 */
1555
1556 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1557 {
1558 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1559 register u64 val64 = 0, temp64 = 0;
1560
1561 /* Top level interrupt classification */
1562 /* PIC Interrupts */
1563 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1564 /* Enable PIC Intrs in the general intr mask register */
1565 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1566 if (flag == ENABLE_INTRS) {
1567 temp64 = readq(&bar0->general_int_mask);
1568 temp64 &= ~((u64) val64);
1569 writeq(temp64, &bar0->general_int_mask);
1570 /*
1571 * If Hercules adapter enable GPIO otherwise
1572 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1573 * interrupts for now.
1574 * TODO
1575 */
1576 if (s2io_link_fault_indication(nic) ==
1577 LINK_UP_DOWN_INTERRUPT ) {
1578 temp64 = readq(&bar0->pic_int_mask);
1579 temp64 &= ~((u64) PIC_INT_GPIO);
1580 writeq(temp64, &bar0->pic_int_mask);
1581 temp64 = readq(&bar0->gpio_int_mask);
1582 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1583 writeq(temp64, &bar0->gpio_int_mask);
1584 } else {
1585 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1586 }
1587 /*
1588 * No MSI Support is available presently, so TTI and
1589 * RTI interrupts are also disabled.
1590 */
1591 } else if (flag == DISABLE_INTRS) {
1592 /*
1593 * Disable PIC Intrs in the general
1594 * intr mask register
1595 */
1596 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1597 temp64 = readq(&bar0->general_int_mask);
1598 val64 |= temp64;
1599 writeq(val64, &bar0->general_int_mask);
1600 }
1601 }
1602
1603 /* DMA Interrupts */
1604 /* Enabling/Disabling Tx DMA interrupts */
1605 if (mask & TX_DMA_INTR) {
1606 /* Enable TxDMA Intrs in the general intr mask register */
1607 val64 = TXDMA_INT_M;
1608 if (flag == ENABLE_INTRS) {
1609 temp64 = readq(&bar0->general_int_mask);
1610 temp64 &= ~((u64) val64);
1611 writeq(temp64, &bar0->general_int_mask);
1612 /*
1613 * Keep all interrupts other than PFC interrupt
1614 * and PCC interrupt disabled in DMA level.
1615 */
1616 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1617 TXDMA_PCC_INT_M);
1618 writeq(val64, &bar0->txdma_int_mask);
1619 /*
1620 * Enable only the MISC error 1 interrupt in PFC block
1621 */
1622 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1623 writeq(val64, &bar0->pfc_err_mask);
1624 /*
1625 * Enable only the FB_ECC error interrupt in PCC block
1626 */
1627 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1628 writeq(val64, &bar0->pcc_err_mask);
1629 } else if (flag == DISABLE_INTRS) {
1630 /*
1631 * Disable TxDMA Intrs in the general intr mask
1632 * register
1633 */
1634 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1635 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1636 temp64 = readq(&bar0->general_int_mask);
1637 val64 |= temp64;
1638 writeq(val64, &bar0->general_int_mask);
1639 }
1640 }
1641
1642 /* Enabling/Disabling Rx DMA interrupts */
1643 if (mask & RX_DMA_INTR) {
1644 /* Enable RxDMA Intrs in the general intr mask register */
1645 val64 = RXDMA_INT_M;
1646 if (flag == ENABLE_INTRS) {
1647 temp64 = readq(&bar0->general_int_mask);
1648 temp64 &= ~((u64) val64);
1649 writeq(temp64, &bar0->general_int_mask);
1650 /*
1651 * All RxDMA block interrupts are disabled for now
1652 * TODO
1653 */
1654 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1655 } else if (flag == DISABLE_INTRS) {
1656 /*
1657 * Disable RxDMA Intrs in the general intr mask
1658 * register
1659 */
1660 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1661 temp64 = readq(&bar0->general_int_mask);
1662 val64 |= temp64;
1663 writeq(val64, &bar0->general_int_mask);
1664 }
1665 }
1666
1667 /* MAC Interrupts */
1668 /* Enabling/Disabling MAC interrupts */
1669 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1670 val64 = TXMAC_INT_M | RXMAC_INT_M;
1671 if (flag == ENABLE_INTRS) {
1672 temp64 = readq(&bar0->general_int_mask);
1673 temp64 &= ~((u64) val64);
1674 writeq(temp64, &bar0->general_int_mask);
1675 /*
1676 * All MAC block error interrupts are disabled for now
1677 * TODO
1678 */
1679 } else if (flag == DISABLE_INTRS) {
1680 /*
1681 * Disable MAC Intrs in the general intr mask register
1682 */
1683 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1684 writeq(DISABLE_ALL_INTRS,
1685 &bar0->mac_rmac_err_mask);
1686
1687 temp64 = readq(&bar0->general_int_mask);
1688 val64 |= temp64;
1689 writeq(val64, &bar0->general_int_mask);
1690 }
1691 }
1692
1693 /* XGXS Interrupts */
1694 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1695 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1696 if (flag == ENABLE_INTRS) {
1697 temp64 = readq(&bar0->general_int_mask);
1698 temp64 &= ~((u64) val64);
1699 writeq(temp64, &bar0->general_int_mask);
1700 /*
1701 * All XGXS block error interrupts are disabled for now
1702 * TODO
1703 */
1704 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1705 } else if (flag == DISABLE_INTRS) {
1706 /*
1707 * Disable MC Intrs in the general intr mask register
1708 */
1709 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1710 temp64 = readq(&bar0->general_int_mask);
1711 val64 |= temp64;
1712 writeq(val64, &bar0->general_int_mask);
1713 }
1714 }
1715
1716 /* Memory Controller(MC) interrupts */
1717 if (mask & MC_INTR) {
1718 val64 = MC_INT_M;
1719 if (flag == ENABLE_INTRS) {
1720 temp64 = readq(&bar0->general_int_mask);
1721 temp64 &= ~((u64) val64);
1722 writeq(temp64, &bar0->general_int_mask);
1723 /*
1724 * Enable all MC Intrs.
1725 */
1726 writeq(0x0, &bar0->mc_int_mask);
1727 writeq(0x0, &bar0->mc_err_mask);
1728 } else if (flag == DISABLE_INTRS) {
1729 /*
1730 * Disable MC Intrs in the general intr mask register
1731 */
1732 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1733 temp64 = readq(&bar0->general_int_mask);
1734 val64 |= temp64;
1735 writeq(val64, &bar0->general_int_mask);
1736 }
1737 }
1738
1739
1740 /* Tx traffic interrupts */
1741 if (mask & TX_TRAFFIC_INTR) {
1742 val64 = TXTRAFFIC_INT_M;
1743 if (flag == ENABLE_INTRS) {
1744 temp64 = readq(&bar0->general_int_mask);
1745 temp64 &= ~((u64) val64);
1746 writeq(temp64, &bar0->general_int_mask);
1747 /*
1748 * Enable all the Tx side interrupts
1749 * writing 0 Enables all 64 TX interrupt levels
1750 */
1751 writeq(0x0, &bar0->tx_traffic_mask);
1752 } else if (flag == DISABLE_INTRS) {
1753 /*
1754 * Disable Tx Traffic Intrs in the general intr mask
1755 * register.
1756 */
1757 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1758 temp64 = readq(&bar0->general_int_mask);
1759 val64 |= temp64;
1760 writeq(val64, &bar0->general_int_mask);
1761 }
1762 }
1763
1764 /* Rx traffic interrupts */
1765 if (mask & RX_TRAFFIC_INTR) {
1766 val64 = RXTRAFFIC_INT_M;
1767 if (flag == ENABLE_INTRS) {
1768 temp64 = readq(&bar0->general_int_mask);
1769 temp64 &= ~((u64) val64);
1770 writeq(temp64, &bar0->general_int_mask);
1771 /* writing 0 Enables all 8 RX interrupt levels */
1772 writeq(0x0, &bar0->rx_traffic_mask);
1773 } else if (flag == DISABLE_INTRS) {
1774 /*
1775 * Disable Rx Traffic Intrs in the general intr mask
1776 * register.
1777 */
1778 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1779 temp64 = readq(&bar0->general_int_mask);
1780 val64 |= temp64;
1781 writeq(val64, &bar0->general_int_mask);
1782 }
1783 }
1784 }
1785
1786 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1787 {
1788 int ret = 0;
1789
1790 if (flag == FALSE) {
1791 if ((!herc && (rev_id >= 4)) || herc) {
1792 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1793 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1794 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1795 ret = 1;
1796 }
1797 }else {
1798 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1799 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1800 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1801 ret = 1;
1802 }
1803 }
1804 } else {
1805 if ((!herc && (rev_id >= 4)) || herc) {
1806 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1807 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1808 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1809 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1810 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1811 ret = 1;
1812 }
1813 } else {
1814 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1815 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1816 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1817 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1818 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1819 ret = 1;
1820 }
1821 }
1822 }
1823
1824 return ret;
1825 }
1826 /**
1827 * verify_xena_quiescence - Checks whether the H/W is ready
1828 * @val64 : Value read from adapter status register.
1829 * @flag : indicates if the adapter enable bit was ever written once
1830 * before.
1831 * Description: Returns whether the H/W is ready to go or not. Depending
1832 * on whether adapter enable bit was written or not the comparison
1833 * differs and the calling function passes the input argument flag to
1834 * indicate this.
1835 * Return: 1 If xena is quiescence
1836 * 0 If Xena is not quiescence
1837 */
1838
1839 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1840 {
1841 int ret = 0, herc;
1842 u64 tmp64 = ~((u64) val64);
1843 int rev_id = get_xena_rev_id(sp->pdev);
1844
1845 herc = (sp->device_type == XFRAME_II_DEVICE);
1846 if (!
1847 (tmp64 &
1848 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1849 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1850 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1851 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1852 ADAPTER_STATUS_P_PLL_LOCK))) {
1853 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1854 }
1855
1856 return ret;
1857 }
1858
1859 /**
1860 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1861 * @sp: Pointer to device specifc structure
1862 * Description :
1863 * New procedure to clear mac address reading problems on Alpha platforms
1864 *
1865 */
1866
1867 void fix_mac_address(nic_t * sp)
1868 {
1869 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1870 u64 val64;
1871 int i = 0;
1872
1873 while (fix_mac[i] != END_SIGN) {
1874 writeq(fix_mac[i++], &bar0->gpio_control);
1875 udelay(10);
1876 val64 = readq(&bar0->gpio_control);
1877 }
1878 }
1879
1880 /**
1881 * start_nic - Turns the device on
1882 * @nic : device private variable.
1883 * Description:
1884 * This function actually turns the device on. Before this function is
1885 * called,all Registers are configured from their reset states
1886 * and shared memory is allocated but the NIC is still quiescent. On
1887 * calling this function, the device interrupts are cleared and the NIC is
1888 * literally switched on by writing into the adapter control register.
1889 * Return Value:
1890 * SUCCESS on success and -1 on failure.
1891 */
1892
1893 static int start_nic(struct s2io_nic *nic)
1894 {
1895 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1896 struct net_device *dev = nic->dev;
1897 register u64 val64 = 0;
1898 u16 interruptible;
1899 u16 subid, i;
1900 mac_info_t *mac_control;
1901 struct config_param *config;
1902
1903 mac_control = &nic->mac_control;
1904 config = &nic->config;
1905
1906 /* PRC Initialization and configuration */
1907 for (i = 0; i < config->rx_ring_num; i++) {
1908 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1909 &bar0->prc_rxd0_n[i]);
1910
1911 val64 = readq(&bar0->prc_ctrl_n[i]);
1912 if (nic->config.bimodal)
1913 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1914 if (nic->rxd_mode == RXD_MODE_1)
1915 val64 |= PRC_CTRL_RC_ENABLED;
1916 else
1917 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1918 writeq(val64, &bar0->prc_ctrl_n[i]);
1919 }
1920
1921 if (nic->rxd_mode == RXD_MODE_3B) {
1922 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1923 val64 = readq(&bar0->rx_pa_cfg);
1924 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1925 writeq(val64, &bar0->rx_pa_cfg);
1926 }
1927
1928 /*
1929 * Enabling MC-RLDRAM. After enabling the device, we timeout
1930 * for around 100ms, which is approximately the time required
1931 * for the device to be ready for operation.
1932 */
1933 val64 = readq(&bar0->mc_rldram_mrs);
1934 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1935 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1936 val64 = readq(&bar0->mc_rldram_mrs);
1937
1938 msleep(100); /* Delay by around 100 ms. */
1939
1940 /* Enabling ECC Protection. */
1941 val64 = readq(&bar0->adapter_control);
1942 val64 &= ~ADAPTER_ECC_EN;
1943 writeq(val64, &bar0->adapter_control);
1944
1945 /*
1946 * Clearing any possible Link state change interrupts that
1947 * could have popped up just before Enabling the card.
1948 */
1949 val64 = readq(&bar0->mac_rmac_err_reg);
1950 if (val64)
1951 writeq(val64, &bar0->mac_rmac_err_reg);
1952
1953 /*
1954 * Verify if the device is ready to be enabled, if so enable
1955 * it.
1956 */
1957 val64 = readq(&bar0->adapter_status);
1958 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1959 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1960 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1961 (unsigned long long) val64);
1962 return FAILURE;
1963 }
1964
1965 /* Enable select interrupts */
1966 if (nic->intr_type != INTA)
1967 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
1968 else {
1969 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1970 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1971 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1972 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1973 }
1974
1975 /*
1976 * With some switches, link might be already up at this point.
1977 * Because of this weird behavior, when we enable laser,
1978 * we may not get link. We need to handle this. We cannot
1979 * figure out which switch is misbehaving. So we are forced to
1980 * make a global change.
1981 */
1982
1983 /* Enabling Laser. */
1984 val64 = readq(&bar0->adapter_control);
1985 val64 |= ADAPTER_EOI_TX_ON;
1986 writeq(val64, &bar0->adapter_control);
1987
1988 /* SXE-002: Initialize link and activity LED */
1989 subid = nic->pdev->subsystem_device;
1990 if (((subid & 0xFF) >= 0x07) &&
1991 (nic->device_type == XFRAME_I_DEVICE)) {
1992 val64 = readq(&bar0->gpio_control);
1993 val64 |= 0x0000800000000000ULL;
1994 writeq(val64, &bar0->gpio_control);
1995 val64 = 0x0411040400000000ULL;
1996 writeq(val64, (void __iomem *)bar0 + 0x2700);
1997 }
1998
1999 /*
2000 * Don't see link state interrupts on certain switches, so
2001 * directly scheduling a link state task from here.
2002 */
2003 schedule_work(&nic->set_link_task);
2004
2005 return SUCCESS;
2006 }
2007
2008 /**
2009 * free_tx_buffers - Free all queued Tx buffers
2010 * @nic : device private variable.
2011 * Description:
2012 * Free all queued Tx buffers.
2013 * Return Value: void
2014 */
2015
2016 static void free_tx_buffers(struct s2io_nic *nic)
2017 {
2018 struct net_device *dev = nic->dev;
2019 struct sk_buff *skb;
2020 TxD_t *txdp;
2021 int i, j;
2022 mac_info_t *mac_control;
2023 struct config_param *config;
2024 int cnt = 0, frg_cnt;
2025
2026 mac_control = &nic->mac_control;
2027 config = &nic->config;
2028
2029 for (i = 0; i < config->tx_fifo_num; i++) {
2030 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2031 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2032 list_virt_addr;
2033 skb =
2034 (struct sk_buff *) ((unsigned long) txdp->
2035 Host_Control);
2036 if (skb == NULL) {
2037 memset(txdp, 0, sizeof(TxD_t) *
2038 config->max_txds);
2039 continue;
2040 }
2041 frg_cnt = skb_shinfo(skb)->nr_frags;
2042 pci_unmap_single(nic->pdev, (dma_addr_t)
2043 txdp->Buffer_Pointer,
2044 skb->len - skb->data_len,
2045 PCI_DMA_TODEVICE);
2046 if (frg_cnt) {
2047 TxD_t *temp;
2048 temp = txdp;
2049 txdp++;
2050 for (j = 0; j < frg_cnt; j++, txdp++) {
2051 skb_frag_t *frag =
2052 &skb_shinfo(skb)->frags[j];
2053 pci_unmap_page(nic->pdev,
2054 (dma_addr_t)
2055 txdp->
2056 Buffer_Pointer,
2057 frag->size,
2058 PCI_DMA_TODEVICE);
2059 }
2060 txdp = temp;
2061 }
2062 dev_kfree_skb(skb);
2063 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2064 cnt++;
2065 }
2066 DBG_PRINT(INTR_DBG,
2067 "%s:forcibly freeing %d skbs on FIFO%d\n",
2068 dev->name, cnt, i);
2069 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2070 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2071 }
2072 }
2073
2074 /**
2075 * stop_nic - To stop the nic
2076 * @nic ; device private variable.
2077 * Description:
2078 * This function does exactly the opposite of what the start_nic()
2079 * function does. This function is called to stop the device.
2080 * Return Value:
2081 * void.
2082 */
2083
2084 static void stop_nic(struct s2io_nic *nic)
2085 {
2086 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2087 register u64 val64 = 0;
2088 u16 interruptible, i;
2089 mac_info_t *mac_control;
2090 struct config_param *config;
2091
2092 mac_control = &nic->mac_control;
2093 config = &nic->config;
2094
2095 /* Disable all interrupts */
2096 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2097 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2098 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2099 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2100
2101 /* Disable PRCs */
2102 for (i = 0; i < config->rx_ring_num; i++) {
2103 val64 = readq(&bar0->prc_ctrl_n[i]);
2104 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2105 writeq(val64, &bar0->prc_ctrl_n[i]);
2106 }
2107 }
2108
2109 int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2110 {
2111 struct net_device *dev = nic->dev;
2112 struct sk_buff *frag_list;
2113 void *tmp;
2114
2115 /* Buffer-1 receives L3/L4 headers */
2116 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2117 (nic->pdev, skb->data, l3l4hdr_size + 4,
2118 PCI_DMA_FROMDEVICE);
2119
2120 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2121 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2122 if (skb_shinfo(skb)->frag_list == NULL) {
2123 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2124 return -ENOMEM ;
2125 }
2126 frag_list = skb_shinfo(skb)->frag_list;
2127 frag_list->next = NULL;
2128 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2129 frag_list->data = tmp;
2130 frag_list->tail = tmp;
2131
2132 /* Buffer-2 receives L4 data payload */
2133 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2134 frag_list->data, dev->mtu,
2135 PCI_DMA_FROMDEVICE);
2136 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2137 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2138
2139 return SUCCESS;
2140 }
2141
2142 /**
2143 * fill_rx_buffers - Allocates the Rx side skbs
2144 * @nic: device private variable
2145 * @ring_no: ring number
2146 * Description:
2147 * The function allocates Rx side skbs and puts the physical
2148 * address of these buffers into the RxD buffer pointers, so that the NIC
2149 * can DMA the received frame into these locations.
2150 * The NIC supports 3 receive modes, viz
2151 * 1. single buffer,
2152 * 2. three buffer and
2153 * 3. Five buffer modes.
2154 * Each mode defines how many fragments the received frame will be split
2155 * up into by the NIC. The frame is split into L3 header, L4 Header,
2156 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2157 * is split into 3 fragments. As of now only single buffer mode is
2158 * supported.
2159 * Return Value:
2160 * SUCCESS on success or an appropriate -ve value on failure.
2161 */
2162
2163 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2164 {
2165 struct net_device *dev = nic->dev;
2166 struct sk_buff *skb;
2167 RxD_t *rxdp;
2168 int off, off1, size, block_no, block_no1;
2169 u32 alloc_tab = 0;
2170 u32 alloc_cnt;
2171 mac_info_t *mac_control;
2172 struct config_param *config;
2173 u64 tmp;
2174 buffAdd_t *ba;
2175 #ifndef CONFIG_S2IO_NAPI
2176 unsigned long flags;
2177 #endif
2178 RxD_t *first_rxdp = NULL;
2179
2180 mac_control = &nic->mac_control;
2181 config = &nic->config;
2182 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2183 atomic_read(&nic->rx_bufs_left[ring_no]);
2184
2185 while (alloc_tab < alloc_cnt) {
2186 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2187 block_index;
2188 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2189 block_index;
2190 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2191 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2192
2193 rxdp = mac_control->rings[ring_no].
2194 rx_blocks[block_no].rxds[off].virt_addr;
2195
2196 if ((block_no == block_no1) && (off == off1) &&
2197 (rxdp->Host_Control)) {
2198 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2199 dev->name);
2200 DBG_PRINT(INTR_DBG, " info equated\n");
2201 goto end;
2202 }
2203 if (off && (off == rxd_count[nic->rxd_mode])) {
2204 mac_control->rings[ring_no].rx_curr_put_info.
2205 block_index++;
2206 if (mac_control->rings[ring_no].rx_curr_put_info.
2207 block_index == mac_control->rings[ring_no].
2208 block_count)
2209 mac_control->rings[ring_no].rx_curr_put_info.
2210 block_index = 0;
2211 block_no = mac_control->rings[ring_no].
2212 rx_curr_put_info.block_index;
2213 if (off == rxd_count[nic->rxd_mode])
2214 off = 0;
2215 mac_control->rings[ring_no].rx_curr_put_info.
2216 offset = off;
2217 rxdp = mac_control->rings[ring_no].
2218 rx_blocks[block_no].block_virt_addr;
2219 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2220 dev->name, rxdp);
2221 }
2222 #ifndef CONFIG_S2IO_NAPI
2223 spin_lock_irqsave(&nic->put_lock, flags);
2224 mac_control->rings[ring_no].put_pos =
2225 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2226 spin_unlock_irqrestore(&nic->put_lock, flags);
2227 #endif
2228 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2229 ((nic->rxd_mode >= RXD_MODE_3A) &&
2230 (rxdp->Control_2 & BIT(0)))) {
2231 mac_control->rings[ring_no].rx_curr_put_info.
2232 offset = off;
2233 goto end;
2234 }
2235 /* calculate size of skb based on ring mode */
2236 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2237 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2238 if (nic->rxd_mode == RXD_MODE_1)
2239 size += NET_IP_ALIGN;
2240 else if (nic->rxd_mode == RXD_MODE_3B)
2241 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2242 else
2243 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2244
2245 /* allocate skb */
2246 skb = dev_alloc_skb(size);
2247 if(!skb) {
2248 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2249 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2250 if (first_rxdp) {
2251 wmb();
2252 first_rxdp->Control_1 |= RXD_OWN_XENA;
2253 }
2254 return -ENOMEM ;
2255 }
2256 if (nic->rxd_mode == RXD_MODE_1) {
2257 /* 1 buffer mode - normal operation mode */
2258 memset(rxdp, 0, sizeof(RxD1_t));
2259 skb_reserve(skb, NET_IP_ALIGN);
2260 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2261 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2262 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE_1);
2263 rxdp->Control_2 |= SET_BUFFER0_SIZE_1(size);
2264
2265 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2266 /*
2267 * 2 or 3 buffer mode -
2268 * Both 2 buffer mode and 3 buffer mode provides 128
2269 * byte aligned receive buffers.
2270 *
2271 * 3 buffer mode provides header separation where in
2272 * skb->data will have L3/L4 headers where as
2273 * skb_shinfo(skb)->frag_list will have the L4 data
2274 * payload
2275 */
2276
2277 memset(rxdp, 0, sizeof(RxD3_t));
2278 ba = &mac_control->rings[ring_no].ba[block_no][off];
2279 skb_reserve(skb, BUF0_LEN);
2280 tmp = (u64)(unsigned long) skb->data;
2281 tmp += ALIGN_SIZE;
2282 tmp &= ~ALIGN_SIZE;
2283 skb->data = (void *) (unsigned long)tmp;
2284 skb->tail = (void *) (unsigned long)tmp;
2285
2286 ((RxD3_t*)rxdp)->Buffer0_ptr =
2287 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2288 PCI_DMA_FROMDEVICE);
2289 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2290 if (nic->rxd_mode == RXD_MODE_3B) {
2291 /* Two buffer mode */
2292
2293 /*
2294 * Buffer2 will have L3/L4 header plus
2295 * L4 payload
2296 */
2297 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2298 (nic->pdev, skb->data, dev->mtu + 4,
2299 PCI_DMA_FROMDEVICE);
2300
2301 /* Buffer-1 will be dummy buffer not used */
2302 ((RxD3_t*)rxdp)->Buffer1_ptr =
2303 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2304 PCI_DMA_FROMDEVICE);
2305 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2306 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2307 (dev->mtu + 4);
2308 } else {
2309 /* 3 buffer mode */
2310 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2311 dev_kfree_skb_irq(skb);
2312 if (first_rxdp) {
2313 wmb();
2314 first_rxdp->Control_1 |=
2315 RXD_OWN_XENA;
2316 }
2317 return -ENOMEM ;
2318 }
2319 }
2320 rxdp->Control_2 |= BIT(0);
2321 }
2322 rxdp->Host_Control = (unsigned long) (skb);
2323 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2324 rxdp->Control_1 |= RXD_OWN_XENA;
2325 off++;
2326 if (off == (rxd_count[nic->rxd_mode] + 1))
2327 off = 0;
2328 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2329
2330 rxdp->Control_2 |= SET_RXD_MARKER;
2331 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2332 if (first_rxdp) {
2333 wmb();
2334 first_rxdp->Control_1 |= RXD_OWN_XENA;
2335 }
2336 first_rxdp = rxdp;
2337 }
2338 atomic_inc(&nic->rx_bufs_left[ring_no]);
2339 alloc_tab++;
2340 }
2341
2342 end:
2343 /* Transfer ownership of first descriptor to adapter just before
2344 * exiting. Before that, use memory barrier so that ownership
2345 * and other fields are seen by adapter correctly.
2346 */
2347 if (first_rxdp) {
2348 wmb();
2349 first_rxdp->Control_1 |= RXD_OWN_XENA;
2350 }
2351
2352 return SUCCESS;
2353 }
2354
2355 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2356 {
2357 struct net_device *dev = sp->dev;
2358 int j;
2359 struct sk_buff *skb;
2360 RxD_t *rxdp;
2361 mac_info_t *mac_control;
2362 buffAdd_t *ba;
2363
2364 mac_control = &sp->mac_control;
2365 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2366 rxdp = mac_control->rings[ring_no].
2367 rx_blocks[blk].rxds[j].virt_addr;
2368 skb = (struct sk_buff *)
2369 ((unsigned long) rxdp->Host_Control);
2370 if (!skb) {
2371 continue;
2372 }
2373 if (sp->rxd_mode == RXD_MODE_1) {
2374 pci_unmap_single(sp->pdev, (dma_addr_t)
2375 ((RxD1_t*)rxdp)->Buffer0_ptr,
2376 dev->mtu +
2377 HEADER_ETHERNET_II_802_3_SIZE
2378 + HEADER_802_2_SIZE +
2379 HEADER_SNAP_SIZE,
2380 PCI_DMA_FROMDEVICE);
2381 memset(rxdp, 0, sizeof(RxD1_t));
2382 } else if(sp->rxd_mode == RXD_MODE_3B) {
2383 ba = &mac_control->rings[ring_no].
2384 ba[blk][j];
2385 pci_unmap_single(sp->pdev, (dma_addr_t)
2386 ((RxD3_t*)rxdp)->Buffer0_ptr,
2387 BUF0_LEN,
2388 PCI_DMA_FROMDEVICE);
2389 pci_unmap_single(sp->pdev, (dma_addr_t)
2390 ((RxD3_t*)rxdp)->Buffer1_ptr,
2391 BUF1_LEN,
2392 PCI_DMA_FROMDEVICE);
2393 pci_unmap_single(sp->pdev, (dma_addr_t)
2394 ((RxD3_t*)rxdp)->Buffer2_ptr,
2395 dev->mtu + 4,
2396 PCI_DMA_FROMDEVICE);
2397 memset(rxdp, 0, sizeof(RxD3_t));
2398 } else {
2399 pci_unmap_single(sp->pdev, (dma_addr_t)
2400 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2401 PCI_DMA_FROMDEVICE);
2402 pci_unmap_single(sp->pdev, (dma_addr_t)
2403 ((RxD3_t*)rxdp)->Buffer1_ptr,
2404 l3l4hdr_size + 4,
2405 PCI_DMA_FROMDEVICE);
2406 pci_unmap_single(sp->pdev, (dma_addr_t)
2407 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2408 PCI_DMA_FROMDEVICE);
2409 memset(rxdp, 0, sizeof(RxD3_t));
2410 }
2411 dev_kfree_skb(skb);
2412 atomic_dec(&sp->rx_bufs_left[ring_no]);
2413 }
2414 }
2415
2416 /**
2417 * free_rx_buffers - Frees all Rx buffers
2418 * @sp: device private variable.
2419 * Description:
2420 * This function will free all Rx buffers allocated by host.
2421 * Return Value:
2422 * NONE.
2423 */
2424
2425 static void free_rx_buffers(struct s2io_nic *sp)
2426 {
2427 struct net_device *dev = sp->dev;
2428 int i, blk = 0, buf_cnt = 0;
2429 mac_info_t *mac_control;
2430 struct config_param *config;
2431
2432 mac_control = &sp->mac_control;
2433 config = &sp->config;
2434
2435 for (i = 0; i < config->rx_ring_num; i++) {
2436 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2437 free_rxd_blk(sp,i,blk);
2438
2439 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2440 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2441 mac_control->rings[i].rx_curr_put_info.offset = 0;
2442 mac_control->rings[i].rx_curr_get_info.offset = 0;
2443 atomic_set(&sp->rx_bufs_left[i], 0);
2444 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2445 dev->name, buf_cnt, i);
2446 }
2447 }
2448
2449 /**
2450 * s2io_poll - Rx interrupt handler for NAPI support
2451 * @dev : pointer to the device structure.
2452 * @budget : The number of packets that were budgeted to be processed
2453 * during one pass through the 'Poll" function.
2454 * Description:
2455 * Comes into picture only if NAPI support has been incorporated. It does
2456 * the same thing that rx_intr_handler does, but not in a interrupt context
2457 * also It will process only a given number of packets.
2458 * Return value:
2459 * 0 on success and 1 if there are No Rx packets to be processed.
2460 */
2461
2462 #if defined(CONFIG_S2IO_NAPI)
2463 static int s2io_poll(struct net_device *dev, int *budget)
2464 {
2465 nic_t *nic = dev->priv;
2466 int pkt_cnt = 0, org_pkts_to_process;
2467 mac_info_t *mac_control;
2468 struct config_param *config;
2469 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2470 u64 val64;
2471 int i;
2472
2473 atomic_inc(&nic->isr_cnt);
2474 mac_control = &nic->mac_control;
2475 config = &nic->config;
2476
2477 nic->pkts_to_process = *budget;
2478 if (nic->pkts_to_process > dev->quota)
2479 nic->pkts_to_process = dev->quota;
2480 org_pkts_to_process = nic->pkts_to_process;
2481
2482 val64 = readq(&bar0->rx_traffic_int);
2483 writeq(val64, &bar0->rx_traffic_int);
2484
2485 for (i = 0; i < config->rx_ring_num; i++) {
2486 rx_intr_handler(&mac_control->rings[i]);
2487 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2488 if (!nic->pkts_to_process) {
2489 /* Quota for the current iteration has been met */
2490 goto no_rx;
2491 }
2492 }
2493 if (!pkt_cnt)
2494 pkt_cnt = 1;
2495
2496 dev->quota -= pkt_cnt;
2497 *budget -= pkt_cnt;
2498 netif_rx_complete(dev);
2499
2500 for (i = 0; i < config->rx_ring_num; i++) {
2501 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2502 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2503 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2504 break;
2505 }
2506 }
2507 /* Re enable the Rx interrupts. */
2508 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2509 atomic_dec(&nic->isr_cnt);
2510 return 0;
2511
2512 no_rx:
2513 dev->quota -= pkt_cnt;
2514 *budget -= pkt_cnt;
2515
2516 for (i = 0; i < config->rx_ring_num; i++) {
2517 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2518 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2519 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2520 break;
2521 }
2522 }
2523 atomic_dec(&nic->isr_cnt);
2524 return 1;
2525 }
2526 #endif
2527
2528 /**
2529 * rx_intr_handler - Rx interrupt handler
2530 * @nic: device private variable.
2531 * Description:
2532 * If the interrupt is because of a received frame or if the
2533 * receive ring contains fresh as yet un-processed frames,this function is
2534 * called. It picks out the RxD at which place the last Rx processing had
2535 * stopped and sends the skb to the OSM's Rx handler and then increments
2536 * the offset.
2537 * Return Value:
2538 * NONE.
2539 */
2540 static void rx_intr_handler(ring_info_t *ring_data)
2541 {
2542 nic_t *nic = ring_data->nic;
2543 struct net_device *dev = (struct net_device *) nic->dev;
2544 int get_block, put_block, put_offset;
2545 rx_curr_get_info_t get_info, put_info;
2546 RxD_t *rxdp;
2547 struct sk_buff *skb;
2548 #ifndef CONFIG_S2IO_NAPI
2549 int pkt_cnt = 0;
2550 #endif
2551 spin_lock(&nic->rx_lock);
2552 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2553 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2554 __FUNCTION__, dev->name);
2555 spin_unlock(&nic->rx_lock);
2556 return;
2557 }
2558
2559 get_info = ring_data->rx_curr_get_info;
2560 get_block = get_info.block_index;
2561 put_info = ring_data->rx_curr_put_info;
2562 put_block = put_info.block_index;
2563 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2564 #ifndef CONFIG_S2IO_NAPI
2565 spin_lock(&nic->put_lock);
2566 put_offset = ring_data->put_pos;
2567 spin_unlock(&nic->put_lock);
2568 #else
2569 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2570 put_info.offset;
2571 #endif
2572 while (RXD_IS_UP2DT(rxdp)) {
2573 /* If your are next to put index then it's FIFO full condition */
2574 if ((get_block == put_block) &&
2575 (get_info.offset + 1) == put_info.offset) {
2576 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2577 break;
2578 }
2579 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2580 if (skb == NULL) {
2581 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2582 dev->name);
2583 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2584 spin_unlock(&nic->rx_lock);
2585 return;
2586 }
2587 if (nic->rxd_mode == RXD_MODE_1) {
2588 pci_unmap_single(nic->pdev, (dma_addr_t)
2589 ((RxD1_t*)rxdp)->Buffer0_ptr,
2590 dev->mtu +
2591 HEADER_ETHERNET_II_802_3_SIZE +
2592 HEADER_802_2_SIZE +
2593 HEADER_SNAP_SIZE,
2594 PCI_DMA_FROMDEVICE);
2595 } else if (nic->rxd_mode == RXD_MODE_3B) {
2596 pci_unmap_single(nic->pdev, (dma_addr_t)
2597 ((RxD3_t*)rxdp)->Buffer0_ptr,
2598 BUF0_LEN, PCI_DMA_FROMDEVICE);
2599 pci_unmap_single(nic->pdev, (dma_addr_t)
2600 ((RxD3_t*)rxdp)->Buffer1_ptr,
2601 BUF1_LEN, PCI_DMA_FROMDEVICE);
2602 pci_unmap_single(nic->pdev, (dma_addr_t)
2603 ((RxD3_t*)rxdp)->Buffer2_ptr,
2604 dev->mtu + 4,
2605 PCI_DMA_FROMDEVICE);
2606 } else {
2607 pci_unmap_single(nic->pdev, (dma_addr_t)
2608 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2609 PCI_DMA_FROMDEVICE);
2610 pci_unmap_single(nic->pdev, (dma_addr_t)
2611 ((RxD3_t*)rxdp)->Buffer1_ptr,
2612 l3l4hdr_size + 4,
2613 PCI_DMA_FROMDEVICE);
2614 pci_unmap_single(nic->pdev, (dma_addr_t)
2615 ((RxD3_t*)rxdp)->Buffer2_ptr,
2616 dev->mtu, PCI_DMA_FROMDEVICE);
2617 }
2618 rx_osm_handler(ring_data, rxdp);
2619 get_info.offset++;
2620 ring_data->rx_curr_get_info.offset = get_info.offset;
2621 rxdp = ring_data->rx_blocks[get_block].
2622 rxds[get_info.offset].virt_addr;
2623 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2624 get_info.offset = 0;
2625 ring_data->rx_curr_get_info.offset = get_info.offset;
2626 get_block++;
2627 if (get_block == ring_data->block_count)
2628 get_block = 0;
2629 ring_data->rx_curr_get_info.block_index = get_block;
2630 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2631 }
2632
2633 #ifdef CONFIG_S2IO_NAPI
2634 nic->pkts_to_process -= 1;
2635 if (!nic->pkts_to_process)
2636 break;
2637 #else
2638 pkt_cnt++;
2639 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2640 break;
2641 #endif
2642 }
2643 spin_unlock(&nic->rx_lock);
2644 }
2645
2646 /**
2647 * tx_intr_handler - Transmit interrupt handler
2648 * @nic : device private variable
2649 * Description:
2650 * If an interrupt was raised to indicate DMA complete of the
2651 * Tx packet, this function is called. It identifies the last TxD
2652 * whose buffer was freed and frees all skbs whose data have already
2653 * DMA'ed into the NICs internal memory.
2654 * Return Value:
2655 * NONE
2656 */
2657
2658 static void tx_intr_handler(fifo_info_t *fifo_data)
2659 {
2660 nic_t *nic = fifo_data->nic;
2661 struct net_device *dev = (struct net_device *) nic->dev;
2662 tx_curr_get_info_t get_info, put_info;
2663 struct sk_buff *skb;
2664 TxD_t *txdlp;
2665 u16 j, frg_cnt;
2666
2667 get_info = fifo_data->tx_curr_get_info;
2668 put_info = fifo_data->tx_curr_put_info;
2669 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2670 list_virt_addr;
2671 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2672 (get_info.offset != put_info.offset) &&
2673 (txdlp->Host_Control)) {
2674 /* Check for TxD errors */
2675 if (txdlp->Control_1 & TXD_T_CODE) {
2676 unsigned long long err;
2677 err = txdlp->Control_1 & TXD_T_CODE;
2678 if ((err >> 48) == 0xA) {
2679 DBG_PRINT(TX_DBG, "TxD returned due \
2680 to loss of link\n");
2681 }
2682 else {
2683 DBG_PRINT(ERR_DBG, "***TxD error \
2684 %llx\n", err);
2685 }
2686 }
2687
2688 skb = (struct sk_buff *) ((unsigned long)
2689 txdlp->Host_Control);
2690 if (skb == NULL) {
2691 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2692 __FUNCTION__);
2693 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2694 return;
2695 }
2696
2697 frg_cnt = skb_shinfo(skb)->nr_frags;
2698 nic->tx_pkt_count++;
2699
2700 pci_unmap_single(nic->pdev, (dma_addr_t)
2701 txdlp->Buffer_Pointer,
2702 skb->len - skb->data_len,
2703 PCI_DMA_TODEVICE);
2704 if (frg_cnt) {
2705 TxD_t *temp;
2706 temp = txdlp;
2707 txdlp++;
2708 for (j = 0; j < frg_cnt; j++, txdlp++) {
2709 skb_frag_t *frag =
2710 &skb_shinfo(skb)->frags[j];
2711 if (!txdlp->Buffer_Pointer)
2712 break;
2713 pci_unmap_page(nic->pdev,
2714 (dma_addr_t)
2715 txdlp->
2716 Buffer_Pointer,
2717 frag->size,
2718 PCI_DMA_TODEVICE);
2719 }
2720 txdlp = temp;
2721 }
2722 memset(txdlp, 0,
2723 (sizeof(TxD_t) * fifo_data->max_txds));
2724
2725 /* Updating the statistics block */
2726 nic->stats.tx_bytes += skb->len;
2727 dev_kfree_skb_irq(skb);
2728
2729 get_info.offset++;
2730 get_info.offset %= get_info.fifo_len + 1;
2731 txdlp = (TxD_t *) fifo_data->list_info
2732 [get_info.offset].list_virt_addr;
2733 fifo_data->tx_curr_get_info.offset =
2734 get_info.offset;
2735 }
2736
2737 spin_lock(&nic->tx_lock);
2738 if (netif_queue_stopped(dev))
2739 netif_wake_queue(dev);
2740 spin_unlock(&nic->tx_lock);
2741 }
2742
2743 /**
2744 * alarm_intr_handler - Alarm Interrrupt handler
2745 * @nic: device private variable
2746 * Description: If the interrupt was neither because of Rx packet or Tx
2747 * complete, this function is called. If the interrupt was to indicate
2748 * a loss of link, the OSM link status handler is invoked for any other
2749 * alarm interrupt the block that raised the interrupt is displayed
2750 * and a H/W reset is issued.
2751 * Return Value:
2752 * NONE
2753 */
2754
2755 static void alarm_intr_handler(struct s2io_nic *nic)
2756 {
2757 struct net_device *dev = (struct net_device *) nic->dev;
2758 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2759 register u64 val64 = 0, err_reg = 0;
2760
2761 /* Handling link status change error Intr */
2762 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2763 err_reg = readq(&bar0->mac_rmac_err_reg);
2764 writeq(err_reg, &bar0->mac_rmac_err_reg);
2765 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2766 schedule_work(&nic->set_link_task);
2767 }
2768 }
2769
2770 /* Handling Ecc errors */
2771 val64 = readq(&bar0->mc_err_reg);
2772 writeq(val64, &bar0->mc_err_reg);
2773 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2774 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2775 nic->mac_control.stats_info->sw_stat.
2776 double_ecc_errs++;
2777 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
2778 dev->name);
2779 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
2780 if (nic->device_type != XFRAME_II_DEVICE) {
2781 /* Reset XframeI only if critical error */
2782 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2783 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2784 netif_stop_queue(dev);
2785 schedule_work(&nic->rst_timer_task);
2786 }
2787 }
2788 } else {
2789 nic->mac_control.stats_info->sw_stat.
2790 single_ecc_errs++;
2791 }
2792 }
2793
2794 /* In case of a serious error, the device will be Reset. */
2795 val64 = readq(&bar0->serr_source);
2796 if (val64 & SERR_SOURCE_ANY) {
2797 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2798 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
2799 (unsigned long long)val64);
2800 netif_stop_queue(dev);
2801 schedule_work(&nic->rst_timer_task);
2802 }
2803
2804 /*
2805 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2806 * Error occurs, the adapter will be recycled by disabling the
2807 * adapter enable bit and enabling it again after the device
2808 * becomes Quiescent.
2809 */
2810 val64 = readq(&bar0->pcc_err_reg);
2811 writeq(val64, &bar0->pcc_err_reg);
2812 if (val64 & PCC_FB_ECC_DB_ERR) {
2813 u64 ac = readq(&bar0->adapter_control);
2814 ac &= ~(ADAPTER_CNTL_EN);
2815 writeq(ac, &bar0->adapter_control);
2816 ac = readq(&bar0->adapter_control);
2817 schedule_work(&nic->set_link_task);
2818 }
2819
2820 /* Other type of interrupts are not being handled now, TODO */
2821 }
2822
2823 /**
2824 * wait_for_cmd_complete - waits for a command to complete.
2825 * @sp : private member of the device structure, which is a pointer to the
2826 * s2io_nic structure.
2827 * Description: Function that waits for a command to Write into RMAC
2828 * ADDR DATA registers to be completed and returns either success or
2829 * error depending on whether the command was complete or not.
2830 * Return value:
2831 * SUCCESS on success and FAILURE on failure.
2832 */
2833
2834 int wait_for_cmd_complete(nic_t * sp)
2835 {
2836 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2837 int ret = FAILURE, cnt = 0;
2838 u64 val64;
2839
2840 while (TRUE) {
2841 val64 = readq(&bar0->rmac_addr_cmd_mem);
2842 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2843 ret = SUCCESS;
2844 break;
2845 }
2846 msleep(50);
2847 if (cnt++ > 10)
2848 break;
2849 }
2850
2851 return ret;
2852 }
2853
2854 /**
2855 * s2io_reset - Resets the card.
2856 * @sp : private member of the device structure.
2857 * Description: Function to Reset the card. This function then also
2858 * restores the previously saved PCI configuration space registers as
2859 * the card reset also resets the configuration space.
2860 * Return value:
2861 * void.
2862 */
2863
2864 void s2io_reset(nic_t * sp)
2865 {
2866 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2867 u64 val64;
2868 u16 subid, pci_cmd;
2869
2870 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2871 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2872
2873 val64 = SW_RESET_ALL;
2874 writeq(val64, &bar0->sw_reset);
2875
2876 /*
2877 * At this stage, if the PCI write is indeed completed, the
2878 * card is reset and so is the PCI Config space of the device.
2879 * So a read cannot be issued at this stage on any of the
2880 * registers to ensure the write into "sw_reset" register
2881 * has gone through.
2882 * Question: Is there any system call that will explicitly force
2883 * all the write commands still pending on the bus to be pushed
2884 * through?
2885 * As of now I'am just giving a 250ms delay and hoping that the
2886 * PCI write to sw_reset register is done by this time.
2887 */
2888 msleep(250);
2889
2890 /* Restore the PCI state saved during initialization. */
2891 pci_restore_state(sp->pdev);
2892 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2893 pci_cmd);
2894 s2io_init_pci(sp);
2895
2896 msleep(250);
2897
2898 /* Set swapper to enable I/O register access */
2899 s2io_set_swapper(sp);
2900
2901 /* Restore the MSIX table entries from local variables */
2902 restore_xmsi_data(sp);
2903
2904 /* Clear certain PCI/PCI-X fields after reset */
2905 if (sp->device_type == XFRAME_II_DEVICE) {
2906 /* Clear parity err detect bit */
2907 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2908
2909 /* Clearing PCIX Ecc status register */
2910 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2911
2912 /* Clearing PCI_STATUS error reflected here */
2913 writeq(BIT(62), &bar0->txpic_int_reg);
2914 }
2915
2916 /* Reset device statistics maintained by OS */
2917 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2918
2919 /* SXE-002: Configure link and activity LED to turn it off */
2920 subid = sp->pdev->subsystem_device;
2921 if (((subid & 0xFF) >= 0x07) &&
2922 (sp->device_type == XFRAME_I_DEVICE)) {
2923 val64 = readq(&bar0->gpio_control);
2924 val64 |= 0x0000800000000000ULL;
2925 writeq(val64, &bar0->gpio_control);
2926 val64 = 0x0411040400000000ULL;
2927 writeq(val64, (void __iomem *)bar0 + 0x2700);
2928 }
2929
2930 /*
2931 * Clear spurious ECC interrupts that would have occured on
2932 * XFRAME II cards after reset.
2933 */
2934 if (sp->device_type == XFRAME_II_DEVICE) {
2935 val64 = readq(&bar0->pcc_err_reg);
2936 writeq(val64, &bar0->pcc_err_reg);
2937 }
2938
2939 sp->device_enabled_once = FALSE;
2940 }
2941
2942 /**
2943 * s2io_set_swapper - to set the swapper controle on the card
2944 * @sp : private member of the device structure,
2945 * pointer to the s2io_nic structure.
2946 * Description: Function to set the swapper control on the card
2947 * correctly depending on the 'endianness' of the system.
2948 * Return value:
2949 * SUCCESS on success and FAILURE on failure.
2950 */
2951
2952 int s2io_set_swapper(nic_t * sp)
2953 {
2954 struct net_device *dev = sp->dev;
2955 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2956 u64 val64, valt, valr;
2957
2958 /*
2959 * Set proper endian settings and verify the same by reading
2960 * the PIF Feed-back register.
2961 */
2962
2963 val64 = readq(&bar0->pif_rd_swapper_fb);
2964 if (val64 != 0x0123456789ABCDEFULL) {
2965 int i = 0;
2966 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2967 0x8100008181000081ULL, /* FE=1, SE=0 */
2968 0x4200004242000042ULL, /* FE=0, SE=1 */
2969 0}; /* FE=0, SE=0 */
2970
2971 while(i<4) {
2972 writeq(value[i], &bar0->swapper_ctrl);
2973 val64 = readq(&bar0->pif_rd_swapper_fb);
2974 if (val64 == 0x0123456789ABCDEFULL)
2975 break;
2976 i++;
2977 }
2978 if (i == 4) {
2979 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2980 dev->name);
2981 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2982 (unsigned long long) val64);
2983 return FAILURE;
2984 }
2985 valr = value[i];
2986 } else {
2987 valr = readq(&bar0->swapper_ctrl);
2988 }
2989
2990 valt = 0x0123456789ABCDEFULL;
2991 writeq(valt, &bar0->xmsi_address);
2992 val64 = readq(&bar0->xmsi_address);
2993
2994 if(val64 != valt) {
2995 int i = 0;
2996 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2997 0x0081810000818100ULL, /* FE=1, SE=0 */
2998 0x0042420000424200ULL, /* FE=0, SE=1 */
2999 0}; /* FE=0, SE=0 */
3000
3001 while(i<4) {
3002 writeq((value[i] | valr), &bar0->swapper_ctrl);
3003 writeq(valt, &bar0->xmsi_address);
3004 val64 = readq(&bar0->xmsi_address);
3005 if(val64 == valt)
3006 break;
3007 i++;
3008 }
3009 if(i == 4) {
3010 unsigned long long x = val64;
3011 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3012 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3013 return FAILURE;
3014 }
3015 }
3016 val64 = readq(&bar0->swapper_ctrl);
3017 val64 &= 0xFFFF000000000000ULL;
3018
3019 #ifdef __BIG_ENDIAN
3020 /*
3021 * The device by default set to a big endian format, so a
3022 * big endian driver need not set anything.
3023 */
3024 val64 |= (SWAPPER_CTRL_TXP_FE |
3025 SWAPPER_CTRL_TXP_SE |
3026 SWAPPER_CTRL_TXD_R_FE |
3027 SWAPPER_CTRL_TXD_W_FE |
3028 SWAPPER_CTRL_TXF_R_FE |
3029 SWAPPER_CTRL_RXD_R_FE |
3030 SWAPPER_CTRL_RXD_W_FE |
3031 SWAPPER_CTRL_RXF_W_FE |
3032 SWAPPER_CTRL_XMSI_FE |
3033 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3034 if (sp->intr_type == INTA)
3035 val64 |= SWAPPER_CTRL_XMSI_SE;
3036 writeq(val64, &bar0->swapper_ctrl);
3037 #else
3038 /*
3039 * Initially we enable all bits to make it accessible by the
3040 * driver, then we selectively enable only those bits that
3041 * we want to set.
3042 */
3043 val64 |= (SWAPPER_CTRL_TXP_FE |
3044 SWAPPER_CTRL_TXP_SE |
3045 SWAPPER_CTRL_TXD_R_FE |
3046 SWAPPER_CTRL_TXD_R_SE |
3047 SWAPPER_CTRL_TXD_W_FE |
3048 SWAPPER_CTRL_TXD_W_SE |
3049 SWAPPER_CTRL_TXF_R_FE |
3050 SWAPPER_CTRL_RXD_R_FE |
3051 SWAPPER_CTRL_RXD_R_SE |
3052 SWAPPER_CTRL_RXD_W_FE |
3053 SWAPPER_CTRL_RXD_W_SE |
3054 SWAPPER_CTRL_RXF_W_FE |
3055 SWAPPER_CTRL_XMSI_FE |
3056 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3057 if (sp->intr_type == INTA)
3058 val64 |= SWAPPER_CTRL_XMSI_SE;
3059 writeq(val64, &bar0->swapper_ctrl);
3060 #endif
3061 val64 = readq(&bar0->swapper_ctrl);
3062
3063 /*
3064 * Verifying if endian settings are accurate by reading a
3065 * feedback register.
3066 */
3067 val64 = readq(&bar0->pif_rd_swapper_fb);
3068 if (val64 != 0x0123456789ABCDEFULL) {
3069 /* Endian settings are incorrect, calls for another dekko. */
3070 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3071 dev->name);
3072 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3073 (unsigned long long) val64);
3074 return FAILURE;
3075 }
3076
3077 return SUCCESS;
3078 }
3079
3080 int wait_for_msix_trans(nic_t *nic, int i)
3081 {
3082 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3083 u64 val64;
3084 int ret = 0, cnt = 0;
3085
3086 do {
3087 val64 = readq(&bar0->xmsi_access);
3088 if (!(val64 & BIT(15)))
3089 break;
3090 mdelay(1);
3091 cnt++;
3092 } while(cnt < 5);
3093 if (cnt == 5) {
3094 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3095 ret = 1;
3096 }
3097
3098 return ret;
3099 }
3100
3101 void restore_xmsi_data(nic_t *nic)
3102 {
3103 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3104 u64 val64;
3105 int i;
3106
3107 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3108 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3109 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3110 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3111 writeq(val64, &bar0->xmsi_access);
3112 if (wait_for_msix_trans(nic, i)) {
3113 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3114 continue;
3115 }
3116 }
3117 }
3118
3119 void store_xmsi_data(nic_t *nic)
3120 {
3121 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3122 u64 val64, addr, data;
3123 int i;
3124
3125 /* Store and display */
3126 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3127 val64 = (BIT(15) | vBIT(i, 26, 6));
3128 writeq(val64, &bar0->xmsi_access);
3129 if (wait_for_msix_trans(nic, i)) {
3130 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3131 continue;
3132 }
3133 addr = readq(&bar0->xmsi_address);
3134 data = readq(&bar0->xmsi_data);
3135 if (addr && data) {
3136 nic->msix_info[i].addr = addr;
3137 nic->msix_info[i].data = data;
3138 }
3139 }
3140 }
3141
3142 int s2io_enable_msi(nic_t *nic)
3143 {
3144 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3145 u16 msi_ctrl, msg_val;
3146 struct config_param *config = &nic->config;
3147 struct net_device *dev = nic->dev;
3148 u64 val64, tx_mat, rx_mat;
3149 int i, err;
3150
3151 val64 = readq(&bar0->pic_control);
3152 val64 &= ~BIT(1);
3153 writeq(val64, &bar0->pic_control);
3154
3155 err = pci_enable_msi(nic->pdev);
3156 if (err) {
3157 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3158 nic->dev->name);
3159 return err;
3160 }
3161
3162 /*
3163 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3164 * for interrupt handling.
3165 */
3166 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3167 msg_val ^= 0x1;
3168 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3169 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3170
3171 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3172 msi_ctrl |= 0x10;
3173 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3174
3175 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3176 tx_mat = readq(&bar0->tx_mat0_n[0]);
3177 for (i=0; i<config->tx_fifo_num; i++) {
3178 tx_mat |= TX_MAT_SET(i, 1);
3179 }
3180 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3181
3182 rx_mat = readq(&bar0->rx_mat);
3183 for (i=0; i<config->rx_ring_num; i++) {
3184 rx_mat |= RX_MAT_SET(i, 1);
3185 }
3186 writeq(rx_mat, &bar0->rx_mat);
3187
3188 dev->irq = nic->pdev->irq;
3189 return 0;
3190 }
3191
3192 int s2io_enable_msi_x(nic_t *nic)
3193 {
3194 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3195 u64 tx_mat, rx_mat;
3196 u16 msi_control; /* Temp variable */
3197 int ret, i, j, msix_indx = 1;
3198
3199 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3200 GFP_KERNEL);
3201 if (nic->entries == NULL) {
3202 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3203 return -ENOMEM;
3204 }
3205 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3206
3207 nic->s2io_entries =
3208 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3209 GFP_KERNEL);
3210 if (nic->s2io_entries == NULL) {
3211 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3212 kfree(nic->entries);
3213 return -ENOMEM;
3214 }
3215 memset(nic->s2io_entries, 0,
3216 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3217
3218 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3219 nic->entries[i].entry = i;
3220 nic->s2io_entries[i].entry = i;
3221 nic->s2io_entries[i].arg = NULL;
3222 nic->s2io_entries[i].in_use = 0;
3223 }
3224
3225 tx_mat = readq(&bar0->tx_mat0_n[0]);
3226 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3227 tx_mat |= TX_MAT_SET(i, msix_indx);
3228 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3229 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3230 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3231 }
3232 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3233
3234 if (!nic->config.bimodal) {
3235 rx_mat = readq(&bar0->rx_mat);
3236 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3237 rx_mat |= RX_MAT_SET(j, msix_indx);
3238 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3239 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3240 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3241 }
3242 writeq(rx_mat, &bar0->rx_mat);
3243 } else {
3244 tx_mat = readq(&bar0->tx_mat0_n[7]);
3245 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3246 tx_mat |= TX_MAT_SET(i, msix_indx);
3247 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3248 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3249 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3250 }
3251 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3252 }
3253
3254 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3255 if (ret) {
3256 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3257 kfree(nic->entries);
3258 kfree(nic->s2io_entries);
3259 nic->entries = NULL;
3260 nic->s2io_entries = NULL;
3261 return -ENOMEM;
3262 }
3263
3264 /*
3265 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3266 * in the herc NIC. (Temp change, needs to be removed later)
3267 */
3268 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3269 msi_control |= 0x1; /* Enable MSI */
3270 pci_write_config_word(nic->pdev, 0x42, msi_control);
3271
3272 return 0;
3273 }
3274
3275 /* ********************************************************* *
3276 * Functions defined below concern the OS part of the driver *
3277 * ********************************************************* */
3278
3279 /**
3280 * s2io_open - open entry point of the driver
3281 * @dev : pointer to the device structure.
3282 * Description:
3283 * This function is the open entry point of the driver. It mainly calls a
3284 * function to allocate Rx buffers and inserts them into the buffer
3285 * descriptors and then enables the Rx part of the NIC.
3286 * Return value:
3287 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3288 * file on failure.
3289 */
3290
3291 int s2io_open(struct net_device *dev)
3292 {
3293 nic_t *sp = dev->priv;
3294 int err = 0;
3295 int i;
3296 u16 msi_control; /* Temp variable */
3297
3298 /*
3299 * Make sure you have link off by default every time
3300 * Nic is initialized
3301 */
3302 netif_carrier_off(dev);
3303 sp->last_link_state = 0;
3304
3305 /* Initialize H/W and enable interrupts */
3306 if (s2io_card_up(sp)) {
3307 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3308 dev->name);
3309 err = -ENODEV;
3310 goto hw_init_failed;
3311 }
3312
3313 /* Store the values of the MSIX table in the nic_t structure */
3314 store_xmsi_data(sp);
3315
3316 /* After proper initialization of H/W, register ISR */
3317 if (sp->intr_type == MSI) {
3318 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
3319 SA_SHIRQ, sp->name, dev);
3320 if (err) {
3321 DBG_PRINT(ERR_DBG, "%s: MSI registration \
3322 failed\n", dev->name);
3323 goto isr_registration_failed;
3324 }
3325 }
3326 if (sp->intr_type == MSI_X) {
3327 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
3328 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
3329 sprintf(sp->desc1, "%s:MSI-X-%d-TX",
3330 dev->name, i);
3331 err = request_irq(sp->entries[i].vector,
3332 s2io_msix_fifo_handle, 0, sp->desc1,
3333 sp->s2io_entries[i].arg);
3334 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
3335 sp->msix_info[i].addr);
3336 } else {
3337 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3338 dev->name, i);
3339 err = request_irq(sp->entries[i].vector,
3340 s2io_msix_ring_handle, 0, sp->desc2,
3341 sp->s2io_entries[i].arg);
3342 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
3343 sp->msix_info[i].addr);
3344 }
3345 if (err) {
3346 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
3347 failed\n", dev->name, i);
3348 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
3349 goto isr_registration_failed;
3350 }
3351 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
3352 }
3353 }
3354 if (sp->intr_type == INTA) {
3355 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
3356 sp->name, dev);
3357 if (err) {
3358 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3359 dev->name);
3360 goto isr_registration_failed;
3361 }
3362 }
3363
3364 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3365 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3366 err = -ENODEV;
3367 goto setting_mac_address_failed;
3368 }
3369
3370 netif_start_queue(dev);
3371 return 0;
3372
3373 setting_mac_address_failed:
3374 if (sp->intr_type != MSI_X)
3375 free_irq(sp->pdev->irq, dev);
3376 isr_registration_failed:
3377 del_timer_sync(&sp->alarm_timer);
3378 if (sp->intr_type == MSI_X) {
3379 if (sp->device_type == XFRAME_II_DEVICE) {
3380 for (i=1; (sp->s2io_entries[i].in_use ==
3381 MSIX_REGISTERED_SUCCESS); i++) {
3382 int vector = sp->entries[i].vector;
3383 void *arg = sp->s2io_entries[i].arg;
3384
3385 free_irq(vector, arg);
3386 }
3387 pci_disable_msix(sp->pdev);
3388
3389 /* Temp */
3390 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3391 msi_control &= 0xFFFE; /* Disable MSI */
3392 pci_write_config_word(sp->pdev, 0x42, msi_control);
3393 }
3394 }
3395 else if (sp->intr_type == MSI)
3396 pci_disable_msi(sp->pdev);
3397 s2io_reset(sp);
3398 hw_init_failed:
3399 if (sp->intr_type == MSI_X) {
3400 if (sp->entries)
3401 kfree(sp->entries);
3402 if (sp->s2io_entries)
3403 kfree(sp->s2io_entries);
3404 }
3405 return err;
3406 }
3407
3408 /**
3409 * s2io_close -close entry point of the driver
3410 * @dev : device pointer.
3411 * Description:
3412 * This is the stop entry point of the driver. It needs to undo exactly
3413 * whatever was done by the open entry point,thus it's usually referred to
3414 * as the close function.Among other things this function mainly stops the
3415 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3416 * Return value:
3417 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3418 * file on failure.
3419 */
3420
3421 int s2io_close(struct net_device *dev)
3422 {
3423 nic_t *sp = dev->priv;
3424 int i;
3425 u16 msi_control;
3426
3427 flush_scheduled_work();
3428 netif_stop_queue(dev);
3429 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3430 s2io_card_down(sp);
3431
3432 if (sp->intr_type == MSI_X) {
3433 if (sp->device_type == XFRAME_II_DEVICE) {
3434 for (i=1; (sp->s2io_entries[i].in_use ==
3435 MSIX_REGISTERED_SUCCESS); i++) {
3436 int vector = sp->entries[i].vector;
3437 void *arg = sp->s2io_entries[i].arg;
3438
3439 free_irq(vector, arg);
3440 }
3441 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3442 msi_control &= 0xFFFE; /* Disable MSI */
3443 pci_write_config_word(sp->pdev, 0x42, msi_control);
3444
3445 pci_disable_msix(sp->pdev);
3446 }
3447 }
3448 else {
3449 free_irq(sp->pdev->irq, dev);
3450 if (sp->intr_type == MSI)
3451 pci_disable_msi(sp->pdev);
3452 }
3453 sp->device_close_flag = TRUE; /* Device is shut down. */
3454 return 0;
3455 }
3456
3457 /**
3458 * s2io_xmit - Tx entry point of te driver
3459 * @skb : the socket buffer containing the Tx data.
3460 * @dev : device pointer.
3461 * Description :
3462 * This function is the Tx entry point of the driver. S2IO NIC supports
3463 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3464 * NOTE: when device cant queue the pkt,just the trans_start variable will
3465 * not be upadted.
3466 * Return value:
3467 * 0 on success & 1 on failure.
3468 */
3469
3470 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3471 {
3472 nic_t *sp = dev->priv;
3473 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3474 register u64 val64;
3475 TxD_t *txdp;
3476 TxFIFO_element_t __iomem *tx_fifo;
3477 unsigned long flags;
3478 #ifdef NETIF_F_TSO
3479 int mss;
3480 #endif
3481 u16 vlan_tag = 0;
3482 int vlan_priority = 0;
3483 mac_info_t *mac_control;
3484 struct config_param *config;
3485
3486 mac_control = &sp->mac_control;
3487 config = &sp->config;
3488
3489 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3490 spin_lock_irqsave(&sp->tx_lock, flags);
3491 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3492 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3493 dev->name);
3494 spin_unlock_irqrestore(&sp->tx_lock, flags);
3495 dev_kfree_skb(skb);
3496 return 0;
3497 }
3498
3499 queue = 0;
3500
3501 /* Get Fifo number to Transmit based on vlan priority */
3502 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3503 vlan_tag = vlan_tx_tag_get(skb);
3504 vlan_priority = vlan_tag >> 13;
3505 queue = config->fifo_mapping[vlan_priority];
3506 }
3507
3508 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3509 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3510 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3511 list_virt_addr;
3512
3513 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3514 /* Avoid "put" pointer going beyond "get" pointer */
3515 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3516 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3517 netif_stop_queue(dev);
3518 dev_kfree_skb(skb);
3519 spin_unlock_irqrestore(&sp->tx_lock, flags);
3520 return 0;
3521 }
3522
3523 /* A buffer with no data will be dropped */
3524 if (!skb->len) {
3525 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3526 dev_kfree_skb(skb);
3527 spin_unlock_irqrestore(&sp->tx_lock, flags);
3528 return 0;
3529 }
3530
3531 #ifdef NETIF_F_TSO
3532 mss = skb_shinfo(skb)->tso_size;
3533 if (mss) {
3534 txdp->Control_1 |= TXD_TCP_LSO_EN;
3535 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3536 }
3537 #endif
3538
3539 frg_cnt = skb_shinfo(skb)->nr_frags;
3540 frg_len = skb->len - skb->data_len;
3541
3542 txdp->Buffer_Pointer = pci_map_single
3543 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3544 txdp->Host_Control = (unsigned long) skb;
3545 if (skb->ip_summed == CHECKSUM_HW) {
3546 txdp->Control_2 |=
3547 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3548 TXD_TX_CKO_UDP_EN);
3549 }
3550
3551 txdp->Control_2 |= config->tx_intr_type;
3552
3553 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3554 txdp->Control_2 |= TXD_VLAN_ENABLE;
3555 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3556 }
3557
3558 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3559 TXD_GATHER_CODE_FIRST);
3560 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3561
3562 /* For fragmented SKB. */
3563 for (i = 0; i < frg_cnt; i++) {
3564 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3565 /* A '0' length fragment will be ignored */
3566 if (!frag->size)
3567 continue;
3568 txdp++;
3569 txdp->Buffer_Pointer = (u64) pci_map_page
3570 (sp->pdev, frag->page, frag->page_offset,
3571 frag->size, PCI_DMA_TODEVICE);
3572 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3573 }
3574 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3575
3576 tx_fifo = mac_control->tx_FIFO_start[queue];
3577 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3578 writeq(val64, &tx_fifo->TxDL_Pointer);
3579
3580 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3581 TX_FIFO_LAST_LIST);
3582
3583 #ifdef NETIF_F_TSO
3584 if (mss)
3585 val64 |= TX_FIFO_SPECIAL_FUNC;
3586 #endif
3587 writeq(val64, &tx_fifo->List_Control);
3588
3589 mmiowb();
3590
3591 put_off++;
3592 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3593 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3594
3595 /* Avoid "put" pointer going beyond "get" pointer */
3596 if (((put_off + 1) % queue_len) == get_off) {
3597 DBG_PRINT(TX_DBG,
3598 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3599 put_off, get_off);
3600 netif_stop_queue(dev);
3601 }
3602
3603 dev->trans_start = jiffies;
3604 spin_unlock_irqrestore(&sp->tx_lock, flags);
3605
3606 return 0;
3607 }
3608
3609 static void
3610 s2io_alarm_handle(unsigned long data)
3611 {
3612 nic_t *sp = (nic_t *)data;
3613
3614 alarm_intr_handler(sp);
3615 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3616 }
3617
3618 static irqreturn_t
3619 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3620 {
3621 struct net_device *dev = (struct net_device *) dev_id;
3622 nic_t *sp = dev->priv;
3623 int i;
3624 int ret;
3625 mac_info_t *mac_control;
3626 struct config_param *config;
3627
3628 atomic_inc(&sp->isr_cnt);
3629 mac_control = &sp->mac_control;
3630 config = &sp->config;
3631 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
3632
3633 /* If Intr is because of Rx Traffic */
3634 for (i = 0; i < config->rx_ring_num; i++)
3635 rx_intr_handler(&mac_control->rings[i]);
3636
3637 /* If Intr is because of Tx Traffic */
3638 for (i = 0; i < config->tx_fifo_num; i++)
3639 tx_intr_handler(&mac_control->fifos[i]);
3640
3641 /*
3642 * If the Rx buffer count is below the panic threshold then
3643 * reallocate the buffers from the interrupt handler itself,
3644 * else schedule a tasklet to reallocate the buffers.
3645 */
3646 for (i = 0; i < config->rx_ring_num; i++) {
3647 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3648 int level = rx_buffer_level(sp, rxb_size, i);
3649
3650 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3651 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3652 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3653 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3654 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3655 dev->name);
3656 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3657 clear_bit(0, (&sp->tasklet_status));
3658 atomic_dec(&sp->isr_cnt);
3659 return IRQ_HANDLED;
3660 }
3661 clear_bit(0, (&sp->tasklet_status));
3662 } else if (level == LOW) {
3663 tasklet_schedule(&sp->task);
3664 }
3665 }
3666
3667 atomic_dec(&sp->isr_cnt);
3668 return IRQ_HANDLED;
3669 }
3670
3671 static irqreturn_t
3672 s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
3673 {
3674 ring_info_t *ring = (ring_info_t *)dev_id;
3675 nic_t *sp = ring->nic;
3676 int rxb_size, level, rng_n;
3677
3678 atomic_inc(&sp->isr_cnt);
3679 rx_intr_handler(ring);
3680
3681 rng_n = ring->ring_no;
3682 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3683 level = rx_buffer_level(sp, rxb_size, rng_n);
3684
3685 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3686 int ret;
3687 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3688 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3689 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3690 DBG_PRINT(ERR_DBG, "Out of memory in %s",
3691 __FUNCTION__);
3692 clear_bit(0, (&sp->tasklet_status));
3693 return IRQ_HANDLED;
3694 }
3695 clear_bit(0, (&sp->tasklet_status));
3696 } else if (level == LOW) {
3697 tasklet_schedule(&sp->task);
3698 }
3699 atomic_dec(&sp->isr_cnt);
3700
3701 return IRQ_HANDLED;
3702 }
3703
3704 static irqreturn_t
3705 s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
3706 {
3707 fifo_info_t *fifo = (fifo_info_t *)dev_id;
3708 nic_t *sp = fifo->nic;
3709
3710 atomic_inc(&sp->isr_cnt);
3711 tx_intr_handler(fifo);
3712 atomic_dec(&sp->isr_cnt);
3713 return IRQ_HANDLED;
3714 }
3715
3716 static void s2io_txpic_intr_handle(nic_t *sp)
3717 {
3718 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3719 u64 val64;
3720
3721 val64 = readq(&bar0->pic_int_status);
3722 if (val64 & PIC_INT_GPIO) {
3723 val64 = readq(&bar0->gpio_int_reg);
3724 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3725 (val64 & GPIO_INT_REG_LINK_UP)) {
3726 val64 |= GPIO_INT_REG_LINK_DOWN;
3727 val64 |= GPIO_INT_REG_LINK_UP;
3728 writeq(val64, &bar0->gpio_int_reg);
3729 goto masking;
3730 }
3731
3732 if (((sp->last_link_state == LINK_UP) &&
3733 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3734 ((sp->last_link_state == LINK_DOWN) &&
3735 (val64 & GPIO_INT_REG_LINK_UP))) {
3736 val64 = readq(&bar0->gpio_int_mask);
3737 val64 |= GPIO_INT_MASK_LINK_DOWN;
3738 val64 |= GPIO_INT_MASK_LINK_UP;
3739 writeq(val64, &bar0->gpio_int_mask);
3740 s2io_set_link((unsigned long)sp);
3741 }
3742 masking:
3743 if (sp->last_link_state == LINK_UP) {
3744 /*enable down interrupt */
3745 val64 = readq(&bar0->gpio_int_mask);
3746 /* unmasks link down intr */
3747 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3748 /* masks link up intr */
3749 val64 |= GPIO_INT_MASK_LINK_UP;
3750 writeq(val64, &bar0->gpio_int_mask);
3751 } else {
3752 /*enable UP Interrupt */
3753 val64 = readq(&bar0->gpio_int_mask);
3754 /* unmasks link up interrupt */
3755 val64 &= ~GPIO_INT_MASK_LINK_UP;
3756 /* masks link down interrupt */
3757 val64 |= GPIO_INT_MASK_LINK_DOWN;
3758 writeq(val64, &bar0->gpio_int_mask);
3759 }
3760 }
3761 }
3762
3763 /**
3764 * s2io_isr - ISR handler of the device .
3765 * @irq: the irq of the device.
3766 * @dev_id: a void pointer to the dev structure of the NIC.
3767 * @pt_regs: pointer to the registers pushed on the stack.
3768 * Description: This function is the ISR handler of the device. It
3769 * identifies the reason for the interrupt and calls the relevant
3770 * service routines. As a contongency measure, this ISR allocates the
3771 * recv buffers, if their numbers are below the panic value which is
3772 * presently set to 25% of the original number of rcv buffers allocated.
3773 * Return value:
3774 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
3775 * IRQ_NONE: will be returned if interrupt is not from our device
3776 */
3777 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3778 {
3779 struct net_device *dev = (struct net_device *) dev_id;
3780 nic_t *sp = dev->priv;
3781 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3782 int i;
3783 u64 reason = 0, val64;
3784 mac_info_t *mac_control;
3785 struct config_param *config;
3786
3787 atomic_inc(&sp->isr_cnt);
3788 mac_control = &sp->mac_control;
3789 config = &sp->config;
3790
3791 /*
3792 * Identify the cause for interrupt and call the appropriate
3793 * interrupt handler. Causes for the interrupt could be;
3794 * 1. Rx of packet.
3795 * 2. Tx complete.
3796 * 3. Link down.
3797 * 4. Error in any functional blocks of the NIC.
3798 */
3799 reason = readq(&bar0->general_int_status);
3800
3801 if (!reason) {
3802 /* The interrupt was not raised by Xena. */
3803 atomic_dec(&sp->isr_cnt);
3804 return IRQ_NONE;
3805 }
3806
3807 #ifdef CONFIG_S2IO_NAPI
3808 if (reason & GEN_INTR_RXTRAFFIC) {
3809 if (netif_rx_schedule_prep(dev)) {
3810 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3811 DISABLE_INTRS);
3812 __netif_rx_schedule(dev);
3813 }
3814 }
3815 #else
3816 /* If Intr is because of Rx Traffic */
3817 if (reason & GEN_INTR_RXTRAFFIC) {
3818 /*
3819 * rx_traffic_int reg is an R1 register, writing all 1's
3820 * will ensure that the actual interrupt causing bit get's
3821 * cleared and hence a read can be avoided.
3822 */
3823 val64 = 0xFFFFFFFFFFFFFFFFULL;
3824 writeq(val64, &bar0->rx_traffic_int);
3825 for (i = 0; i < config->rx_ring_num; i++) {
3826 rx_intr_handler(&mac_control->rings[i]);
3827 }
3828 }
3829 #endif
3830
3831 /* If Intr is because of Tx Traffic */
3832 if (reason & GEN_INTR_TXTRAFFIC) {
3833 /*
3834 * tx_traffic_int reg is an R1 register, writing all 1's
3835 * will ensure that the actual interrupt causing bit get's
3836 * cleared and hence a read can be avoided.
3837 */
3838 val64 = 0xFFFFFFFFFFFFFFFFULL;
3839 writeq(val64, &bar0->tx_traffic_int);
3840
3841 for (i = 0; i < config->tx_fifo_num; i++)
3842 tx_intr_handler(&mac_control->fifos[i]);
3843 }
3844
3845 if (reason & GEN_INTR_TXPIC)
3846 s2io_txpic_intr_handle(sp);
3847 /*
3848 * If the Rx buffer count is below the panic threshold then
3849 * reallocate the buffers from the interrupt handler itself,
3850 * else schedule a tasklet to reallocate the buffers.
3851 */
3852 #ifndef CONFIG_S2IO_NAPI
3853 for (i = 0; i < config->rx_ring_num; i++) {
3854 int ret;
3855 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3856 int level = rx_buffer_level(sp, rxb_size, i);
3857
3858 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3859 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3860 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3861 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3862 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3863 dev->name);
3864 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3865 clear_bit(0, (&sp->tasklet_status));
3866 atomic_dec(&sp->isr_cnt);
3867 return IRQ_HANDLED;
3868 }
3869 clear_bit(0, (&sp->tasklet_status));
3870 } else if (level == LOW) {
3871 tasklet_schedule(&sp->task);
3872 }
3873 }
3874 #endif
3875
3876 atomic_dec(&sp->isr_cnt);
3877 return IRQ_HANDLED;
3878 }
3879
3880 /**
3881 * s2io_updt_stats -
3882 */
3883 static void s2io_updt_stats(nic_t *sp)
3884 {
3885 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3886 u64 val64;
3887 int cnt = 0;
3888
3889 if (atomic_read(&sp->card_state) == CARD_UP) {
3890 /* Apprx 30us on a 133 MHz bus */
3891 val64 = SET_UPDT_CLICKS(10) |
3892 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3893 writeq(val64, &bar0->stat_cfg);
3894 do {
3895 udelay(100);
3896 val64 = readq(&bar0->stat_cfg);
3897 if (!(val64 & BIT(0)))
3898 break;
3899 cnt++;
3900 if (cnt == 5)
3901 break; /* Updt failed */
3902 } while(1);
3903 }
3904 }
3905
3906 /**
3907 * s2io_get_stats - Updates the device statistics structure.
3908 * @dev : pointer to the device structure.
3909 * Description:
3910 * This function updates the device statistics structure in the s2io_nic
3911 * structure and returns a pointer to the same.
3912 * Return value:
3913 * pointer to the updated net_device_stats structure.
3914 */
3915
3916 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3917 {
3918 nic_t *sp = dev->priv;
3919 mac_info_t *mac_control;
3920 struct config_param *config;
3921
3922
3923 mac_control = &sp->mac_control;
3924 config = &sp->config;
3925
3926 /* Configure Stats for immediate updt */
3927 s2io_updt_stats(sp);
3928
3929 sp->stats.tx_packets =
3930 le32_to_cpu(mac_control->stats_info->tmac_frms);
3931 sp->stats.tx_errors =
3932 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3933 sp->stats.rx_errors =
3934 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3935 sp->stats.multicast =
3936 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3937 sp->stats.rx_length_errors =
3938 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3939
3940 return (&sp->stats);
3941 }
3942
3943 /**
3944 * s2io_set_multicast - entry point for multicast address enable/disable.
3945 * @dev : pointer to the device structure
3946 * Description:
3947 * This function is a driver entry point which gets called by the kernel
3948 * whenever multicast addresses must be enabled/disabled. This also gets
3949 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3950 * determine, if multicast address must be enabled or if promiscuous mode
3951 * is to be disabled etc.
3952 * Return value:
3953 * void.
3954 */
3955
3956 static void s2io_set_multicast(struct net_device *dev)
3957 {
3958 int i, j, prev_cnt;
3959 struct dev_mc_list *mclist;
3960 nic_t *sp = dev->priv;
3961 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3962 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3963 0xfeffffffffffULL;
3964 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3965 void __iomem *add;
3966
3967 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3968 /* Enable all Multicast addresses */
3969 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3970 &bar0->rmac_addr_data0_mem);
3971 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3972 &bar0->rmac_addr_data1_mem);
3973 val64 = RMAC_ADDR_CMD_MEM_WE |
3974 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3975 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3976 writeq(val64, &bar0->rmac_addr_cmd_mem);
3977 /* Wait till command completes */
3978 wait_for_cmd_complete(sp);
3979
3980 sp->m_cast_flg = 1;
3981 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3982 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3983 /* Disable all Multicast addresses */
3984 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3985 &bar0->rmac_addr_data0_mem);
3986 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3987 &bar0->rmac_addr_data1_mem);
3988 val64 = RMAC_ADDR_CMD_MEM_WE |
3989 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3990 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3991 writeq(val64, &bar0->rmac_addr_cmd_mem);
3992 /* Wait till command completes */
3993 wait_for_cmd_complete(sp);
3994
3995 sp->m_cast_flg = 0;
3996 sp->all_multi_pos = 0;
3997 }
3998
3999 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4000 /* Put the NIC into promiscuous mode */
4001 add = &bar0->mac_cfg;
4002 val64 = readq(&bar0->mac_cfg);
4003 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4004
4005 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4006 writel((u32) val64, add);
4007 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4008 writel((u32) (val64 >> 32), (add + 4));
4009
4010 val64 = readq(&bar0->mac_cfg);
4011 sp->promisc_flg = 1;
4012 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4013 dev->name);
4014 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4015 /* Remove the NIC from promiscuous mode */
4016 add = &bar0->mac_cfg;
4017 val64 = readq(&bar0->mac_cfg);
4018 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4019
4020 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4021 writel((u32) val64, add);
4022 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4023 writel((u32) (val64 >> 32), (add + 4));
4024
4025 val64 = readq(&bar0->mac_cfg);
4026 sp->promisc_flg = 0;
4027 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4028 dev->name);
4029 }
4030
4031 /* Update individual M_CAST address list */
4032 if ((!sp->m_cast_flg) && dev->mc_count) {
4033 if (dev->mc_count >
4034 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4035 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4036 dev->name);
4037 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4038 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4039 return;
4040 }
4041
4042 prev_cnt = sp->mc_addr_count;
4043 sp->mc_addr_count = dev->mc_count;
4044
4045 /* Clear out the previous list of Mc in the H/W. */
4046 for (i = 0; i < prev_cnt; i++) {
4047 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4048 &bar0->rmac_addr_data0_mem);
4049 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4050 &bar0->rmac_addr_data1_mem);
4051 val64 = RMAC_ADDR_CMD_MEM_WE |
4052 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4053 RMAC_ADDR_CMD_MEM_OFFSET
4054 (MAC_MC_ADDR_START_OFFSET + i);
4055 writeq(val64, &bar0->rmac_addr_cmd_mem);
4056
4057 /* Wait for command completes */
4058 if (wait_for_cmd_complete(sp)) {
4059 DBG_PRINT(ERR_DBG, "%s: Adding ",
4060 dev->name);
4061 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4062 return;
4063 }
4064 }
4065
4066 /* Create the new Rx filter list and update the same in H/W. */
4067 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4068 i++, mclist = mclist->next) {
4069 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4070 ETH_ALEN);
4071 for (j = 0; j < ETH_ALEN; j++) {
4072 mac_addr |= mclist->dmi_addr[j];
4073 mac_addr <<= 8;
4074 }
4075 mac_addr >>= 8;
4076 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4077 &bar0->rmac_addr_data0_mem);
4078 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4079 &bar0->rmac_addr_data1_mem);
4080 val64 = RMAC_ADDR_CMD_MEM_WE |
4081 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4082 RMAC_ADDR_CMD_MEM_OFFSET
4083 (i + MAC_MC_ADDR_START_OFFSET);
4084 writeq(val64, &bar0->rmac_addr_cmd_mem);
4085
4086 /* Wait for command completes */
4087 if (wait_for_cmd_complete(sp)) {
4088 DBG_PRINT(ERR_DBG, "%s: Adding ",
4089 dev->name);
4090 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4091 return;
4092 }
4093 }
4094 }
4095 }
4096
4097 /**
4098 * s2io_set_mac_addr - Programs the Xframe mac address
4099 * @dev : pointer to the device structure.
4100 * @addr: a uchar pointer to the new mac address which is to be set.
4101 * Description : This procedure will program the Xframe to receive
4102 * frames with new Mac Address
4103 * Return value: SUCCESS on success and an appropriate (-)ve integer
4104 * as defined in errno.h file on failure.
4105 */
4106
4107 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4108 {
4109 nic_t *sp = dev->priv;
4110 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4111 register u64 val64, mac_addr = 0;
4112 int i;
4113
4114 /*
4115 * Set the new MAC address as the new unicast filter and reflect this
4116 * change on the device address registered with the OS. It will be
4117 * at offset 0.
4118 */
4119 for (i = 0; i < ETH_ALEN; i++) {
4120 mac_addr <<= 8;
4121 mac_addr |= addr[i];
4122 }
4123
4124 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4125 &bar0->rmac_addr_data0_mem);
4126
4127 val64 =
4128 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4129 RMAC_ADDR_CMD_MEM_OFFSET(0);
4130 writeq(val64, &bar0->rmac_addr_cmd_mem);
4131 /* Wait till command completes */
4132 if (wait_for_cmd_complete(sp)) {
4133 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4134 return FAILURE;
4135 }
4136
4137 return SUCCESS;
4138 }
4139
4140 /**
4141 * s2io_ethtool_sset - Sets different link parameters.
4142 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4143 * @info: pointer to the structure with parameters given by ethtool to set
4144 * link information.
4145 * Description:
4146 * The function sets different link parameters provided by the user onto
4147 * the NIC.
4148 * Return value:
4149 * 0 on success.
4150 */
4151
4152 static int s2io_ethtool_sset(struct net_device *dev,
4153 struct ethtool_cmd *info)
4154 {
4155 nic_t *sp = dev->priv;
4156 if ((info->autoneg == AUTONEG_ENABLE) ||
4157 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4158 return -EINVAL;
4159 else {
4160 s2io_close(sp->dev);
4161 s2io_open(sp->dev);
4162 }
4163
4164 return 0;
4165 }
4166
4167 /**
4168 * s2io_ethtol_gset - Return link specific information.
4169 * @sp : private member of the device structure, pointer to the
4170 * s2io_nic structure.
4171 * @info : pointer to the structure with parameters given by ethtool
4172 * to return link information.
4173 * Description:
4174 * Returns link specific information like speed, duplex etc.. to ethtool.
4175 * Return value :
4176 * return 0 on success.
4177 */
4178
4179 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4180 {
4181 nic_t *sp = dev->priv;
4182 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4183 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4184 info->port = PORT_FIBRE;
4185 /* info->transceiver?? TODO */
4186
4187 if (netif_carrier_ok(sp->dev)) {
4188 info->speed = 10000;
4189 info->duplex = DUPLEX_FULL;
4190 } else {
4191 info->speed = -1;
4192 info->duplex = -1;
4193 }
4194
4195 info->autoneg = AUTONEG_DISABLE;
4196 return 0;
4197 }
4198
4199 /**
4200 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4201 * @sp : private member of the device structure, which is a pointer to the
4202 * s2io_nic structure.
4203 * @info : pointer to the structure with parameters given by ethtool to
4204 * return driver information.
4205 * Description:
4206 * Returns driver specefic information like name, version etc.. to ethtool.
4207 * Return value:
4208 * void
4209 */
4210
4211 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4212 struct ethtool_drvinfo *info)
4213 {
4214 nic_t *sp = dev->priv;
4215
4216 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4217 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4218 strncpy(info->fw_version, "", sizeof(info->fw_version));
4219 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4220 info->regdump_len = XENA_REG_SPACE;
4221 info->eedump_len = XENA_EEPROM_SPACE;
4222 info->testinfo_len = S2IO_TEST_LEN;
4223 info->n_stats = S2IO_STAT_LEN;
4224 }
4225
4226 /**
4227 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4228 * @sp: private member of the device structure, which is a pointer to the
4229 * s2io_nic structure.
4230 * @regs : pointer to the structure with parameters given by ethtool for
4231 * dumping the registers.
4232 * @reg_space: The input argumnet into which all the registers are dumped.
4233 * Description:
4234 * Dumps the entire register space of xFrame NIC into the user given
4235 * buffer area.
4236 * Return value :
4237 * void .
4238 */
4239
4240 static void s2io_ethtool_gregs(struct net_device *dev,
4241 struct ethtool_regs *regs, void *space)
4242 {
4243 int i;
4244 u64 reg;
4245 u8 *reg_space = (u8 *) space;
4246 nic_t *sp = dev->priv;
4247
4248 regs->len = XENA_REG_SPACE;
4249 regs->version = sp->pdev->subsystem_device;
4250
4251 for (i = 0; i < regs->len; i += 8) {
4252 reg = readq(sp->bar0 + i);
4253 memcpy((reg_space + i), &reg, 8);
4254 }
4255 }
4256
4257 /**
4258 * s2io_phy_id - timer function that alternates adapter LED.
4259 * @data : address of the private member of the device structure, which
4260 * is a pointer to the s2io_nic structure, provided as an u32.
4261 * Description: This is actually the timer function that alternates the
4262 * adapter LED bit of the adapter control bit to set/reset every time on
4263 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4264 * once every second.
4265 */
4266 static void s2io_phy_id(unsigned long data)
4267 {
4268 nic_t *sp = (nic_t *) data;
4269 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4270 u64 val64 = 0;
4271 u16 subid;
4272
4273 subid = sp->pdev->subsystem_device;
4274 if ((sp->device_type == XFRAME_II_DEVICE) ||
4275 ((subid & 0xFF) >= 0x07)) {
4276 val64 = readq(&bar0->gpio_control);
4277 val64 ^= GPIO_CTRL_GPIO_0;
4278 writeq(val64, &bar0->gpio_control);
4279 } else {
4280 val64 = readq(&bar0->adapter_control);
4281 val64 ^= ADAPTER_LED_ON;
4282 writeq(val64, &bar0->adapter_control);
4283 }
4284
4285 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4286 }
4287
4288 /**
4289 * s2io_ethtool_idnic - To physically identify the nic on the system.
4290 * @sp : private member of the device structure, which is a pointer to the
4291 * s2io_nic structure.
4292 * @id : pointer to the structure with identification parameters given by
4293 * ethtool.
4294 * Description: Used to physically identify the NIC on the system.
4295 * The Link LED will blink for a time specified by the user for
4296 * identification.
4297 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4298 * identification is possible only if it's link is up.
4299 * Return value:
4300 * int , returns 0 on success
4301 */
4302
4303 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4304 {
4305 u64 val64 = 0, last_gpio_ctrl_val;
4306 nic_t *sp = dev->priv;
4307 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4308 u16 subid;
4309
4310 subid = sp->pdev->subsystem_device;
4311 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4312 if ((sp->device_type == XFRAME_I_DEVICE) &&
4313 ((subid & 0xFF) < 0x07)) {
4314 val64 = readq(&bar0->adapter_control);
4315 if (!(val64 & ADAPTER_CNTL_EN)) {
4316 printk(KERN_ERR
4317 "Adapter Link down, cannot blink LED\n");
4318 return -EFAULT;
4319 }
4320 }
4321 if (sp->id_timer.function == NULL) {
4322 init_timer(&sp->id_timer);
4323 sp->id_timer.function = s2io_phy_id;
4324 sp->id_timer.data = (unsigned long) sp;
4325 }
4326 mod_timer(&sp->id_timer, jiffies);
4327 if (data)
4328 msleep_interruptible(data * HZ);
4329 else
4330 msleep_interruptible(MAX_FLICKER_TIME);
4331 del_timer_sync(&sp->id_timer);
4332
4333 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4334 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4335 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4336 }
4337
4338 return 0;
4339 }
4340
4341 /**
4342 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4343 * @sp : private member of the device structure, which is a pointer to the
4344 * s2io_nic structure.
4345 * @ep : pointer to the structure with pause parameters given by ethtool.
4346 * Description:
4347 * Returns the Pause frame generation and reception capability of the NIC.
4348 * Return value:
4349 * void
4350 */
4351 static void s2io_ethtool_getpause_data(struct net_device *dev,
4352 struct ethtool_pauseparam *ep)
4353 {
4354 u64 val64;
4355 nic_t *sp = dev->priv;
4356 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4357
4358 val64 = readq(&bar0->rmac_pause_cfg);
4359 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4360 ep->tx_pause = TRUE;
4361 if (val64 & RMAC_PAUSE_RX_ENABLE)
4362 ep->rx_pause = TRUE;
4363 ep->autoneg = FALSE;
4364 }
4365
4366 /**
4367 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4368 * @sp : private member of the device structure, which is a pointer to the
4369 * s2io_nic structure.
4370 * @ep : pointer to the structure with pause parameters given by ethtool.
4371 * Description:
4372 * It can be used to set or reset Pause frame generation or reception
4373 * support of the NIC.
4374 * Return value:
4375 * int, returns 0 on Success
4376 */
4377
4378 static int s2io_ethtool_setpause_data(struct net_device *dev,
4379 struct ethtool_pauseparam *ep)
4380 {
4381 u64 val64;
4382 nic_t *sp = dev->priv;
4383 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4384
4385 val64 = readq(&bar0->rmac_pause_cfg);
4386 if (ep->tx_pause)
4387 val64 |= RMAC_PAUSE_GEN_ENABLE;
4388 else
4389 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4390 if (ep->rx_pause)
4391 val64 |= RMAC_PAUSE_RX_ENABLE;
4392 else
4393 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4394 writeq(val64, &bar0->rmac_pause_cfg);
4395 return 0;
4396 }
4397
4398 /**
4399 * read_eeprom - reads 4 bytes of data from user given offset.
4400 * @sp : private member of the device structure, which is a pointer to the
4401 * s2io_nic structure.
4402 * @off : offset at which the data must be written
4403 * @data : Its an output parameter where the data read at the given
4404 * offset is stored.
4405 * Description:
4406 * Will read 4 bytes of data from the user given offset and return the
4407 * read data.
4408 * NOTE: Will allow to read only part of the EEPROM visible through the
4409 * I2C bus.
4410 * Return value:
4411 * -1 on failure and 0 on success.
4412 */
4413
4414 #define S2IO_DEV_ID 5
4415 static int read_eeprom(nic_t * sp, int off, u64 * data)
4416 {
4417 int ret = -1;
4418 u32 exit_cnt = 0;
4419 u64 val64;
4420 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4421
4422 if (sp->device_type == XFRAME_I_DEVICE) {
4423 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4424 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4425 I2C_CONTROL_CNTL_START;
4426 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4427
4428 while (exit_cnt < 5) {
4429 val64 = readq(&bar0->i2c_control);
4430 if (I2C_CONTROL_CNTL_END(val64)) {
4431 *data = I2C_CONTROL_GET_DATA(val64);
4432 ret = 0;
4433 break;
4434 }
4435 msleep(50);
4436 exit_cnt++;
4437 }
4438 }
4439
4440 if (sp->device_type == XFRAME_II_DEVICE) {
4441 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4442 SPI_CONTROL_BYTECNT(0x3) |
4443 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4444 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4445 val64 |= SPI_CONTROL_REQ;
4446 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4447 while (exit_cnt < 5) {
4448 val64 = readq(&bar0->spi_control);
4449 if (val64 & SPI_CONTROL_NACK) {
4450 ret = 1;
4451 break;
4452 } else if (val64 & SPI_CONTROL_DONE) {
4453 *data = readq(&bar0->spi_data);
4454 *data &= 0xffffff;
4455 ret = 0;
4456 break;
4457 }
4458 msleep(50);
4459 exit_cnt++;
4460 }
4461 }
4462 return ret;
4463 }
4464
4465 /**
4466 * write_eeprom - actually writes the relevant part of the data value.
4467 * @sp : private member of the device structure, which is a pointer to the
4468 * s2io_nic structure.
4469 * @off : offset at which the data must be written
4470 * @data : The data that is to be written
4471 * @cnt : Number of bytes of the data that are actually to be written into
4472 * the Eeprom. (max of 3)
4473 * Description:
4474 * Actually writes the relevant part of the data value into the Eeprom
4475 * through the I2C bus.
4476 * Return value:
4477 * 0 on success, -1 on failure.
4478 */
4479
4480 static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4481 {
4482 int exit_cnt = 0, ret = -1;
4483 u64 val64;
4484 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4485
4486 if (sp->device_type == XFRAME_I_DEVICE) {
4487 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4488 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4489 I2C_CONTROL_CNTL_START;
4490 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4491
4492 while (exit_cnt < 5) {
4493 val64 = readq(&bar0->i2c_control);
4494 if (I2C_CONTROL_CNTL_END(val64)) {
4495 if (!(val64 & I2C_CONTROL_NACK))
4496 ret = 0;
4497 break;
4498 }
4499 msleep(50);
4500 exit_cnt++;
4501 }
4502 }
4503
4504 if (sp->device_type == XFRAME_II_DEVICE) {
4505 int write_cnt = (cnt == 8) ? 0 : cnt;
4506 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4507
4508 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4509 SPI_CONTROL_BYTECNT(write_cnt) |
4510 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4511 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4512 val64 |= SPI_CONTROL_REQ;
4513 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4514 while (exit_cnt < 5) {
4515 val64 = readq(&bar0->spi_control);
4516 if (val64 & SPI_CONTROL_NACK) {
4517 ret = 1;
4518 break;
4519 } else if (val64 & SPI_CONTROL_DONE) {
4520 ret = 0;
4521 break;
4522 }
4523 msleep(50);
4524 exit_cnt++;
4525 }
4526 }
4527 return ret;
4528 }
4529
4530 /**
4531 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4532 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4533 * @eeprom : pointer to the user level structure provided by ethtool,
4534 * containing all relevant information.
4535 * @data_buf : user defined value to be written into Eeprom.
4536 * Description: Reads the values stored in the Eeprom at given offset
4537 * for a given length. Stores these values int the input argument data
4538 * buffer 'data_buf' and returns these to the caller (ethtool.)
4539 * Return value:
4540 * int 0 on success
4541 */
4542
4543 static int s2io_ethtool_geeprom(struct net_device *dev,
4544 struct ethtool_eeprom *eeprom, u8 * data_buf)
4545 {
4546 u32 i, valid;
4547 u64 data;
4548 nic_t *sp = dev->priv;
4549
4550 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4551
4552 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4553 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4554
4555 for (i = 0; i < eeprom->len; i += 4) {
4556 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4557 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4558 return -EFAULT;
4559 }
4560 valid = INV(data);
4561 memcpy((data_buf + i), &valid, 4);
4562 }
4563 return 0;
4564 }
4565
4566 /**
4567 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4568 * @sp : private member of the device structure, which is a pointer to the
4569 * s2io_nic structure.
4570 * @eeprom : pointer to the user level structure provided by ethtool,
4571 * containing all relevant information.
4572 * @data_buf ; user defined value to be written into Eeprom.
4573 * Description:
4574 * Tries to write the user provided value in the Eeprom, at the offset
4575 * given by the user.
4576 * Return value:
4577 * 0 on success, -EFAULT on failure.
4578 */
4579
4580 static int s2io_ethtool_seeprom(struct net_device *dev,
4581 struct ethtool_eeprom *eeprom,
4582 u8 * data_buf)
4583 {
4584 int len = eeprom->len, cnt = 0;
4585 u64 valid = 0, data;
4586 nic_t *sp = dev->priv;
4587
4588 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4589 DBG_PRINT(ERR_DBG,
4590 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4591 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4592 eeprom->magic);
4593 return -EFAULT;
4594 }
4595
4596 while (len) {
4597 data = (u32) data_buf[cnt] & 0x000000FF;
4598 if (data) {
4599 valid = (u32) (data << 24);
4600 } else
4601 valid = data;
4602
4603 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4604 DBG_PRINT(ERR_DBG,
4605 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4606 DBG_PRINT(ERR_DBG,
4607 "write into the specified offset\n");
4608 return -EFAULT;
4609 }
4610 cnt++;
4611 len--;
4612 }
4613
4614 return 0;
4615 }
4616
4617 /**
4618 * s2io_register_test - reads and writes into all clock domains.
4619 * @sp : private member of the device structure, which is a pointer to the
4620 * s2io_nic structure.
4621 * @data : variable that returns the result of each of the test conducted b
4622 * by the driver.
4623 * Description:
4624 * Read and write into all clock domains. The NIC has 3 clock domains,
4625 * see that registers in all the three regions are accessible.
4626 * Return value:
4627 * 0 on success.
4628 */
4629
4630 static int s2io_register_test(nic_t * sp, uint64_t * data)
4631 {
4632 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4633 u64 val64 = 0, exp_val;
4634 int fail = 0;
4635
4636 val64 = readq(&bar0->pif_rd_swapper_fb);
4637 if (val64 != 0x123456789abcdefULL) {
4638 fail = 1;
4639 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4640 }
4641
4642 val64 = readq(&bar0->rmac_pause_cfg);
4643 if (val64 != 0xc000ffff00000000ULL) {
4644 fail = 1;
4645 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4646 }
4647
4648 val64 = readq(&bar0->rx_queue_cfg);
4649 if (sp->device_type == XFRAME_II_DEVICE)
4650 exp_val = 0x0404040404040404ULL;
4651 else
4652 exp_val = 0x0808080808080808ULL;
4653 if (val64 != exp_val) {
4654 fail = 1;
4655 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4656 }
4657
4658 val64 = readq(&bar0->xgxs_efifo_cfg);
4659 if (val64 != 0x000000001923141EULL) {
4660 fail = 1;
4661 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4662 }
4663
4664 val64 = 0x5A5A5A5A5A5A5A5AULL;
4665 writeq(val64, &bar0->xmsi_data);
4666 val64 = readq(&bar0->xmsi_data);
4667 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4668 fail = 1;
4669 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4670 }
4671
4672 val64 = 0xA5A5A5A5A5A5A5A5ULL;
4673 writeq(val64, &bar0->xmsi_data);
4674 val64 = readq(&bar0->xmsi_data);
4675 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4676 fail = 1;
4677 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4678 }
4679
4680 *data = fail;
4681 return fail;
4682 }
4683
4684 /**
4685 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4686 * @sp : private member of the device structure, which is a pointer to the
4687 * s2io_nic structure.
4688 * @data:variable that returns the result of each of the test conducted by
4689 * the driver.
4690 * Description:
4691 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4692 * register.
4693 * Return value:
4694 * 0 on success.
4695 */
4696
4697 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4698 {
4699 int fail = 0;
4700 u64 ret_data, org_4F0, org_7F0;
4701 u8 saved_4F0 = 0, saved_7F0 = 0;
4702 struct net_device *dev = sp->dev;
4703
4704 /* Test Write Error at offset 0 */
4705 /* Note that SPI interface allows write access to all areas
4706 * of EEPROM. Hence doing all negative testing only for Xframe I.
4707 */
4708 if (sp->device_type == XFRAME_I_DEVICE)
4709 if (!write_eeprom(sp, 0, 0, 3))
4710 fail = 1;
4711
4712 /* Save current values at offsets 0x4F0 and 0x7F0 */
4713 if (!read_eeprom(sp, 0x4F0, &org_4F0))
4714 saved_4F0 = 1;
4715 if (!read_eeprom(sp, 0x7F0, &org_7F0))
4716 saved_7F0 = 1;
4717
4718 /* Test Write at offset 4f0 */
4719 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
4720 fail = 1;
4721 if (read_eeprom(sp, 0x4F0, &ret_data))
4722 fail = 1;
4723
4724 if (ret_data != 0x012345) {
4725 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data);
4726 fail = 1;
4727 }
4728
4729 /* Reset the EEPROM data go FFFF */
4730 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
4731
4732 /* Test Write Request Error at offset 0x7c */
4733 if (sp->device_type == XFRAME_I_DEVICE)
4734 if (!write_eeprom(sp, 0x07C, 0, 3))
4735 fail = 1;
4736
4737 /* Test Write Request at offset 0x7f0 */
4738 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
4739 fail = 1;
4740 if (read_eeprom(sp, 0x7F0, &ret_data))
4741 fail = 1;
4742
4743 if (ret_data != 0x012345) {
4744 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data);
4745 fail = 1;
4746 }
4747
4748 /* Reset the EEPROM data go FFFF */
4749 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
4750
4751 if (sp->device_type == XFRAME_I_DEVICE) {
4752 /* Test Write Error at offset 0x80 */
4753 if (!write_eeprom(sp, 0x080, 0, 3))
4754 fail = 1;
4755
4756 /* Test Write Error at offset 0xfc */
4757 if (!write_eeprom(sp, 0x0FC, 0, 3))
4758 fail = 1;
4759
4760 /* Test Write Error at offset 0x100 */
4761 if (!write_eeprom(sp, 0x100, 0, 3))
4762 fail = 1;
4763
4764 /* Test Write Error at offset 4ec */
4765 if (!write_eeprom(sp, 0x4EC, 0, 3))
4766 fail = 1;
4767 }
4768
4769 /* Restore values at offsets 0x4F0 and 0x7F0 */
4770 if (saved_4F0)
4771 write_eeprom(sp, 0x4F0, org_4F0, 3);
4772 if (saved_7F0)
4773 write_eeprom(sp, 0x7F0, org_7F0, 3);
4774
4775 *data = fail;
4776 return fail;
4777 }
4778
4779 /**
4780 * s2io_bist_test - invokes the MemBist test of the card .
4781 * @sp : private member of the device structure, which is a pointer to the
4782 * s2io_nic structure.
4783 * @data:variable that returns the result of each of the test conducted by
4784 * the driver.
4785 * Description:
4786 * This invokes the MemBist test of the card. We give around
4787 * 2 secs time for the Test to complete. If it's still not complete
4788 * within this peiod, we consider that the test failed.
4789 * Return value:
4790 * 0 on success and -1 on failure.
4791 */
4792
4793 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4794 {
4795 u8 bist = 0;
4796 int cnt = 0, ret = -1;
4797
4798 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4799 bist |= PCI_BIST_START;
4800 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4801
4802 while (cnt < 20) {
4803 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4804 if (!(bist & PCI_BIST_START)) {
4805 *data = (bist & PCI_BIST_CODE_MASK);
4806 ret = 0;
4807 break;
4808 }
4809 msleep(100);
4810 cnt++;
4811 }
4812
4813 return ret;
4814 }
4815
4816 /**
4817 * s2io-link_test - verifies the link state of the nic
4818 * @sp ; private member of the device structure, which is a pointer to the
4819 * s2io_nic structure.
4820 * @data: variable that returns the result of each of the test conducted by
4821 * the driver.
4822 * Description:
4823 * The function verifies the link state of the NIC and updates the input
4824 * argument 'data' appropriately.
4825 * Return value:
4826 * 0 on success.
4827 */
4828
4829 static int s2io_link_test(nic_t * sp, uint64_t * data)
4830 {
4831 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4832 u64 val64;
4833
4834 val64 = readq(&bar0->adapter_status);
4835 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4836 *data = 1;
4837
4838 return 0;
4839 }
4840
4841 /**
4842 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4843 * @sp - private member of the device structure, which is a pointer to the
4844 * s2io_nic structure.
4845 * @data - variable that returns the result of each of the test
4846 * conducted by the driver.
4847 * Description:
4848 * This is one of the offline test that tests the read and write
4849 * access to the RldRam chip on the NIC.
4850 * Return value:
4851 * 0 on success.
4852 */
4853
4854 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4855 {
4856 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4857 u64 val64;
4858 int cnt, iteration = 0, test_fail = 0;
4859
4860 val64 = readq(&bar0->adapter_control);
4861 val64 &= ~ADAPTER_ECC_EN;
4862 writeq(val64, &bar0->adapter_control);
4863
4864 val64 = readq(&bar0->mc_rldram_test_ctrl);
4865 val64 |= MC_RLDRAM_TEST_MODE;
4866 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4867
4868 val64 = readq(&bar0->mc_rldram_mrs);
4869 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4870 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4871
4872 val64 |= MC_RLDRAM_MRS_ENABLE;
4873 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4874
4875 while (iteration < 2) {
4876 val64 = 0x55555555aaaa0000ULL;
4877 if (iteration == 1) {
4878 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4879 }
4880 writeq(val64, &bar0->mc_rldram_test_d0);
4881
4882 val64 = 0xaaaa5a5555550000ULL;
4883 if (iteration == 1) {
4884 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4885 }
4886 writeq(val64, &bar0->mc_rldram_test_d1);
4887
4888 val64 = 0x55aaaaaaaa5a0000ULL;
4889 if (iteration == 1) {
4890 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4891 }
4892 writeq(val64, &bar0->mc_rldram_test_d2);
4893
4894 val64 = (u64) (0x0000003ffffe0100ULL);
4895 writeq(val64, &bar0->mc_rldram_test_add);
4896
4897 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4898 MC_RLDRAM_TEST_GO;
4899 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4900
4901 for (cnt = 0; cnt < 5; cnt++) {
4902 val64 = readq(&bar0->mc_rldram_test_ctrl);
4903 if (val64 & MC_RLDRAM_TEST_DONE)
4904 break;
4905 msleep(200);
4906 }
4907
4908 if (cnt == 5)
4909 break;
4910
4911 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4912 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4913
4914 for (cnt = 0; cnt < 5; cnt++) {
4915 val64 = readq(&bar0->mc_rldram_test_ctrl);
4916 if (val64 & MC_RLDRAM_TEST_DONE)
4917 break;
4918 msleep(500);
4919 }
4920
4921 if (cnt == 5)
4922 break;
4923
4924 val64 = readq(&bar0->mc_rldram_test_ctrl);
4925 if (!(val64 & MC_RLDRAM_TEST_PASS))
4926 test_fail = 1;
4927
4928 iteration++;
4929 }
4930
4931 *data = test_fail;
4932
4933 /* Bring the adapter out of test mode */
4934 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
4935
4936 return test_fail;
4937 }
4938
4939 /**
4940 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4941 * @sp : private member of the device structure, which is a pointer to the
4942 * s2io_nic structure.
4943 * @ethtest : pointer to a ethtool command specific structure that will be
4944 * returned to the user.
4945 * @data : variable that returns the result of each of the test
4946 * conducted by the driver.
4947 * Description:
4948 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4949 * the health of the card.
4950 * Return value:
4951 * void
4952 */
4953
4954 static void s2io_ethtool_test(struct net_device *dev,
4955 struct ethtool_test *ethtest,
4956 uint64_t * data)
4957 {
4958 nic_t *sp = dev->priv;
4959 int orig_state = netif_running(sp->dev);
4960
4961 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4962 /* Offline Tests. */
4963 if (orig_state)
4964 s2io_close(sp->dev);
4965
4966 if (s2io_register_test(sp, &data[0]))
4967 ethtest->flags |= ETH_TEST_FL_FAILED;
4968
4969 s2io_reset(sp);
4970
4971 if (s2io_rldram_test(sp, &data[3]))
4972 ethtest->flags |= ETH_TEST_FL_FAILED;
4973
4974 s2io_reset(sp);
4975
4976 if (s2io_eeprom_test(sp, &data[1]))
4977 ethtest->flags |= ETH_TEST_FL_FAILED;
4978
4979 if (s2io_bist_test(sp, &data[4]))
4980 ethtest->flags |= ETH_TEST_FL_FAILED;
4981
4982 if (orig_state)
4983 s2io_open(sp->dev);
4984
4985 data[2] = 0;
4986 } else {
4987 /* Online Tests. */
4988 if (!orig_state) {
4989 DBG_PRINT(ERR_DBG,
4990 "%s: is not up, cannot run test\n",
4991 dev->name);
4992 data[0] = -1;
4993 data[1] = -1;
4994 data[2] = -1;
4995 data[3] = -1;
4996 data[4] = -1;
4997 }
4998
4999 if (s2io_link_test(sp, &data[2]))
5000 ethtest->flags |= ETH_TEST_FL_FAILED;
5001
5002 data[0] = 0;
5003 data[1] = 0;
5004 data[3] = 0;
5005 data[4] = 0;
5006 }
5007 }
5008
5009 static void s2io_get_ethtool_stats(struct net_device *dev,
5010 struct ethtool_stats *estats,
5011 u64 * tmp_stats)
5012 {
5013 int i = 0;
5014 nic_t *sp = dev->priv;
5015 StatInfo_t *stat_info = sp->mac_control.stats_info;
5016
5017 s2io_updt_stats(sp);
5018 tmp_stats[i++] =
5019 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5020 le32_to_cpu(stat_info->tmac_frms);
5021 tmp_stats[i++] =
5022 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5023 le32_to_cpu(stat_info->tmac_data_octets);
5024 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5025 tmp_stats[i++] =
5026 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5027 le32_to_cpu(stat_info->tmac_mcst_frms);
5028 tmp_stats[i++] =
5029 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5030 le32_to_cpu(stat_info->tmac_bcst_frms);
5031 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5032 tmp_stats[i++] =
5033 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5034 le32_to_cpu(stat_info->tmac_any_err_frms);
5035 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5036 tmp_stats[i++] =
5037 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5038 le32_to_cpu(stat_info->tmac_vld_ip);
5039 tmp_stats[i++] =
5040 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5041 le32_to_cpu(stat_info->tmac_drop_ip);
5042 tmp_stats[i++] =
5043 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5044 le32_to_cpu(stat_info->tmac_icmp);
5045 tmp_stats[i++] =
5046 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5047 le32_to_cpu(stat_info->tmac_rst_tcp);
5048 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5049 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5050 le32_to_cpu(stat_info->tmac_udp);
5051 tmp_stats[i++] =
5052 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5053 le32_to_cpu(stat_info->rmac_vld_frms);
5054 tmp_stats[i++] =
5055 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5056 le32_to_cpu(stat_info->rmac_data_octets);
5057 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5058 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5059 tmp_stats[i++] =
5060 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5061 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5062 tmp_stats[i++] =
5063 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5064 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5065 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5066 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5067 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5068 tmp_stats[i++] =
5069 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5070 le32_to_cpu(stat_info->rmac_discarded_frms);
5071 tmp_stats[i++] =
5072 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5073 le32_to_cpu(stat_info->rmac_usized_frms);
5074 tmp_stats[i++] =
5075 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5076 le32_to_cpu(stat_info->rmac_osized_frms);
5077 tmp_stats[i++] =
5078 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5079 le32_to_cpu(stat_info->rmac_frag_frms);
5080 tmp_stats[i++] =
5081 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5082 le32_to_cpu(stat_info->rmac_jabber_frms);
5083 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5084 le32_to_cpu(stat_info->rmac_ip);
5085 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5086 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5087 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5088 le32_to_cpu(stat_info->rmac_drop_ip);
5089 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5090 le32_to_cpu(stat_info->rmac_icmp);
5091 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5092 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5093 le32_to_cpu(stat_info->rmac_udp);
5094 tmp_stats[i++] =
5095 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5096 le32_to_cpu(stat_info->rmac_err_drp_udp);
5097 tmp_stats[i++] =
5098 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5099 le32_to_cpu(stat_info->rmac_pause_cnt);
5100 tmp_stats[i++] =
5101 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5102 le32_to_cpu(stat_info->rmac_accepted_ip);
5103 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5104 tmp_stats[i++] = 0;
5105 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5106 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5107 }
5108
5109 int s2io_ethtool_get_regs_len(struct net_device *dev)
5110 {
5111 return (XENA_REG_SPACE);
5112 }
5113
5114
5115 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5116 {
5117 nic_t *sp = dev->priv;
5118
5119 return (sp->rx_csum);
5120 }
5121 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5122 {
5123 nic_t *sp = dev->priv;
5124
5125 if (data)
5126 sp->rx_csum = 1;
5127 else
5128 sp->rx_csum = 0;
5129
5130 return 0;
5131 }
5132 int s2io_get_eeprom_len(struct net_device *dev)
5133 {
5134 return (XENA_EEPROM_SPACE);
5135 }
5136
5137 int s2io_ethtool_self_test_count(struct net_device *dev)
5138 {
5139 return (S2IO_TEST_LEN);
5140 }
5141 void s2io_ethtool_get_strings(struct net_device *dev,
5142 u32 stringset, u8 * data)
5143 {
5144 switch (stringset) {
5145 case ETH_SS_TEST:
5146 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5147 break;
5148 case ETH_SS_STATS:
5149 memcpy(data, &ethtool_stats_keys,
5150 sizeof(ethtool_stats_keys));
5151 }
5152 }
5153 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5154 {
5155 return (S2IO_STAT_LEN);
5156 }
5157
5158 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5159 {
5160 if (data)
5161 dev->features |= NETIF_F_IP_CSUM;
5162 else
5163 dev->features &= ~NETIF_F_IP_CSUM;
5164
5165 return 0;
5166 }
5167
5168
5169 static struct ethtool_ops netdev_ethtool_ops = {
5170 .get_settings = s2io_ethtool_gset,
5171 .set_settings = s2io_ethtool_sset,
5172 .get_drvinfo = s2io_ethtool_gdrvinfo,
5173 .get_regs_len = s2io_ethtool_get_regs_len,
5174 .get_regs = s2io_ethtool_gregs,
5175 .get_link = ethtool_op_get_link,
5176 .get_eeprom_len = s2io_get_eeprom_len,
5177 .get_eeprom = s2io_ethtool_geeprom,
5178 .set_eeprom = s2io_ethtool_seeprom,
5179 .get_pauseparam = s2io_ethtool_getpause_data,
5180 .set_pauseparam = s2io_ethtool_setpause_data,
5181 .get_rx_csum = s2io_ethtool_get_rx_csum,
5182 .set_rx_csum = s2io_ethtool_set_rx_csum,
5183 .get_tx_csum = ethtool_op_get_tx_csum,
5184 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5185 .get_sg = ethtool_op_get_sg,
5186 .set_sg = ethtool_op_set_sg,
5187 #ifdef NETIF_F_TSO
5188 .get_tso = ethtool_op_get_tso,
5189 .set_tso = ethtool_op_set_tso,
5190 #endif
5191 .self_test_count = s2io_ethtool_self_test_count,
5192 .self_test = s2io_ethtool_test,
5193 .get_strings = s2io_ethtool_get_strings,
5194 .phys_id = s2io_ethtool_idnic,
5195 .get_stats_count = s2io_ethtool_get_stats_count,
5196 .get_ethtool_stats = s2io_get_ethtool_stats
5197 };
5198
5199 /**
5200 * s2io_ioctl - Entry point for the Ioctl
5201 * @dev : Device pointer.
5202 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5203 * a proprietary structure used to pass information to the driver.
5204 * @cmd : This is used to distinguish between the different commands that
5205 * can be passed to the IOCTL functions.
5206 * Description:
5207 * Currently there are no special functionality supported in IOCTL, hence
5208 * function always return EOPNOTSUPPORTED
5209 */
5210
5211 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5212 {
5213 return -EOPNOTSUPP;
5214 }
5215
5216 /**
5217 * s2io_change_mtu - entry point to change MTU size for the device.
5218 * @dev : device pointer.
5219 * @new_mtu : the new MTU size for the device.
5220 * Description: A driver entry point to change MTU size for the device.
5221 * Before changing the MTU the device must be stopped.
5222 * Return value:
5223 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5224 * file on failure.
5225 */
5226
5227 int s2io_change_mtu(struct net_device *dev, int new_mtu)
5228 {
5229 nic_t *sp = dev->priv;
5230
5231 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5232 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5233 dev->name);
5234 return -EPERM;
5235 }
5236
5237 dev->mtu = new_mtu;
5238 if (netif_running(dev)) {
5239 s2io_card_down(sp);
5240 netif_stop_queue(dev);
5241 if (s2io_card_up(sp)) {
5242 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5243 __FUNCTION__);
5244 }
5245 if (netif_queue_stopped(dev))
5246 netif_wake_queue(dev);
5247 } else { /* Device is down */
5248 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5249 u64 val64 = new_mtu;
5250
5251 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5252 }
5253
5254 return 0;
5255 }
5256
5257 /**
5258 * s2io_tasklet - Bottom half of the ISR.
5259 * @dev_adr : address of the device structure in dma_addr_t format.
5260 * Description:
5261 * This is the tasklet or the bottom half of the ISR. This is
5262 * an extension of the ISR which is scheduled by the scheduler to be run
5263 * when the load on the CPU is low. All low priority tasks of the ISR can
5264 * be pushed into the tasklet. For now the tasklet is used only to
5265 * replenish the Rx buffers in the Rx buffer descriptors.
5266 * Return value:
5267 * void.
5268 */
5269
5270 static void s2io_tasklet(unsigned long dev_addr)
5271 {
5272 struct net_device *dev = (struct net_device *) dev_addr;
5273 nic_t *sp = dev->priv;
5274 int i, ret;
5275 mac_info_t *mac_control;
5276 struct config_param *config;
5277
5278 mac_control = &sp->mac_control;
5279 config = &sp->config;
5280
5281 if (!TASKLET_IN_USE) {
5282 for (i = 0; i < config->rx_ring_num; i++) {
5283 ret = fill_rx_buffers(sp, i);
5284 if (ret == -ENOMEM) {
5285 DBG_PRINT(ERR_DBG, "%s: Out of ",
5286 dev->name);
5287 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5288 break;
5289 } else if (ret == -EFILL) {
5290 DBG_PRINT(ERR_DBG,
5291 "%s: Rx Ring %d is full\n",
5292 dev->name, i);
5293 break;
5294 }
5295 }
5296 clear_bit(0, (&sp->tasklet_status));
5297 }
5298 }
5299
5300 /**
5301 * s2io_set_link - Set the LInk status
5302 * @data: long pointer to device private structue
5303 * Description: Sets the link status for the adapter
5304 */
5305
5306 static void s2io_set_link(unsigned long data)
5307 {
5308 nic_t *nic = (nic_t *) data;
5309 struct net_device *dev = nic->dev;
5310 XENA_dev_config_t __iomem *bar0 = nic->bar0;
5311 register u64 val64;
5312 u16 subid;
5313
5314 if (test_and_set_bit(0, &(nic->link_state))) {
5315 /* The card is being reset, no point doing anything */
5316 return;
5317 }
5318
5319 subid = nic->pdev->subsystem_device;
5320 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5321 /*
5322 * Allow a small delay for the NICs self initiated
5323 * cleanup to complete.
5324 */
5325 msleep(100);
5326 }
5327
5328 val64 = readq(&bar0->adapter_status);
5329 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
5330 if (LINK_IS_UP(val64)) {
5331 val64 = readq(&bar0->adapter_control);
5332 val64 |= ADAPTER_CNTL_EN;
5333 writeq(val64, &bar0->adapter_control);
5334 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5335 subid)) {
5336 val64 = readq(&bar0->gpio_control);
5337 val64 |= GPIO_CTRL_GPIO_0;
5338 writeq(val64, &bar0->gpio_control);
5339 val64 = readq(&bar0->gpio_control);
5340 } else {
5341 val64 |= ADAPTER_LED_ON;
5342 writeq(val64, &bar0->adapter_control);
5343 }
5344 if (s2io_link_fault_indication(nic) ==
5345 MAC_RMAC_ERR_TIMER) {
5346 val64 = readq(&bar0->adapter_status);
5347 if (!LINK_IS_UP(val64)) {
5348 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5349 DBG_PRINT(ERR_DBG, " Link down");
5350 DBG_PRINT(ERR_DBG, "after ");
5351 DBG_PRINT(ERR_DBG, "enabling ");
5352 DBG_PRINT(ERR_DBG, "device \n");
5353 }
5354 }
5355 if (nic->device_enabled_once == FALSE) {
5356 nic->device_enabled_once = TRUE;
5357 }
5358 s2io_link(nic, LINK_UP);
5359 } else {
5360 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5361 subid)) {
5362 val64 = readq(&bar0->gpio_control);
5363 val64 &= ~GPIO_CTRL_GPIO_0;
5364 writeq(val64, &bar0->gpio_control);
5365 val64 = readq(&bar0->gpio_control);
5366 }
5367 s2io_link(nic, LINK_DOWN);
5368 }
5369 } else { /* NIC is not Quiescent. */
5370 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5371 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5372 netif_stop_queue(dev);
5373 }
5374 clear_bit(0, &(nic->link_state));
5375 }
5376
5377 static void s2io_card_down(nic_t * sp)
5378 {
5379 int cnt = 0;
5380 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5381 unsigned long flags;
5382 register u64 val64 = 0;
5383
5384 del_timer_sync(&sp->alarm_timer);
5385 /* If s2io_set_link task is executing, wait till it completes. */
5386 while (test_and_set_bit(0, &(sp->link_state))) {
5387 msleep(50);
5388 }
5389 atomic_set(&sp->card_state, CARD_DOWN);
5390
5391 /* disable Tx and Rx traffic on the NIC */
5392 stop_nic(sp);
5393
5394 /* Kill tasklet. */
5395 tasklet_kill(&sp->task);
5396
5397 /* Check if the device is Quiescent and then Reset the NIC */
5398 do {
5399 val64 = readq(&bar0->adapter_status);
5400 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
5401 break;
5402 }
5403
5404 msleep(50);
5405 cnt++;
5406 if (cnt == 10) {
5407 DBG_PRINT(ERR_DBG,
5408 "s2io_close:Device not Quiescent ");
5409 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
5410 (unsigned long long) val64);
5411 break;
5412 }
5413 } while (1);
5414 s2io_reset(sp);
5415
5416 /* Waiting till all Interrupt handlers are complete */
5417 cnt = 0;
5418 do {
5419 msleep(10);
5420 if (!atomic_read(&sp->isr_cnt))
5421 break;
5422 cnt++;
5423 } while(cnt < 5);
5424
5425 spin_lock_irqsave(&sp->tx_lock, flags);
5426 /* Free all Tx buffers */
5427 free_tx_buffers(sp);
5428 spin_unlock_irqrestore(&sp->tx_lock, flags);
5429
5430 /* Free all Rx buffers */
5431 spin_lock_irqsave(&sp->rx_lock, flags);
5432 free_rx_buffers(sp);
5433 spin_unlock_irqrestore(&sp->rx_lock, flags);
5434
5435 clear_bit(0, &(sp->link_state));
5436 }
5437
5438 static int s2io_card_up(nic_t * sp)
5439 {
5440 int i, ret = 0;
5441 mac_info_t *mac_control;
5442 struct config_param *config;
5443 struct net_device *dev = (struct net_device *) sp->dev;
5444
5445 /* Initialize the H/W I/O registers */
5446 if (init_nic(sp) != 0) {
5447 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
5448 dev->name);
5449 return -ENODEV;
5450 }
5451
5452 if (sp->intr_type == MSI)
5453 ret = s2io_enable_msi(sp);
5454 else if (sp->intr_type == MSI_X)
5455 ret = s2io_enable_msi_x(sp);
5456 if (ret) {
5457 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
5458 sp->intr_type = INTA;
5459 }
5460
5461 /*
5462 * Initializing the Rx buffers. For now we are considering only 1
5463 * Rx ring and initializing buffers into 30 Rx blocks
5464 */
5465 mac_control = &sp->mac_control;
5466 config = &sp->config;
5467
5468 for (i = 0; i < config->rx_ring_num; i++) {
5469 if ((ret = fill_rx_buffers(sp, i))) {
5470 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
5471 dev->name);
5472 s2io_reset(sp);
5473 free_rx_buffers(sp);
5474 return -ENOMEM;
5475 }
5476 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
5477 atomic_read(&sp->rx_bufs_left[i]));
5478 }
5479
5480 /* Setting its receive mode */
5481 s2io_set_multicast(dev);
5482
5483 /* Enable tasklet for the device */
5484 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
5485
5486 /* Enable Rx Traffic and interrupts on the NIC */
5487 if (start_nic(sp)) {
5488 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
5489 tasklet_kill(&sp->task);
5490 s2io_reset(sp);
5491 free_irq(dev->irq, dev);
5492 free_rx_buffers(sp);
5493 return -ENODEV;
5494 }
5495
5496 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
5497
5498 atomic_set(&sp->card_state, CARD_UP);
5499 return 0;
5500 }
5501
5502 /**
5503 * s2io_restart_nic - Resets the NIC.
5504 * @data : long pointer to the device private structure
5505 * Description:
5506 * This function is scheduled to be run by the s2io_tx_watchdog
5507 * function after 0.5 secs to reset the NIC. The idea is to reduce
5508 * the run time of the watch dog routine which is run holding a
5509 * spin lock.
5510 */
5511
5512 static void s2io_restart_nic(unsigned long data)
5513 {
5514 struct net_device *dev = (struct net_device *) data;
5515 nic_t *sp = dev->priv;
5516
5517 s2io_card_down(sp);
5518 if (s2io_card_up(sp)) {
5519 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5520 dev->name);
5521 }
5522 netif_wake_queue(dev);
5523 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
5524 dev->name);
5525
5526 }
5527
5528 /**
5529 * s2io_tx_watchdog - Watchdog for transmit side.
5530 * @dev : Pointer to net device structure
5531 * Description:
5532 * This function is triggered if the Tx Queue is stopped
5533 * for a pre-defined amount of time when the Interface is still up.
5534 * If the Interface is jammed in such a situation, the hardware is
5535 * reset (by s2io_close) and restarted again (by s2io_open) to
5536 * overcome any problem that might have been caused in the hardware.
5537 * Return value:
5538 * void
5539 */
5540
5541 static void s2io_tx_watchdog(struct net_device *dev)
5542 {
5543 nic_t *sp = dev->priv;
5544
5545 if (netif_carrier_ok(dev)) {
5546 schedule_work(&sp->rst_timer_task);
5547 }
5548 }
5549
5550 /**
5551 * rx_osm_handler - To perform some OS related operations on SKB.
5552 * @sp: private member of the device structure,pointer to s2io_nic structure.
5553 * @skb : the socket buffer pointer.
5554 * @len : length of the packet
5555 * @cksum : FCS checksum of the frame.
5556 * @ring_no : the ring from which this RxD was extracted.
5557 * Description:
5558 * This function is called by the Tx interrupt serivce routine to perform
5559 * some OS related operations on the SKB before passing it to the upper
5560 * layers. It mainly checks if the checksum is OK, if so adds it to the
5561 * SKBs cksum variable, increments the Rx packet count and passes the SKB
5562 * to the upper layer. If the checksum is wrong, it increments the Rx
5563 * packet error count, frees the SKB and returns error.
5564 * Return value:
5565 * SUCCESS on success and -1 on failure.
5566 */
5567 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5568 {
5569 nic_t *sp = ring_data->nic;
5570 struct net_device *dev = (struct net_device *) sp->dev;
5571 struct sk_buff *skb = (struct sk_buff *)
5572 ((unsigned long) rxdp->Host_Control);
5573 int ring_no = ring_data->ring_no;
5574 u16 l3_csum, l4_csum;
5575
5576 skb->dev = dev;
5577 if (rxdp->Control_1 & RXD_T_CODE) {
5578 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5579 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5580 dev->name, err);
5581 dev_kfree_skb(skb);
5582 sp->stats.rx_crc_errors++;
5583 atomic_dec(&sp->rx_bufs_left[ring_no]);
5584 rxdp->Host_Control = 0;
5585 return 0;
5586 }
5587
5588 /* Updating statistics */
5589 rxdp->Host_Control = 0;
5590 sp->rx_pkt_count++;
5591 sp->stats.rx_packets++;
5592 if (sp->rxd_mode == RXD_MODE_1) {
5593 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
5594
5595 sp->stats.rx_bytes += len;
5596 skb_put(skb, len);
5597
5598 } else if (sp->rxd_mode >= RXD_MODE_3A) {
5599 int get_block = ring_data->rx_curr_get_info.block_index;
5600 int get_off = ring_data->rx_curr_get_info.offset;
5601 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
5602 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
5603 unsigned char *buff = skb_push(skb, buf0_len);
5604
5605 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5606 sp->stats.rx_bytes += buf0_len + buf2_len;
5607 memcpy(buff, ba->ba_0, buf0_len);
5608
5609 if (sp->rxd_mode == RXD_MODE_3A) {
5610 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
5611
5612 skb_put(skb, buf1_len);
5613 skb->len += buf2_len;
5614 skb->data_len += buf2_len;
5615 skb->truesize += buf2_len;
5616 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
5617 sp->stats.rx_bytes += buf1_len;
5618
5619 } else
5620 skb_put(skb, buf2_len);
5621 }
5622
5623 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5624 (sp->rx_csum)) {
5625 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
5626 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5627 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
5628 /*
5629 * NIC verifies if the Checksum of the received
5630 * frame is Ok or not and accordingly returns
5631 * a flag in the RxD.
5632 */
5633 skb->ip_summed = CHECKSUM_UNNECESSARY;
5634 } else {
5635 /*
5636 * Packet with erroneous checksum, let the
5637 * upper layers deal with it.
5638 */
5639 skb->ip_summed = CHECKSUM_NONE;
5640 }
5641 } else {
5642 skb->ip_summed = CHECKSUM_NONE;
5643 }
5644
5645 skb->protocol = eth_type_trans(skb, dev);
5646 #ifdef CONFIG_S2IO_NAPI
5647 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5648 /* Queueing the vlan frame to the upper layer */
5649 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5650 RXD_GET_VLAN_TAG(rxdp->Control_2));
5651 } else {
5652 netif_receive_skb(skb);
5653 }
5654 #else
5655 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5656 /* Queueing the vlan frame to the upper layer */
5657 vlan_hwaccel_rx(skb, sp->vlgrp,
5658 RXD_GET_VLAN_TAG(rxdp->Control_2));
5659 } else {
5660 netif_rx(skb);
5661 }
5662 #endif
5663 dev->last_rx = jiffies;
5664 atomic_dec(&sp->rx_bufs_left[ring_no]);
5665 return SUCCESS;
5666 }
5667
5668 /**
5669 * s2io_link - stops/starts the Tx queue.
5670 * @sp : private member of the device structure, which is a pointer to the
5671 * s2io_nic structure.
5672 * @link : inidicates whether link is UP/DOWN.
5673 * Description:
5674 * This function stops/starts the Tx queue depending on whether the link
5675 * status of the NIC is is down or up. This is called by the Alarm
5676 * interrupt handler whenever a link change interrupt comes up.
5677 * Return value:
5678 * void.
5679 */
5680
5681 void s2io_link(nic_t * sp, int link)
5682 {
5683 struct net_device *dev = (struct net_device *) sp->dev;
5684
5685 if (link != sp->last_link_state) {
5686 if (link == LINK_DOWN) {
5687 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5688 netif_carrier_off(dev);
5689 } else {
5690 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5691 netif_carrier_on(dev);
5692 }
5693 }
5694 sp->last_link_state = link;
5695 }
5696
5697 /**
5698 * get_xena_rev_id - to identify revision ID of xena.
5699 * @pdev : PCI Dev structure
5700 * Description:
5701 * Function to identify the Revision ID of xena.
5702 * Return value:
5703 * returns the revision ID of the device.
5704 */
5705
5706 int get_xena_rev_id(struct pci_dev *pdev)
5707 {
5708 u8 id = 0;
5709 int ret;
5710 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5711 return id;
5712 }
5713
5714 /**
5715 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5716 * @sp : private member of the device structure, which is a pointer to the
5717 * s2io_nic structure.
5718 * Description:
5719 * This function initializes a few of the PCI and PCI-X configuration registers
5720 * with recommended values.
5721 * Return value:
5722 * void
5723 */
5724
5725 static void s2io_init_pci(nic_t * sp)
5726 {
5727 u16 pci_cmd = 0, pcix_cmd = 0;
5728
5729 /* Enable Data Parity Error Recovery in PCI-X command register. */
5730 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5731 &(pcix_cmd));
5732 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5733 (pcix_cmd | 1));
5734 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5735 &(pcix_cmd));
5736
5737 /* Set the PErr Response bit in PCI command register. */
5738 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5739 pci_write_config_word(sp->pdev, PCI_COMMAND,
5740 (pci_cmd | PCI_COMMAND_PARITY));
5741 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5742
5743 /* Forcibly disabling relaxed ordering capability of the card. */
5744 pcix_cmd &= 0xfffd;
5745 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5746 pcix_cmd);
5747 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5748 &(pcix_cmd));
5749 }
5750
5751 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5752 MODULE_LICENSE("GPL");
5753 MODULE_VERSION(DRV_VERSION);
5754
5755 module_param(tx_fifo_num, int, 0);
5756 module_param(rx_ring_num, int, 0);
5757 module_param(rx_ring_mode, int, 0);
5758 module_param_array(tx_fifo_len, uint, NULL, 0);
5759 module_param_array(rx_ring_sz, uint, NULL, 0);
5760 module_param_array(rts_frm_len, uint, NULL, 0);
5761 module_param(use_continuous_tx_intrs, int, 1);
5762 module_param(rmac_pause_time, int, 0);
5763 module_param(mc_pause_threshold_q0q3, int, 0);
5764 module_param(mc_pause_threshold_q4q7, int, 0);
5765 module_param(shared_splits, int, 0);
5766 module_param(tmac_util_period, int, 0);
5767 module_param(rmac_util_period, int, 0);
5768 module_param(bimodal, bool, 0);
5769 module_param(l3l4hdr_size, int , 0);
5770 #ifndef CONFIG_S2IO_NAPI
5771 module_param(indicate_max_pkts, int, 0);
5772 #endif
5773 module_param(rxsync_frequency, int, 0);
5774 module_param(intr_type, int, 0);
5775
5776 /**
5777 * s2io_init_nic - Initialization of the adapter .
5778 * @pdev : structure containing the PCI related information of the device.
5779 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5780 * Description:
5781 * The function initializes an adapter identified by the pci_dec structure.
5782 * All OS related initialization including memory and device structure and
5783 * initlaization of the device private variable is done. Also the swapper
5784 * control register is initialized to enable read and write into the I/O
5785 * registers of the device.
5786 * Return value:
5787 * returns 0 on success and negative on failure.
5788 */
5789
5790 static int __devinit
5791 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5792 {
5793 nic_t *sp;
5794 struct net_device *dev;
5795 int i, j, ret;
5796 int dma_flag = FALSE;
5797 u32 mac_up, mac_down;
5798 u64 val64 = 0, tmp64 = 0;
5799 XENA_dev_config_t __iomem *bar0 = NULL;
5800 u16 subid;
5801 mac_info_t *mac_control;
5802 struct config_param *config;
5803 int mode;
5804 u8 dev_intr_type = intr_type;
5805
5806 #ifdef CONFIG_S2IO_NAPI
5807 if (dev_intr_type != INTA) {
5808 DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
5809 is enabled. Defaulting to INTA\n");
5810 dev_intr_type = INTA;
5811 }
5812 else
5813 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5814 #endif
5815
5816 if ((ret = pci_enable_device(pdev))) {
5817 DBG_PRINT(ERR_DBG,
5818 "s2io_init_nic: pci_enable_device failed\n");
5819 return ret;
5820 }
5821
5822 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
5823 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5824 dma_flag = TRUE;
5825 if (pci_set_consistent_dma_mask
5826 (pdev, DMA_64BIT_MASK)) {
5827 DBG_PRINT(ERR_DBG,
5828 "Unable to obtain 64bit DMA for \
5829 consistent allocations\n");
5830 pci_disable_device(pdev);
5831 return -ENOMEM;
5832 }
5833 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
5834 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5835 } else {
5836 pci_disable_device(pdev);
5837 return -ENOMEM;
5838 }
5839
5840 if ((dev_intr_type == MSI_X) &&
5841 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
5842 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
5843 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
5844 Defaulting to INTA\n");
5845 dev_intr_type = INTA;
5846 }
5847 if (dev_intr_type != MSI_X) {
5848 if (pci_request_regions(pdev, s2io_driver_name)) {
5849 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5850 pci_disable_device(pdev);
5851 return -ENODEV;
5852 }
5853 }
5854 else {
5855 if (!(request_mem_region(pci_resource_start(pdev, 0),
5856 pci_resource_len(pdev, 0), s2io_driver_name))) {
5857 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
5858 pci_disable_device(pdev);
5859 return -ENODEV;
5860 }
5861 if (!(request_mem_region(pci_resource_start(pdev, 2),
5862 pci_resource_len(pdev, 2), s2io_driver_name))) {
5863 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
5864 release_mem_region(pci_resource_start(pdev, 0),
5865 pci_resource_len(pdev, 0));
5866 pci_disable_device(pdev);
5867 return -ENODEV;
5868 }
5869 }
5870
5871 dev = alloc_etherdev(sizeof(nic_t));
5872 if (dev == NULL) {
5873 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5874 pci_disable_device(pdev);
5875 pci_release_regions(pdev);
5876 return -ENODEV;
5877 }
5878
5879 pci_set_master(pdev);
5880 pci_set_drvdata(pdev, dev);
5881 SET_MODULE_OWNER(dev);
5882 SET_NETDEV_DEV(dev, &pdev->dev);
5883
5884 /* Private member variable initialized to s2io NIC structure */
5885 sp = dev->priv;
5886 memset(sp, 0, sizeof(nic_t));
5887 sp->dev = dev;
5888 sp->pdev = pdev;
5889 sp->high_dma_flag = dma_flag;
5890 sp->device_enabled_once = FALSE;
5891 if (rx_ring_mode == 1)
5892 sp->rxd_mode = RXD_MODE_1;
5893 if (rx_ring_mode == 2)
5894 sp->rxd_mode = RXD_MODE_3B;
5895 if (rx_ring_mode == 3)
5896 sp->rxd_mode = RXD_MODE_3A;
5897
5898 sp->intr_type = dev_intr_type;
5899
5900 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5901 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5902 sp->device_type = XFRAME_II_DEVICE;
5903 else
5904 sp->device_type = XFRAME_I_DEVICE;
5905
5906
5907 /* Initialize some PCI/PCI-X fields of the NIC. */
5908 s2io_init_pci(sp);
5909
5910 /*
5911 * Setting the device configuration parameters.
5912 * Most of these parameters can be specified by the user during
5913 * module insertion as they are module loadable parameters. If
5914 * these parameters are not not specified during load time, they
5915 * are initialized with default values.
5916 */
5917 mac_control = &sp->mac_control;
5918 config = &sp->config;
5919
5920 /* Tx side parameters. */
5921 if (tx_fifo_len[0] == 0)
5922 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5923 config->tx_fifo_num = tx_fifo_num;
5924 for (i = 0; i < MAX_TX_FIFOS; i++) {
5925 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5926 config->tx_cfg[i].fifo_priority = i;
5927 }
5928
5929 /* mapping the QoS priority to the configured fifos */
5930 for (i = 0; i < MAX_TX_FIFOS; i++)
5931 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5932
5933 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5934 for (i = 0; i < config->tx_fifo_num; i++) {
5935 config->tx_cfg[i].f_no_snoop =
5936 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5937 if (config->tx_cfg[i].fifo_len < 65) {
5938 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5939 break;
5940 }
5941 }
5942 config->max_txds = MAX_SKB_FRAGS + 1;
5943
5944 /* Rx side parameters. */
5945 if (rx_ring_sz[0] == 0)
5946 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5947 config->rx_ring_num = rx_ring_num;
5948 for (i = 0; i < MAX_RX_RINGS; i++) {
5949 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5950 (rxd_count[sp->rxd_mode] + 1);
5951 config->rx_cfg[i].ring_priority = i;
5952 }
5953
5954 for (i = 0; i < rx_ring_num; i++) {
5955 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5956 config->rx_cfg[i].f_no_snoop =
5957 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5958 }
5959
5960 /* Setting Mac Control parameters */
5961 mac_control->rmac_pause_time = rmac_pause_time;
5962 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5963 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5964
5965
5966 /* Initialize Ring buffer parameters. */
5967 for (i = 0; i < config->rx_ring_num; i++)
5968 atomic_set(&sp->rx_bufs_left[i], 0);
5969
5970 /* Initialize the number of ISRs currently running */
5971 atomic_set(&sp->isr_cnt, 0);
5972
5973 /* initialize the shared memory used by the NIC and the host */
5974 if (init_shared_mem(sp)) {
5975 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5976 __FUNCTION__);
5977 ret = -ENOMEM;
5978 goto mem_alloc_failed;
5979 }
5980
5981 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5982 pci_resource_len(pdev, 0));
5983 if (!sp->bar0) {
5984 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5985 dev->name);
5986 ret = -ENOMEM;
5987 goto bar0_remap_failed;
5988 }
5989
5990 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5991 pci_resource_len(pdev, 2));
5992 if (!sp->bar1) {
5993 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5994 dev->name);
5995 ret = -ENOMEM;
5996 goto bar1_remap_failed;
5997 }
5998
5999 dev->irq = pdev->irq;
6000 dev->base_addr = (unsigned long) sp->bar0;
6001
6002 /* Initializing the BAR1 address as the start of the FIFO pointer. */
6003 for (j = 0; j < MAX_TX_FIFOS; j++) {
6004 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
6005 (sp->bar1 + (j * 0x00020000));
6006 }
6007
6008 /* Driver entry points */
6009 dev->open = &s2io_open;
6010 dev->stop = &s2io_close;
6011 dev->hard_start_xmit = &s2io_xmit;
6012 dev->get_stats = &s2io_get_stats;
6013 dev->set_multicast_list = &s2io_set_multicast;
6014 dev->do_ioctl = &s2io_ioctl;
6015 dev->change_mtu = &s2io_change_mtu;
6016 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
6017 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6018 dev->vlan_rx_register = s2io_vlan_rx_register;
6019 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
6020
6021 /*
6022 * will use eth_mac_addr() for dev->set_mac_address
6023 * mac address will be set every time dev->open() is called
6024 */
6025 #if defined(CONFIG_S2IO_NAPI)
6026 dev->poll = s2io_poll;
6027 dev->weight = 32;
6028 #endif
6029
6030 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
6031 if (sp->high_dma_flag == TRUE)
6032 dev->features |= NETIF_F_HIGHDMA;
6033 #ifdef NETIF_F_TSO
6034 dev->features |= NETIF_F_TSO;
6035 #endif
6036
6037 dev->tx_timeout = &s2io_tx_watchdog;
6038 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
6039 INIT_WORK(&sp->rst_timer_task,
6040 (void (*)(void *)) s2io_restart_nic, dev);
6041 INIT_WORK(&sp->set_link_task,
6042 (void (*)(void *)) s2io_set_link, sp);
6043
6044 pci_save_state(sp->pdev);
6045
6046 /* Setting swapper control on the NIC, for proper reset operation */
6047 if (s2io_set_swapper(sp)) {
6048 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
6049 dev->name);
6050 ret = -EAGAIN;
6051 goto set_swap_failed;
6052 }
6053
6054 /* Verify if the Herc works on the slot its placed into */
6055 if (sp->device_type & XFRAME_II_DEVICE) {
6056 mode = s2io_verify_pci_mode(sp);
6057 if (mode < 0) {
6058 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
6059 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
6060 ret = -EBADSLT;
6061 goto set_swap_failed;
6062 }
6063 }
6064
6065 /* Not needed for Herc */
6066 if (sp->device_type & XFRAME_I_DEVICE) {
6067 /*
6068 * Fix for all "FFs" MAC address problems observed on
6069 * Alpha platforms
6070 */
6071 fix_mac_address(sp);
6072 s2io_reset(sp);
6073 }
6074
6075 /*
6076 * MAC address initialization.
6077 * For now only one mac address will be read and used.
6078 */
6079 bar0 = sp->bar0;
6080 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
6081 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
6082 writeq(val64, &bar0->rmac_addr_cmd_mem);
6083 wait_for_cmd_complete(sp);
6084
6085 tmp64 = readq(&bar0->rmac_addr_data0_mem);
6086 mac_down = (u32) tmp64;
6087 mac_up = (u32) (tmp64 >> 32);
6088
6089 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
6090
6091 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
6092 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
6093 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
6094 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
6095 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
6096 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
6097
6098 /* Set the factory defined MAC address initially */
6099 dev->addr_len = ETH_ALEN;
6100 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
6101
6102 /*
6103 * Initialize the tasklet status and link state flags
6104 * and the card state parameter
6105 */
6106 atomic_set(&(sp->card_state), 0);
6107 sp->tasklet_status = 0;
6108 sp->link_state = 0;
6109
6110 /* Initialize spinlocks */
6111 spin_lock_init(&sp->tx_lock);
6112 #ifndef CONFIG_S2IO_NAPI
6113 spin_lock_init(&sp->put_lock);
6114 #endif
6115 spin_lock_init(&sp->rx_lock);
6116
6117 /*
6118 * SXE-002: Configure link and activity LED to init state
6119 * on driver load.
6120 */
6121 subid = sp->pdev->subsystem_device;
6122 if ((subid & 0xFF) >= 0x07) {
6123 val64 = readq(&bar0->gpio_control);
6124 val64 |= 0x0000800000000000ULL;
6125 writeq(val64, &bar0->gpio_control);
6126 val64 = 0x0411040400000000ULL;
6127 writeq(val64, (void __iomem *) bar0 + 0x2700);
6128 val64 = readq(&bar0->gpio_control);
6129 }
6130
6131 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
6132
6133 if (register_netdev(dev)) {
6134 DBG_PRINT(ERR_DBG, "Device registration failed\n");
6135 ret = -ENODEV;
6136 goto register_failed;
6137 }
6138
6139 if (sp->device_type & XFRAME_II_DEVICE) {
6140 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
6141 dev->name);
6142 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6143 get_xena_rev_id(sp->pdev),
6144 s2io_driver_version);
6145 switch(sp->intr_type) {
6146 case INTA:
6147 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6148 break;
6149 case MSI:
6150 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6151 break;
6152 case MSI_X:
6153 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6154 break;
6155 }
6156
6157 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6158 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6159 sp->def_mac_addr[0].mac_addr[0],
6160 sp->def_mac_addr[0].mac_addr[1],
6161 sp->def_mac_addr[0].mac_addr[2],
6162 sp->def_mac_addr[0].mac_addr[3],
6163 sp->def_mac_addr[0].mac_addr[4],
6164 sp->def_mac_addr[0].mac_addr[5]);
6165 mode = s2io_print_pci_mode(sp);
6166 if (mode < 0) {
6167 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
6168 ret = -EBADSLT;
6169 goto set_swap_failed;
6170 }
6171 } else {
6172 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
6173 dev->name);
6174 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6175 get_xena_rev_id(sp->pdev),
6176 s2io_driver_version);
6177 switch(sp->intr_type) {
6178 case INTA:
6179 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6180 break;
6181 case MSI:
6182 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6183 break;
6184 case MSI_X:
6185 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6186 break;
6187 }
6188 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6189 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6190 sp->def_mac_addr[0].mac_addr[0],
6191 sp->def_mac_addr[0].mac_addr[1],
6192 sp->def_mac_addr[0].mac_addr[2],
6193 sp->def_mac_addr[0].mac_addr[3],
6194 sp->def_mac_addr[0].mac_addr[4],
6195 sp->def_mac_addr[0].mac_addr[5]);
6196 }
6197 if (sp->rxd_mode == RXD_MODE_3B)
6198 DBG_PRINT(ERR_DBG, "%s: 2-Buffer mode support has been "
6199 "enabled\n",dev->name);
6200 if (sp->rxd_mode == RXD_MODE_3A)
6201 DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been "
6202 "enabled\n",dev->name);
6203
6204 /* Initialize device name */
6205 strcpy(sp->name, dev->name);
6206 if (sp->device_type & XFRAME_II_DEVICE)
6207 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
6208 else
6209 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
6210
6211 /* Initialize bimodal Interrupts */
6212 sp->config.bimodal = bimodal;
6213 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
6214 sp->config.bimodal = 0;
6215 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
6216 dev->name);
6217 }
6218
6219 /*
6220 * Make Link state as off at this point, when the Link change
6221 * interrupt comes the state will be automatically changed to
6222 * the right state.
6223 */
6224 netif_carrier_off(dev);
6225
6226 return 0;
6227
6228 register_failed:
6229 set_swap_failed:
6230 iounmap(sp->bar1);
6231 bar1_remap_failed:
6232 iounmap(sp->bar0);
6233 bar0_remap_failed:
6234 mem_alloc_failed:
6235 free_shared_mem(sp);
6236 pci_disable_device(pdev);
6237 if (dev_intr_type != MSI_X)
6238 pci_release_regions(pdev);
6239 else {
6240 release_mem_region(pci_resource_start(pdev, 0),
6241 pci_resource_len(pdev, 0));
6242 release_mem_region(pci_resource_start(pdev, 2),
6243 pci_resource_len(pdev, 2));
6244 }
6245 pci_set_drvdata(pdev, NULL);
6246 free_netdev(dev);
6247
6248 return ret;
6249 }
6250
6251 /**
6252 * s2io_rem_nic - Free the PCI device
6253 * @pdev: structure containing the PCI related information of the device.
6254 * Description: This function is called by the Pci subsystem to release a
6255 * PCI device and free up all resource held up by the device. This could
6256 * be in response to a Hot plug event or when the driver is to be removed
6257 * from memory.
6258 */
6259
6260 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
6261 {
6262 struct net_device *dev =
6263 (struct net_device *) pci_get_drvdata(pdev);
6264 nic_t *sp;
6265
6266 if (dev == NULL) {
6267 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
6268 return;
6269 }
6270
6271 sp = dev->priv;
6272 unregister_netdev(dev);
6273
6274 free_shared_mem(sp);
6275 iounmap(sp->bar0);
6276 iounmap(sp->bar1);
6277 pci_disable_device(pdev);
6278 if (sp->intr_type != MSI_X)
6279 pci_release_regions(pdev);
6280 else {
6281 release_mem_region(pci_resource_start(pdev, 0),
6282 pci_resource_len(pdev, 0));
6283 release_mem_region(pci_resource_start(pdev, 2),
6284 pci_resource_len(pdev, 2));
6285 }
6286 pci_set_drvdata(pdev, NULL);
6287 free_netdev(dev);
6288 }
6289
6290 /**
6291 * s2io_starter - Entry point for the driver
6292 * Description: This function is the entry point for the driver. It verifies
6293 * the module loadable parameters and initializes PCI configuration space.
6294 */
6295
6296 int __init s2io_starter(void)
6297 {
6298 return pci_module_init(&s2io_driver);
6299 }
6300
6301 /**
6302 * s2io_closer - Cleanup routine for the driver
6303 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
6304 */
6305
6306 void s2io_closer(void)
6307 {
6308 pci_unregister_driver(&s2io_driver);
6309 DBG_PRINT(INIT_DBG, "cleanup done\n");
6310 }
6311
6312 module_init(s2io_starter);
6313 module_exit(s2io_closer);
This page took 0.313291 seconds and 5 git commands to generate.