[NET]: Remove gratuitous use of skb->tail in network drivers.
[deliverable/linux.git] / drivers / net / s2io.c
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 * in PCI Configuration space.
37 ************************************************************************/
38
39 #include <linux/config.h>
40 #include <linux/module.h>
41 #include <linux/types.h>
42 #include <linux/errno.h>
43 #include <linux/ioport.h>
44 #include <linux/pci.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kernel.h>
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/skbuff.h>
50 #include <linux/init.h>
51 #include <linux/delay.h>
52 #include <linux/stddef.h>
53 #include <linux/ioctl.h>
54 #include <linux/timex.h>
55 #include <linux/sched.h>
56 #include <linux/ethtool.h>
57 #include <linux/version.h>
58 #include <linux/workqueue.h>
59
60 #include <asm/io.h>
61 #include <asm/system.h>
62 #include <asm/uaccess.h>
63
64 /* local include */
65 #include "s2io.h"
66 #include "s2io-regs.h"
67
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "s2io";
70 static char s2io_driver_version[] = "Version 1.7.7.1";
71
72 /*
73 * Cards with following subsystem_id have a link state indication
74 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
75 * macro below identifies these cards given the subsystem_id.
76 */
77 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
78 (((subid >= 0x600B) && (subid <= 0x600D)) || \
79 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
80
81 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
82 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
83 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
84 #define PANIC 1
85 #define LOW 2
86 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
87 {
88 int level = 0;
89 if ((sp->pkt_cnt[ring] - rxb_size) > 16) {
90 level = LOW;
91 if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) {
92 level = PANIC;
93 }
94 }
95
96 return level;
97 }
98
99 /* Ethtool related variables and Macros. */
100 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
101 "Register test\t(offline)",
102 "Eeprom test\t(offline)",
103 "Link test\t(online)",
104 "RLDRAM test\t(offline)",
105 "BIST Test\t(offline)"
106 };
107
108 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
109 {"tmac_frms"},
110 {"tmac_data_octets"},
111 {"tmac_drop_frms"},
112 {"tmac_mcst_frms"},
113 {"tmac_bcst_frms"},
114 {"tmac_pause_ctrl_frms"},
115 {"tmac_any_err_frms"},
116 {"tmac_vld_ip_octets"},
117 {"tmac_vld_ip"},
118 {"tmac_drop_ip"},
119 {"tmac_icmp"},
120 {"tmac_rst_tcp"},
121 {"tmac_tcp"},
122 {"tmac_udp"},
123 {"rmac_vld_frms"},
124 {"rmac_data_octets"},
125 {"rmac_fcs_err_frms"},
126 {"rmac_drop_frms"},
127 {"rmac_vld_mcst_frms"},
128 {"rmac_vld_bcst_frms"},
129 {"rmac_in_rng_len_err_frms"},
130 {"rmac_long_frms"},
131 {"rmac_pause_ctrl_frms"},
132 {"rmac_discarded_frms"},
133 {"rmac_usized_frms"},
134 {"rmac_osized_frms"},
135 {"rmac_frag_frms"},
136 {"rmac_jabber_frms"},
137 {"rmac_ip"},
138 {"rmac_ip_octets"},
139 {"rmac_hdr_err_ip"},
140 {"rmac_drop_ip"},
141 {"rmac_icmp"},
142 {"rmac_tcp"},
143 {"rmac_udp"},
144 {"rmac_err_drp_udp"},
145 {"rmac_pause_cnt"},
146 {"rmac_accepted_ip"},
147 {"rmac_err_tcp"},
148 };
149
150 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
151 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
152
153 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
154 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
155
156
157 /*
158 * Constants to be programmed into the Xena's registers, to configure
159 * the XAUI.
160 */
161
162 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
163 #define END_SIGN 0x0
164
165 static u64 default_mdio_cfg[] = {
166 /* Reset PMA PLL */
167 0xC001010000000000ULL, 0xC0010100000000E0ULL,
168 0xC0010100008000E4ULL,
169 /* Remove Reset from PMA PLL */
170 0xC001010000000000ULL, 0xC0010100000000E0ULL,
171 0xC0010100000000E4ULL,
172 END_SIGN
173 };
174
175 static u64 default_dtx_cfg[] = {
176 0x8000051500000000ULL, 0x80000515000000E0ULL,
177 0x80000515D93500E4ULL, 0x8001051500000000ULL,
178 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
179 0x8002051500000000ULL, 0x80020515000000E0ULL,
180 0x80020515F21000E4ULL,
181 /* Set PADLOOPBACKN */
182 0x8002051500000000ULL, 0x80020515000000E0ULL,
183 0x80020515B20000E4ULL, 0x8003051500000000ULL,
184 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
185 0x8004051500000000ULL, 0x80040515000000E0ULL,
186 0x80040515B20000E4ULL, 0x8005051500000000ULL,
187 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
188 SWITCH_SIGN,
189 /* Remove PADLOOPBACKN */
190 0x8002051500000000ULL, 0x80020515000000E0ULL,
191 0x80020515F20000E4ULL, 0x8003051500000000ULL,
192 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
193 0x8004051500000000ULL, 0x80040515000000E0ULL,
194 0x80040515F20000E4ULL, 0x8005051500000000ULL,
195 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
196 END_SIGN
197 };
198
199
200 /*
201 * Constants for Fixing the MacAddress problem seen mostly on
202 * Alpha machines.
203 */
204 static u64 fix_mac[] = {
205 0x0060000000000000ULL, 0x0060600000000000ULL,
206 0x0040600000000000ULL, 0x0000600000000000ULL,
207 0x0020600000000000ULL, 0x0060600000000000ULL,
208 0x0020600000000000ULL, 0x0060600000000000ULL,
209 0x0020600000000000ULL, 0x0060600000000000ULL,
210 0x0020600000000000ULL, 0x0060600000000000ULL,
211 0x0020600000000000ULL, 0x0060600000000000ULL,
212 0x0020600000000000ULL, 0x0060600000000000ULL,
213 0x0020600000000000ULL, 0x0060600000000000ULL,
214 0x0020600000000000ULL, 0x0060600000000000ULL,
215 0x0020600000000000ULL, 0x0060600000000000ULL,
216 0x0020600000000000ULL, 0x0060600000000000ULL,
217 0x0020600000000000ULL, 0x0000600000000000ULL,
218 0x0040600000000000ULL, 0x0060600000000000ULL,
219 END_SIGN
220 };
221
222 /* Module Loadable parameters. */
223 static unsigned int tx_fifo_num = 1;
224 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
225 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
226 static unsigned int rx_ring_num = 1;
227 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
228 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
229 static unsigned int Stats_refresh_time = 4;
230 static unsigned int rmac_pause_time = 65535;
231 static unsigned int mc_pause_threshold_q0q3 = 187;
232 static unsigned int mc_pause_threshold_q4q7 = 187;
233 static unsigned int shared_splits;
234 static unsigned int tmac_util_period = 5;
235 static unsigned int rmac_util_period = 5;
236 #ifndef CONFIG_S2IO_NAPI
237 static unsigned int indicate_max_pkts;
238 #endif
239
240 /*
241 * S2IO device table.
242 * This table lists all the devices that this driver supports.
243 */
244 static struct pci_device_id s2io_tbl[] __devinitdata = {
245 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
246 PCI_ANY_ID, PCI_ANY_ID},
247 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
248 PCI_ANY_ID, PCI_ANY_ID},
249 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
250 PCI_ANY_ID, PCI_ANY_ID},
251 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
252 PCI_ANY_ID, PCI_ANY_ID},
253 {0,}
254 };
255
256 MODULE_DEVICE_TABLE(pci, s2io_tbl);
257
258 static struct pci_driver s2io_driver = {
259 .name = "S2IO",
260 .id_table = s2io_tbl,
261 .probe = s2io_init_nic,
262 .remove = __devexit_p(s2io_rem_nic),
263 };
264
265 /* A simplifier macro used both by init and free shared_mem Fns(). */
266 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
267
268 /**
269 * init_shared_mem - Allocation and Initialization of Memory
270 * @nic: Device private variable.
271 * Description: The function allocates all the memory areas shared
272 * between the NIC and the driver. This includes Tx descriptors,
273 * Rx descriptors and the statistics block.
274 */
275
276 static int init_shared_mem(struct s2io_nic *nic)
277 {
278 u32 size;
279 void *tmp_v_addr, *tmp_v_addr_next;
280 dma_addr_t tmp_p_addr, tmp_p_addr_next;
281 RxD_block_t *pre_rxd_blk = NULL;
282 int i, j, blk_cnt;
283 int lst_size, lst_per_page;
284 struct net_device *dev = nic->dev;
285 #ifdef CONFIG_2BUFF_MODE
286 unsigned long tmp;
287 buffAdd_t *ba;
288 #endif
289
290 mac_info_t *mac_control;
291 struct config_param *config;
292
293 mac_control = &nic->mac_control;
294 config = &nic->config;
295
296
297 /* Allocation and initialization of TXDLs in FIOFs */
298 size = 0;
299 for (i = 0; i < config->tx_fifo_num; i++) {
300 size += config->tx_cfg[i].fifo_len;
301 }
302 if (size > MAX_AVAILABLE_TXDS) {
303 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
304 dev->name);
305 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
306 DBG_PRINT(ERR_DBG, "that can be used\n");
307 return FAILURE;
308 }
309
310 lst_size = (sizeof(TxD_t) * config->max_txds);
311 lst_per_page = PAGE_SIZE / lst_size;
312
313 for (i = 0; i < config->tx_fifo_num; i++) {
314 int fifo_len = config->tx_cfg[i].fifo_len;
315 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
316 nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL);
317 if (!nic->list_info[i]) {
318 DBG_PRINT(ERR_DBG,
319 "Malloc failed for list_info\n");
320 return -ENOMEM;
321 }
322 memset(nic->list_info[i], 0, list_holder_size);
323 }
324 for (i = 0; i < config->tx_fifo_num; i++) {
325 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
326 lst_per_page);
327 mac_control->tx_curr_put_info[i].offset = 0;
328 mac_control->tx_curr_put_info[i].fifo_len =
329 config->tx_cfg[i].fifo_len - 1;
330 mac_control->tx_curr_get_info[i].offset = 0;
331 mac_control->tx_curr_get_info[i].fifo_len =
332 config->tx_cfg[i].fifo_len - 1;
333 for (j = 0; j < page_num; j++) {
334 int k = 0;
335 dma_addr_t tmp_p;
336 void *tmp_v;
337 tmp_v = pci_alloc_consistent(nic->pdev,
338 PAGE_SIZE, &tmp_p);
339 if (!tmp_v) {
340 DBG_PRINT(ERR_DBG,
341 "pci_alloc_consistent ");
342 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
343 return -ENOMEM;
344 }
345 while (k < lst_per_page) {
346 int l = (j * lst_per_page) + k;
347 if (l == config->tx_cfg[i].fifo_len)
348 goto end_txd_alloc;
349 nic->list_info[i][l].list_virt_addr =
350 tmp_v + (k * lst_size);
351 nic->list_info[i][l].list_phy_addr =
352 tmp_p + (k * lst_size);
353 k++;
354 }
355 }
356 }
357 end_txd_alloc:
358
359 /* Allocation and initialization of RXDs in Rings */
360 size = 0;
361 for (i = 0; i < config->rx_ring_num; i++) {
362 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
363 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
364 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
365 i);
366 DBG_PRINT(ERR_DBG, "RxDs per Block");
367 return FAILURE;
368 }
369 size += config->rx_cfg[i].num_rxd;
370 nic->block_count[i] =
371 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
372 nic->pkt_cnt[i] =
373 config->rx_cfg[i].num_rxd - nic->block_count[i];
374 }
375
376 for (i = 0; i < config->rx_ring_num; i++) {
377 mac_control->rx_curr_get_info[i].block_index = 0;
378 mac_control->rx_curr_get_info[i].offset = 0;
379 mac_control->rx_curr_get_info[i].ring_len =
380 config->rx_cfg[i].num_rxd - 1;
381 mac_control->rx_curr_put_info[i].block_index = 0;
382 mac_control->rx_curr_put_info[i].offset = 0;
383 mac_control->rx_curr_put_info[i].ring_len =
384 config->rx_cfg[i].num_rxd - 1;
385 blk_cnt =
386 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
387 /* Allocating all the Rx blocks */
388 for (j = 0; j < blk_cnt; j++) {
389 #ifndef CONFIG_2BUFF_MODE
390 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
391 #else
392 size = SIZE_OF_BLOCK;
393 #endif
394 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
395 &tmp_p_addr);
396 if (tmp_v_addr == NULL) {
397 /*
398 * In case of failure, free_shared_mem()
399 * is called, which should free any
400 * memory that was alloced till the
401 * failure happened.
402 */
403 nic->rx_blocks[i][j].block_virt_addr =
404 tmp_v_addr;
405 return -ENOMEM;
406 }
407 memset(tmp_v_addr, 0, size);
408 nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr;
409 nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr;
410 }
411 /* Interlinking all Rx Blocks */
412 for (j = 0; j < blk_cnt; j++) {
413 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
414 tmp_v_addr_next =
415 nic->rx_blocks[i][(j + 1) %
416 blk_cnt].block_virt_addr;
417 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
418 tmp_p_addr_next =
419 nic->rx_blocks[i][(j + 1) %
420 blk_cnt].block_dma_addr;
421
422 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
423 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
424 * marker.
425 */
426 #ifndef CONFIG_2BUFF_MODE
427 pre_rxd_blk->reserved_2_pNext_RxD_block =
428 (unsigned long) tmp_v_addr_next;
429 #endif
430 pre_rxd_blk->pNext_RxD_Blk_physical =
431 (u64) tmp_p_addr_next;
432 }
433 }
434
435 #ifdef CONFIG_2BUFF_MODE
436 /*
437 * Allocation of Storages for buffer addresses in 2BUFF mode
438 * and the buffers as well.
439 */
440 for (i = 0; i < config->rx_ring_num; i++) {
441 blk_cnt =
442 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
443 nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
444 GFP_KERNEL);
445 if (!nic->ba[i])
446 return -ENOMEM;
447 for (j = 0; j < blk_cnt; j++) {
448 int k = 0;
449 nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) *
450 (MAX_RXDS_PER_BLOCK + 1)),
451 GFP_KERNEL);
452 if (!nic->ba[i][j])
453 return -ENOMEM;
454 while (k != MAX_RXDS_PER_BLOCK) {
455 ba = &nic->ba[i][j][k];
456
457 ba->ba_0_org = kmalloc
458 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
459 if (!ba->ba_0_org)
460 return -ENOMEM;
461 tmp = (unsigned long) ba->ba_0_org;
462 tmp += ALIGN_SIZE;
463 tmp &= ~((unsigned long) ALIGN_SIZE);
464 ba->ba_0 = (void *) tmp;
465
466 ba->ba_1_org = kmalloc
467 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
468 if (!ba->ba_1_org)
469 return -ENOMEM;
470 tmp = (unsigned long) ba->ba_1_org;
471 tmp += ALIGN_SIZE;
472 tmp &= ~((unsigned long) ALIGN_SIZE);
473 ba->ba_1 = (void *) tmp;
474 k++;
475 }
476 }
477 }
478 #endif
479
480 /* Allocation and initialization of Statistics block */
481 size = sizeof(StatInfo_t);
482 mac_control->stats_mem = pci_alloc_consistent
483 (nic->pdev, size, &mac_control->stats_mem_phy);
484
485 if (!mac_control->stats_mem) {
486 /*
487 * In case of failure, free_shared_mem() is called, which
488 * should free any memory that was alloced till the
489 * failure happened.
490 */
491 return -ENOMEM;
492 }
493 mac_control->stats_mem_sz = size;
494
495 tmp_v_addr = mac_control->stats_mem;
496 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
497 memset(tmp_v_addr, 0, size);
498
499 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
500 (unsigned long long) tmp_p_addr);
501
502 return SUCCESS;
503 }
504
505 /**
506 * free_shared_mem - Free the allocated Memory
507 * @nic: Device private variable.
508 * Description: This function is to free all memory locations allocated by
509 * the init_shared_mem() function and return it to the kernel.
510 */
511
512 static void free_shared_mem(struct s2io_nic *nic)
513 {
514 int i, j, blk_cnt, size;
515 void *tmp_v_addr;
516 dma_addr_t tmp_p_addr;
517 mac_info_t *mac_control;
518 struct config_param *config;
519 int lst_size, lst_per_page;
520
521
522 if (!nic)
523 return;
524
525 mac_control = &nic->mac_control;
526 config = &nic->config;
527
528 lst_size = (sizeof(TxD_t) * config->max_txds);
529 lst_per_page = PAGE_SIZE / lst_size;
530
531 for (i = 0; i < config->tx_fifo_num; i++) {
532 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
533 lst_per_page);
534 for (j = 0; j < page_num; j++) {
535 int mem_blks = (j * lst_per_page);
536 if (!nic->list_info[i][mem_blks].list_virt_addr)
537 break;
538 pci_free_consistent(nic->pdev, PAGE_SIZE,
539 nic->list_info[i][mem_blks].
540 list_virt_addr,
541 nic->list_info[i][mem_blks].
542 list_phy_addr);
543 }
544 kfree(nic->list_info[i]);
545 }
546
547 #ifndef CONFIG_2BUFF_MODE
548 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
549 #else
550 size = SIZE_OF_BLOCK;
551 #endif
552 for (i = 0; i < config->rx_ring_num; i++) {
553 blk_cnt = nic->block_count[i];
554 for (j = 0; j < blk_cnt; j++) {
555 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
556 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
557 if (tmp_v_addr == NULL)
558 break;
559 pci_free_consistent(nic->pdev, size,
560 tmp_v_addr, tmp_p_addr);
561 }
562 }
563
564 #ifdef CONFIG_2BUFF_MODE
565 /* Freeing buffer storage addresses in 2BUFF mode. */
566 for (i = 0; i < config->rx_ring_num; i++) {
567 blk_cnt =
568 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
569 if (!nic->ba[i])
570 goto end_free;
571 for (j = 0; j < blk_cnt; j++) {
572 int k = 0;
573 if (!nic->ba[i][j]) {
574 kfree(nic->ba[i]);
575 goto end_free;
576 }
577 while (k != MAX_RXDS_PER_BLOCK) {
578 buffAdd_t *ba = &nic->ba[i][j][k];
579 if (!ba || !ba->ba_0_org || !ba->ba_1_org)
580 {
581 kfree(nic->ba[i]);
582 kfree(nic->ba[i][j]);
583 if(ba->ba_0_org)
584 kfree(ba->ba_0_org);
585 if(ba->ba_1_org)
586 kfree(ba->ba_1_org);
587 goto end_free;
588 }
589 kfree(ba->ba_0_org);
590 kfree(ba->ba_1_org);
591 k++;
592 }
593 kfree(nic->ba[i][j]);
594 }
595 kfree(nic->ba[i]);
596 }
597 end_free:
598 #endif
599
600 if (mac_control->stats_mem) {
601 pci_free_consistent(nic->pdev,
602 mac_control->stats_mem_sz,
603 mac_control->stats_mem,
604 mac_control->stats_mem_phy);
605 }
606 }
607
608 /**
609 * init_nic - Initialization of hardware
610 * @nic: device peivate variable
611 * Description: The function sequentially configures every block
612 * of the H/W from their reset values.
613 * Return Value: SUCCESS on success and
614 * '-1' on failure (endian settings incorrect).
615 */
616
617 static int init_nic(struct s2io_nic *nic)
618 {
619 XENA_dev_config_t __iomem *bar0 = nic->bar0;
620 struct net_device *dev = nic->dev;
621 register u64 val64 = 0;
622 void __iomem *add;
623 u32 time;
624 int i, j;
625 mac_info_t *mac_control;
626 struct config_param *config;
627 int mdio_cnt = 0, dtx_cnt = 0;
628 unsigned long long mem_share;
629
630 mac_control = &nic->mac_control;
631 config = &nic->config;
632
633 /* Initialize swapper control register */
634 if (s2io_set_swapper(nic)) {
635 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
636 return -1;
637 }
638
639 /* Remove XGXS from reset state */
640 val64 = 0;
641 writeq(val64, &bar0->sw_reset);
642 val64 = readq(&bar0->sw_reset);
643 msleep(500);
644
645 /* Enable Receiving broadcasts */
646 add = &bar0->mac_cfg;
647 val64 = readq(&bar0->mac_cfg);
648 val64 |= MAC_RMAC_BCAST_ENABLE;
649 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
650 writel((u32) val64, add);
651 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
652 writel((u32) (val64 >> 32), (add + 4));
653
654 /* Read registers in all blocks */
655 val64 = readq(&bar0->mac_int_mask);
656 val64 = readq(&bar0->mc_int_mask);
657 val64 = readq(&bar0->xgxs_int_mask);
658
659 /* Set MTU */
660 val64 = dev->mtu;
661 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
662
663 /*
664 * Configuring the XAUI Interface of Xena.
665 * ***************************************
666 * To Configure the Xena's XAUI, one has to write a series
667 * of 64 bit values into two registers in a particular
668 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
669 * which will be defined in the array of configuration values
670 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
671 * to switch writing from one regsiter to another. We continue
672 * writing these values until we encounter the 'END_SIGN' macro.
673 * For example, After making a series of 21 writes into
674 * dtx_control register the 'SWITCH_SIGN' appears and hence we
675 * start writing into mdio_control until we encounter END_SIGN.
676 */
677 while (1) {
678 dtx_cfg:
679 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
680 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
681 dtx_cnt++;
682 goto mdio_cfg;
683 }
684 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
685 &bar0->dtx_control, UF);
686 val64 = readq(&bar0->dtx_control);
687 dtx_cnt++;
688 }
689 mdio_cfg:
690 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
691 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
692 mdio_cnt++;
693 goto dtx_cfg;
694 }
695 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
696 &bar0->mdio_control, UF);
697 val64 = readq(&bar0->mdio_control);
698 mdio_cnt++;
699 }
700 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
701 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
702 break;
703 } else {
704 goto dtx_cfg;
705 }
706 }
707
708 /* Tx DMA Initialization */
709 val64 = 0;
710 writeq(val64, &bar0->tx_fifo_partition_0);
711 writeq(val64, &bar0->tx_fifo_partition_1);
712 writeq(val64, &bar0->tx_fifo_partition_2);
713 writeq(val64, &bar0->tx_fifo_partition_3);
714
715
716 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
717 val64 |=
718 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
719 13) | vBIT(config->tx_cfg[i].fifo_priority,
720 ((i * 32) + 5), 3);
721
722 if (i == (config->tx_fifo_num - 1)) {
723 if (i % 2 == 0)
724 i++;
725 }
726
727 switch (i) {
728 case 1:
729 writeq(val64, &bar0->tx_fifo_partition_0);
730 val64 = 0;
731 break;
732 case 3:
733 writeq(val64, &bar0->tx_fifo_partition_1);
734 val64 = 0;
735 break;
736 case 5:
737 writeq(val64, &bar0->tx_fifo_partition_2);
738 val64 = 0;
739 break;
740 case 7:
741 writeq(val64, &bar0->tx_fifo_partition_3);
742 break;
743 }
744 }
745
746 /* Enable Tx FIFO partition 0. */
747 val64 = readq(&bar0->tx_fifo_partition_0);
748 val64 |= BIT(0); /* To enable the FIFO partition. */
749 writeq(val64, &bar0->tx_fifo_partition_0);
750
751 val64 = readq(&bar0->tx_fifo_partition_0);
752 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
753 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
754
755 /*
756 * Initialization of Tx_PA_CONFIG register to ignore packet
757 * integrity checking.
758 */
759 val64 = readq(&bar0->tx_pa_cfg);
760 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
761 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
762 writeq(val64, &bar0->tx_pa_cfg);
763
764 /* Rx DMA intialization. */
765 val64 = 0;
766 for (i = 0; i < config->rx_ring_num; i++) {
767 val64 |=
768 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
769 3);
770 }
771 writeq(val64, &bar0->rx_queue_priority);
772
773 /*
774 * Allocating equal share of memory to all the
775 * configured Rings.
776 */
777 val64 = 0;
778 for (i = 0; i < config->rx_ring_num; i++) {
779 switch (i) {
780 case 0:
781 mem_share = (64 / config->rx_ring_num +
782 64 % config->rx_ring_num);
783 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
784 continue;
785 case 1:
786 mem_share = (64 / config->rx_ring_num);
787 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
788 continue;
789 case 2:
790 mem_share = (64 / config->rx_ring_num);
791 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
792 continue;
793 case 3:
794 mem_share = (64 / config->rx_ring_num);
795 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
796 continue;
797 case 4:
798 mem_share = (64 / config->rx_ring_num);
799 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
800 continue;
801 case 5:
802 mem_share = (64 / config->rx_ring_num);
803 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
804 continue;
805 case 6:
806 mem_share = (64 / config->rx_ring_num);
807 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
808 continue;
809 case 7:
810 mem_share = (64 / config->rx_ring_num);
811 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
812 continue;
813 }
814 }
815 writeq(val64, &bar0->rx_queue_cfg);
816
817 /*
818 * Initializing the Tx round robin registers to 0.
819 * Filling Tx and Rx round robin registers as per the
820 * number of FIFOs and Rings is still TODO.
821 */
822 writeq(0, &bar0->tx_w_round_robin_0);
823 writeq(0, &bar0->tx_w_round_robin_1);
824 writeq(0, &bar0->tx_w_round_robin_2);
825 writeq(0, &bar0->tx_w_round_robin_3);
826 writeq(0, &bar0->tx_w_round_robin_4);
827
828 /*
829 * TODO
830 * Disable Rx steering. Hard coding all packets be steered to
831 * Queue 0 for now.
832 */
833 val64 = 0x8080808080808080ULL;
834 writeq(val64, &bar0->rts_qos_steering);
835
836 /* UDP Fix */
837 val64 = 0;
838 for (i = 1; i < 8; i++)
839 writeq(val64, &bar0->rts_frm_len_n[i]);
840
841 /* Set rts_frm_len register for fifo 0 */
842 writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22),
843 &bar0->rts_frm_len_n[0]);
844
845 /* Enable statistics */
846 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
847 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
848 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
849 writeq(val64, &bar0->stat_cfg);
850
851 /*
852 * Initializing the sampling rate for the device to calculate the
853 * bandwidth utilization.
854 */
855 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
856 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
857 writeq(val64, &bar0->mac_link_util);
858
859
860 /*
861 * Initializing the Transmit and Receive Traffic Interrupt
862 * Scheme.
863 */
864 /* TTI Initialization. Default Tx timer gets us about
865 * 250 interrupts per sec. Continuous interrupts are enabled
866 * by default.
867 */
868 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
869 TTI_DATA1_MEM_TX_URNG_A(0xA) |
870 TTI_DATA1_MEM_TX_URNG_B(0x10) |
871 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN |
872 TTI_DATA1_MEM_TX_TIMER_CI_EN;
873 writeq(val64, &bar0->tti_data1_mem);
874
875 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
876 TTI_DATA2_MEM_TX_UFC_B(0x20) |
877 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
878 writeq(val64, &bar0->tti_data2_mem);
879
880 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
881 writeq(val64, &bar0->tti_command_mem);
882
883 /*
884 * Once the operation completes, the Strobe bit of the command
885 * register will be reset. We poll for this particular condition
886 * We wait for a maximum of 500ms for the operation to complete,
887 * if it's not complete by then we return error.
888 */
889 time = 0;
890 while (TRUE) {
891 val64 = readq(&bar0->tti_command_mem);
892 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
893 break;
894 }
895 if (time > 10) {
896 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
897 dev->name);
898 return -1;
899 }
900 msleep(50);
901 time++;
902 }
903
904 /* RTI Initialization */
905 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
906 RTI_DATA1_MEM_RX_URNG_A(0xA) |
907 RTI_DATA1_MEM_RX_URNG_B(0x10) |
908 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
909
910 writeq(val64, &bar0->rti_data1_mem);
911
912 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
913 RTI_DATA2_MEM_RX_UFC_B(0x2) |
914 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
915 writeq(val64, &bar0->rti_data2_mem);
916
917 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
918 writeq(val64, &bar0->rti_command_mem);
919
920 /*
921 * Once the operation completes, the Strobe bit of the command
922 * register will be reset. We poll for this particular condition
923 * We wait for a maximum of 500ms for the operation to complete,
924 * if it's not complete by then we return error.
925 */
926 time = 0;
927 while (TRUE) {
928 val64 = readq(&bar0->rti_command_mem);
929 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
930 break;
931 }
932 if (time > 10) {
933 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
934 dev->name);
935 return -1;
936 }
937 time++;
938 msleep(50);
939 }
940
941 /*
942 * Initializing proper values as Pause threshold into all
943 * the 8 Queues on Rx side.
944 */
945 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
946 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
947
948 /* Disable RMAC PAD STRIPPING */
949 add = &bar0->mac_cfg;
950 val64 = readq(&bar0->mac_cfg);
951 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
952 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
953 writel((u32) (val64), add);
954 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
955 writel((u32) (val64 >> 32), (add + 4));
956 val64 = readq(&bar0->mac_cfg);
957
958 /*
959 * Set the time value to be inserted in the pause frame
960 * generated by xena.
961 */
962 val64 = readq(&bar0->rmac_pause_cfg);
963 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
964 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
965 writeq(val64, &bar0->rmac_pause_cfg);
966
967 /*
968 * Set the Threshold Limit for Generating the pause frame
969 * If the amount of data in any Queue exceeds ratio of
970 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
971 * pause frame is generated
972 */
973 val64 = 0;
974 for (i = 0; i < 4; i++) {
975 val64 |=
976 (((u64) 0xFF00 | nic->mac_control.
977 mc_pause_threshold_q0q3)
978 << (i * 2 * 8));
979 }
980 writeq(val64, &bar0->mc_pause_thresh_q0q3);
981
982 val64 = 0;
983 for (i = 0; i < 4; i++) {
984 val64 |=
985 (((u64) 0xFF00 | nic->mac_control.
986 mc_pause_threshold_q4q7)
987 << (i * 2 * 8));
988 }
989 writeq(val64, &bar0->mc_pause_thresh_q4q7);
990
991 /*
992 * TxDMA will stop Read request if the number of read split has
993 * exceeded the limit pointed by shared_splits
994 */
995 val64 = readq(&bar0->pic_control);
996 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
997 writeq(val64, &bar0->pic_control);
998
999 return SUCCESS;
1000 }
1001
1002 /**
1003 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1004 * @nic: device private variable,
1005 * @mask: A mask indicating which Intr block must be modified and,
1006 * @flag: A flag indicating whether to enable or disable the Intrs.
1007 * Description: This function will either disable or enable the interrupts
1008 * depending on the flag argument. The mask argument can be used to
1009 * enable/disable any Intr block.
1010 * Return Value: NONE.
1011 */
1012
1013 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1014 {
1015 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1016 register u64 val64 = 0, temp64 = 0;
1017
1018 /* Top level interrupt classification */
1019 /* PIC Interrupts */
1020 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1021 /* Enable PIC Intrs in the general intr mask register */
1022 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1023 if (flag == ENABLE_INTRS) {
1024 temp64 = readq(&bar0->general_int_mask);
1025 temp64 &= ~((u64) val64);
1026 writeq(temp64, &bar0->general_int_mask);
1027 /*
1028 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1029 * interrupts for now.
1030 * TODO
1031 */
1032 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1033 /*
1034 * No MSI Support is available presently, so TTI and
1035 * RTI interrupts are also disabled.
1036 */
1037 } else if (flag == DISABLE_INTRS) {
1038 /*
1039 * Disable PIC Intrs in the general
1040 * intr mask register
1041 */
1042 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1043 temp64 = readq(&bar0->general_int_mask);
1044 val64 |= temp64;
1045 writeq(val64, &bar0->general_int_mask);
1046 }
1047 }
1048
1049 /* DMA Interrupts */
1050 /* Enabling/Disabling Tx DMA interrupts */
1051 if (mask & TX_DMA_INTR) {
1052 /* Enable TxDMA Intrs in the general intr mask register */
1053 val64 = TXDMA_INT_M;
1054 if (flag == ENABLE_INTRS) {
1055 temp64 = readq(&bar0->general_int_mask);
1056 temp64 &= ~((u64) val64);
1057 writeq(temp64, &bar0->general_int_mask);
1058 /*
1059 * Keep all interrupts other than PFC interrupt
1060 * and PCC interrupt disabled in DMA level.
1061 */
1062 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1063 TXDMA_PCC_INT_M);
1064 writeq(val64, &bar0->txdma_int_mask);
1065 /*
1066 * Enable only the MISC error 1 interrupt in PFC block
1067 */
1068 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1069 writeq(val64, &bar0->pfc_err_mask);
1070 /*
1071 * Enable only the FB_ECC error interrupt in PCC block
1072 */
1073 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1074 writeq(val64, &bar0->pcc_err_mask);
1075 } else if (flag == DISABLE_INTRS) {
1076 /*
1077 * Disable TxDMA Intrs in the general intr mask
1078 * register
1079 */
1080 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1081 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1082 temp64 = readq(&bar0->general_int_mask);
1083 val64 |= temp64;
1084 writeq(val64, &bar0->general_int_mask);
1085 }
1086 }
1087
1088 /* Enabling/Disabling Rx DMA interrupts */
1089 if (mask & RX_DMA_INTR) {
1090 /* Enable RxDMA Intrs in the general intr mask register */
1091 val64 = RXDMA_INT_M;
1092 if (flag == ENABLE_INTRS) {
1093 temp64 = readq(&bar0->general_int_mask);
1094 temp64 &= ~((u64) val64);
1095 writeq(temp64, &bar0->general_int_mask);
1096 /*
1097 * All RxDMA block interrupts are disabled for now
1098 * TODO
1099 */
1100 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1101 } else if (flag == DISABLE_INTRS) {
1102 /*
1103 * Disable RxDMA Intrs in the general intr mask
1104 * register
1105 */
1106 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1107 temp64 = readq(&bar0->general_int_mask);
1108 val64 |= temp64;
1109 writeq(val64, &bar0->general_int_mask);
1110 }
1111 }
1112
1113 /* MAC Interrupts */
1114 /* Enabling/Disabling MAC interrupts */
1115 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1116 val64 = TXMAC_INT_M | RXMAC_INT_M;
1117 if (flag == ENABLE_INTRS) {
1118 temp64 = readq(&bar0->general_int_mask);
1119 temp64 &= ~((u64) val64);
1120 writeq(temp64, &bar0->general_int_mask);
1121 /*
1122 * All MAC block error interrupts are disabled for now
1123 * except the link status change interrupt.
1124 * TODO
1125 */
1126 val64 = MAC_INT_STATUS_RMAC_INT;
1127 temp64 = readq(&bar0->mac_int_mask);
1128 temp64 &= ~((u64) val64);
1129 writeq(temp64, &bar0->mac_int_mask);
1130
1131 val64 = readq(&bar0->mac_rmac_err_mask);
1132 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1133 writeq(val64, &bar0->mac_rmac_err_mask);
1134 } else if (flag == DISABLE_INTRS) {
1135 /*
1136 * Disable MAC Intrs in the general intr mask register
1137 */
1138 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1139 writeq(DISABLE_ALL_INTRS,
1140 &bar0->mac_rmac_err_mask);
1141
1142 temp64 = readq(&bar0->general_int_mask);
1143 val64 |= temp64;
1144 writeq(val64, &bar0->general_int_mask);
1145 }
1146 }
1147
1148 /* XGXS Interrupts */
1149 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1150 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1151 if (flag == ENABLE_INTRS) {
1152 temp64 = readq(&bar0->general_int_mask);
1153 temp64 &= ~((u64) val64);
1154 writeq(temp64, &bar0->general_int_mask);
1155 /*
1156 * All XGXS block error interrupts are disabled for now
1157 * TODO
1158 */
1159 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1160 } else if (flag == DISABLE_INTRS) {
1161 /*
1162 * Disable MC Intrs in the general intr mask register
1163 */
1164 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1165 temp64 = readq(&bar0->general_int_mask);
1166 val64 |= temp64;
1167 writeq(val64, &bar0->general_int_mask);
1168 }
1169 }
1170
1171 /* Memory Controller(MC) interrupts */
1172 if (mask & MC_INTR) {
1173 val64 = MC_INT_M;
1174 if (flag == ENABLE_INTRS) {
1175 temp64 = readq(&bar0->general_int_mask);
1176 temp64 &= ~((u64) val64);
1177 writeq(temp64, &bar0->general_int_mask);
1178 /*
1179 * All MC block error interrupts are disabled for now
1180 * TODO
1181 */
1182 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1183 } else if (flag == DISABLE_INTRS) {
1184 /*
1185 * Disable MC Intrs in the general intr mask register
1186 */
1187 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1188 temp64 = readq(&bar0->general_int_mask);
1189 val64 |= temp64;
1190 writeq(val64, &bar0->general_int_mask);
1191 }
1192 }
1193
1194
1195 /* Tx traffic interrupts */
1196 if (mask & TX_TRAFFIC_INTR) {
1197 val64 = TXTRAFFIC_INT_M;
1198 if (flag == ENABLE_INTRS) {
1199 temp64 = readq(&bar0->general_int_mask);
1200 temp64 &= ~((u64) val64);
1201 writeq(temp64, &bar0->general_int_mask);
1202 /*
1203 * Enable all the Tx side interrupts
1204 * writing 0 Enables all 64 TX interrupt levels
1205 */
1206 writeq(0x0, &bar0->tx_traffic_mask);
1207 } else if (flag == DISABLE_INTRS) {
1208 /*
1209 * Disable Tx Traffic Intrs in the general intr mask
1210 * register.
1211 */
1212 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1213 temp64 = readq(&bar0->general_int_mask);
1214 val64 |= temp64;
1215 writeq(val64, &bar0->general_int_mask);
1216 }
1217 }
1218
1219 /* Rx traffic interrupts */
1220 if (mask & RX_TRAFFIC_INTR) {
1221 val64 = RXTRAFFIC_INT_M;
1222 if (flag == ENABLE_INTRS) {
1223 temp64 = readq(&bar0->general_int_mask);
1224 temp64 &= ~((u64) val64);
1225 writeq(temp64, &bar0->general_int_mask);
1226 /* writing 0 Enables all 8 RX interrupt levels */
1227 writeq(0x0, &bar0->rx_traffic_mask);
1228 } else if (flag == DISABLE_INTRS) {
1229 /*
1230 * Disable Rx Traffic Intrs in the general intr mask
1231 * register.
1232 */
1233 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1234 temp64 = readq(&bar0->general_int_mask);
1235 val64 |= temp64;
1236 writeq(val64, &bar0->general_int_mask);
1237 }
1238 }
1239 }
1240
1241 /**
1242 * verify_xena_quiescence - Checks whether the H/W is ready
1243 * @val64 : Value read from adapter status register.
1244 * @flag : indicates if the adapter enable bit was ever written once
1245 * before.
1246 * Description: Returns whether the H/W is ready to go or not. Depending
1247 * on whether adapter enable bit was written or not the comparison
1248 * differs and the calling function passes the input argument flag to
1249 * indicate this.
1250 * Return: 1 If xena is quiescence
1251 * 0 If Xena is not quiescence
1252 */
1253
1254 static int verify_xena_quiescence(u64 val64, int flag)
1255 {
1256 int ret = 0;
1257 u64 tmp64 = ~((u64) val64);
1258
1259 if (!
1260 (tmp64 &
1261 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1262 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1263 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1264 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1265 ADAPTER_STATUS_P_PLL_LOCK))) {
1266 if (flag == FALSE) {
1267 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1268 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1269 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1270
1271 ret = 1;
1272
1273 }
1274 } else {
1275 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1276 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1277 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1278 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1279 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1280
1281 ret = 1;
1282
1283 }
1284 }
1285 }
1286
1287 return ret;
1288 }
1289
1290 /**
1291 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1292 * @sp: Pointer to device specifc structure
1293 * Description :
1294 * New procedure to clear mac address reading problems on Alpha platforms
1295 *
1296 */
1297
1298 static void fix_mac_address(nic_t * sp)
1299 {
1300 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1301 u64 val64;
1302 int i = 0;
1303
1304 while (fix_mac[i] != END_SIGN) {
1305 writeq(fix_mac[i++], &bar0->gpio_control);
1306 val64 = readq(&bar0->gpio_control);
1307 }
1308 }
1309
1310 /**
1311 * start_nic - Turns the device on
1312 * @nic : device private variable.
1313 * Description:
1314 * This function actually turns the device on. Before this function is
1315 * called,all Registers are configured from their reset states
1316 * and shared memory is allocated but the NIC is still quiescent. On
1317 * calling this function, the device interrupts are cleared and the NIC is
1318 * literally switched on by writing into the adapter control register.
1319 * Return Value:
1320 * SUCCESS on success and -1 on failure.
1321 */
1322
1323 static int start_nic(struct s2io_nic *nic)
1324 {
1325 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1326 struct net_device *dev = nic->dev;
1327 register u64 val64 = 0;
1328 u16 interruptible, i;
1329 u16 subid;
1330 mac_info_t *mac_control;
1331 struct config_param *config;
1332
1333 mac_control = &nic->mac_control;
1334 config = &nic->config;
1335
1336 /* PRC Initialization and configuration */
1337 for (i = 0; i < config->rx_ring_num; i++) {
1338 writeq((u64) nic->rx_blocks[i][0].block_dma_addr,
1339 &bar0->prc_rxd0_n[i]);
1340
1341 val64 = readq(&bar0->prc_ctrl_n[i]);
1342 #ifndef CONFIG_2BUFF_MODE
1343 val64 |= PRC_CTRL_RC_ENABLED;
1344 #else
1345 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1346 #endif
1347 writeq(val64, &bar0->prc_ctrl_n[i]);
1348 }
1349
1350 #ifdef CONFIG_2BUFF_MODE
1351 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1352 val64 = readq(&bar0->rx_pa_cfg);
1353 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1354 writeq(val64, &bar0->rx_pa_cfg);
1355 #endif
1356
1357 /*
1358 * Enabling MC-RLDRAM. After enabling the device, we timeout
1359 * for around 100ms, which is approximately the time required
1360 * for the device to be ready for operation.
1361 */
1362 val64 = readq(&bar0->mc_rldram_mrs);
1363 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1364 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1365 val64 = readq(&bar0->mc_rldram_mrs);
1366
1367 msleep(100); /* Delay by around 100 ms. */
1368
1369 /* Enabling ECC Protection. */
1370 val64 = readq(&bar0->adapter_control);
1371 val64 &= ~ADAPTER_ECC_EN;
1372 writeq(val64, &bar0->adapter_control);
1373
1374 /*
1375 * Clearing any possible Link state change interrupts that
1376 * could have popped up just before Enabling the card.
1377 */
1378 val64 = readq(&bar0->mac_rmac_err_reg);
1379 if (val64)
1380 writeq(val64, &bar0->mac_rmac_err_reg);
1381
1382 /*
1383 * Verify if the device is ready to be enabled, if so enable
1384 * it.
1385 */
1386 val64 = readq(&bar0->adapter_status);
1387 if (!verify_xena_quiescence(val64, nic->device_enabled_once)) {
1388 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1389 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1390 (unsigned long long) val64);
1391 return FAILURE;
1392 }
1393
1394 /* Enable select interrupts */
1395 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1396 RX_MAC_INTR;
1397 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1398
1399 /*
1400 * With some switches, link might be already up at this point.
1401 * Because of this weird behavior, when we enable laser,
1402 * we may not get link. We need to handle this. We cannot
1403 * figure out which switch is misbehaving. So we are forced to
1404 * make a global change.
1405 */
1406
1407 /* Enabling Laser. */
1408 val64 = readq(&bar0->adapter_control);
1409 val64 |= ADAPTER_EOI_TX_ON;
1410 writeq(val64, &bar0->adapter_control);
1411
1412 /* SXE-002: Initialize link and activity LED */
1413 subid = nic->pdev->subsystem_device;
1414 if ((subid & 0xFF) >= 0x07) {
1415 val64 = readq(&bar0->gpio_control);
1416 val64 |= 0x0000800000000000ULL;
1417 writeq(val64, &bar0->gpio_control);
1418 val64 = 0x0411040400000000ULL;
1419 writeq(val64, (void __iomem *) bar0 + 0x2700);
1420 }
1421
1422 /*
1423 * Don't see link state interrupts on certain switches, so
1424 * directly scheduling a link state task from here.
1425 */
1426 schedule_work(&nic->set_link_task);
1427
1428 /*
1429 * Here we are performing soft reset on XGXS to
1430 * force link down. Since link is already up, we will get
1431 * link state change interrupt after this reset
1432 */
1433 SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1434 val64 = readq(&bar0->dtx_control);
1435 udelay(50);
1436 SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1437 val64 = readq(&bar0->dtx_control);
1438 udelay(50);
1439 SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1440 val64 = readq(&bar0->dtx_control);
1441 udelay(50);
1442
1443 return SUCCESS;
1444 }
1445
1446 /**
1447 * free_tx_buffers - Free all queued Tx buffers
1448 * @nic : device private variable.
1449 * Description:
1450 * Free all queued Tx buffers.
1451 * Return Value: void
1452 */
1453
1454 static void free_tx_buffers(struct s2io_nic *nic)
1455 {
1456 struct net_device *dev = nic->dev;
1457 struct sk_buff *skb;
1458 TxD_t *txdp;
1459 int i, j;
1460 mac_info_t *mac_control;
1461 struct config_param *config;
1462 int cnt = 0;
1463
1464 mac_control = &nic->mac_control;
1465 config = &nic->config;
1466
1467 for (i = 0; i < config->tx_fifo_num; i++) {
1468 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1469 txdp = (TxD_t *) nic->list_info[i][j].
1470 list_virt_addr;
1471 skb =
1472 (struct sk_buff *) ((unsigned long) txdp->
1473 Host_Control);
1474 if (skb == NULL) {
1475 memset(txdp, 0, sizeof(TxD_t));
1476 continue;
1477 }
1478 dev_kfree_skb(skb);
1479 memset(txdp, 0, sizeof(TxD_t));
1480 cnt++;
1481 }
1482 DBG_PRINT(INTR_DBG,
1483 "%s:forcibly freeing %d skbs on FIFO%d\n",
1484 dev->name, cnt, i);
1485 mac_control->tx_curr_get_info[i].offset = 0;
1486 mac_control->tx_curr_put_info[i].offset = 0;
1487 }
1488 }
1489
1490 /**
1491 * stop_nic - To stop the nic
1492 * @nic ; device private variable.
1493 * Description:
1494 * This function does exactly the opposite of what the start_nic()
1495 * function does. This function is called to stop the device.
1496 * Return Value:
1497 * void.
1498 */
1499
1500 static void stop_nic(struct s2io_nic *nic)
1501 {
1502 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1503 register u64 val64 = 0;
1504 u16 interruptible, i;
1505 mac_info_t *mac_control;
1506 struct config_param *config;
1507
1508 mac_control = &nic->mac_control;
1509 config = &nic->config;
1510
1511 /* Disable all interrupts */
1512 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1513 RX_MAC_INTR;
1514 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1515
1516 /* Disable PRCs */
1517 for (i = 0; i < config->rx_ring_num; i++) {
1518 val64 = readq(&bar0->prc_ctrl_n[i]);
1519 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1520 writeq(val64, &bar0->prc_ctrl_n[i]);
1521 }
1522 }
1523
1524 /**
1525 * fill_rx_buffers - Allocates the Rx side skbs
1526 * @nic: device private variable
1527 * @ring_no: ring number
1528 * Description:
1529 * The function allocates Rx side skbs and puts the physical
1530 * address of these buffers into the RxD buffer pointers, so that the NIC
1531 * can DMA the received frame into these locations.
1532 * The NIC supports 3 receive modes, viz
1533 * 1. single buffer,
1534 * 2. three buffer and
1535 * 3. Five buffer modes.
1536 * Each mode defines how many fragments the received frame will be split
1537 * up into by the NIC. The frame is split into L3 header, L4 Header,
1538 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1539 * is split into 3 fragments. As of now only single buffer mode is
1540 * supported.
1541 * Return Value:
1542 * SUCCESS on success or an appropriate -ve value on failure.
1543 */
1544
1545 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1546 {
1547 struct net_device *dev = nic->dev;
1548 struct sk_buff *skb;
1549 RxD_t *rxdp;
1550 int off, off1, size, block_no, block_no1;
1551 int offset, offset1;
1552 u32 alloc_tab = 0;
1553 u32 alloc_cnt = nic->pkt_cnt[ring_no] -
1554 atomic_read(&nic->rx_bufs_left[ring_no]);
1555 mac_info_t *mac_control;
1556 struct config_param *config;
1557 #ifdef CONFIG_2BUFF_MODE
1558 RxD_t *rxdpnext;
1559 int nextblk;
1560 unsigned long tmp;
1561 buffAdd_t *ba;
1562 dma_addr_t rxdpphys;
1563 #endif
1564 #ifndef CONFIG_S2IO_NAPI
1565 unsigned long flags;
1566 #endif
1567
1568 mac_control = &nic->mac_control;
1569 config = &nic->config;
1570
1571 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1572 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1573
1574 while (alloc_tab < alloc_cnt) {
1575 block_no = mac_control->rx_curr_put_info[ring_no].
1576 block_index;
1577 block_no1 = mac_control->rx_curr_get_info[ring_no].
1578 block_index;
1579 off = mac_control->rx_curr_put_info[ring_no].offset;
1580 off1 = mac_control->rx_curr_get_info[ring_no].offset;
1581 #ifndef CONFIG_2BUFF_MODE
1582 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1583 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1584 #else
1585 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1586 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1587 #endif
1588
1589 rxdp = nic->rx_blocks[ring_no][block_no].
1590 block_virt_addr + off;
1591 if ((offset == offset1) && (rxdp->Host_Control)) {
1592 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1593 DBG_PRINT(INTR_DBG, " info equated\n");
1594 goto end;
1595 }
1596 #ifndef CONFIG_2BUFF_MODE
1597 if (rxdp->Control_1 == END_OF_BLOCK) {
1598 mac_control->rx_curr_put_info[ring_no].
1599 block_index++;
1600 mac_control->rx_curr_put_info[ring_no].
1601 block_index %= nic->block_count[ring_no];
1602 block_no = mac_control->rx_curr_put_info
1603 [ring_no].block_index;
1604 off++;
1605 off %= (MAX_RXDS_PER_BLOCK + 1);
1606 mac_control->rx_curr_put_info[ring_no].offset =
1607 off;
1608 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1609 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1610 dev->name, rxdp);
1611 }
1612 #ifndef CONFIG_S2IO_NAPI
1613 spin_lock_irqsave(&nic->put_lock, flags);
1614 nic->put_pos[ring_no] =
1615 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1616 spin_unlock_irqrestore(&nic->put_lock, flags);
1617 #endif
1618 #else
1619 if (rxdp->Host_Control == END_OF_BLOCK) {
1620 mac_control->rx_curr_put_info[ring_no].
1621 block_index++;
1622 mac_control->rx_curr_put_info[ring_no].
1623 block_index %= nic->block_count[ring_no];
1624 block_no = mac_control->rx_curr_put_info
1625 [ring_no].block_index;
1626 off = 0;
1627 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1628 dev->name, block_no,
1629 (unsigned long long) rxdp->Control_1);
1630 mac_control->rx_curr_put_info[ring_no].offset =
1631 off;
1632 rxdp = nic->rx_blocks[ring_no][block_no].
1633 block_virt_addr;
1634 }
1635 #ifndef CONFIG_S2IO_NAPI
1636 spin_lock_irqsave(&nic->put_lock, flags);
1637 nic->put_pos[ring_no] = (block_no *
1638 (MAX_RXDS_PER_BLOCK + 1)) + off;
1639 spin_unlock_irqrestore(&nic->put_lock, flags);
1640 #endif
1641 #endif
1642
1643 #ifndef CONFIG_2BUFF_MODE
1644 if (rxdp->Control_1 & RXD_OWN_XENA)
1645 #else
1646 if (rxdp->Control_2 & BIT(0))
1647 #endif
1648 {
1649 mac_control->rx_curr_put_info[ring_no].
1650 offset = off;
1651 goto end;
1652 }
1653 #ifdef CONFIG_2BUFF_MODE
1654 /*
1655 * RxDs Spanning cache lines will be replenished only
1656 * if the succeeding RxD is also owned by Host. It
1657 * will always be the ((8*i)+3) and ((8*i)+6)
1658 * descriptors for the 48 byte descriptor. The offending
1659 * decsriptor is of-course the 3rd descriptor.
1660 */
1661 rxdpphys = nic->rx_blocks[ring_no][block_no].
1662 block_dma_addr + (off * sizeof(RxD_t));
1663 if (((u64) (rxdpphys)) % 128 > 80) {
1664 rxdpnext = nic->rx_blocks[ring_no][block_no].
1665 block_virt_addr + (off + 1);
1666 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1667 nextblk = (block_no + 1) %
1668 (nic->block_count[ring_no]);
1669 rxdpnext = nic->rx_blocks[ring_no]
1670 [nextblk].block_virt_addr;
1671 }
1672 if (rxdpnext->Control_2 & BIT(0))
1673 goto end;
1674 }
1675 #endif
1676
1677 #ifndef CONFIG_2BUFF_MODE
1678 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1679 #else
1680 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1681 #endif
1682 if (!skb) {
1683 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1684 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1685 return -ENOMEM;
1686 }
1687 #ifndef CONFIG_2BUFF_MODE
1688 skb_reserve(skb, NET_IP_ALIGN);
1689 memset(rxdp, 0, sizeof(RxD_t));
1690 rxdp->Buffer0_ptr = pci_map_single
1691 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1692 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1693 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1694 rxdp->Host_Control = (unsigned long) (skb);
1695 rxdp->Control_1 |= RXD_OWN_XENA;
1696 off++;
1697 off %= (MAX_RXDS_PER_BLOCK + 1);
1698 mac_control->rx_curr_put_info[ring_no].offset = off;
1699 #else
1700 ba = &nic->ba[ring_no][block_no][off];
1701 skb_reserve(skb, BUF0_LEN);
1702 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1703 if (tmp)
1704 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1705
1706 memset(rxdp, 0, sizeof(RxD_t));
1707 rxdp->Buffer2_ptr = pci_map_single
1708 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1709 PCI_DMA_FROMDEVICE);
1710 rxdp->Buffer0_ptr =
1711 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1712 PCI_DMA_FROMDEVICE);
1713 rxdp->Buffer1_ptr =
1714 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1715 PCI_DMA_FROMDEVICE);
1716
1717 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1718 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1719 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1720 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1721 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1722 rxdp->Control_1 |= RXD_OWN_XENA;
1723 off++;
1724 mac_control->rx_curr_put_info[ring_no].offset = off;
1725 #endif
1726 atomic_inc(&nic->rx_bufs_left[ring_no]);
1727 alloc_tab++;
1728 }
1729
1730 end:
1731 return SUCCESS;
1732 }
1733
1734 /**
1735 * free_rx_buffers - Frees all Rx buffers
1736 * @sp: device private variable.
1737 * Description:
1738 * This function will free all Rx buffers allocated by host.
1739 * Return Value:
1740 * NONE.
1741 */
1742
1743 static void free_rx_buffers(struct s2io_nic *sp)
1744 {
1745 struct net_device *dev = sp->dev;
1746 int i, j, blk = 0, off, buf_cnt = 0;
1747 RxD_t *rxdp;
1748 struct sk_buff *skb;
1749 mac_info_t *mac_control;
1750 struct config_param *config;
1751 #ifdef CONFIG_2BUFF_MODE
1752 buffAdd_t *ba;
1753 #endif
1754
1755 mac_control = &sp->mac_control;
1756 config = &sp->config;
1757
1758 for (i = 0; i < config->rx_ring_num; i++) {
1759 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1760 off = j % (MAX_RXDS_PER_BLOCK + 1);
1761 rxdp = sp->rx_blocks[i][blk].block_virt_addr + off;
1762
1763 #ifndef CONFIG_2BUFF_MODE
1764 if (rxdp->Control_1 == END_OF_BLOCK) {
1765 rxdp =
1766 (RxD_t *) ((unsigned long) rxdp->
1767 Control_2);
1768 j++;
1769 blk++;
1770 }
1771 #else
1772 if (rxdp->Host_Control == END_OF_BLOCK) {
1773 blk++;
1774 continue;
1775 }
1776 #endif
1777
1778 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
1779 memset(rxdp, 0, sizeof(RxD_t));
1780 continue;
1781 }
1782
1783 skb =
1784 (struct sk_buff *) ((unsigned long) rxdp->
1785 Host_Control);
1786 if (skb) {
1787 #ifndef CONFIG_2BUFF_MODE
1788 pci_unmap_single(sp->pdev, (dma_addr_t)
1789 rxdp->Buffer0_ptr,
1790 dev->mtu +
1791 HEADER_ETHERNET_II_802_3_SIZE
1792 + HEADER_802_2_SIZE +
1793 HEADER_SNAP_SIZE,
1794 PCI_DMA_FROMDEVICE);
1795 #else
1796 ba = &sp->ba[i][blk][off];
1797 pci_unmap_single(sp->pdev, (dma_addr_t)
1798 rxdp->Buffer0_ptr,
1799 BUF0_LEN,
1800 PCI_DMA_FROMDEVICE);
1801 pci_unmap_single(sp->pdev, (dma_addr_t)
1802 rxdp->Buffer1_ptr,
1803 BUF1_LEN,
1804 PCI_DMA_FROMDEVICE);
1805 pci_unmap_single(sp->pdev, (dma_addr_t)
1806 rxdp->Buffer2_ptr,
1807 dev->mtu + BUF0_LEN + 4,
1808 PCI_DMA_FROMDEVICE);
1809 #endif
1810 dev_kfree_skb(skb);
1811 atomic_dec(&sp->rx_bufs_left[i]);
1812 buf_cnt++;
1813 }
1814 memset(rxdp, 0, sizeof(RxD_t));
1815 }
1816 mac_control->rx_curr_put_info[i].block_index = 0;
1817 mac_control->rx_curr_get_info[i].block_index = 0;
1818 mac_control->rx_curr_put_info[i].offset = 0;
1819 mac_control->rx_curr_get_info[i].offset = 0;
1820 atomic_set(&sp->rx_bufs_left[i], 0);
1821 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1822 dev->name, buf_cnt, i);
1823 }
1824 }
1825
1826 /**
1827 * s2io_poll - Rx interrupt handler for NAPI support
1828 * @dev : pointer to the device structure.
1829 * @budget : The number of packets that were budgeted to be processed
1830 * during one pass through the 'Poll" function.
1831 * Description:
1832 * Comes into picture only if NAPI support has been incorporated. It does
1833 * the same thing that rx_intr_handler does, but not in a interrupt context
1834 * also It will process only a given number of packets.
1835 * Return value:
1836 * 0 on success and 1 if there are No Rx packets to be processed.
1837 */
1838
1839 #ifdef CONFIG_S2IO_NAPI
1840 static int s2io_poll(struct net_device *dev, int *budget)
1841 {
1842 nic_t *nic = dev->priv;
1843 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1844 int pkts_to_process = *budget, pkt_cnt = 0;
1845 register u64 val64 = 0;
1846 rx_curr_get_info_t get_info, put_info;
1847 int i, get_block, put_block, get_offset, put_offset, ring_bufs;
1848 #ifndef CONFIG_2BUFF_MODE
1849 u16 val16, cksum;
1850 #endif
1851 struct sk_buff *skb;
1852 RxD_t *rxdp;
1853 mac_info_t *mac_control;
1854 struct config_param *config;
1855 #ifdef CONFIG_2BUFF_MODE
1856 buffAdd_t *ba;
1857 #endif
1858
1859 mac_control = &nic->mac_control;
1860 config = &nic->config;
1861
1862 if (pkts_to_process > dev->quota)
1863 pkts_to_process = dev->quota;
1864
1865 val64 = readq(&bar0->rx_traffic_int);
1866 writeq(val64, &bar0->rx_traffic_int);
1867
1868 for (i = 0; i < config->rx_ring_num; i++) {
1869 get_info = mac_control->rx_curr_get_info[i];
1870 get_block = get_info.block_index;
1871 put_info = mac_control->rx_curr_put_info[i];
1872 put_block = put_info.block_index;
1873 ring_bufs = config->rx_cfg[i].num_rxd;
1874 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
1875 get_info.offset;
1876 #ifndef CONFIG_2BUFF_MODE
1877 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1878 get_info.offset;
1879 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1880 put_info.offset;
1881 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1882 (((get_offset + 1) % ring_bufs) != put_offset)) {
1883 if (--pkts_to_process < 0) {
1884 goto no_rx;
1885 }
1886 if (rxdp->Control_1 == END_OF_BLOCK) {
1887 rxdp =
1888 (RxD_t *) ((unsigned long) rxdp->
1889 Control_2);
1890 get_info.offset++;
1891 get_info.offset %=
1892 (MAX_RXDS_PER_BLOCK + 1);
1893 get_block++;
1894 get_block %= nic->block_count[i];
1895 mac_control->rx_curr_get_info[i].
1896 offset = get_info.offset;
1897 mac_control->rx_curr_get_info[i].
1898 block_index = get_block;
1899 continue;
1900 }
1901 get_offset =
1902 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1903 get_info.offset;
1904 skb =
1905 (struct sk_buff *) ((unsigned long) rxdp->
1906 Host_Control);
1907 if (skb == NULL) {
1908 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1909 dev->name);
1910 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1911 goto no_rx;
1912 }
1913 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
1914 val16 = (u16) (val64 >> 48);
1915 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
1916 pci_unmap_single(nic->pdev, (dma_addr_t)
1917 rxdp->Buffer0_ptr,
1918 dev->mtu +
1919 HEADER_ETHERNET_II_802_3_SIZE +
1920 HEADER_802_2_SIZE +
1921 HEADER_SNAP_SIZE,
1922 PCI_DMA_FROMDEVICE);
1923 rx_osm_handler(nic, val16, rxdp, i);
1924 pkt_cnt++;
1925 get_info.offset++;
1926 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
1927 rxdp =
1928 nic->rx_blocks[i][get_block].block_virt_addr +
1929 get_info.offset;
1930 mac_control->rx_curr_get_info[i].offset =
1931 get_info.offset;
1932 }
1933 #else
1934 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1935 get_info.offset;
1936 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1937 put_info.offset;
1938 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1939 !(rxdp->Control_2 & BIT(0))) &&
1940 (((get_offset + 1) % ring_bufs) != put_offset)) {
1941 if (--pkts_to_process < 0) {
1942 goto no_rx;
1943 }
1944 skb = (struct sk_buff *) ((unsigned long)
1945 rxdp->Host_Control);
1946 if (skb == NULL) {
1947 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1948 dev->name);
1949 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1950 goto no_rx;
1951 }
1952
1953 pci_unmap_single(nic->pdev, (dma_addr_t)
1954 rxdp->Buffer0_ptr,
1955 BUF0_LEN, PCI_DMA_FROMDEVICE);
1956 pci_unmap_single(nic->pdev, (dma_addr_t)
1957 rxdp->Buffer1_ptr,
1958 BUF1_LEN, PCI_DMA_FROMDEVICE);
1959 pci_unmap_single(nic->pdev, (dma_addr_t)
1960 rxdp->Buffer2_ptr,
1961 dev->mtu + BUF0_LEN + 4,
1962 PCI_DMA_FROMDEVICE);
1963 ba = &nic->ba[i][get_block][get_info.offset];
1964
1965 rx_osm_handler(nic, rxdp, i, ba);
1966
1967 get_info.offset++;
1968 mac_control->rx_curr_get_info[i].offset =
1969 get_info.offset;
1970 rxdp =
1971 nic->rx_blocks[i][get_block].block_virt_addr +
1972 get_info.offset;
1973
1974 if (get_info.offset &&
1975 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
1976 get_info.offset = 0;
1977 mac_control->rx_curr_get_info[i].
1978 offset = get_info.offset;
1979 get_block++;
1980 get_block %= nic->block_count[i];
1981 mac_control->rx_curr_get_info[i].
1982 block_index = get_block;
1983 rxdp =
1984 nic->rx_blocks[i][get_block].
1985 block_virt_addr;
1986 }
1987 get_offset =
1988 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1989 get_info.offset;
1990 pkt_cnt++;
1991 }
1992 #endif
1993 }
1994 if (!pkt_cnt)
1995 pkt_cnt = 1;
1996
1997 dev->quota -= pkt_cnt;
1998 *budget -= pkt_cnt;
1999 netif_rx_complete(dev);
2000
2001 for (i = 0; i < config->rx_ring_num; i++) {
2002 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2003 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2004 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2005 break;
2006 }
2007 }
2008 /* Re enable the Rx interrupts. */
2009 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2010 return 0;
2011
2012 no_rx:
2013 dev->quota -= pkt_cnt;
2014 *budget -= pkt_cnt;
2015
2016 for (i = 0; i < config->rx_ring_num; i++) {
2017 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2018 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2019 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2020 break;
2021 }
2022 }
2023 return 1;
2024 }
2025 #else
2026 /**
2027 * rx_intr_handler - Rx interrupt handler
2028 * @nic: device private variable.
2029 * Description:
2030 * If the interrupt is because of a received frame or if the
2031 * receive ring contains fresh as yet un-processed frames,this function is
2032 * called. It picks out the RxD at which place the last Rx processing had
2033 * stopped and sends the skb to the OSM's Rx handler and then increments
2034 * the offset.
2035 * Return Value:
2036 * NONE.
2037 */
2038
2039 static void rx_intr_handler(struct s2io_nic *nic)
2040 {
2041 struct net_device *dev = (struct net_device *) nic->dev;
2042 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2043 rx_curr_get_info_t get_info, put_info;
2044 RxD_t *rxdp;
2045 struct sk_buff *skb;
2046 #ifndef CONFIG_2BUFF_MODE
2047 u16 val16, cksum;
2048 #endif
2049 register u64 val64 = 0;
2050 int get_block, get_offset, put_block, put_offset, ring_bufs;
2051 int i, pkt_cnt = 0;
2052 mac_info_t *mac_control;
2053 struct config_param *config;
2054 #ifdef CONFIG_2BUFF_MODE
2055 buffAdd_t *ba;
2056 #endif
2057
2058 mac_control = &nic->mac_control;
2059 config = &nic->config;
2060
2061 /*
2062 * rx_traffic_int reg is an R1 register, hence we read and write back
2063 * the samevalue in the register to clear it.
2064 */
2065 val64 = readq(&bar0->rx_traffic_int);
2066 writeq(val64, &bar0->rx_traffic_int);
2067
2068 for (i = 0; i < config->rx_ring_num; i++) {
2069 get_info = mac_control->rx_curr_get_info[i];
2070 get_block = get_info.block_index;
2071 put_info = mac_control->rx_curr_put_info[i];
2072 put_block = put_info.block_index;
2073 ring_bufs = config->rx_cfg[i].num_rxd;
2074 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
2075 get_info.offset;
2076 #ifndef CONFIG_2BUFF_MODE
2077 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2078 get_info.offset;
2079 spin_lock(&nic->put_lock);
2080 put_offset = nic->put_pos[i];
2081 spin_unlock(&nic->put_lock);
2082 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2083 (((get_offset + 1) % ring_bufs) != put_offset)) {
2084 if (rxdp->Control_1 == END_OF_BLOCK) {
2085 rxdp = (RxD_t *) ((unsigned long)
2086 rxdp->Control_2);
2087 get_info.offset++;
2088 get_info.offset %=
2089 (MAX_RXDS_PER_BLOCK + 1);
2090 get_block++;
2091 get_block %= nic->block_count[i];
2092 mac_control->rx_curr_get_info[i].
2093 offset = get_info.offset;
2094 mac_control->rx_curr_get_info[i].
2095 block_index = get_block;
2096 continue;
2097 }
2098 get_offset =
2099 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2100 get_info.offset;
2101 skb = (struct sk_buff *) ((unsigned long)
2102 rxdp->Host_Control);
2103 if (skb == NULL) {
2104 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2105 dev->name);
2106 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2107 return;
2108 }
2109 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
2110 val16 = (u16) (val64 >> 48);
2111 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
2112 pci_unmap_single(nic->pdev, (dma_addr_t)
2113 rxdp->Buffer0_ptr,
2114 dev->mtu +
2115 HEADER_ETHERNET_II_802_3_SIZE +
2116 HEADER_802_2_SIZE +
2117 HEADER_SNAP_SIZE,
2118 PCI_DMA_FROMDEVICE);
2119 rx_osm_handler(nic, val16, rxdp, i);
2120 get_info.offset++;
2121 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
2122 rxdp =
2123 nic->rx_blocks[i][get_block].block_virt_addr +
2124 get_info.offset;
2125 mac_control->rx_curr_get_info[i].offset =
2126 get_info.offset;
2127 pkt_cnt++;
2128 if ((indicate_max_pkts)
2129 && (pkt_cnt > indicate_max_pkts))
2130 break;
2131 }
2132 #else
2133 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2134 get_info.offset;
2135 spin_lock(&nic->put_lock);
2136 put_offset = nic->put_pos[i];
2137 spin_unlock(&nic->put_lock);
2138 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2139 !(rxdp->Control_2 & BIT(0))) &&
2140 (((get_offset + 1) % ring_bufs) != put_offset)) {
2141 skb = (struct sk_buff *) ((unsigned long)
2142 rxdp->Host_Control);
2143 if (skb == NULL) {
2144 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2145 dev->name);
2146 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2147 return;
2148 }
2149
2150 pci_unmap_single(nic->pdev, (dma_addr_t)
2151 rxdp->Buffer0_ptr,
2152 BUF0_LEN, PCI_DMA_FROMDEVICE);
2153 pci_unmap_single(nic->pdev, (dma_addr_t)
2154 rxdp->Buffer1_ptr,
2155 BUF1_LEN, PCI_DMA_FROMDEVICE);
2156 pci_unmap_single(nic->pdev, (dma_addr_t)
2157 rxdp->Buffer2_ptr,
2158 dev->mtu + BUF0_LEN + 4,
2159 PCI_DMA_FROMDEVICE);
2160 ba = &nic->ba[i][get_block][get_info.offset];
2161
2162 rx_osm_handler(nic, rxdp, i, ba);
2163
2164 get_info.offset++;
2165 mac_control->rx_curr_get_info[i].offset =
2166 get_info.offset;
2167 rxdp =
2168 nic->rx_blocks[i][get_block].block_virt_addr +
2169 get_info.offset;
2170
2171 if (get_info.offset &&
2172 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2173 get_info.offset = 0;
2174 mac_control->rx_curr_get_info[i].
2175 offset = get_info.offset;
2176 get_block++;
2177 get_block %= nic->block_count[i];
2178 mac_control->rx_curr_get_info[i].
2179 block_index = get_block;
2180 rxdp =
2181 nic->rx_blocks[i][get_block].
2182 block_virt_addr;
2183 }
2184 get_offset =
2185 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2186 get_info.offset;
2187 pkt_cnt++;
2188 if ((indicate_max_pkts)
2189 && (pkt_cnt > indicate_max_pkts))
2190 break;
2191 }
2192 #endif
2193 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2194 break;
2195 }
2196 }
2197 #endif
2198 /**
2199 * tx_intr_handler - Transmit interrupt handler
2200 * @nic : device private variable
2201 * Description:
2202 * If an interrupt was raised to indicate DMA complete of the
2203 * Tx packet, this function is called. It identifies the last TxD
2204 * whose buffer was freed and frees all skbs whose data have already
2205 * DMA'ed into the NICs internal memory.
2206 * Return Value:
2207 * NONE
2208 */
2209
2210 static void tx_intr_handler(struct s2io_nic *nic)
2211 {
2212 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2213 struct net_device *dev = (struct net_device *) nic->dev;
2214 tx_curr_get_info_t get_info, put_info;
2215 struct sk_buff *skb;
2216 TxD_t *txdlp;
2217 register u64 val64 = 0;
2218 int i;
2219 u16 j, frg_cnt;
2220 mac_info_t *mac_control;
2221 struct config_param *config;
2222
2223 mac_control = &nic->mac_control;
2224 config = &nic->config;
2225
2226 /*
2227 * tx_traffic_int reg is an R1 register, hence we read and write
2228 * back the samevalue in the register to clear it.
2229 */
2230 val64 = readq(&bar0->tx_traffic_int);
2231 writeq(val64, &bar0->tx_traffic_int);
2232
2233 for (i = 0; i < config->tx_fifo_num; i++) {
2234 get_info = mac_control->tx_curr_get_info[i];
2235 put_info = mac_control->tx_curr_put_info[i];
2236 txdlp = (TxD_t *) nic->list_info[i][get_info.offset].
2237 list_virt_addr;
2238 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2239 (get_info.offset != put_info.offset) &&
2240 (txdlp->Host_Control)) {
2241 /* Check for TxD errors */
2242 if (txdlp->Control_1 & TXD_T_CODE) {
2243 unsigned long long err;
2244 err = txdlp->Control_1 & TXD_T_CODE;
2245 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2246 err);
2247 }
2248
2249 skb = (struct sk_buff *) ((unsigned long)
2250 txdlp->Host_Control);
2251 if (skb == NULL) {
2252 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2253 dev->name);
2254 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2255 return;
2256 }
2257 nic->tx_pkt_count++;
2258
2259 frg_cnt = skb_shinfo(skb)->nr_frags;
2260
2261 /* For unfragmented skb */
2262 pci_unmap_single(nic->pdev, (dma_addr_t)
2263 txdlp->Buffer_Pointer,
2264 skb->len - skb->data_len,
2265 PCI_DMA_TODEVICE);
2266 if (frg_cnt) {
2267 TxD_t *temp = txdlp;
2268 txdlp++;
2269 for (j = 0; j < frg_cnt; j++, txdlp++) {
2270 skb_frag_t *frag =
2271 &skb_shinfo(skb)->frags[j];
2272 pci_unmap_page(nic->pdev,
2273 (dma_addr_t)
2274 txdlp->
2275 Buffer_Pointer,
2276 frag->size,
2277 PCI_DMA_TODEVICE);
2278 }
2279 txdlp = temp;
2280 }
2281 memset(txdlp, 0,
2282 (sizeof(TxD_t) * config->max_txds));
2283
2284 /* Updating the statistics block */
2285 nic->stats.tx_packets++;
2286 nic->stats.tx_bytes += skb->len;
2287 dev_kfree_skb_irq(skb);
2288
2289 get_info.offset++;
2290 get_info.offset %= get_info.fifo_len + 1;
2291 txdlp = (TxD_t *) nic->list_info[i]
2292 [get_info.offset].list_virt_addr;
2293 mac_control->tx_curr_get_info[i].offset =
2294 get_info.offset;
2295 }
2296 }
2297
2298 spin_lock(&nic->tx_lock);
2299 if (netif_queue_stopped(dev))
2300 netif_wake_queue(dev);
2301 spin_unlock(&nic->tx_lock);
2302 }
2303
2304 /**
2305 * alarm_intr_handler - Alarm Interrrupt handler
2306 * @nic: device private variable
2307 * Description: If the interrupt was neither because of Rx packet or Tx
2308 * complete, this function is called. If the interrupt was to indicate
2309 * a loss of link, the OSM link status handler is invoked for any other
2310 * alarm interrupt the block that raised the interrupt is displayed
2311 * and a H/W reset is issued.
2312 * Return Value:
2313 * NONE
2314 */
2315
2316 static void alarm_intr_handler(struct s2io_nic *nic)
2317 {
2318 struct net_device *dev = (struct net_device *) nic->dev;
2319 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2320 register u64 val64 = 0, err_reg = 0;
2321
2322 /* Handling link status change error Intr */
2323 err_reg = readq(&bar0->mac_rmac_err_reg);
2324 writeq(err_reg, &bar0->mac_rmac_err_reg);
2325 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2326 schedule_work(&nic->set_link_task);
2327 }
2328
2329 /* In case of a serious error, the device will be Reset. */
2330 val64 = readq(&bar0->serr_source);
2331 if (val64 & SERR_SOURCE_ANY) {
2332 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2333 DBG_PRINT(ERR_DBG, "serious error!!\n");
2334 netif_stop_queue(dev);
2335 schedule_work(&nic->rst_timer_task);
2336 }
2337
2338 /*
2339 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2340 * Error occurs, the adapter will be recycled by disabling the
2341 * adapter enable bit and enabling it again after the device
2342 * becomes Quiescent.
2343 */
2344 val64 = readq(&bar0->pcc_err_reg);
2345 writeq(val64, &bar0->pcc_err_reg);
2346 if (val64 & PCC_FB_ECC_DB_ERR) {
2347 u64 ac = readq(&bar0->adapter_control);
2348 ac &= ~(ADAPTER_CNTL_EN);
2349 writeq(ac, &bar0->adapter_control);
2350 ac = readq(&bar0->adapter_control);
2351 schedule_work(&nic->set_link_task);
2352 }
2353
2354 /* Other type of interrupts are not being handled now, TODO */
2355 }
2356
2357 /**
2358 * wait_for_cmd_complete - waits for a command to complete.
2359 * @sp : private member of the device structure, which is a pointer to the
2360 * s2io_nic structure.
2361 * Description: Function that waits for a command to Write into RMAC
2362 * ADDR DATA registers to be completed and returns either success or
2363 * error depending on whether the command was complete or not.
2364 * Return value:
2365 * SUCCESS on success and FAILURE on failure.
2366 */
2367
2368 static int wait_for_cmd_complete(nic_t * sp)
2369 {
2370 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2371 int ret = FAILURE, cnt = 0;
2372 u64 val64;
2373
2374 while (TRUE) {
2375 val64 = readq(&bar0->rmac_addr_cmd_mem);
2376 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2377 ret = SUCCESS;
2378 break;
2379 }
2380 msleep(50);
2381 if (cnt++ > 10)
2382 break;
2383 }
2384
2385 return ret;
2386 }
2387
2388 /**
2389 * s2io_reset - Resets the card.
2390 * @sp : private member of the device structure.
2391 * Description: Function to Reset the card. This function then also
2392 * restores the previously saved PCI configuration space registers as
2393 * the card reset also resets the configuration space.
2394 * Return value:
2395 * void.
2396 */
2397
2398 static void s2io_reset(nic_t * sp)
2399 {
2400 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2401 u64 val64;
2402 u16 subid;
2403
2404 val64 = SW_RESET_ALL;
2405 writeq(val64, &bar0->sw_reset);
2406
2407 /*
2408 * At this stage, if the PCI write is indeed completed, the
2409 * card is reset and so is the PCI Config space of the device.
2410 * So a read cannot be issued at this stage on any of the
2411 * registers to ensure the write into "sw_reset" register
2412 * has gone through.
2413 * Question: Is there any system call that will explicitly force
2414 * all the write commands still pending on the bus to be pushed
2415 * through?
2416 * As of now I'am just giving a 250ms delay and hoping that the
2417 * PCI write to sw_reset register is done by this time.
2418 */
2419 msleep(250);
2420
2421 /* Restore the PCI state saved during initializarion. */
2422 pci_restore_state(sp->pdev);
2423 s2io_init_pci(sp);
2424
2425 msleep(250);
2426
2427 /* SXE-002: Configure link and activity LED to turn it off */
2428 subid = sp->pdev->subsystem_device;
2429 if ((subid & 0xFF) >= 0x07) {
2430 val64 = readq(&bar0->gpio_control);
2431 val64 |= 0x0000800000000000ULL;
2432 writeq(val64, &bar0->gpio_control);
2433 val64 = 0x0411040400000000ULL;
2434 writeq(val64, (void __iomem *) bar0 + 0x2700);
2435 }
2436
2437 sp->device_enabled_once = FALSE;
2438 }
2439
2440 /**
2441 * s2io_set_swapper - to set the swapper controle on the card
2442 * @sp : private member of the device structure,
2443 * pointer to the s2io_nic structure.
2444 * Description: Function to set the swapper control on the card
2445 * correctly depending on the 'endianness' of the system.
2446 * Return value:
2447 * SUCCESS on success and FAILURE on failure.
2448 */
2449
2450 static int s2io_set_swapper(nic_t * sp)
2451 {
2452 struct net_device *dev = sp->dev;
2453 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2454 u64 val64, valt, valr;
2455
2456 /*
2457 * Set proper endian settings and verify the same by reading
2458 * the PIF Feed-back register.
2459 */
2460
2461 val64 = readq(&bar0->pif_rd_swapper_fb);
2462 if (val64 != 0x0123456789ABCDEFULL) {
2463 int i = 0;
2464 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2465 0x8100008181000081ULL, /* FE=1, SE=0 */
2466 0x4200004242000042ULL, /* FE=0, SE=1 */
2467 0}; /* FE=0, SE=0 */
2468
2469 while(i<4) {
2470 writeq(value[i], &bar0->swapper_ctrl);
2471 val64 = readq(&bar0->pif_rd_swapper_fb);
2472 if (val64 == 0x0123456789ABCDEFULL)
2473 break;
2474 i++;
2475 }
2476 if (i == 4) {
2477 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2478 dev->name);
2479 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2480 (unsigned long long) val64);
2481 return FAILURE;
2482 }
2483 valr = value[i];
2484 } else {
2485 valr = readq(&bar0->swapper_ctrl);
2486 }
2487
2488 valt = 0x0123456789ABCDEFULL;
2489 writeq(valt, &bar0->xmsi_address);
2490 val64 = readq(&bar0->xmsi_address);
2491
2492 if(val64 != valt) {
2493 int i = 0;
2494 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2495 0x0081810000818100ULL, /* FE=1, SE=0 */
2496 0x0042420000424200ULL, /* FE=0, SE=1 */
2497 0}; /* FE=0, SE=0 */
2498
2499 while(i<4) {
2500 writeq((value[i] | valr), &bar0->swapper_ctrl);
2501 writeq(valt, &bar0->xmsi_address);
2502 val64 = readq(&bar0->xmsi_address);
2503 if(val64 == valt)
2504 break;
2505 i++;
2506 }
2507 if(i == 4) {
2508 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2509 DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64);
2510 return FAILURE;
2511 }
2512 }
2513 val64 = readq(&bar0->swapper_ctrl);
2514 val64 &= 0xFFFF000000000000ULL;
2515
2516 #ifdef __BIG_ENDIAN
2517 /*
2518 * The device by default set to a big endian format, so a
2519 * big endian driver need not set anything.
2520 */
2521 val64 |= (SWAPPER_CTRL_TXP_FE |
2522 SWAPPER_CTRL_TXP_SE |
2523 SWAPPER_CTRL_TXD_R_FE |
2524 SWAPPER_CTRL_TXD_W_FE |
2525 SWAPPER_CTRL_TXF_R_FE |
2526 SWAPPER_CTRL_RXD_R_FE |
2527 SWAPPER_CTRL_RXD_W_FE |
2528 SWAPPER_CTRL_RXF_W_FE |
2529 SWAPPER_CTRL_XMSI_FE |
2530 SWAPPER_CTRL_XMSI_SE |
2531 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2532 writeq(val64, &bar0->swapper_ctrl);
2533 #else
2534 /*
2535 * Initially we enable all bits to make it accessible by the
2536 * driver, then we selectively enable only those bits that
2537 * we want to set.
2538 */
2539 val64 |= (SWAPPER_CTRL_TXP_FE |
2540 SWAPPER_CTRL_TXP_SE |
2541 SWAPPER_CTRL_TXD_R_FE |
2542 SWAPPER_CTRL_TXD_R_SE |
2543 SWAPPER_CTRL_TXD_W_FE |
2544 SWAPPER_CTRL_TXD_W_SE |
2545 SWAPPER_CTRL_TXF_R_FE |
2546 SWAPPER_CTRL_RXD_R_FE |
2547 SWAPPER_CTRL_RXD_R_SE |
2548 SWAPPER_CTRL_RXD_W_FE |
2549 SWAPPER_CTRL_RXD_W_SE |
2550 SWAPPER_CTRL_RXF_W_FE |
2551 SWAPPER_CTRL_XMSI_FE |
2552 SWAPPER_CTRL_XMSI_SE |
2553 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2554 writeq(val64, &bar0->swapper_ctrl);
2555 #endif
2556 val64 = readq(&bar0->swapper_ctrl);
2557
2558 /*
2559 * Verifying if endian settings are accurate by reading a
2560 * feedback register.
2561 */
2562 val64 = readq(&bar0->pif_rd_swapper_fb);
2563 if (val64 != 0x0123456789ABCDEFULL) {
2564 /* Endian settings are incorrect, calls for another dekko. */
2565 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2566 dev->name);
2567 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2568 (unsigned long long) val64);
2569 return FAILURE;
2570 }
2571
2572 return SUCCESS;
2573 }
2574
2575 /* ********************************************************* *
2576 * Functions defined below concern the OS part of the driver *
2577 * ********************************************************* */
2578
2579 /**
2580 * s2io_open - open entry point of the driver
2581 * @dev : pointer to the device structure.
2582 * Description:
2583 * This function is the open entry point of the driver. It mainly calls a
2584 * function to allocate Rx buffers and inserts them into the buffer
2585 * descriptors and then enables the Rx part of the NIC.
2586 * Return value:
2587 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2588 * file on failure.
2589 */
2590
2591 static int s2io_open(struct net_device *dev)
2592 {
2593 nic_t *sp = dev->priv;
2594 int err = 0;
2595
2596 /*
2597 * Make sure you have link off by default every time
2598 * Nic is initialized
2599 */
2600 netif_carrier_off(dev);
2601 sp->last_link_state = LINK_DOWN;
2602
2603 /* Initialize H/W and enable interrupts */
2604 if (s2io_card_up(sp)) {
2605 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2606 dev->name);
2607 return -ENODEV;
2608 }
2609
2610 /* After proper initialization of H/W, register ISR */
2611 err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ,
2612 sp->name, dev);
2613 if (err) {
2614 s2io_reset(sp);
2615 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2616 dev->name);
2617 return err;
2618 }
2619
2620 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2621 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2622 s2io_reset(sp);
2623 return -ENODEV;
2624 }
2625
2626 netif_start_queue(dev);
2627 return 0;
2628 }
2629
2630 /**
2631 * s2io_close -close entry point of the driver
2632 * @dev : device pointer.
2633 * Description:
2634 * This is the stop entry point of the driver. It needs to undo exactly
2635 * whatever was done by the open entry point,thus it's usually referred to
2636 * as the close function.Among other things this function mainly stops the
2637 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2638 * Return value:
2639 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2640 * file on failure.
2641 */
2642
2643 static int s2io_close(struct net_device *dev)
2644 {
2645 nic_t *sp = dev->priv;
2646
2647 flush_scheduled_work();
2648 netif_stop_queue(dev);
2649 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2650 s2io_card_down(sp);
2651
2652 free_irq(dev->irq, dev);
2653 sp->device_close_flag = TRUE; /* Device is shut down. */
2654 return 0;
2655 }
2656
2657 /**
2658 * s2io_xmit - Tx entry point of te driver
2659 * @skb : the socket buffer containing the Tx data.
2660 * @dev : device pointer.
2661 * Description :
2662 * This function is the Tx entry point of the driver. S2IO NIC supports
2663 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2664 * NOTE: when device cant queue the pkt,just the trans_start variable will
2665 * not be upadted.
2666 * Return value:
2667 * 0 on success & 1 on failure.
2668 */
2669
2670 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2671 {
2672 nic_t *sp = dev->priv;
2673 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2674 register u64 val64;
2675 TxD_t *txdp;
2676 TxFIFO_element_t __iomem *tx_fifo;
2677 unsigned long flags;
2678 #ifdef NETIF_F_TSO
2679 int mss;
2680 #endif
2681 mac_info_t *mac_control;
2682 struct config_param *config;
2683 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2684
2685 mac_control = &sp->mac_control;
2686 config = &sp->config;
2687
2688 DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name);
2689 spin_lock_irqsave(&sp->tx_lock, flags);
2690
2691 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2692 DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n",
2693 dev->name);
2694 spin_unlock_irqrestore(&sp->tx_lock, flags);
2695 return 1;
2696 }
2697
2698 queue = 0;
2699 put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
2700 get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
2701 txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
2702
2703 queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1;
2704 /* Avoid "put" pointer going beyond "get" pointer */
2705 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2706 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2707 netif_stop_queue(dev);
2708 dev_kfree_skb(skb);
2709 spin_unlock_irqrestore(&sp->tx_lock, flags);
2710 return 0;
2711 }
2712 #ifdef NETIF_F_TSO
2713 mss = skb_shinfo(skb)->tso_size;
2714 if (mss) {
2715 txdp->Control_1 |= TXD_TCP_LSO_EN;
2716 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2717 }
2718 #endif
2719
2720 frg_cnt = skb_shinfo(skb)->nr_frags;
2721 frg_len = skb->len - skb->data_len;
2722
2723 txdp->Host_Control = (unsigned long) skb;
2724 txdp->Buffer_Pointer = pci_map_single
2725 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2726 if (skb->ip_summed == CHECKSUM_HW) {
2727 txdp->Control_2 |=
2728 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2729 TXD_TX_CKO_UDP_EN);
2730 }
2731
2732 txdp->Control_2 |= config->tx_intr_type;
2733
2734 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2735 TXD_GATHER_CODE_FIRST);
2736 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2737
2738 /* For fragmented SKB. */
2739 for (i = 0; i < frg_cnt; i++) {
2740 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2741 txdp++;
2742 txdp->Buffer_Pointer = (u64) pci_map_page
2743 (sp->pdev, frag->page, frag->page_offset,
2744 frag->size, PCI_DMA_TODEVICE);
2745 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2746 }
2747 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2748
2749 tx_fifo = mac_control->tx_FIFO_start[queue];
2750 val64 = sp->list_info[queue][put_off].list_phy_addr;
2751 writeq(val64, &tx_fifo->TxDL_Pointer);
2752
2753 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2754 TX_FIFO_LAST_LIST);
2755 #ifdef NETIF_F_TSO
2756 if (mss)
2757 val64 |= TX_FIFO_SPECIAL_FUNC;
2758 #endif
2759 writeq(val64, &tx_fifo->List_Control);
2760
2761 /* Perform a PCI read to flush previous writes */
2762 val64 = readq(&bar0->general_int_status);
2763
2764 put_off++;
2765 put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1;
2766 mac_control->tx_curr_put_info[queue].offset = put_off;
2767
2768 /* Avoid "put" pointer going beyond "get" pointer */
2769 if (((put_off + 1) % queue_len) == get_off) {
2770 DBG_PRINT(TX_DBG,
2771 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2772 put_off, get_off);
2773 netif_stop_queue(dev);
2774 }
2775
2776 dev->trans_start = jiffies;
2777 spin_unlock_irqrestore(&sp->tx_lock, flags);
2778
2779 return 0;
2780 }
2781
2782 /**
2783 * s2io_isr - ISR handler of the device .
2784 * @irq: the irq of the device.
2785 * @dev_id: a void pointer to the dev structure of the NIC.
2786 * @pt_regs: pointer to the registers pushed on the stack.
2787 * Description: This function is the ISR handler of the device. It
2788 * identifies the reason for the interrupt and calls the relevant
2789 * service routines. As a contongency measure, this ISR allocates the
2790 * recv buffers, if their numbers are below the panic value which is
2791 * presently set to 25% of the original number of rcv buffers allocated.
2792 * Return value:
2793 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2794 * IRQ_NONE: will be returned if interrupt is not from our device
2795 */
2796 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2797 {
2798 struct net_device *dev = (struct net_device *) dev_id;
2799 nic_t *sp = dev->priv;
2800 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2801 #ifndef CONFIG_S2IO_NAPI
2802 int i, ret;
2803 #endif
2804 u64 reason = 0;
2805 mac_info_t *mac_control;
2806 struct config_param *config;
2807
2808 mac_control = &sp->mac_control;
2809 config = &sp->config;
2810
2811 /*
2812 * Identify the cause for interrupt and call the appropriate
2813 * interrupt handler. Causes for the interrupt could be;
2814 * 1. Rx of packet.
2815 * 2. Tx complete.
2816 * 3. Link down.
2817 * 4. Error in any functional blocks of the NIC.
2818 */
2819 reason = readq(&bar0->general_int_status);
2820
2821 if (!reason) {
2822 /* The interrupt was not raised by Xena. */
2823 return IRQ_NONE;
2824 }
2825
2826 /* If Intr is because of Tx Traffic */
2827 if (reason & GEN_INTR_TXTRAFFIC) {
2828 tx_intr_handler(sp);
2829 }
2830
2831 /* If Intr is because of an error */
2832 if (reason & (GEN_ERROR_INTR))
2833 alarm_intr_handler(sp);
2834
2835 #ifdef CONFIG_S2IO_NAPI
2836 if (reason & GEN_INTR_RXTRAFFIC) {
2837 if (netif_rx_schedule_prep(dev)) {
2838 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2839 DISABLE_INTRS);
2840 __netif_rx_schedule(dev);
2841 }
2842 }
2843 #else
2844 /* If Intr is because of Rx Traffic */
2845 if (reason & GEN_INTR_RXTRAFFIC) {
2846 rx_intr_handler(sp);
2847 }
2848 #endif
2849
2850 /*
2851 * If the Rx buffer count is below the panic threshold then
2852 * reallocate the buffers from the interrupt handler itself,
2853 * else schedule a tasklet to reallocate the buffers.
2854 */
2855 #ifndef CONFIG_S2IO_NAPI
2856 for (i = 0; i < config->rx_ring_num; i++) {
2857 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2858 int level = rx_buffer_level(sp, rxb_size, i);
2859
2860 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2861 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2862 DBG_PRINT(INTR_DBG, "PANIC levels\n");
2863 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2864 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2865 dev->name);
2866 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2867 clear_bit(0, (&sp->tasklet_status));
2868 return IRQ_HANDLED;
2869 }
2870 clear_bit(0, (&sp->tasklet_status));
2871 } else if (level == LOW) {
2872 tasklet_schedule(&sp->task);
2873 }
2874 }
2875 #endif
2876
2877 return IRQ_HANDLED;
2878 }
2879
2880 /**
2881 * s2io_get_stats - Updates the device statistics structure.
2882 * @dev : pointer to the device structure.
2883 * Description:
2884 * This function updates the device statistics structure in the s2io_nic
2885 * structure and returns a pointer to the same.
2886 * Return value:
2887 * pointer to the updated net_device_stats structure.
2888 */
2889
2890 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
2891 {
2892 nic_t *sp = dev->priv;
2893 mac_info_t *mac_control;
2894 struct config_param *config;
2895
2896 mac_control = &sp->mac_control;
2897 config = &sp->config;
2898
2899 sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms;
2900 sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms;
2901 sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms;
2902 sp->stats.rx_length_errors =
2903 mac_control->stats_info->rmac_long_frms;
2904
2905 return (&sp->stats);
2906 }
2907
2908 /**
2909 * s2io_set_multicast - entry point for multicast address enable/disable.
2910 * @dev : pointer to the device structure
2911 * Description:
2912 * This function is a driver entry point which gets called by the kernel
2913 * whenever multicast addresses must be enabled/disabled. This also gets
2914 * called to set/reset promiscuous mode. Depending on the deivce flag, we
2915 * determine, if multicast address must be enabled or if promiscuous mode
2916 * is to be disabled etc.
2917 * Return value:
2918 * void.
2919 */
2920
2921 static void s2io_set_multicast(struct net_device *dev)
2922 {
2923 int i, j, prev_cnt;
2924 struct dev_mc_list *mclist;
2925 nic_t *sp = dev->priv;
2926 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2927 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
2928 0xfeffffffffffULL;
2929 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
2930 void __iomem *add;
2931
2932 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
2933 /* Enable all Multicast addresses */
2934 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
2935 &bar0->rmac_addr_data0_mem);
2936 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
2937 &bar0->rmac_addr_data1_mem);
2938 val64 = RMAC_ADDR_CMD_MEM_WE |
2939 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2940 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
2941 writeq(val64, &bar0->rmac_addr_cmd_mem);
2942 /* Wait till command completes */
2943 wait_for_cmd_complete(sp);
2944
2945 sp->m_cast_flg = 1;
2946 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
2947 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
2948 /* Disable all Multicast addresses */
2949 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2950 &bar0->rmac_addr_data0_mem);
2951 val64 = RMAC_ADDR_CMD_MEM_WE |
2952 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2953 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
2954 writeq(val64, &bar0->rmac_addr_cmd_mem);
2955 /* Wait till command completes */
2956 wait_for_cmd_complete(sp);
2957
2958 sp->m_cast_flg = 0;
2959 sp->all_multi_pos = 0;
2960 }
2961
2962 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
2963 /* Put the NIC into promiscuous mode */
2964 add = &bar0->mac_cfg;
2965 val64 = readq(&bar0->mac_cfg);
2966 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
2967
2968 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2969 writel((u32) val64, add);
2970 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2971 writel((u32) (val64 >> 32), (add + 4));
2972
2973 val64 = readq(&bar0->mac_cfg);
2974 sp->promisc_flg = 1;
2975 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
2976 dev->name);
2977 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
2978 /* Remove the NIC from promiscuous mode */
2979 add = &bar0->mac_cfg;
2980 val64 = readq(&bar0->mac_cfg);
2981 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
2982
2983 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2984 writel((u32) val64, add);
2985 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2986 writel((u32) (val64 >> 32), (add + 4));
2987
2988 val64 = readq(&bar0->mac_cfg);
2989 sp->promisc_flg = 0;
2990 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
2991 dev->name);
2992 }
2993
2994 /* Update individual M_CAST address list */
2995 if ((!sp->m_cast_flg) && dev->mc_count) {
2996 if (dev->mc_count >
2997 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
2998 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
2999 dev->name);
3000 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3001 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3002 return;
3003 }
3004
3005 prev_cnt = sp->mc_addr_count;
3006 sp->mc_addr_count = dev->mc_count;
3007
3008 /* Clear out the previous list of Mc in the H/W. */
3009 for (i = 0; i < prev_cnt; i++) {
3010 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3011 &bar0->rmac_addr_data0_mem);
3012 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3013 &bar0->rmac_addr_data1_mem);
3014 val64 = RMAC_ADDR_CMD_MEM_WE |
3015 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3016 RMAC_ADDR_CMD_MEM_OFFSET
3017 (MAC_MC_ADDR_START_OFFSET + i);
3018 writeq(val64, &bar0->rmac_addr_cmd_mem);
3019
3020 /* Wait for command completes */
3021 if (wait_for_cmd_complete(sp)) {
3022 DBG_PRINT(ERR_DBG, "%s: Adding ",
3023 dev->name);
3024 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3025 return;
3026 }
3027 }
3028
3029 /* Create the new Rx filter list and update the same in H/W. */
3030 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3031 i++, mclist = mclist->next) {
3032 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3033 ETH_ALEN);
3034 for (j = 0; j < ETH_ALEN; j++) {
3035 mac_addr |= mclist->dmi_addr[j];
3036 mac_addr <<= 8;
3037 }
3038 mac_addr >>= 8;
3039 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3040 &bar0->rmac_addr_data0_mem);
3041 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3042 &bar0->rmac_addr_data1_mem);
3043
3044 val64 = RMAC_ADDR_CMD_MEM_WE |
3045 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3046 RMAC_ADDR_CMD_MEM_OFFSET
3047 (i + MAC_MC_ADDR_START_OFFSET);
3048 writeq(val64, &bar0->rmac_addr_cmd_mem);
3049
3050 /* Wait for command completes */
3051 if (wait_for_cmd_complete(sp)) {
3052 DBG_PRINT(ERR_DBG, "%s: Adding ",
3053 dev->name);
3054 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3055 return;
3056 }
3057 }
3058 }
3059 }
3060
3061 /**
3062 * s2io_set_mac_addr - Programs the Xframe mac address
3063 * @dev : pointer to the device structure.
3064 * @addr: a uchar pointer to the new mac address which is to be set.
3065 * Description : This procedure will program the Xframe to receive
3066 * frames with new Mac Address
3067 * Return value: SUCCESS on success and an appropriate (-)ve integer
3068 * as defined in errno.h file on failure.
3069 */
3070
3071 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3072 {
3073 nic_t *sp = dev->priv;
3074 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3075 register u64 val64, mac_addr = 0;
3076 int i;
3077
3078 /*
3079 * Set the new MAC address as the new unicast filter and reflect this
3080 * change on the device address registered with the OS. It will be
3081 * at offset 0.
3082 */
3083 for (i = 0; i < ETH_ALEN; i++) {
3084 mac_addr <<= 8;
3085 mac_addr |= addr[i];
3086 }
3087
3088 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3089 &bar0->rmac_addr_data0_mem);
3090
3091 val64 =
3092 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3093 RMAC_ADDR_CMD_MEM_OFFSET(0);
3094 writeq(val64, &bar0->rmac_addr_cmd_mem);
3095 /* Wait till command completes */
3096 if (wait_for_cmd_complete(sp)) {
3097 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3098 return FAILURE;
3099 }
3100
3101 return SUCCESS;
3102 }
3103
3104 /**
3105 * s2io_ethtool_sset - Sets different link parameters.
3106 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3107 * @info: pointer to the structure with parameters given by ethtool to set
3108 * link information.
3109 * Description:
3110 * The function sets different link parameters provided by the user onto
3111 * the NIC.
3112 * Return value:
3113 * 0 on success.
3114 */
3115
3116 static int s2io_ethtool_sset(struct net_device *dev,
3117 struct ethtool_cmd *info)
3118 {
3119 nic_t *sp = dev->priv;
3120 if ((info->autoneg == AUTONEG_ENABLE) ||
3121 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3122 return -EINVAL;
3123 else {
3124 s2io_close(sp->dev);
3125 s2io_open(sp->dev);
3126 }
3127
3128 return 0;
3129 }
3130
3131 /**
3132 * s2io_ethtol_gset - Return link specific information.
3133 * @sp : private member of the device structure, pointer to the
3134 * s2io_nic structure.
3135 * @info : pointer to the structure with parameters given by ethtool
3136 * to return link information.
3137 * Description:
3138 * Returns link specific information like speed, duplex etc.. to ethtool.
3139 * Return value :
3140 * return 0 on success.
3141 */
3142
3143 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3144 {
3145 nic_t *sp = dev->priv;
3146 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3147 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3148 info->port = PORT_FIBRE;
3149 /* info->transceiver?? TODO */
3150
3151 if (netif_carrier_ok(sp->dev)) {
3152 info->speed = 10000;
3153 info->duplex = DUPLEX_FULL;
3154 } else {
3155 info->speed = -1;
3156 info->duplex = -1;
3157 }
3158
3159 info->autoneg = AUTONEG_DISABLE;
3160 return 0;
3161 }
3162
3163 /**
3164 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3165 * @sp : private member of the device structure, which is a pointer to the
3166 * s2io_nic structure.
3167 * @info : pointer to the structure with parameters given by ethtool to
3168 * return driver information.
3169 * Description:
3170 * Returns driver specefic information like name, version etc.. to ethtool.
3171 * Return value:
3172 * void
3173 */
3174
3175 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3176 struct ethtool_drvinfo *info)
3177 {
3178 nic_t *sp = dev->priv;
3179
3180 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3181 strncpy(info->version, s2io_driver_version,
3182 sizeof(s2io_driver_version));
3183 strncpy(info->fw_version, "", 32);
3184 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3185 info->regdump_len = XENA_REG_SPACE;
3186 info->eedump_len = XENA_EEPROM_SPACE;
3187 info->testinfo_len = S2IO_TEST_LEN;
3188 info->n_stats = S2IO_STAT_LEN;
3189 }
3190
3191 /**
3192 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3193 * @sp: private member of the device structure, which is a pointer to the
3194 * s2io_nic structure.
3195 * @regs : pointer to the structure with parameters given by ethtool for
3196 * dumping the registers.
3197 * @reg_space: The input argumnet into which all the registers are dumped.
3198 * Description:
3199 * Dumps the entire register space of xFrame NIC into the user given
3200 * buffer area.
3201 * Return value :
3202 * void .
3203 */
3204
3205 static void s2io_ethtool_gregs(struct net_device *dev,
3206 struct ethtool_regs *regs, void *space)
3207 {
3208 int i;
3209 u64 reg;
3210 u8 *reg_space = (u8 *) space;
3211 nic_t *sp = dev->priv;
3212
3213 regs->len = XENA_REG_SPACE;
3214 regs->version = sp->pdev->subsystem_device;
3215
3216 for (i = 0; i < regs->len; i += 8) {
3217 reg = readq(sp->bar0 + i);
3218 memcpy((reg_space + i), &reg, 8);
3219 }
3220 }
3221
3222 /**
3223 * s2io_phy_id - timer function that alternates adapter LED.
3224 * @data : address of the private member of the device structure, which
3225 * is a pointer to the s2io_nic structure, provided as an u32.
3226 * Description: This is actually the timer function that alternates the
3227 * adapter LED bit of the adapter control bit to set/reset every time on
3228 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3229 * once every second.
3230 */
3231 static void s2io_phy_id(unsigned long data)
3232 {
3233 nic_t *sp = (nic_t *) data;
3234 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3235 u64 val64 = 0;
3236 u16 subid;
3237
3238 subid = sp->pdev->subsystem_device;
3239 if ((subid & 0xFF) >= 0x07) {
3240 val64 = readq(&bar0->gpio_control);
3241 val64 ^= GPIO_CTRL_GPIO_0;
3242 writeq(val64, &bar0->gpio_control);
3243 } else {
3244 val64 = readq(&bar0->adapter_control);
3245 val64 ^= ADAPTER_LED_ON;
3246 writeq(val64, &bar0->adapter_control);
3247 }
3248
3249 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3250 }
3251
3252 /**
3253 * s2io_ethtool_idnic - To physically identify the nic on the system.
3254 * @sp : private member of the device structure, which is a pointer to the
3255 * s2io_nic structure.
3256 * @id : pointer to the structure with identification parameters given by
3257 * ethtool.
3258 * Description: Used to physically identify the NIC on the system.
3259 * The Link LED will blink for a time specified by the user for
3260 * identification.
3261 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3262 * identification is possible only if it's link is up.
3263 * Return value:
3264 * int , returns 0 on success
3265 */
3266
3267 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3268 {
3269 u64 val64 = 0, last_gpio_ctrl_val;
3270 nic_t *sp = dev->priv;
3271 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3272 u16 subid;
3273
3274 subid = sp->pdev->subsystem_device;
3275 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3276 if ((subid & 0xFF) < 0x07) {
3277 val64 = readq(&bar0->adapter_control);
3278 if (!(val64 & ADAPTER_CNTL_EN)) {
3279 printk(KERN_ERR
3280 "Adapter Link down, cannot blink LED\n");
3281 return -EFAULT;
3282 }
3283 }
3284 if (sp->id_timer.function == NULL) {
3285 init_timer(&sp->id_timer);
3286 sp->id_timer.function = s2io_phy_id;
3287 sp->id_timer.data = (unsigned long) sp;
3288 }
3289 mod_timer(&sp->id_timer, jiffies);
3290 if (data)
3291 msleep(data * 1000);
3292 else
3293 msleep(0xFFFFFFFF);
3294 del_timer_sync(&sp->id_timer);
3295
3296 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3297 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3298 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3299 }
3300
3301 return 0;
3302 }
3303
3304 /**
3305 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3306 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3307 * @ep : pointer to the structure with pause parameters given by ethtool.
3308 * Description:
3309 * Returns the Pause frame generation and reception capability of the NIC.
3310 * Return value:
3311 * void
3312 */
3313 static void s2io_ethtool_getpause_data(struct net_device *dev,
3314 struct ethtool_pauseparam *ep)
3315 {
3316 u64 val64;
3317 nic_t *sp = dev->priv;
3318 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3319
3320 val64 = readq(&bar0->rmac_pause_cfg);
3321 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3322 ep->tx_pause = TRUE;
3323 if (val64 & RMAC_PAUSE_RX_ENABLE)
3324 ep->rx_pause = TRUE;
3325 ep->autoneg = FALSE;
3326 }
3327
3328 /**
3329 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3330 * @sp : private member of the device structure, which is a pointer to the
3331 * s2io_nic structure.
3332 * @ep : pointer to the structure with pause parameters given by ethtool.
3333 * Description:
3334 * It can be used to set or reset Pause frame generation or reception
3335 * support of the NIC.
3336 * Return value:
3337 * int, returns 0 on Success
3338 */
3339
3340 static int s2io_ethtool_setpause_data(struct net_device *dev,
3341 struct ethtool_pauseparam *ep)
3342 {
3343 u64 val64;
3344 nic_t *sp = dev->priv;
3345 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3346
3347 val64 = readq(&bar0->rmac_pause_cfg);
3348 if (ep->tx_pause)
3349 val64 |= RMAC_PAUSE_GEN_ENABLE;
3350 else
3351 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3352 if (ep->rx_pause)
3353 val64 |= RMAC_PAUSE_RX_ENABLE;
3354 else
3355 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3356 writeq(val64, &bar0->rmac_pause_cfg);
3357 return 0;
3358 }
3359
3360 /**
3361 * read_eeprom - reads 4 bytes of data from user given offset.
3362 * @sp : private member of the device structure, which is a pointer to the
3363 * s2io_nic structure.
3364 * @off : offset at which the data must be written
3365 * @data : Its an output parameter where the data read at the given
3366 * offset is stored.
3367 * Description:
3368 * Will read 4 bytes of data from the user given offset and return the
3369 * read data.
3370 * NOTE: Will allow to read only part of the EEPROM visible through the
3371 * I2C bus.
3372 * Return value:
3373 * -1 on failure and 0 on success.
3374 */
3375
3376 #define S2IO_DEV_ID 5
3377 static int read_eeprom(nic_t * sp, int off, u32 * data)
3378 {
3379 int ret = -1;
3380 u32 exit_cnt = 0;
3381 u64 val64;
3382 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3383
3384 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3385 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3386 I2C_CONTROL_CNTL_START;
3387 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3388
3389 while (exit_cnt < 5) {
3390 val64 = readq(&bar0->i2c_control);
3391 if (I2C_CONTROL_CNTL_END(val64)) {
3392 *data = I2C_CONTROL_GET_DATA(val64);
3393 ret = 0;
3394 break;
3395 }
3396 msleep(50);
3397 exit_cnt++;
3398 }
3399
3400 return ret;
3401 }
3402
3403 /**
3404 * write_eeprom - actually writes the relevant part of the data value.
3405 * @sp : private member of the device structure, which is a pointer to the
3406 * s2io_nic structure.
3407 * @off : offset at which the data must be written
3408 * @data : The data that is to be written
3409 * @cnt : Number of bytes of the data that are actually to be written into
3410 * the Eeprom. (max of 3)
3411 * Description:
3412 * Actually writes the relevant part of the data value into the Eeprom
3413 * through the I2C bus.
3414 * Return value:
3415 * 0 on success, -1 on failure.
3416 */
3417
3418 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3419 {
3420 int exit_cnt = 0, ret = -1;
3421 u64 val64;
3422 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3423
3424 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3425 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3426 I2C_CONTROL_CNTL_START;
3427 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3428
3429 while (exit_cnt < 5) {
3430 val64 = readq(&bar0->i2c_control);
3431 if (I2C_CONTROL_CNTL_END(val64)) {
3432 if (!(val64 & I2C_CONTROL_NACK))
3433 ret = 0;
3434 break;
3435 }
3436 msleep(50);
3437 exit_cnt++;
3438 }
3439
3440 return ret;
3441 }
3442
3443 /**
3444 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3445 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3446 * @eeprom : pointer to the user level structure provided by ethtool,
3447 * containing all relevant information.
3448 * @data_buf : user defined value to be written into Eeprom.
3449 * Description: Reads the values stored in the Eeprom at given offset
3450 * for a given length. Stores these values int the input argument data
3451 * buffer 'data_buf' and returns these to the caller (ethtool.)
3452 * Return value:
3453 * int 0 on success
3454 */
3455
3456 static int s2io_ethtool_geeprom(struct net_device *dev,
3457 struct ethtool_eeprom *eeprom, u8 * data_buf)
3458 {
3459 u32 data, i, valid;
3460 nic_t *sp = dev->priv;
3461
3462 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3463
3464 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3465 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3466
3467 for (i = 0; i < eeprom->len; i += 4) {
3468 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3469 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3470 return -EFAULT;
3471 }
3472 valid = INV(data);
3473 memcpy((data_buf + i), &valid, 4);
3474 }
3475 return 0;
3476 }
3477
3478 /**
3479 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3480 * @sp : private member of the device structure, which is a pointer to the
3481 * s2io_nic structure.
3482 * @eeprom : pointer to the user level structure provided by ethtool,
3483 * containing all relevant information.
3484 * @data_buf ; user defined value to be written into Eeprom.
3485 * Description:
3486 * Tries to write the user provided value in the Eeprom, at the offset
3487 * given by the user.
3488 * Return value:
3489 * 0 on success, -EFAULT on failure.
3490 */
3491
3492 static int s2io_ethtool_seeprom(struct net_device *dev,
3493 struct ethtool_eeprom *eeprom,
3494 u8 * data_buf)
3495 {
3496 int len = eeprom->len, cnt = 0;
3497 u32 valid = 0, data;
3498 nic_t *sp = dev->priv;
3499
3500 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3501 DBG_PRINT(ERR_DBG,
3502 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3503 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3504 eeprom->magic);
3505 return -EFAULT;
3506 }
3507
3508 while (len) {
3509 data = (u32) data_buf[cnt] & 0x000000FF;
3510 if (data) {
3511 valid = (u32) (data << 24);
3512 } else
3513 valid = data;
3514
3515 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3516 DBG_PRINT(ERR_DBG,
3517 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3518 DBG_PRINT(ERR_DBG,
3519 "write into the specified offset\n");
3520 return -EFAULT;
3521 }
3522 cnt++;
3523 len--;
3524 }
3525
3526 return 0;
3527 }
3528
3529 /**
3530 * s2io_register_test - reads and writes into all clock domains.
3531 * @sp : private member of the device structure, which is a pointer to the
3532 * s2io_nic structure.
3533 * @data : variable that returns the result of each of the test conducted b
3534 * by the driver.
3535 * Description:
3536 * Read and write into all clock domains. The NIC has 3 clock domains,
3537 * see that registers in all the three regions are accessible.
3538 * Return value:
3539 * 0 on success.
3540 */
3541
3542 static int s2io_register_test(nic_t * sp, uint64_t * data)
3543 {
3544 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3545 u64 val64 = 0;
3546 int fail = 0;
3547
3548 val64 = readq(&bar0->pcc_enable);
3549 if (val64 != 0xff00000000000000ULL) {
3550 fail = 1;
3551 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3552 }
3553
3554 val64 = readq(&bar0->rmac_pause_cfg);
3555 if (val64 != 0xc000ffff00000000ULL) {
3556 fail = 1;
3557 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3558 }
3559
3560 val64 = readq(&bar0->rx_queue_cfg);
3561 if (val64 != 0x0808080808080808ULL) {
3562 fail = 1;
3563 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3564 }
3565
3566 val64 = readq(&bar0->xgxs_efifo_cfg);
3567 if (val64 != 0x000000001923141EULL) {
3568 fail = 1;
3569 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3570 }
3571
3572 val64 = 0x5A5A5A5A5A5A5A5AULL;
3573 writeq(val64, &bar0->xmsi_data);
3574 val64 = readq(&bar0->xmsi_data);
3575 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3576 fail = 1;
3577 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3578 }
3579
3580 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3581 writeq(val64, &bar0->xmsi_data);
3582 val64 = readq(&bar0->xmsi_data);
3583 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3584 fail = 1;
3585 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3586 }
3587
3588 *data = fail;
3589 return 0;
3590 }
3591
3592 /**
3593 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3594 * @sp : private member of the device structure, which is a pointer to the
3595 * s2io_nic structure.
3596 * @data:variable that returns the result of each of the test conducted by
3597 * the driver.
3598 * Description:
3599 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3600 * register.
3601 * Return value:
3602 * 0 on success.
3603 */
3604
3605 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3606 {
3607 int fail = 0;
3608 u32 ret_data;
3609
3610 /* Test Write Error at offset 0 */
3611 if (!write_eeprom(sp, 0, 0, 3))
3612 fail = 1;
3613
3614 /* Test Write at offset 4f0 */
3615 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3616 fail = 1;
3617 if (read_eeprom(sp, 0x4F0, &ret_data))
3618 fail = 1;
3619
3620 if (ret_data != 0x01234567)
3621 fail = 1;
3622
3623 /* Reset the EEPROM data go FFFF */
3624 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3625
3626 /* Test Write Request Error at offset 0x7c */
3627 if (!write_eeprom(sp, 0x07C, 0, 3))
3628 fail = 1;
3629
3630 /* Test Write Request at offset 0x7fc */
3631 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3632 fail = 1;
3633 if (read_eeprom(sp, 0x7FC, &ret_data))
3634 fail = 1;
3635
3636 if (ret_data != 0x01234567)
3637 fail = 1;
3638
3639 /* Reset the EEPROM data go FFFF */
3640 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3641
3642 /* Test Write Error at offset 0x80 */
3643 if (!write_eeprom(sp, 0x080, 0, 3))
3644 fail = 1;
3645
3646 /* Test Write Error at offset 0xfc */
3647 if (!write_eeprom(sp, 0x0FC, 0, 3))
3648 fail = 1;
3649
3650 /* Test Write Error at offset 0x100 */
3651 if (!write_eeprom(sp, 0x100, 0, 3))
3652 fail = 1;
3653
3654 /* Test Write Error at offset 4ec */
3655 if (!write_eeprom(sp, 0x4EC, 0, 3))
3656 fail = 1;
3657
3658 *data = fail;
3659 return 0;
3660 }
3661
3662 /**
3663 * s2io_bist_test - invokes the MemBist test of the card .
3664 * @sp : private member of the device structure, which is a pointer to the
3665 * s2io_nic structure.
3666 * @data:variable that returns the result of each of the test conducted by
3667 * the driver.
3668 * Description:
3669 * This invokes the MemBist test of the card. We give around
3670 * 2 secs time for the Test to complete. If it's still not complete
3671 * within this peiod, we consider that the test failed.
3672 * Return value:
3673 * 0 on success and -1 on failure.
3674 */
3675
3676 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3677 {
3678 u8 bist = 0;
3679 int cnt = 0, ret = -1;
3680
3681 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3682 bist |= PCI_BIST_START;
3683 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3684
3685 while (cnt < 20) {
3686 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3687 if (!(bist & PCI_BIST_START)) {
3688 *data = (bist & PCI_BIST_CODE_MASK);
3689 ret = 0;
3690 break;
3691 }
3692 msleep(100);
3693 cnt++;
3694 }
3695
3696 return ret;
3697 }
3698
3699 /**
3700 * s2io-link_test - verifies the link state of the nic
3701 * @sp ; private member of the device structure, which is a pointer to the
3702 * s2io_nic structure.
3703 * @data: variable that returns the result of each of the test conducted by
3704 * the driver.
3705 * Description:
3706 * The function verifies the link state of the NIC and updates the input
3707 * argument 'data' appropriately.
3708 * Return value:
3709 * 0 on success.
3710 */
3711
3712 static int s2io_link_test(nic_t * sp, uint64_t * data)
3713 {
3714 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3715 u64 val64;
3716
3717 val64 = readq(&bar0->adapter_status);
3718 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3719 *data = 1;
3720
3721 return 0;
3722 }
3723
3724 /**
3725 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3726 * @sp - private member of the device structure, which is a pointer to the
3727 * s2io_nic structure.
3728 * @data - variable that returns the result of each of the test
3729 * conducted by the driver.
3730 * Description:
3731 * This is one of the offline test that tests the read and write
3732 * access to the RldRam chip on the NIC.
3733 * Return value:
3734 * 0 on success.
3735 */
3736
3737 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3738 {
3739 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3740 u64 val64;
3741 int cnt, iteration = 0, test_pass = 0;
3742
3743 val64 = readq(&bar0->adapter_control);
3744 val64 &= ~ADAPTER_ECC_EN;
3745 writeq(val64, &bar0->adapter_control);
3746
3747 val64 = readq(&bar0->mc_rldram_test_ctrl);
3748 val64 |= MC_RLDRAM_TEST_MODE;
3749 writeq(val64, &bar0->mc_rldram_test_ctrl);
3750
3751 val64 = readq(&bar0->mc_rldram_mrs);
3752 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3753 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3754
3755 val64 |= MC_RLDRAM_MRS_ENABLE;
3756 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3757
3758 while (iteration < 2) {
3759 val64 = 0x55555555aaaa0000ULL;
3760 if (iteration == 1) {
3761 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3762 }
3763 writeq(val64, &bar0->mc_rldram_test_d0);
3764
3765 val64 = 0xaaaa5a5555550000ULL;
3766 if (iteration == 1) {
3767 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3768 }
3769 writeq(val64, &bar0->mc_rldram_test_d1);
3770
3771 val64 = 0x55aaaaaaaa5a0000ULL;
3772 if (iteration == 1) {
3773 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3774 }
3775 writeq(val64, &bar0->mc_rldram_test_d2);
3776
3777 val64 = (u64) (0x0000003fffff0000ULL);
3778 writeq(val64, &bar0->mc_rldram_test_add);
3779
3780
3781 val64 = MC_RLDRAM_TEST_MODE;
3782 writeq(val64, &bar0->mc_rldram_test_ctrl);
3783
3784 val64 |=
3785 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3786 MC_RLDRAM_TEST_GO;
3787 writeq(val64, &bar0->mc_rldram_test_ctrl);
3788
3789 for (cnt = 0; cnt < 5; cnt++) {
3790 val64 = readq(&bar0->mc_rldram_test_ctrl);
3791 if (val64 & MC_RLDRAM_TEST_DONE)
3792 break;
3793 msleep(200);
3794 }
3795
3796 if (cnt == 5)
3797 break;
3798
3799 val64 = MC_RLDRAM_TEST_MODE;
3800 writeq(val64, &bar0->mc_rldram_test_ctrl);
3801
3802 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3803 writeq(val64, &bar0->mc_rldram_test_ctrl);
3804
3805 for (cnt = 0; cnt < 5; cnt++) {
3806 val64 = readq(&bar0->mc_rldram_test_ctrl);
3807 if (val64 & MC_RLDRAM_TEST_DONE)
3808 break;
3809 msleep(500);
3810 }
3811
3812 if (cnt == 5)
3813 break;
3814
3815 val64 = readq(&bar0->mc_rldram_test_ctrl);
3816 if (val64 & MC_RLDRAM_TEST_PASS)
3817 test_pass = 1;
3818
3819 iteration++;
3820 }
3821
3822 if (!test_pass)
3823 *data = 1;
3824 else
3825 *data = 0;
3826
3827 return 0;
3828 }
3829
3830 /**
3831 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3832 * @sp : private member of the device structure, which is a pointer to the
3833 * s2io_nic structure.
3834 * @ethtest : pointer to a ethtool command specific structure that will be
3835 * returned to the user.
3836 * @data : variable that returns the result of each of the test
3837 * conducted by the driver.
3838 * Description:
3839 * This function conducts 6 tests ( 4 offline and 2 online) to determine
3840 * the health of the card.
3841 * Return value:
3842 * void
3843 */
3844
3845 static void s2io_ethtool_test(struct net_device *dev,
3846 struct ethtool_test *ethtest,
3847 uint64_t * data)
3848 {
3849 nic_t *sp = dev->priv;
3850 int orig_state = netif_running(sp->dev);
3851
3852 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3853 /* Offline Tests. */
3854 if (orig_state) {
3855 s2io_close(sp->dev);
3856 s2io_set_swapper(sp);
3857 } else
3858 s2io_set_swapper(sp);
3859
3860 if (s2io_register_test(sp, &data[0]))
3861 ethtest->flags |= ETH_TEST_FL_FAILED;
3862
3863 s2io_reset(sp);
3864 s2io_set_swapper(sp);
3865
3866 if (s2io_rldram_test(sp, &data[3]))
3867 ethtest->flags |= ETH_TEST_FL_FAILED;
3868
3869 s2io_reset(sp);
3870 s2io_set_swapper(sp);
3871
3872 if (s2io_eeprom_test(sp, &data[1]))
3873 ethtest->flags |= ETH_TEST_FL_FAILED;
3874
3875 if (s2io_bist_test(sp, &data[4]))
3876 ethtest->flags |= ETH_TEST_FL_FAILED;
3877
3878 if (orig_state)
3879 s2io_open(sp->dev);
3880
3881 data[2] = 0;
3882 } else {
3883 /* Online Tests. */
3884 if (!orig_state) {
3885 DBG_PRINT(ERR_DBG,
3886 "%s: is not up, cannot run test\n",
3887 dev->name);
3888 data[0] = -1;
3889 data[1] = -1;
3890 data[2] = -1;
3891 data[3] = -1;
3892 data[4] = -1;
3893 }
3894
3895 if (s2io_link_test(sp, &data[2]))
3896 ethtest->flags |= ETH_TEST_FL_FAILED;
3897
3898 data[0] = 0;
3899 data[1] = 0;
3900 data[3] = 0;
3901 data[4] = 0;
3902 }
3903 }
3904
3905 static void s2io_get_ethtool_stats(struct net_device *dev,
3906 struct ethtool_stats *estats,
3907 u64 * tmp_stats)
3908 {
3909 int i = 0;
3910 nic_t *sp = dev->priv;
3911 StatInfo_t *stat_info = sp->mac_control.stats_info;
3912
3913 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
3914 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
3915 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
3916 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
3917 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
3918 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
3919 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
3920 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
3921 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
3922 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
3923 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
3924 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
3925 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
3926 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
3927 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
3928 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
3929 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
3930 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
3931 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
3932 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
3933 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
3934 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
3935 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
3936 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
3937 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
3938 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
3939 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
3940 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
3941 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
3942 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
3943 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
3944 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
3945 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
3946 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
3947 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
3948 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
3949 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
3950 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
3951 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
3952 }
3953
3954 static int s2io_ethtool_get_regs_len(struct net_device *dev)
3955 {
3956 return (XENA_REG_SPACE);
3957 }
3958
3959
3960 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3961 {
3962 nic_t *sp = dev->priv;
3963
3964 return (sp->rx_csum);
3965 }
3966
3967 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3968 {
3969 nic_t *sp = dev->priv;
3970
3971 if (data)
3972 sp->rx_csum = 1;
3973 else
3974 sp->rx_csum = 0;
3975
3976 return 0;
3977 }
3978
3979 static int s2io_get_eeprom_len(struct net_device *dev)
3980 {
3981 return (XENA_EEPROM_SPACE);
3982 }
3983
3984 static int s2io_ethtool_self_test_count(struct net_device *dev)
3985 {
3986 return (S2IO_TEST_LEN);
3987 }
3988
3989 static void s2io_ethtool_get_strings(struct net_device *dev,
3990 u32 stringset, u8 * data)
3991 {
3992 switch (stringset) {
3993 case ETH_SS_TEST:
3994 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
3995 break;
3996 case ETH_SS_STATS:
3997 memcpy(data, &ethtool_stats_keys,
3998 sizeof(ethtool_stats_keys));
3999 }
4000 }
4001
4002 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4003 {
4004 return (S2IO_STAT_LEN);
4005 }
4006
4007 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4008 {
4009 if (data)
4010 dev->features |= NETIF_F_IP_CSUM;
4011 else
4012 dev->features &= ~NETIF_F_IP_CSUM;
4013
4014 return 0;
4015 }
4016
4017
4018 static struct ethtool_ops netdev_ethtool_ops = {
4019 .get_settings = s2io_ethtool_gset,
4020 .set_settings = s2io_ethtool_sset,
4021 .get_drvinfo = s2io_ethtool_gdrvinfo,
4022 .get_regs_len = s2io_ethtool_get_regs_len,
4023 .get_regs = s2io_ethtool_gregs,
4024 .get_link = ethtool_op_get_link,
4025 .get_eeprom_len = s2io_get_eeprom_len,
4026 .get_eeprom = s2io_ethtool_geeprom,
4027 .set_eeprom = s2io_ethtool_seeprom,
4028 .get_pauseparam = s2io_ethtool_getpause_data,
4029 .set_pauseparam = s2io_ethtool_setpause_data,
4030 .get_rx_csum = s2io_ethtool_get_rx_csum,
4031 .set_rx_csum = s2io_ethtool_set_rx_csum,
4032 .get_tx_csum = ethtool_op_get_tx_csum,
4033 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4034 .get_sg = ethtool_op_get_sg,
4035 .set_sg = ethtool_op_set_sg,
4036 #ifdef NETIF_F_TSO
4037 .get_tso = ethtool_op_get_tso,
4038 .set_tso = ethtool_op_set_tso,
4039 #endif
4040 .self_test_count = s2io_ethtool_self_test_count,
4041 .self_test = s2io_ethtool_test,
4042 .get_strings = s2io_ethtool_get_strings,
4043 .phys_id = s2io_ethtool_idnic,
4044 .get_stats_count = s2io_ethtool_get_stats_count,
4045 .get_ethtool_stats = s2io_get_ethtool_stats
4046 };
4047
4048 /**
4049 * s2io_ioctl - Entry point for the Ioctl
4050 * @dev : Device pointer.
4051 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4052 * a proprietary structure used to pass information to the driver.
4053 * @cmd : This is used to distinguish between the different commands that
4054 * can be passed to the IOCTL functions.
4055 * Description:
4056 * This function has support for ethtool, adding multiple MAC addresses on
4057 * the NIC and some DBG commands for the util tool.
4058 * Return value:
4059 * Currently the IOCTL supports no operations, hence by default this
4060 * function returns OP NOT SUPPORTED value.
4061 */
4062
4063 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4064 {
4065 return -EOPNOTSUPP;
4066 }
4067
4068 /**
4069 * s2io_change_mtu - entry point to change MTU size for the device.
4070 * @dev : device pointer.
4071 * @new_mtu : the new MTU size for the device.
4072 * Description: A driver entry point to change MTU size for the device.
4073 * Before changing the MTU the device must be stopped.
4074 * Return value:
4075 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4076 * file on failure.
4077 */
4078
4079 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4080 {
4081 nic_t *sp = dev->priv;
4082 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4083 register u64 val64;
4084
4085 if (netif_running(dev)) {
4086 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4087 DBG_PRINT(ERR_DBG, "change its MTU \n");
4088 return -EBUSY;
4089 }
4090
4091 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4092 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4093 dev->name);
4094 return -EPERM;
4095 }
4096
4097 /* Set the new MTU into the PYLD register of the NIC */
4098 val64 = new_mtu;
4099 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4100
4101 dev->mtu = new_mtu;
4102
4103 return 0;
4104 }
4105
4106 /**
4107 * s2io_tasklet - Bottom half of the ISR.
4108 * @dev_adr : address of the device structure in dma_addr_t format.
4109 * Description:
4110 * This is the tasklet or the bottom half of the ISR. This is
4111 * an extension of the ISR which is scheduled by the scheduler to be run
4112 * when the load on the CPU is low. All low priority tasks of the ISR can
4113 * be pushed into the tasklet. For now the tasklet is used only to
4114 * replenish the Rx buffers in the Rx buffer descriptors.
4115 * Return value:
4116 * void.
4117 */
4118
4119 static void s2io_tasklet(unsigned long dev_addr)
4120 {
4121 struct net_device *dev = (struct net_device *) dev_addr;
4122 nic_t *sp = dev->priv;
4123 int i, ret;
4124 mac_info_t *mac_control;
4125 struct config_param *config;
4126
4127 mac_control = &sp->mac_control;
4128 config = &sp->config;
4129
4130 if (!TASKLET_IN_USE) {
4131 for (i = 0; i < config->rx_ring_num; i++) {
4132 ret = fill_rx_buffers(sp, i);
4133 if (ret == -ENOMEM) {
4134 DBG_PRINT(ERR_DBG, "%s: Out of ",
4135 dev->name);
4136 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4137 break;
4138 } else if (ret == -EFILL) {
4139 DBG_PRINT(ERR_DBG,
4140 "%s: Rx Ring %d is full\n",
4141 dev->name, i);
4142 break;
4143 }
4144 }
4145 clear_bit(0, (&sp->tasklet_status));
4146 }
4147 }
4148
4149 /**
4150 * s2io_set_link - Set the LInk status
4151 * @data: long pointer to device private structue
4152 * Description: Sets the link status for the adapter
4153 */
4154
4155 static void s2io_set_link(unsigned long data)
4156 {
4157 nic_t *nic = (nic_t *) data;
4158 struct net_device *dev = nic->dev;
4159 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4160 register u64 val64;
4161 u16 subid;
4162
4163 if (test_and_set_bit(0, &(nic->link_state))) {
4164 /* The card is being reset, no point doing anything */
4165 return;
4166 }
4167
4168 subid = nic->pdev->subsystem_device;
4169 /*
4170 * Allow a small delay for the NICs self initiated
4171 * cleanup to complete.
4172 */
4173 msleep(100);
4174
4175 val64 = readq(&bar0->adapter_status);
4176 if (verify_xena_quiescence(val64, nic->device_enabled_once)) {
4177 if (LINK_IS_UP(val64)) {
4178 val64 = readq(&bar0->adapter_control);
4179 val64 |= ADAPTER_CNTL_EN;
4180 writeq(val64, &bar0->adapter_control);
4181 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4182 val64 = readq(&bar0->gpio_control);
4183 val64 |= GPIO_CTRL_GPIO_0;
4184 writeq(val64, &bar0->gpio_control);
4185 val64 = readq(&bar0->gpio_control);
4186 } else {
4187 val64 |= ADAPTER_LED_ON;
4188 writeq(val64, &bar0->adapter_control);
4189 }
4190 val64 = readq(&bar0->adapter_status);
4191 if (!LINK_IS_UP(val64)) {
4192 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4193 DBG_PRINT(ERR_DBG, " Link down");
4194 DBG_PRINT(ERR_DBG, "after ");
4195 DBG_PRINT(ERR_DBG, "enabling ");
4196 DBG_PRINT(ERR_DBG, "device \n");
4197 }
4198 if (nic->device_enabled_once == FALSE) {
4199 nic->device_enabled_once = TRUE;
4200 }
4201 s2io_link(nic, LINK_UP);
4202 } else {
4203 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4204 val64 = readq(&bar0->gpio_control);
4205 val64 &= ~GPIO_CTRL_GPIO_0;
4206 writeq(val64, &bar0->gpio_control);
4207 val64 = readq(&bar0->gpio_control);
4208 }
4209 s2io_link(nic, LINK_DOWN);
4210 }
4211 } else { /* NIC is not Quiescent. */
4212 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4213 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4214 netif_stop_queue(dev);
4215 }
4216 clear_bit(0, &(nic->link_state));
4217 }
4218
4219 static void s2io_card_down(nic_t * sp)
4220 {
4221 int cnt = 0;
4222 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4223 unsigned long flags;
4224 register u64 val64 = 0;
4225
4226 /* If s2io_set_link task is executing, wait till it completes. */
4227 while (test_and_set_bit(0, &(sp->link_state)))
4228 msleep(50);
4229 atomic_set(&sp->card_state, CARD_DOWN);
4230
4231 /* disable Tx and Rx traffic on the NIC */
4232 stop_nic(sp);
4233
4234 /* Kill tasklet. */
4235 tasklet_kill(&sp->task);
4236
4237 /* Check if the device is Quiescent and then Reset the NIC */
4238 do {
4239 val64 = readq(&bar0->adapter_status);
4240 if (verify_xena_quiescence(val64, sp->device_enabled_once)) {
4241 break;
4242 }
4243
4244 msleep(50);
4245 cnt++;
4246 if (cnt == 10) {
4247 DBG_PRINT(ERR_DBG,
4248 "s2io_close:Device not Quiescent ");
4249 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4250 (unsigned long long) val64);
4251 break;
4252 }
4253 } while (1);
4254 spin_lock_irqsave(&sp->tx_lock, flags);
4255 s2io_reset(sp);
4256
4257 /* Free all unused Tx and Rx buffers */
4258 free_tx_buffers(sp);
4259 free_rx_buffers(sp);
4260
4261 spin_unlock_irqrestore(&sp->tx_lock, flags);
4262 clear_bit(0, &(sp->link_state));
4263 }
4264
4265 static int s2io_card_up(nic_t * sp)
4266 {
4267 int i, ret;
4268 mac_info_t *mac_control;
4269 struct config_param *config;
4270 struct net_device *dev = (struct net_device *) sp->dev;
4271
4272 /* Initialize the H/W I/O registers */
4273 if (init_nic(sp) != 0) {
4274 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4275 dev->name);
4276 return -ENODEV;
4277 }
4278
4279 /*
4280 * Initializing the Rx buffers. For now we are considering only 1
4281 * Rx ring and initializing buffers into 30 Rx blocks
4282 */
4283 mac_control = &sp->mac_control;
4284 config = &sp->config;
4285
4286 for (i = 0; i < config->rx_ring_num; i++) {
4287 if ((ret = fill_rx_buffers(sp, i))) {
4288 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4289 dev->name);
4290 s2io_reset(sp);
4291 free_rx_buffers(sp);
4292 return -ENOMEM;
4293 }
4294 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4295 atomic_read(&sp->rx_bufs_left[i]));
4296 }
4297
4298 /* Setting its receive mode */
4299 s2io_set_multicast(dev);
4300
4301 /* Enable tasklet for the device */
4302 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4303
4304 /* Enable Rx Traffic and interrupts on the NIC */
4305 if (start_nic(sp)) {
4306 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4307 tasklet_kill(&sp->task);
4308 s2io_reset(sp);
4309 free_irq(dev->irq, dev);
4310 free_rx_buffers(sp);
4311 return -ENODEV;
4312 }
4313
4314 atomic_set(&sp->card_state, CARD_UP);
4315 return 0;
4316 }
4317
4318 /**
4319 * s2io_restart_nic - Resets the NIC.
4320 * @data : long pointer to the device private structure
4321 * Description:
4322 * This function is scheduled to be run by the s2io_tx_watchdog
4323 * function after 0.5 secs to reset the NIC. The idea is to reduce
4324 * the run time of the watch dog routine which is run holding a
4325 * spin lock.
4326 */
4327
4328 static void s2io_restart_nic(unsigned long data)
4329 {
4330 struct net_device *dev = (struct net_device *) data;
4331 nic_t *sp = dev->priv;
4332
4333 s2io_card_down(sp);
4334 if (s2io_card_up(sp)) {
4335 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4336 dev->name);
4337 }
4338 netif_wake_queue(dev);
4339 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4340 dev->name);
4341 }
4342
4343 /**
4344 * s2io_tx_watchdog - Watchdog for transmit side.
4345 * @dev : Pointer to net device structure
4346 * Description:
4347 * This function is triggered if the Tx Queue is stopped
4348 * for a pre-defined amount of time when the Interface is still up.
4349 * If the Interface is jammed in such a situation, the hardware is
4350 * reset (by s2io_close) and restarted again (by s2io_open) to
4351 * overcome any problem that might have been caused in the hardware.
4352 * Return value:
4353 * void
4354 */
4355
4356 static void s2io_tx_watchdog(struct net_device *dev)
4357 {
4358 nic_t *sp = dev->priv;
4359
4360 if (netif_carrier_ok(dev)) {
4361 schedule_work(&sp->rst_timer_task);
4362 }
4363 }
4364
4365 /**
4366 * rx_osm_handler - To perform some OS related operations on SKB.
4367 * @sp: private member of the device structure,pointer to s2io_nic structure.
4368 * @skb : the socket buffer pointer.
4369 * @len : length of the packet
4370 * @cksum : FCS checksum of the frame.
4371 * @ring_no : the ring from which this RxD was extracted.
4372 * Description:
4373 * This function is called by the Tx interrupt serivce routine to perform
4374 * some OS related operations on the SKB before passing it to the upper
4375 * layers. It mainly checks if the checksum is OK, if so adds it to the
4376 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4377 * to the upper layer. If the checksum is wrong, it increments the Rx
4378 * packet error count, frees the SKB and returns error.
4379 * Return value:
4380 * SUCCESS on success and -1 on failure.
4381 */
4382 #ifndef CONFIG_2BUFF_MODE
4383 static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
4384 #else
4385 static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4386 buffAdd_t * ba)
4387 #endif
4388 {
4389 struct net_device *dev = (struct net_device *) sp->dev;
4390 struct sk_buff *skb =
4391 (struct sk_buff *) ((unsigned long) rxdp->Host_Control);
4392 u16 l3_csum, l4_csum;
4393 #ifdef CONFIG_2BUFF_MODE
4394 int buf0_len, buf2_len;
4395 unsigned char *buff;
4396 #endif
4397
4398 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4399 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) {
4400 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4401 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4402 /*
4403 * NIC verifies if the Checksum of the received
4404 * frame is Ok or not and accordingly returns
4405 * a flag in the RxD.
4406 */
4407 skb->ip_summed = CHECKSUM_UNNECESSARY;
4408 } else {
4409 /*
4410 * Packet with erroneous checksum, let the
4411 * upper layers deal with it.
4412 */
4413 skb->ip_summed = CHECKSUM_NONE;
4414 }
4415 } else {
4416 skb->ip_summed = CHECKSUM_NONE;
4417 }
4418
4419 if (rxdp->Control_1 & RXD_T_CODE) {
4420 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4421 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4422 dev->name, err);
4423 }
4424 #ifdef CONFIG_2BUFF_MODE
4425 buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4426 buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4427 #endif
4428
4429 skb->dev = dev;
4430 #ifndef CONFIG_2BUFF_MODE
4431 skb_put(skb, len);
4432 skb->protocol = eth_type_trans(skb, dev);
4433 #else
4434 buff = skb_push(skb, buf0_len);
4435 memcpy(buff, ba->ba_0, buf0_len);
4436 skb_put(skb, buf2_len);
4437 skb->protocol = eth_type_trans(skb, dev);
4438 #endif
4439
4440 #ifdef CONFIG_S2IO_NAPI
4441 netif_receive_skb(skb);
4442 #else
4443 netif_rx(skb);
4444 #endif
4445
4446 dev->last_rx = jiffies;
4447 sp->rx_pkt_count++;
4448 sp->stats.rx_packets++;
4449 #ifndef CONFIG_2BUFF_MODE
4450 sp->stats.rx_bytes += len;
4451 #else
4452 sp->stats.rx_bytes += buf0_len + buf2_len;
4453 #endif
4454
4455 atomic_dec(&sp->rx_bufs_left[ring_no]);
4456 rxdp->Host_Control = 0;
4457 return SUCCESS;
4458 }
4459
4460 /**
4461 * s2io_link - stops/starts the Tx queue.
4462 * @sp : private member of the device structure, which is a pointer to the
4463 * s2io_nic structure.
4464 * @link : inidicates whether link is UP/DOWN.
4465 * Description:
4466 * This function stops/starts the Tx queue depending on whether the link
4467 * status of the NIC is is down or up. This is called by the Alarm
4468 * interrupt handler whenever a link change interrupt comes up.
4469 * Return value:
4470 * void.
4471 */
4472
4473 static void s2io_link(nic_t * sp, int link)
4474 {
4475 struct net_device *dev = (struct net_device *) sp->dev;
4476
4477 if (link != sp->last_link_state) {
4478 if (link == LINK_DOWN) {
4479 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4480 netif_carrier_off(dev);
4481 } else {
4482 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4483 netif_carrier_on(dev);
4484 }
4485 }
4486 sp->last_link_state = link;
4487 }
4488
4489 /**
4490 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4491 * @sp : private member of the device structure, which is a pointer to the
4492 * s2io_nic structure.
4493 * Description:
4494 * This function initializes a few of the PCI and PCI-X configuration registers
4495 * with recommended values.
4496 * Return value:
4497 * void
4498 */
4499
4500 static void s2io_init_pci(nic_t * sp)
4501 {
4502 u16 pci_cmd = 0;
4503
4504 /* Enable Data Parity Error Recovery in PCI-X command register. */
4505 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4506 &(sp->pcix_cmd));
4507 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4508 (sp->pcix_cmd | 1));
4509 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4510 &(sp->pcix_cmd));
4511
4512 /* Set the PErr Response bit in PCI command register. */
4513 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4514 pci_write_config_word(sp->pdev, PCI_COMMAND,
4515 (pci_cmd | PCI_COMMAND_PARITY));
4516 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4517
4518 /* Set MMRB count to 1024 in PCI-X Command register. */
4519 sp->pcix_cmd &= 0xFFF3;
4520 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4521 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4522 &(sp->pcix_cmd));
4523
4524 /* Setting Maximum outstanding splits based on system type. */
4525 sp->pcix_cmd &= 0xFF8F;
4526
4527 sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
4528 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4529 sp->pcix_cmd);
4530 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4531 &(sp->pcix_cmd));
4532 /* Forcibly disabling relaxed ordering capability of the card. */
4533 sp->pcix_cmd &= 0xfffd;
4534 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4535 sp->pcix_cmd);
4536 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4537 &(sp->pcix_cmd));
4538 }
4539
4540 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4541 MODULE_LICENSE("GPL");
4542 module_param(tx_fifo_num, int, 0);
4543 module_param_array(tx_fifo_len, int, NULL, 0);
4544 module_param(rx_ring_num, int, 0);
4545 module_param_array(rx_ring_sz, int, NULL, 0);
4546 module_param(Stats_refresh_time, int, 0);
4547 module_param(rmac_pause_time, int, 0);
4548 module_param(mc_pause_threshold_q0q3, int, 0);
4549 module_param(mc_pause_threshold_q4q7, int, 0);
4550 module_param(shared_splits, int, 0);
4551 module_param(tmac_util_period, int, 0);
4552 module_param(rmac_util_period, int, 0);
4553 #ifndef CONFIG_S2IO_NAPI
4554 module_param(indicate_max_pkts, int, 0);
4555 #endif
4556 /**
4557 * s2io_init_nic - Initialization of the adapter .
4558 * @pdev : structure containing the PCI related information of the device.
4559 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4560 * Description:
4561 * The function initializes an adapter identified by the pci_dec structure.
4562 * All OS related initialization including memory and device structure and
4563 * initlaization of the device private variable is done. Also the swapper
4564 * control register is initialized to enable read and write into the I/O
4565 * registers of the device.
4566 * Return value:
4567 * returns 0 on success and negative on failure.
4568 */
4569
4570 static int __devinit
4571 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4572 {
4573 nic_t *sp;
4574 struct net_device *dev;
4575 char *dev_name = "S2IO 10GE NIC";
4576 int i, j, ret;
4577 int dma_flag = FALSE;
4578 u32 mac_up, mac_down;
4579 u64 val64 = 0, tmp64 = 0;
4580 XENA_dev_config_t __iomem *bar0 = NULL;
4581 u16 subid;
4582 mac_info_t *mac_control;
4583 struct config_param *config;
4584
4585
4586 DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n",
4587 s2io_driver_version);
4588
4589 if ((ret = pci_enable_device(pdev))) {
4590 DBG_PRINT(ERR_DBG,
4591 "s2io_init_nic: pci_enable_device failed\n");
4592 return ret;
4593 }
4594
4595 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4596 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4597 dma_flag = TRUE;
4598
4599 if (pci_set_consistent_dma_mask
4600 (pdev, DMA_64BIT_MASK)) {
4601 DBG_PRINT(ERR_DBG,
4602 "Unable to obtain 64bit DMA for \
4603 consistent allocations\n");
4604 pci_disable_device(pdev);
4605 return -ENOMEM;
4606 }
4607 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4608 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4609 } else {
4610 pci_disable_device(pdev);
4611 return -ENOMEM;
4612 }
4613
4614 if (pci_request_regions(pdev, s2io_driver_name)) {
4615 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4616 pci_disable_device(pdev);
4617 return -ENODEV;
4618 }
4619
4620 dev = alloc_etherdev(sizeof(nic_t));
4621 if (dev == NULL) {
4622 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4623 pci_disable_device(pdev);
4624 pci_release_regions(pdev);
4625 return -ENODEV;
4626 }
4627
4628 pci_set_master(pdev);
4629 pci_set_drvdata(pdev, dev);
4630 SET_MODULE_OWNER(dev);
4631 SET_NETDEV_DEV(dev, &pdev->dev);
4632
4633 /* Private member variable initialized to s2io NIC structure */
4634 sp = dev->priv;
4635 memset(sp, 0, sizeof(nic_t));
4636 sp->dev = dev;
4637 sp->pdev = pdev;
4638 sp->vendor_id = pdev->vendor;
4639 sp->device_id = pdev->device;
4640 sp->high_dma_flag = dma_flag;
4641 sp->irq = pdev->irq;
4642 sp->device_enabled_once = FALSE;
4643 strcpy(sp->name, dev_name);
4644
4645 /* Initialize some PCI/PCI-X fields of the NIC. */
4646 s2io_init_pci(sp);
4647
4648 /*
4649 * Setting the device configuration parameters.
4650 * Most of these parameters can be specified by the user during
4651 * module insertion as they are module loadable parameters. If
4652 * these parameters are not not specified during load time, they
4653 * are initialized with default values.
4654 */
4655 mac_control = &sp->mac_control;
4656 config = &sp->config;
4657
4658 /* Tx side parameters. */
4659 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4660 config->tx_fifo_num = tx_fifo_num;
4661 for (i = 0; i < MAX_TX_FIFOS; i++) {
4662 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4663 config->tx_cfg[i].fifo_priority = i;
4664 }
4665
4666 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4667 for (i = 0; i < config->tx_fifo_num; i++) {
4668 config->tx_cfg[i].f_no_snoop =
4669 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4670 if (config->tx_cfg[i].fifo_len < 65) {
4671 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4672 break;
4673 }
4674 }
4675 config->max_txds = MAX_SKB_FRAGS;
4676
4677 /* Rx side parameters. */
4678 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4679 config->rx_ring_num = rx_ring_num;
4680 for (i = 0; i < MAX_RX_RINGS; i++) {
4681 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4682 (MAX_RXDS_PER_BLOCK + 1);
4683 config->rx_cfg[i].ring_priority = i;
4684 }
4685
4686 for (i = 0; i < rx_ring_num; i++) {
4687 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4688 config->rx_cfg[i].f_no_snoop =
4689 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4690 }
4691
4692 /* Setting Mac Control parameters */
4693 mac_control->rmac_pause_time = rmac_pause_time;
4694 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4695 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4696
4697
4698 /* Initialize Ring buffer parameters. */
4699 for (i = 0; i < config->rx_ring_num; i++)
4700 atomic_set(&sp->rx_bufs_left[i], 0);
4701
4702 /* initialize the shared memory used by the NIC and the host */
4703 if (init_shared_mem(sp)) {
4704 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4705 dev->name);
4706 ret = -ENOMEM;
4707 goto mem_alloc_failed;
4708 }
4709
4710 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4711 pci_resource_len(pdev, 0));
4712 if (!sp->bar0) {
4713 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4714 dev->name);
4715 ret = -ENOMEM;
4716 goto bar0_remap_failed;
4717 }
4718
4719 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4720 pci_resource_len(pdev, 2));
4721 if (!sp->bar1) {
4722 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4723 dev->name);
4724 ret = -ENOMEM;
4725 goto bar1_remap_failed;
4726 }
4727
4728 dev->irq = pdev->irq;
4729 dev->base_addr = (unsigned long) sp->bar0;
4730
4731 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4732 for (j = 0; j < MAX_TX_FIFOS; j++) {
4733 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4734 (sp->bar1 + (j * 0x00020000));
4735 }
4736
4737 /* Driver entry points */
4738 dev->open = &s2io_open;
4739 dev->stop = &s2io_close;
4740 dev->hard_start_xmit = &s2io_xmit;
4741 dev->get_stats = &s2io_get_stats;
4742 dev->set_multicast_list = &s2io_set_multicast;
4743 dev->do_ioctl = &s2io_ioctl;
4744 dev->change_mtu = &s2io_change_mtu;
4745 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4746 /*
4747 * will use eth_mac_addr() for dev->set_mac_address
4748 * mac address will be set every time dev->open() is called
4749 */
4750 #ifdef CONFIG_S2IO_NAPI
4751 dev->poll = s2io_poll;
4752 dev->weight = 90;
4753 #endif
4754
4755 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4756 if (sp->high_dma_flag == TRUE)
4757 dev->features |= NETIF_F_HIGHDMA;
4758 #ifdef NETIF_F_TSO
4759 dev->features |= NETIF_F_TSO;
4760 #endif
4761
4762 dev->tx_timeout = &s2io_tx_watchdog;
4763 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4764 INIT_WORK(&sp->rst_timer_task,
4765 (void (*)(void *)) s2io_restart_nic, dev);
4766 INIT_WORK(&sp->set_link_task,
4767 (void (*)(void *)) s2io_set_link, sp);
4768
4769 pci_save_state(sp->pdev);
4770
4771 /* Setting swapper control on the NIC, for proper reset operation */
4772 if (s2io_set_swapper(sp)) {
4773 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4774 dev->name);
4775 ret = -EAGAIN;
4776 goto set_swap_failed;
4777 }
4778
4779 /* Fix for all "FFs" MAC address problems observed on Alpha platforms */
4780 fix_mac_address(sp);
4781 s2io_reset(sp);
4782
4783 /*
4784 * Setting swapper control on the NIC, so the MAC address can be read.
4785 */
4786 if (s2io_set_swapper(sp)) {
4787 DBG_PRINT(ERR_DBG,
4788 "%s: S2IO: swapper settings are wrong\n",
4789 dev->name);
4790 ret = -EAGAIN;
4791 goto set_swap_failed;
4792 }
4793
4794 /*
4795 * MAC address initialization.
4796 * For now only one mac address will be read and used.
4797 */
4798 bar0 = sp->bar0;
4799 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4800 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4801 writeq(val64, &bar0->rmac_addr_cmd_mem);
4802 wait_for_cmd_complete(sp);
4803
4804 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4805 mac_down = (u32) tmp64;
4806 mac_up = (u32) (tmp64 >> 32);
4807
4808 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4809
4810 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4811 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4812 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4813 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4814 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4815 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4816
4817 DBG_PRINT(INIT_DBG,
4818 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4819 sp->def_mac_addr[0].mac_addr[0],
4820 sp->def_mac_addr[0].mac_addr[1],
4821 sp->def_mac_addr[0].mac_addr[2],
4822 sp->def_mac_addr[0].mac_addr[3],
4823 sp->def_mac_addr[0].mac_addr[4],
4824 sp->def_mac_addr[0].mac_addr[5]);
4825
4826 /* Set the factory defined MAC address initially */
4827 dev->addr_len = ETH_ALEN;
4828 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4829
4830 /*
4831 * Initialize the tasklet status and link state flags
4832 * and the card statte parameter
4833 */
4834 atomic_set(&(sp->card_state), 0);
4835 sp->tasklet_status = 0;
4836 sp->link_state = 0;
4837
4838
4839 /* Initialize spinlocks */
4840 spin_lock_init(&sp->tx_lock);
4841 #ifndef CONFIG_S2IO_NAPI
4842 spin_lock_init(&sp->put_lock);
4843 #endif
4844
4845 /*
4846 * SXE-002: Configure link and activity LED to init state
4847 * on driver load.
4848 */
4849 subid = sp->pdev->subsystem_device;
4850 if ((subid & 0xFF) >= 0x07) {
4851 val64 = readq(&bar0->gpio_control);
4852 val64 |= 0x0000800000000000ULL;
4853 writeq(val64, &bar0->gpio_control);
4854 val64 = 0x0411040400000000ULL;
4855 writeq(val64, (void __iomem *) bar0 + 0x2700);
4856 val64 = readq(&bar0->gpio_control);
4857 }
4858
4859 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
4860
4861 if (register_netdev(dev)) {
4862 DBG_PRINT(ERR_DBG, "Device registration failed\n");
4863 ret = -ENODEV;
4864 goto register_failed;
4865 }
4866
4867 /*
4868 * Make Link state as off at this point, when the Link change
4869 * interrupt comes the state will be automatically changed to
4870 * the right state.
4871 */
4872 netif_carrier_off(dev);
4873 sp->last_link_state = LINK_DOWN;
4874
4875 return 0;
4876
4877 register_failed:
4878 set_swap_failed:
4879 iounmap(sp->bar1);
4880 bar1_remap_failed:
4881 iounmap(sp->bar0);
4882 bar0_remap_failed:
4883 mem_alloc_failed:
4884 free_shared_mem(sp);
4885 pci_disable_device(pdev);
4886 pci_release_regions(pdev);
4887 pci_set_drvdata(pdev, NULL);
4888 free_netdev(dev);
4889
4890 return ret;
4891 }
4892
4893 /**
4894 * s2io_rem_nic - Free the PCI device
4895 * @pdev: structure containing the PCI related information of the device.
4896 * Description: This function is called by the Pci subsystem to release a
4897 * PCI device and free up all resource held up by the device. This could
4898 * be in response to a Hot plug event or when the driver is to be removed
4899 * from memory.
4900 */
4901
4902 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4903 {
4904 struct net_device *dev =
4905 (struct net_device *) pci_get_drvdata(pdev);
4906 nic_t *sp;
4907
4908 if (dev == NULL) {
4909 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
4910 return;
4911 }
4912
4913 sp = dev->priv;
4914 unregister_netdev(dev);
4915
4916 free_shared_mem(sp);
4917 iounmap(sp->bar0);
4918 iounmap(sp->bar1);
4919 pci_disable_device(pdev);
4920 pci_release_regions(pdev);
4921 pci_set_drvdata(pdev, NULL);
4922
4923 free_netdev(dev);
4924 }
4925
4926 /**
4927 * s2io_starter - Entry point for the driver
4928 * Description: This function is the entry point for the driver. It verifies
4929 * the module loadable parameters and initializes PCI configuration space.
4930 */
4931
4932 int __init s2io_starter(void)
4933 {
4934 return pci_module_init(&s2io_driver);
4935 }
4936
4937 /**
4938 * s2io_closer - Cleanup routine for the driver
4939 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4940 */
4941
4942 static void s2io_closer(void)
4943 {
4944 pci_unregister_driver(&s2io_driver);
4945 DBG_PRINT(INIT_DBG, "cleanup done\n");
4946 }
4947
4948 module_init(s2io_starter);
4949 module_exit(s2io_closer);
This page took 0.133445 seconds and 5 git commands to generate.