[PATCH] bcm43xx: Fix array overrun in bcm43xx_geo_init
[deliverable/linux.git] / drivers / net / s2io.c
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver.
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
34 * values are 1, 2 and 3.
35 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
36 * tx_fifo_len: This too is an array of 8. Each element defines the number of
37 * Tx descriptors that can be associated with each corresponding FIFO.
38 ************************************************************************/
39
40 #include <linux/config.h>
41 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/errno.h>
44 #include <linux/ioport.h>
45 #include <linux/pci.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/kernel.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/skbuff.h>
51 #include <linux/init.h>
52 #include <linux/delay.h>
53 #include <linux/stddef.h>
54 #include <linux/ioctl.h>
55 #include <linux/timex.h>
56 #include <linux/sched.h>
57 #include <linux/ethtool.h>
58 #include <linux/workqueue.h>
59 #include <linux/if_vlan.h>
60 #include <linux/ip.h>
61 #include <linux/tcp.h>
62 #include <net/tcp.h>
63
64 #include <asm/system.h>
65 #include <asm/uaccess.h>
66 #include <asm/io.h>
67 #include <asm/div64.h>
68
69 /* local include */
70 #include "s2io.h"
71 #include "s2io-regs.h"
72
73 #define DRV_VERSION "2.0.11.2"
74
75 /* S2io Driver name & version. */
76 static char s2io_driver_name[] = "Neterion";
77 static char s2io_driver_version[] = DRV_VERSION;
78
79 static int rxd_size[4] = {32,48,48,64};
80 static int rxd_count[4] = {127,85,85,63};
81
82 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
83 {
84 int ret;
85
86 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
87 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
88
89 return ret;
90 }
91
92 /*
93 * Cards with following subsystem_id have a link state indication
94 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
95 * macro below identifies these cards given the subsystem_id.
96 */
97 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
98 (dev_type == XFRAME_I_DEVICE) ? \
99 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
100 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
101
102 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
103 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
104 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
105 #define PANIC 1
106 #define LOW 2
107 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
108 {
109 int level = 0;
110 mac_info_t *mac_control;
111
112 mac_control = &sp->mac_control;
113 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
114 level = LOW;
115 if (rxb_size <= rxd_count[sp->rxd_mode]) {
116 level = PANIC;
117 }
118 }
119
120 return level;
121 }
122
123 /* Ethtool related variables and Macros. */
124 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
125 "Register test\t(offline)",
126 "Eeprom test\t(offline)",
127 "Link test\t(online)",
128 "RLDRAM test\t(offline)",
129 "BIST Test\t(offline)"
130 };
131
132 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
133 {"tmac_frms"},
134 {"tmac_data_octets"},
135 {"tmac_drop_frms"},
136 {"tmac_mcst_frms"},
137 {"tmac_bcst_frms"},
138 {"tmac_pause_ctrl_frms"},
139 {"tmac_any_err_frms"},
140 {"tmac_vld_ip_octets"},
141 {"tmac_vld_ip"},
142 {"tmac_drop_ip"},
143 {"tmac_icmp"},
144 {"tmac_rst_tcp"},
145 {"tmac_tcp"},
146 {"tmac_udp"},
147 {"rmac_vld_frms"},
148 {"rmac_data_octets"},
149 {"rmac_fcs_err_frms"},
150 {"rmac_drop_frms"},
151 {"rmac_vld_mcst_frms"},
152 {"rmac_vld_bcst_frms"},
153 {"rmac_in_rng_len_err_frms"},
154 {"rmac_long_frms"},
155 {"rmac_pause_ctrl_frms"},
156 {"rmac_discarded_frms"},
157 {"rmac_usized_frms"},
158 {"rmac_osized_frms"},
159 {"rmac_frag_frms"},
160 {"rmac_jabber_frms"},
161 {"rmac_ip"},
162 {"rmac_ip_octets"},
163 {"rmac_hdr_err_ip"},
164 {"rmac_drop_ip"},
165 {"rmac_icmp"},
166 {"rmac_tcp"},
167 {"rmac_udp"},
168 {"rmac_err_drp_udp"},
169 {"rmac_pause_cnt"},
170 {"rmac_accepted_ip"},
171 {"rmac_err_tcp"},
172 {"\n DRIVER STATISTICS"},
173 {"single_bit_ecc_errs"},
174 {"double_bit_ecc_errs"},
175 ("lro_aggregated_pkts"),
176 ("lro_flush_both_count"),
177 ("lro_out_of_sequence_pkts"),
178 ("lro_flush_due_to_max_pkts"),
179 ("lro_avg_aggr_pkts"),
180 };
181
182 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
183 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
184
185 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
186 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
187
188 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
189 init_timer(&timer); \
190 timer.function = handle; \
191 timer.data = (unsigned long) arg; \
192 mod_timer(&timer, (jiffies + exp)) \
193
194 /* Add the vlan */
195 static void s2io_vlan_rx_register(struct net_device *dev,
196 struct vlan_group *grp)
197 {
198 nic_t *nic = dev->priv;
199 unsigned long flags;
200
201 spin_lock_irqsave(&nic->tx_lock, flags);
202 nic->vlgrp = grp;
203 spin_unlock_irqrestore(&nic->tx_lock, flags);
204 }
205
206 /* Unregister the vlan */
207 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
208 {
209 nic_t *nic = dev->priv;
210 unsigned long flags;
211
212 spin_lock_irqsave(&nic->tx_lock, flags);
213 if (nic->vlgrp)
214 nic->vlgrp->vlan_devices[vid] = NULL;
215 spin_unlock_irqrestore(&nic->tx_lock, flags);
216 }
217
218 /*
219 * Constants to be programmed into the Xena's registers, to configure
220 * the XAUI.
221 */
222
223 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
224 #define END_SIGN 0x0
225
226 static const u64 herc_act_dtx_cfg[] = {
227 /* Set address */
228 0x8000051536750000ULL, 0x80000515367500E0ULL,
229 /* Write data */
230 0x8000051536750004ULL, 0x80000515367500E4ULL,
231 /* Set address */
232 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
233 /* Write data */
234 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
235 /* Set address */
236 0x801205150D440000ULL, 0x801205150D4400E0ULL,
237 /* Write data */
238 0x801205150D440004ULL, 0x801205150D4400E4ULL,
239 /* Set address */
240 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
241 /* Write data */
242 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
243 /* Done */
244 END_SIGN
245 };
246
247 static const u64 xena_mdio_cfg[] = {
248 /* Reset PMA PLL */
249 0xC001010000000000ULL, 0xC0010100000000E0ULL,
250 0xC0010100008000E4ULL,
251 /* Remove Reset from PMA PLL */
252 0xC001010000000000ULL, 0xC0010100000000E0ULL,
253 0xC0010100000000E4ULL,
254 END_SIGN
255 };
256
257 static const u64 xena_dtx_cfg[] = {
258 0x8000051500000000ULL, 0x80000515000000E0ULL,
259 0x80000515D93500E4ULL, 0x8001051500000000ULL,
260 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
261 0x8002051500000000ULL, 0x80020515000000E0ULL,
262 0x80020515F21000E4ULL,
263 /* Set PADLOOPBACKN */
264 0x8002051500000000ULL, 0x80020515000000E0ULL,
265 0x80020515B20000E4ULL, 0x8003051500000000ULL,
266 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
267 0x8004051500000000ULL, 0x80040515000000E0ULL,
268 0x80040515B20000E4ULL, 0x8005051500000000ULL,
269 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
270 SWITCH_SIGN,
271 /* Remove PADLOOPBACKN */
272 0x8002051500000000ULL, 0x80020515000000E0ULL,
273 0x80020515F20000E4ULL, 0x8003051500000000ULL,
274 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
275 0x8004051500000000ULL, 0x80040515000000E0ULL,
276 0x80040515F20000E4ULL, 0x8005051500000000ULL,
277 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
278 END_SIGN
279 };
280
281 /*
282 * Constants for Fixing the MacAddress problem seen mostly on
283 * Alpha machines.
284 */
285 static const u64 fix_mac[] = {
286 0x0060000000000000ULL, 0x0060600000000000ULL,
287 0x0040600000000000ULL, 0x0000600000000000ULL,
288 0x0020600000000000ULL, 0x0060600000000000ULL,
289 0x0020600000000000ULL, 0x0060600000000000ULL,
290 0x0020600000000000ULL, 0x0060600000000000ULL,
291 0x0020600000000000ULL, 0x0060600000000000ULL,
292 0x0020600000000000ULL, 0x0060600000000000ULL,
293 0x0020600000000000ULL, 0x0060600000000000ULL,
294 0x0020600000000000ULL, 0x0060600000000000ULL,
295 0x0020600000000000ULL, 0x0060600000000000ULL,
296 0x0020600000000000ULL, 0x0060600000000000ULL,
297 0x0020600000000000ULL, 0x0060600000000000ULL,
298 0x0020600000000000ULL, 0x0000600000000000ULL,
299 0x0040600000000000ULL, 0x0060600000000000ULL,
300 END_SIGN
301 };
302
303 /* Module Loadable parameters. */
304 static unsigned int tx_fifo_num = 1;
305 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
306 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
307 static unsigned int rx_ring_num = 1;
308 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
309 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
310 static unsigned int rts_frm_len[MAX_RX_RINGS] =
311 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
312 static unsigned int rx_ring_mode = 1;
313 static unsigned int use_continuous_tx_intrs = 1;
314 static unsigned int rmac_pause_time = 65535;
315 static unsigned int mc_pause_threshold_q0q3 = 187;
316 static unsigned int mc_pause_threshold_q4q7 = 187;
317 static unsigned int shared_splits;
318 static unsigned int tmac_util_period = 5;
319 static unsigned int rmac_util_period = 5;
320 static unsigned int bimodal = 0;
321 static unsigned int l3l4hdr_size = 128;
322 #ifndef CONFIG_S2IO_NAPI
323 static unsigned int indicate_max_pkts;
324 #endif
325 /* Frequency of Rx desc syncs expressed as power of 2 */
326 static unsigned int rxsync_frequency = 3;
327 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
328 static unsigned int intr_type = 0;
329 /* Large receive offload feature */
330 static unsigned int lro = 0;
331 /* Max pkts to be aggregated by LRO at one time. If not specified,
332 * aggregation happens until we hit max IP pkt size(64K)
333 */
334 static unsigned int lro_max_pkts = 0xFFFF;
335
336 /*
337 * S2IO device table.
338 * This table lists all the devices that this driver supports.
339 */
340 static struct pci_device_id s2io_tbl[] __devinitdata = {
341 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
342 PCI_ANY_ID, PCI_ANY_ID},
343 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
344 PCI_ANY_ID, PCI_ANY_ID},
345 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
346 PCI_ANY_ID, PCI_ANY_ID},
347 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
348 PCI_ANY_ID, PCI_ANY_ID},
349 {0,}
350 };
351
352 MODULE_DEVICE_TABLE(pci, s2io_tbl);
353
354 static struct pci_driver s2io_driver = {
355 .name = "S2IO",
356 .id_table = s2io_tbl,
357 .probe = s2io_init_nic,
358 .remove = __devexit_p(s2io_rem_nic),
359 };
360
361 /* A simplifier macro used both by init and free shared_mem Fns(). */
362 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
363
364 /**
365 * init_shared_mem - Allocation and Initialization of Memory
366 * @nic: Device private variable.
367 * Description: The function allocates all the memory areas shared
368 * between the NIC and the driver. This includes Tx descriptors,
369 * Rx descriptors and the statistics block.
370 */
371
372 static int init_shared_mem(struct s2io_nic *nic)
373 {
374 u32 size;
375 void *tmp_v_addr, *tmp_v_addr_next;
376 dma_addr_t tmp_p_addr, tmp_p_addr_next;
377 RxD_block_t *pre_rxd_blk = NULL;
378 int i, j, blk_cnt, rx_sz, tx_sz;
379 int lst_size, lst_per_page;
380 struct net_device *dev = nic->dev;
381 unsigned long tmp;
382 buffAdd_t *ba;
383
384 mac_info_t *mac_control;
385 struct config_param *config;
386
387 mac_control = &nic->mac_control;
388 config = &nic->config;
389
390
391 /* Allocation and initialization of TXDLs in FIOFs */
392 size = 0;
393 for (i = 0; i < config->tx_fifo_num; i++) {
394 size += config->tx_cfg[i].fifo_len;
395 }
396 if (size > MAX_AVAILABLE_TXDS) {
397 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
398 __FUNCTION__);
399 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
400 return FAILURE;
401 }
402
403 lst_size = (sizeof(TxD_t) * config->max_txds);
404 tx_sz = lst_size * size;
405 lst_per_page = PAGE_SIZE / lst_size;
406
407 for (i = 0; i < config->tx_fifo_num; i++) {
408 int fifo_len = config->tx_cfg[i].fifo_len;
409 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
410 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
411 GFP_KERNEL);
412 if (!mac_control->fifos[i].list_info) {
413 DBG_PRINT(ERR_DBG,
414 "Malloc failed for list_info\n");
415 return -ENOMEM;
416 }
417 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
418 }
419 for (i = 0; i < config->tx_fifo_num; i++) {
420 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
421 lst_per_page);
422 mac_control->fifos[i].tx_curr_put_info.offset = 0;
423 mac_control->fifos[i].tx_curr_put_info.fifo_len =
424 config->tx_cfg[i].fifo_len - 1;
425 mac_control->fifos[i].tx_curr_get_info.offset = 0;
426 mac_control->fifos[i].tx_curr_get_info.fifo_len =
427 config->tx_cfg[i].fifo_len - 1;
428 mac_control->fifos[i].fifo_no = i;
429 mac_control->fifos[i].nic = nic;
430 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
431
432 for (j = 0; j < page_num; j++) {
433 int k = 0;
434 dma_addr_t tmp_p;
435 void *tmp_v;
436 tmp_v = pci_alloc_consistent(nic->pdev,
437 PAGE_SIZE, &tmp_p);
438 if (!tmp_v) {
439 DBG_PRINT(ERR_DBG,
440 "pci_alloc_consistent ");
441 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
442 return -ENOMEM;
443 }
444 /* If we got a zero DMA address(can happen on
445 * certain platforms like PPC), reallocate.
446 * Store virtual address of page we don't want,
447 * to be freed later.
448 */
449 if (!tmp_p) {
450 mac_control->zerodma_virt_addr = tmp_v;
451 DBG_PRINT(INIT_DBG,
452 "%s: Zero DMA address for TxDL. ", dev->name);
453 DBG_PRINT(INIT_DBG,
454 "Virtual address %p\n", tmp_v);
455 tmp_v = pci_alloc_consistent(nic->pdev,
456 PAGE_SIZE, &tmp_p);
457 if (!tmp_v) {
458 DBG_PRINT(ERR_DBG,
459 "pci_alloc_consistent ");
460 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
461 return -ENOMEM;
462 }
463 }
464 while (k < lst_per_page) {
465 int l = (j * lst_per_page) + k;
466 if (l == config->tx_cfg[i].fifo_len)
467 break;
468 mac_control->fifos[i].list_info[l].list_virt_addr =
469 tmp_v + (k * lst_size);
470 mac_control->fifos[i].list_info[l].list_phy_addr =
471 tmp_p + (k * lst_size);
472 k++;
473 }
474 }
475 }
476
477 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
478 if (!nic->ufo_in_band_v)
479 return -ENOMEM;
480
481 /* Allocation and initialization of RXDs in Rings */
482 size = 0;
483 for (i = 0; i < config->rx_ring_num; i++) {
484 if (config->rx_cfg[i].num_rxd %
485 (rxd_count[nic->rxd_mode] + 1)) {
486 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
487 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
488 i);
489 DBG_PRINT(ERR_DBG, "RxDs per Block");
490 return FAILURE;
491 }
492 size += config->rx_cfg[i].num_rxd;
493 mac_control->rings[i].block_count =
494 config->rx_cfg[i].num_rxd /
495 (rxd_count[nic->rxd_mode] + 1 );
496 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
497 mac_control->rings[i].block_count;
498 }
499 if (nic->rxd_mode == RXD_MODE_1)
500 size = (size * (sizeof(RxD1_t)));
501 else
502 size = (size * (sizeof(RxD3_t)));
503 rx_sz = size;
504
505 for (i = 0; i < config->rx_ring_num; i++) {
506 mac_control->rings[i].rx_curr_get_info.block_index = 0;
507 mac_control->rings[i].rx_curr_get_info.offset = 0;
508 mac_control->rings[i].rx_curr_get_info.ring_len =
509 config->rx_cfg[i].num_rxd - 1;
510 mac_control->rings[i].rx_curr_put_info.block_index = 0;
511 mac_control->rings[i].rx_curr_put_info.offset = 0;
512 mac_control->rings[i].rx_curr_put_info.ring_len =
513 config->rx_cfg[i].num_rxd - 1;
514 mac_control->rings[i].nic = nic;
515 mac_control->rings[i].ring_no = i;
516
517 blk_cnt = config->rx_cfg[i].num_rxd /
518 (rxd_count[nic->rxd_mode] + 1);
519 /* Allocating all the Rx blocks */
520 for (j = 0; j < blk_cnt; j++) {
521 rx_block_info_t *rx_blocks;
522 int l;
523
524 rx_blocks = &mac_control->rings[i].rx_blocks[j];
525 size = SIZE_OF_BLOCK; //size is always page size
526 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
527 &tmp_p_addr);
528 if (tmp_v_addr == NULL) {
529 /*
530 * In case of failure, free_shared_mem()
531 * is called, which should free any
532 * memory that was alloced till the
533 * failure happened.
534 */
535 rx_blocks->block_virt_addr = tmp_v_addr;
536 return -ENOMEM;
537 }
538 memset(tmp_v_addr, 0, size);
539 rx_blocks->block_virt_addr = tmp_v_addr;
540 rx_blocks->block_dma_addr = tmp_p_addr;
541 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
542 rxd_count[nic->rxd_mode],
543 GFP_KERNEL);
544 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
545 rx_blocks->rxds[l].virt_addr =
546 rx_blocks->block_virt_addr +
547 (rxd_size[nic->rxd_mode] * l);
548 rx_blocks->rxds[l].dma_addr =
549 rx_blocks->block_dma_addr +
550 (rxd_size[nic->rxd_mode] * l);
551 }
552
553 mac_control->rings[i].rx_blocks[j].block_virt_addr =
554 tmp_v_addr;
555 mac_control->rings[i].rx_blocks[j].block_dma_addr =
556 tmp_p_addr;
557 }
558 /* Interlinking all Rx Blocks */
559 for (j = 0; j < blk_cnt; j++) {
560 tmp_v_addr =
561 mac_control->rings[i].rx_blocks[j].block_virt_addr;
562 tmp_v_addr_next =
563 mac_control->rings[i].rx_blocks[(j + 1) %
564 blk_cnt].block_virt_addr;
565 tmp_p_addr =
566 mac_control->rings[i].rx_blocks[j].block_dma_addr;
567 tmp_p_addr_next =
568 mac_control->rings[i].rx_blocks[(j + 1) %
569 blk_cnt].block_dma_addr;
570
571 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
572 pre_rxd_blk->reserved_2_pNext_RxD_block =
573 (unsigned long) tmp_v_addr_next;
574 pre_rxd_blk->pNext_RxD_Blk_physical =
575 (u64) tmp_p_addr_next;
576 }
577 }
578 if (nic->rxd_mode >= RXD_MODE_3A) {
579 /*
580 * Allocation of Storages for buffer addresses in 2BUFF mode
581 * and the buffers as well.
582 */
583 for (i = 0; i < config->rx_ring_num; i++) {
584 blk_cnt = config->rx_cfg[i].num_rxd /
585 (rxd_count[nic->rxd_mode]+ 1);
586 mac_control->rings[i].ba =
587 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
588 GFP_KERNEL);
589 if (!mac_control->rings[i].ba)
590 return -ENOMEM;
591 for (j = 0; j < blk_cnt; j++) {
592 int k = 0;
593 mac_control->rings[i].ba[j] =
594 kmalloc((sizeof(buffAdd_t) *
595 (rxd_count[nic->rxd_mode] + 1)),
596 GFP_KERNEL);
597 if (!mac_control->rings[i].ba[j])
598 return -ENOMEM;
599 while (k != rxd_count[nic->rxd_mode]) {
600 ba = &mac_control->rings[i].ba[j][k];
601
602 ba->ba_0_org = (void *) kmalloc
603 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
604 if (!ba->ba_0_org)
605 return -ENOMEM;
606 tmp = (unsigned long)ba->ba_0_org;
607 tmp += ALIGN_SIZE;
608 tmp &= ~((unsigned long) ALIGN_SIZE);
609 ba->ba_0 = (void *) tmp;
610
611 ba->ba_1_org = (void *) kmalloc
612 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
613 if (!ba->ba_1_org)
614 return -ENOMEM;
615 tmp = (unsigned long) ba->ba_1_org;
616 tmp += ALIGN_SIZE;
617 tmp &= ~((unsigned long) ALIGN_SIZE);
618 ba->ba_1 = (void *) tmp;
619 k++;
620 }
621 }
622 }
623 }
624
625 /* Allocation and initialization of Statistics block */
626 size = sizeof(StatInfo_t);
627 mac_control->stats_mem = pci_alloc_consistent
628 (nic->pdev, size, &mac_control->stats_mem_phy);
629
630 if (!mac_control->stats_mem) {
631 /*
632 * In case of failure, free_shared_mem() is called, which
633 * should free any memory that was alloced till the
634 * failure happened.
635 */
636 return -ENOMEM;
637 }
638 mac_control->stats_mem_sz = size;
639
640 tmp_v_addr = mac_control->stats_mem;
641 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
642 memset(tmp_v_addr, 0, size);
643 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
644 (unsigned long long) tmp_p_addr);
645
646 return SUCCESS;
647 }
648
649 /**
650 * free_shared_mem - Free the allocated Memory
651 * @nic: Device private variable.
652 * Description: This function is to free all memory locations allocated by
653 * the init_shared_mem() function and return it to the kernel.
654 */
655
656 static void free_shared_mem(struct s2io_nic *nic)
657 {
658 int i, j, blk_cnt, size;
659 void *tmp_v_addr;
660 dma_addr_t tmp_p_addr;
661 mac_info_t *mac_control;
662 struct config_param *config;
663 int lst_size, lst_per_page;
664 struct net_device *dev = nic->dev;
665
666 if (!nic)
667 return;
668
669 mac_control = &nic->mac_control;
670 config = &nic->config;
671
672 lst_size = (sizeof(TxD_t) * config->max_txds);
673 lst_per_page = PAGE_SIZE / lst_size;
674
675 for (i = 0; i < config->tx_fifo_num; i++) {
676 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
677 lst_per_page);
678 for (j = 0; j < page_num; j++) {
679 int mem_blks = (j * lst_per_page);
680 if (!mac_control->fifos[i].list_info)
681 return;
682 if (!mac_control->fifos[i].list_info[mem_blks].
683 list_virt_addr)
684 break;
685 pci_free_consistent(nic->pdev, PAGE_SIZE,
686 mac_control->fifos[i].
687 list_info[mem_blks].
688 list_virt_addr,
689 mac_control->fifos[i].
690 list_info[mem_blks].
691 list_phy_addr);
692 }
693 /* If we got a zero DMA address during allocation,
694 * free the page now
695 */
696 if (mac_control->zerodma_virt_addr) {
697 pci_free_consistent(nic->pdev, PAGE_SIZE,
698 mac_control->zerodma_virt_addr,
699 (dma_addr_t)0);
700 DBG_PRINT(INIT_DBG,
701 "%s: Freeing TxDL with zero DMA addr. ",
702 dev->name);
703 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
704 mac_control->zerodma_virt_addr);
705 }
706 kfree(mac_control->fifos[i].list_info);
707 }
708
709 size = SIZE_OF_BLOCK;
710 for (i = 0; i < config->rx_ring_num; i++) {
711 blk_cnt = mac_control->rings[i].block_count;
712 for (j = 0; j < blk_cnt; j++) {
713 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
714 block_virt_addr;
715 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
716 block_dma_addr;
717 if (tmp_v_addr == NULL)
718 break;
719 pci_free_consistent(nic->pdev, size,
720 tmp_v_addr, tmp_p_addr);
721 kfree(mac_control->rings[i].rx_blocks[j].rxds);
722 }
723 }
724
725 if (nic->rxd_mode >= RXD_MODE_3A) {
726 /* Freeing buffer storage addresses in 2BUFF mode. */
727 for (i = 0; i < config->rx_ring_num; i++) {
728 blk_cnt = config->rx_cfg[i].num_rxd /
729 (rxd_count[nic->rxd_mode] + 1);
730 for (j = 0; j < blk_cnt; j++) {
731 int k = 0;
732 if (!mac_control->rings[i].ba[j])
733 continue;
734 while (k != rxd_count[nic->rxd_mode]) {
735 buffAdd_t *ba =
736 &mac_control->rings[i].ba[j][k];
737 kfree(ba->ba_0_org);
738 kfree(ba->ba_1_org);
739 k++;
740 }
741 kfree(mac_control->rings[i].ba[j]);
742 }
743 kfree(mac_control->rings[i].ba);
744 }
745 }
746
747 if (mac_control->stats_mem) {
748 pci_free_consistent(nic->pdev,
749 mac_control->stats_mem_sz,
750 mac_control->stats_mem,
751 mac_control->stats_mem_phy);
752 }
753 if (nic->ufo_in_band_v)
754 kfree(nic->ufo_in_band_v);
755 }
756
757 /**
758 * s2io_verify_pci_mode -
759 */
760
761 static int s2io_verify_pci_mode(nic_t *nic)
762 {
763 XENA_dev_config_t __iomem *bar0 = nic->bar0;
764 register u64 val64 = 0;
765 int mode;
766
767 val64 = readq(&bar0->pci_mode);
768 mode = (u8)GET_PCI_MODE(val64);
769
770 if ( val64 & PCI_MODE_UNKNOWN_MODE)
771 return -1; /* Unknown PCI mode */
772 return mode;
773 }
774
775
776 /**
777 * s2io_print_pci_mode -
778 */
779 static int s2io_print_pci_mode(nic_t *nic)
780 {
781 XENA_dev_config_t __iomem *bar0 = nic->bar0;
782 register u64 val64 = 0;
783 int mode;
784 struct config_param *config = &nic->config;
785
786 val64 = readq(&bar0->pci_mode);
787 mode = (u8)GET_PCI_MODE(val64);
788
789 if ( val64 & PCI_MODE_UNKNOWN_MODE)
790 return -1; /* Unknown PCI mode */
791
792 if (val64 & PCI_MODE_32_BITS) {
793 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
794 } else {
795 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
796 }
797
798 switch(mode) {
799 case PCI_MODE_PCI_33:
800 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
801 config->bus_speed = 33;
802 break;
803 case PCI_MODE_PCI_66:
804 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
805 config->bus_speed = 133;
806 break;
807 case PCI_MODE_PCIX_M1_66:
808 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
809 config->bus_speed = 133; /* Herc doubles the clock rate */
810 break;
811 case PCI_MODE_PCIX_M1_100:
812 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
813 config->bus_speed = 200;
814 break;
815 case PCI_MODE_PCIX_M1_133:
816 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
817 config->bus_speed = 266;
818 break;
819 case PCI_MODE_PCIX_M2_66:
820 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
821 config->bus_speed = 133;
822 break;
823 case PCI_MODE_PCIX_M2_100:
824 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
825 config->bus_speed = 200;
826 break;
827 case PCI_MODE_PCIX_M2_133:
828 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
829 config->bus_speed = 266;
830 break;
831 default:
832 return -1; /* Unsupported bus speed */
833 }
834
835 return mode;
836 }
837
838 /**
839 * init_nic - Initialization of hardware
840 * @nic: device peivate variable
841 * Description: The function sequentially configures every block
842 * of the H/W from their reset values.
843 * Return Value: SUCCESS on success and
844 * '-1' on failure (endian settings incorrect).
845 */
846
847 static int init_nic(struct s2io_nic *nic)
848 {
849 XENA_dev_config_t __iomem *bar0 = nic->bar0;
850 struct net_device *dev = nic->dev;
851 register u64 val64 = 0;
852 void __iomem *add;
853 u32 time;
854 int i, j;
855 mac_info_t *mac_control;
856 struct config_param *config;
857 int mdio_cnt = 0, dtx_cnt = 0;
858 unsigned long long mem_share;
859 int mem_size;
860
861 mac_control = &nic->mac_control;
862 config = &nic->config;
863
864 /* to set the swapper controle on the card */
865 if(s2io_set_swapper(nic)) {
866 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
867 return -1;
868 }
869
870 /*
871 * Herc requires EOI to be removed from reset before XGXS, so..
872 */
873 if (nic->device_type & XFRAME_II_DEVICE) {
874 val64 = 0xA500000000ULL;
875 writeq(val64, &bar0->sw_reset);
876 msleep(500);
877 val64 = readq(&bar0->sw_reset);
878 }
879
880 /* Remove XGXS from reset state */
881 val64 = 0;
882 writeq(val64, &bar0->sw_reset);
883 msleep(500);
884 val64 = readq(&bar0->sw_reset);
885
886 /* Enable Receiving broadcasts */
887 add = &bar0->mac_cfg;
888 val64 = readq(&bar0->mac_cfg);
889 val64 |= MAC_RMAC_BCAST_ENABLE;
890 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
891 writel((u32) val64, add);
892 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
893 writel((u32) (val64 >> 32), (add + 4));
894
895 /* Read registers in all blocks */
896 val64 = readq(&bar0->mac_int_mask);
897 val64 = readq(&bar0->mc_int_mask);
898 val64 = readq(&bar0->xgxs_int_mask);
899
900 /* Set MTU */
901 val64 = dev->mtu;
902 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
903
904 /*
905 * Configuring the XAUI Interface of Xena.
906 * ***************************************
907 * To Configure the Xena's XAUI, one has to write a series
908 * of 64 bit values into two registers in a particular
909 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
910 * which will be defined in the array of configuration values
911 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
912 * to switch writing from one regsiter to another. We continue
913 * writing these values until we encounter the 'END_SIGN' macro.
914 * For example, After making a series of 21 writes into
915 * dtx_control register the 'SWITCH_SIGN' appears and hence we
916 * start writing into mdio_control until we encounter END_SIGN.
917 */
918 if (nic->device_type & XFRAME_II_DEVICE) {
919 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
920 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
921 &bar0->dtx_control, UF);
922 if (dtx_cnt & 0x1)
923 msleep(1); /* Necessary!! */
924 dtx_cnt++;
925 }
926 } else {
927 while (1) {
928 dtx_cfg:
929 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
930 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
931 dtx_cnt++;
932 goto mdio_cfg;
933 }
934 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
935 &bar0->dtx_control, UF);
936 val64 = readq(&bar0->dtx_control);
937 dtx_cnt++;
938 }
939 mdio_cfg:
940 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
941 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
942 mdio_cnt++;
943 goto dtx_cfg;
944 }
945 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
946 &bar0->mdio_control, UF);
947 val64 = readq(&bar0->mdio_control);
948 mdio_cnt++;
949 }
950 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
951 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
952 break;
953 } else {
954 goto dtx_cfg;
955 }
956 }
957 }
958
959 /* Tx DMA Initialization */
960 val64 = 0;
961 writeq(val64, &bar0->tx_fifo_partition_0);
962 writeq(val64, &bar0->tx_fifo_partition_1);
963 writeq(val64, &bar0->tx_fifo_partition_2);
964 writeq(val64, &bar0->tx_fifo_partition_3);
965
966
967 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
968 val64 |=
969 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
970 13) | vBIT(config->tx_cfg[i].fifo_priority,
971 ((i * 32) + 5), 3);
972
973 if (i == (config->tx_fifo_num - 1)) {
974 if (i % 2 == 0)
975 i++;
976 }
977
978 switch (i) {
979 case 1:
980 writeq(val64, &bar0->tx_fifo_partition_0);
981 val64 = 0;
982 break;
983 case 3:
984 writeq(val64, &bar0->tx_fifo_partition_1);
985 val64 = 0;
986 break;
987 case 5:
988 writeq(val64, &bar0->tx_fifo_partition_2);
989 val64 = 0;
990 break;
991 case 7:
992 writeq(val64, &bar0->tx_fifo_partition_3);
993 break;
994 }
995 }
996
997 /* Enable Tx FIFO partition 0. */
998 val64 = readq(&bar0->tx_fifo_partition_0);
999 val64 |= BIT(0); /* To enable the FIFO partition. */
1000 writeq(val64, &bar0->tx_fifo_partition_0);
1001
1002 /*
1003 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1004 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1005 */
1006 if ((nic->device_type == XFRAME_I_DEVICE) &&
1007 (get_xena_rev_id(nic->pdev) < 4))
1008 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1009
1010 val64 = readq(&bar0->tx_fifo_partition_0);
1011 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1012 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1013
1014 /*
1015 * Initialization of Tx_PA_CONFIG register to ignore packet
1016 * integrity checking.
1017 */
1018 val64 = readq(&bar0->tx_pa_cfg);
1019 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1020 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1021 writeq(val64, &bar0->tx_pa_cfg);
1022
1023 /* Rx DMA intialization. */
1024 val64 = 0;
1025 for (i = 0; i < config->rx_ring_num; i++) {
1026 val64 |=
1027 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1028 3);
1029 }
1030 writeq(val64, &bar0->rx_queue_priority);
1031
1032 /*
1033 * Allocating equal share of memory to all the
1034 * configured Rings.
1035 */
1036 val64 = 0;
1037 if (nic->device_type & XFRAME_II_DEVICE)
1038 mem_size = 32;
1039 else
1040 mem_size = 64;
1041
1042 for (i = 0; i < config->rx_ring_num; i++) {
1043 switch (i) {
1044 case 0:
1045 mem_share = (mem_size / config->rx_ring_num +
1046 mem_size % config->rx_ring_num);
1047 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1048 continue;
1049 case 1:
1050 mem_share = (mem_size / config->rx_ring_num);
1051 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1052 continue;
1053 case 2:
1054 mem_share = (mem_size / config->rx_ring_num);
1055 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1056 continue;
1057 case 3:
1058 mem_share = (mem_size / config->rx_ring_num);
1059 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1060 continue;
1061 case 4:
1062 mem_share = (mem_size / config->rx_ring_num);
1063 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1064 continue;
1065 case 5:
1066 mem_share = (mem_size / config->rx_ring_num);
1067 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1068 continue;
1069 case 6:
1070 mem_share = (mem_size / config->rx_ring_num);
1071 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1072 continue;
1073 case 7:
1074 mem_share = (mem_size / config->rx_ring_num);
1075 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1076 continue;
1077 }
1078 }
1079 writeq(val64, &bar0->rx_queue_cfg);
1080
1081 /*
1082 * Filling Tx round robin registers
1083 * as per the number of FIFOs
1084 */
1085 switch (config->tx_fifo_num) {
1086 case 1:
1087 val64 = 0x0000000000000000ULL;
1088 writeq(val64, &bar0->tx_w_round_robin_0);
1089 writeq(val64, &bar0->tx_w_round_robin_1);
1090 writeq(val64, &bar0->tx_w_round_robin_2);
1091 writeq(val64, &bar0->tx_w_round_robin_3);
1092 writeq(val64, &bar0->tx_w_round_robin_4);
1093 break;
1094 case 2:
1095 val64 = 0x0000010000010000ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_0);
1097 val64 = 0x0100000100000100ULL;
1098 writeq(val64, &bar0->tx_w_round_robin_1);
1099 val64 = 0x0001000001000001ULL;
1100 writeq(val64, &bar0->tx_w_round_robin_2);
1101 val64 = 0x0000010000010000ULL;
1102 writeq(val64, &bar0->tx_w_round_robin_3);
1103 val64 = 0x0100000000000000ULL;
1104 writeq(val64, &bar0->tx_w_round_robin_4);
1105 break;
1106 case 3:
1107 val64 = 0x0001000102000001ULL;
1108 writeq(val64, &bar0->tx_w_round_robin_0);
1109 val64 = 0x0001020000010001ULL;
1110 writeq(val64, &bar0->tx_w_round_robin_1);
1111 val64 = 0x0200000100010200ULL;
1112 writeq(val64, &bar0->tx_w_round_robin_2);
1113 val64 = 0x0001000102000001ULL;
1114 writeq(val64, &bar0->tx_w_round_robin_3);
1115 val64 = 0x0001020000000000ULL;
1116 writeq(val64, &bar0->tx_w_round_robin_4);
1117 break;
1118 case 4:
1119 val64 = 0x0001020300010200ULL;
1120 writeq(val64, &bar0->tx_w_round_robin_0);
1121 val64 = 0x0100000102030001ULL;
1122 writeq(val64, &bar0->tx_w_round_robin_1);
1123 val64 = 0x0200010000010203ULL;
1124 writeq(val64, &bar0->tx_w_round_robin_2);
1125 val64 = 0x0001020001000001ULL;
1126 writeq(val64, &bar0->tx_w_round_robin_3);
1127 val64 = 0x0203000100000000ULL;
1128 writeq(val64, &bar0->tx_w_round_robin_4);
1129 break;
1130 case 5:
1131 val64 = 0x0001000203000102ULL;
1132 writeq(val64, &bar0->tx_w_round_robin_0);
1133 val64 = 0x0001020001030004ULL;
1134 writeq(val64, &bar0->tx_w_round_robin_1);
1135 val64 = 0x0001000203000102ULL;
1136 writeq(val64, &bar0->tx_w_round_robin_2);
1137 val64 = 0x0001020001030004ULL;
1138 writeq(val64, &bar0->tx_w_round_robin_3);
1139 val64 = 0x0001000000000000ULL;
1140 writeq(val64, &bar0->tx_w_round_robin_4);
1141 break;
1142 case 6:
1143 val64 = 0x0001020304000102ULL;
1144 writeq(val64, &bar0->tx_w_round_robin_0);
1145 val64 = 0x0304050001020001ULL;
1146 writeq(val64, &bar0->tx_w_round_robin_1);
1147 val64 = 0x0203000100000102ULL;
1148 writeq(val64, &bar0->tx_w_round_robin_2);
1149 val64 = 0x0304000102030405ULL;
1150 writeq(val64, &bar0->tx_w_round_robin_3);
1151 val64 = 0x0001000200000000ULL;
1152 writeq(val64, &bar0->tx_w_round_robin_4);
1153 break;
1154 case 7:
1155 val64 = 0x0001020001020300ULL;
1156 writeq(val64, &bar0->tx_w_round_robin_0);
1157 val64 = 0x0102030400010203ULL;
1158 writeq(val64, &bar0->tx_w_round_robin_1);
1159 val64 = 0x0405060001020001ULL;
1160 writeq(val64, &bar0->tx_w_round_robin_2);
1161 val64 = 0x0304050000010200ULL;
1162 writeq(val64, &bar0->tx_w_round_robin_3);
1163 val64 = 0x0102030000000000ULL;
1164 writeq(val64, &bar0->tx_w_round_robin_4);
1165 break;
1166 case 8:
1167 val64 = 0x0001020300040105ULL;
1168 writeq(val64, &bar0->tx_w_round_robin_0);
1169 val64 = 0x0200030106000204ULL;
1170 writeq(val64, &bar0->tx_w_round_robin_1);
1171 val64 = 0x0103000502010007ULL;
1172 writeq(val64, &bar0->tx_w_round_robin_2);
1173 val64 = 0x0304010002060500ULL;
1174 writeq(val64, &bar0->tx_w_round_robin_3);
1175 val64 = 0x0103020400000000ULL;
1176 writeq(val64, &bar0->tx_w_round_robin_4);
1177 break;
1178 }
1179
1180 /* Filling the Rx round robin registers as per the
1181 * number of Rings and steering based on QoS.
1182 */
1183 switch (config->rx_ring_num) {
1184 case 1:
1185 val64 = 0x8080808080808080ULL;
1186 writeq(val64, &bar0->rts_qos_steering);
1187 break;
1188 case 2:
1189 val64 = 0x0000010000010000ULL;
1190 writeq(val64, &bar0->rx_w_round_robin_0);
1191 val64 = 0x0100000100000100ULL;
1192 writeq(val64, &bar0->rx_w_round_robin_1);
1193 val64 = 0x0001000001000001ULL;
1194 writeq(val64, &bar0->rx_w_round_robin_2);
1195 val64 = 0x0000010000010000ULL;
1196 writeq(val64, &bar0->rx_w_round_robin_3);
1197 val64 = 0x0100000000000000ULL;
1198 writeq(val64, &bar0->rx_w_round_robin_4);
1199
1200 val64 = 0x8080808040404040ULL;
1201 writeq(val64, &bar0->rts_qos_steering);
1202 break;
1203 case 3:
1204 val64 = 0x0001000102000001ULL;
1205 writeq(val64, &bar0->rx_w_round_robin_0);
1206 val64 = 0x0001020000010001ULL;
1207 writeq(val64, &bar0->rx_w_round_robin_1);
1208 val64 = 0x0200000100010200ULL;
1209 writeq(val64, &bar0->rx_w_round_robin_2);
1210 val64 = 0x0001000102000001ULL;
1211 writeq(val64, &bar0->rx_w_round_robin_3);
1212 val64 = 0x0001020000000000ULL;
1213 writeq(val64, &bar0->rx_w_round_robin_4);
1214
1215 val64 = 0x8080804040402020ULL;
1216 writeq(val64, &bar0->rts_qos_steering);
1217 break;
1218 case 4:
1219 val64 = 0x0001020300010200ULL;
1220 writeq(val64, &bar0->rx_w_round_robin_0);
1221 val64 = 0x0100000102030001ULL;
1222 writeq(val64, &bar0->rx_w_round_robin_1);
1223 val64 = 0x0200010000010203ULL;
1224 writeq(val64, &bar0->rx_w_round_robin_2);
1225 val64 = 0x0001020001000001ULL;
1226 writeq(val64, &bar0->rx_w_round_robin_3);
1227 val64 = 0x0203000100000000ULL;
1228 writeq(val64, &bar0->rx_w_round_robin_4);
1229
1230 val64 = 0x8080404020201010ULL;
1231 writeq(val64, &bar0->rts_qos_steering);
1232 break;
1233 case 5:
1234 val64 = 0x0001000203000102ULL;
1235 writeq(val64, &bar0->rx_w_round_robin_0);
1236 val64 = 0x0001020001030004ULL;
1237 writeq(val64, &bar0->rx_w_round_robin_1);
1238 val64 = 0x0001000203000102ULL;
1239 writeq(val64, &bar0->rx_w_round_robin_2);
1240 val64 = 0x0001020001030004ULL;
1241 writeq(val64, &bar0->rx_w_round_robin_3);
1242 val64 = 0x0001000000000000ULL;
1243 writeq(val64, &bar0->rx_w_round_robin_4);
1244
1245 val64 = 0x8080404020201008ULL;
1246 writeq(val64, &bar0->rts_qos_steering);
1247 break;
1248 case 6:
1249 val64 = 0x0001020304000102ULL;
1250 writeq(val64, &bar0->rx_w_round_robin_0);
1251 val64 = 0x0304050001020001ULL;
1252 writeq(val64, &bar0->rx_w_round_robin_1);
1253 val64 = 0x0203000100000102ULL;
1254 writeq(val64, &bar0->rx_w_round_robin_2);
1255 val64 = 0x0304000102030405ULL;
1256 writeq(val64, &bar0->rx_w_round_robin_3);
1257 val64 = 0x0001000200000000ULL;
1258 writeq(val64, &bar0->rx_w_round_robin_4);
1259
1260 val64 = 0x8080404020100804ULL;
1261 writeq(val64, &bar0->rts_qos_steering);
1262 break;
1263 case 7:
1264 val64 = 0x0001020001020300ULL;
1265 writeq(val64, &bar0->rx_w_round_robin_0);
1266 val64 = 0x0102030400010203ULL;
1267 writeq(val64, &bar0->rx_w_round_robin_1);
1268 val64 = 0x0405060001020001ULL;
1269 writeq(val64, &bar0->rx_w_round_robin_2);
1270 val64 = 0x0304050000010200ULL;
1271 writeq(val64, &bar0->rx_w_round_robin_3);
1272 val64 = 0x0102030000000000ULL;
1273 writeq(val64, &bar0->rx_w_round_robin_4);
1274
1275 val64 = 0x8080402010080402ULL;
1276 writeq(val64, &bar0->rts_qos_steering);
1277 break;
1278 case 8:
1279 val64 = 0x0001020300040105ULL;
1280 writeq(val64, &bar0->rx_w_round_robin_0);
1281 val64 = 0x0200030106000204ULL;
1282 writeq(val64, &bar0->rx_w_round_robin_1);
1283 val64 = 0x0103000502010007ULL;
1284 writeq(val64, &bar0->rx_w_round_robin_2);
1285 val64 = 0x0304010002060500ULL;
1286 writeq(val64, &bar0->rx_w_round_robin_3);
1287 val64 = 0x0103020400000000ULL;
1288 writeq(val64, &bar0->rx_w_round_robin_4);
1289
1290 val64 = 0x8040201008040201ULL;
1291 writeq(val64, &bar0->rts_qos_steering);
1292 break;
1293 }
1294
1295 /* UDP Fix */
1296 val64 = 0;
1297 for (i = 0; i < 8; i++)
1298 writeq(val64, &bar0->rts_frm_len_n[i]);
1299
1300 /* Set the default rts frame length for the rings configured */
1301 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1302 for (i = 0 ; i < config->rx_ring_num ; i++)
1303 writeq(val64, &bar0->rts_frm_len_n[i]);
1304
1305 /* Set the frame length for the configured rings
1306 * desired by the user
1307 */
1308 for (i = 0; i < config->rx_ring_num; i++) {
1309 /* If rts_frm_len[i] == 0 then it is assumed that user not
1310 * specified frame length steering.
1311 * If the user provides the frame length then program
1312 * the rts_frm_len register for those values or else
1313 * leave it as it is.
1314 */
1315 if (rts_frm_len[i] != 0) {
1316 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1317 &bar0->rts_frm_len_n[i]);
1318 }
1319 }
1320
1321 /* Program statistics memory */
1322 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1323
1324 if (nic->device_type == XFRAME_II_DEVICE) {
1325 val64 = STAT_BC(0x320);
1326 writeq(val64, &bar0->stat_byte_cnt);
1327 }
1328
1329 /*
1330 * Initializing the sampling rate for the device to calculate the
1331 * bandwidth utilization.
1332 */
1333 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1334 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1335 writeq(val64, &bar0->mac_link_util);
1336
1337
1338 /*
1339 * Initializing the Transmit and Receive Traffic Interrupt
1340 * Scheme.
1341 */
1342 /*
1343 * TTI Initialization. Default Tx timer gets us about
1344 * 250 interrupts per sec. Continuous interrupts are enabled
1345 * by default.
1346 */
1347 if (nic->device_type == XFRAME_II_DEVICE) {
1348 int count = (nic->config.bus_speed * 125)/2;
1349 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1350 } else {
1351
1352 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1353 }
1354 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1355 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1356 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1357 if (use_continuous_tx_intrs)
1358 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1359 writeq(val64, &bar0->tti_data1_mem);
1360
1361 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1362 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1363 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1364 writeq(val64, &bar0->tti_data2_mem);
1365
1366 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1367 writeq(val64, &bar0->tti_command_mem);
1368
1369 /*
1370 * Once the operation completes, the Strobe bit of the command
1371 * register will be reset. We poll for this particular condition
1372 * We wait for a maximum of 500ms for the operation to complete,
1373 * if it's not complete by then we return error.
1374 */
1375 time = 0;
1376 while (TRUE) {
1377 val64 = readq(&bar0->tti_command_mem);
1378 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1379 break;
1380 }
1381 if (time > 10) {
1382 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1383 dev->name);
1384 return -1;
1385 }
1386 msleep(50);
1387 time++;
1388 }
1389
1390 if (nic->config.bimodal) {
1391 int k = 0;
1392 for (k = 0; k < config->rx_ring_num; k++) {
1393 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1394 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1395 writeq(val64, &bar0->tti_command_mem);
1396
1397 /*
1398 * Once the operation completes, the Strobe bit of the command
1399 * register will be reset. We poll for this particular condition
1400 * We wait for a maximum of 500ms for the operation to complete,
1401 * if it's not complete by then we return error.
1402 */
1403 time = 0;
1404 while (TRUE) {
1405 val64 = readq(&bar0->tti_command_mem);
1406 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1407 break;
1408 }
1409 if (time > 10) {
1410 DBG_PRINT(ERR_DBG,
1411 "%s: TTI init Failed\n",
1412 dev->name);
1413 return -1;
1414 }
1415 time++;
1416 msleep(50);
1417 }
1418 }
1419 } else {
1420
1421 /* RTI Initialization */
1422 if (nic->device_type == XFRAME_II_DEVICE) {
1423 /*
1424 * Programmed to generate Apprx 500 Intrs per
1425 * second
1426 */
1427 int count = (nic->config.bus_speed * 125)/4;
1428 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1429 } else {
1430 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1431 }
1432 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1433 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1434 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1435
1436 writeq(val64, &bar0->rti_data1_mem);
1437
1438 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1439 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1440 if (nic->intr_type == MSI_X)
1441 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1442 RTI_DATA2_MEM_RX_UFC_D(0x40));
1443 else
1444 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1445 RTI_DATA2_MEM_RX_UFC_D(0x80));
1446 writeq(val64, &bar0->rti_data2_mem);
1447
1448 for (i = 0; i < config->rx_ring_num; i++) {
1449 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1450 | RTI_CMD_MEM_OFFSET(i);
1451 writeq(val64, &bar0->rti_command_mem);
1452
1453 /*
1454 * Once the operation completes, the Strobe bit of the
1455 * command register will be reset. We poll for this
1456 * particular condition. We wait for a maximum of 500ms
1457 * for the operation to complete, if it's not complete
1458 * by then we return error.
1459 */
1460 time = 0;
1461 while (TRUE) {
1462 val64 = readq(&bar0->rti_command_mem);
1463 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1464 break;
1465 }
1466 if (time > 10) {
1467 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1468 dev->name);
1469 return -1;
1470 }
1471 time++;
1472 msleep(50);
1473 }
1474 }
1475 }
1476
1477 /*
1478 * Initializing proper values as Pause threshold into all
1479 * the 8 Queues on Rx side.
1480 */
1481 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1482 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1483
1484 /* Disable RMAC PAD STRIPPING */
1485 add = &bar0->mac_cfg;
1486 val64 = readq(&bar0->mac_cfg);
1487 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1488 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1489 writel((u32) (val64), add);
1490 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1491 writel((u32) (val64 >> 32), (add + 4));
1492 val64 = readq(&bar0->mac_cfg);
1493
1494 /* Enable FCS stripping by adapter */
1495 add = &bar0->mac_cfg;
1496 val64 = readq(&bar0->mac_cfg);
1497 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1498 if (nic->device_type == XFRAME_II_DEVICE)
1499 writeq(val64, &bar0->mac_cfg);
1500 else {
1501 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1502 writel((u32) (val64), add);
1503 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1504 writel((u32) (val64 >> 32), (add + 4));
1505 }
1506
1507 /*
1508 * Set the time value to be inserted in the pause frame
1509 * generated by xena.
1510 */
1511 val64 = readq(&bar0->rmac_pause_cfg);
1512 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1513 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1514 writeq(val64, &bar0->rmac_pause_cfg);
1515
1516 /*
1517 * Set the Threshold Limit for Generating the pause frame
1518 * If the amount of data in any Queue exceeds ratio of
1519 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1520 * pause frame is generated
1521 */
1522 val64 = 0;
1523 for (i = 0; i < 4; i++) {
1524 val64 |=
1525 (((u64) 0xFF00 | nic->mac_control.
1526 mc_pause_threshold_q0q3)
1527 << (i * 2 * 8));
1528 }
1529 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1530
1531 val64 = 0;
1532 for (i = 0; i < 4; i++) {
1533 val64 |=
1534 (((u64) 0xFF00 | nic->mac_control.
1535 mc_pause_threshold_q4q7)
1536 << (i * 2 * 8));
1537 }
1538 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1539
1540 /*
1541 * TxDMA will stop Read request if the number of read split has
1542 * exceeded the limit pointed by shared_splits
1543 */
1544 val64 = readq(&bar0->pic_control);
1545 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1546 writeq(val64, &bar0->pic_control);
1547
1548 /*
1549 * Programming the Herc to split every write transaction
1550 * that does not start on an ADB to reduce disconnects.
1551 */
1552 if (nic->device_type == XFRAME_II_DEVICE) {
1553 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1554 writeq(val64, &bar0->wreq_split_mask);
1555 }
1556
1557 /* Setting Link stability period to 64 ms */
1558 if (nic->device_type == XFRAME_II_DEVICE) {
1559 val64 = MISC_LINK_STABILITY_PRD(3);
1560 writeq(val64, &bar0->misc_control);
1561 }
1562
1563 return SUCCESS;
1564 }
1565 #define LINK_UP_DOWN_INTERRUPT 1
1566 #define MAC_RMAC_ERR_TIMER 2
1567
1568 static int s2io_link_fault_indication(nic_t *nic)
1569 {
1570 if (nic->intr_type != INTA)
1571 return MAC_RMAC_ERR_TIMER;
1572 if (nic->device_type == XFRAME_II_DEVICE)
1573 return LINK_UP_DOWN_INTERRUPT;
1574 else
1575 return MAC_RMAC_ERR_TIMER;
1576 }
1577
1578 /**
1579 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1580 * @nic: device private variable,
1581 * @mask: A mask indicating which Intr block must be modified and,
1582 * @flag: A flag indicating whether to enable or disable the Intrs.
1583 * Description: This function will either disable or enable the interrupts
1584 * depending on the flag argument. The mask argument can be used to
1585 * enable/disable any Intr block.
1586 * Return Value: NONE.
1587 */
1588
1589 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1590 {
1591 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1592 register u64 val64 = 0, temp64 = 0;
1593
1594 /* Top level interrupt classification */
1595 /* PIC Interrupts */
1596 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1597 /* Enable PIC Intrs in the general intr mask register */
1598 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1599 if (flag == ENABLE_INTRS) {
1600 temp64 = readq(&bar0->general_int_mask);
1601 temp64 &= ~((u64) val64);
1602 writeq(temp64, &bar0->general_int_mask);
1603 /*
1604 * If Hercules adapter enable GPIO otherwise
1605 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1606 * interrupts for now.
1607 * TODO
1608 */
1609 if (s2io_link_fault_indication(nic) ==
1610 LINK_UP_DOWN_INTERRUPT ) {
1611 temp64 = readq(&bar0->pic_int_mask);
1612 temp64 &= ~((u64) PIC_INT_GPIO);
1613 writeq(temp64, &bar0->pic_int_mask);
1614 temp64 = readq(&bar0->gpio_int_mask);
1615 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1616 writeq(temp64, &bar0->gpio_int_mask);
1617 } else {
1618 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1619 }
1620 /*
1621 * No MSI Support is available presently, so TTI and
1622 * RTI interrupts are also disabled.
1623 */
1624 } else if (flag == DISABLE_INTRS) {
1625 /*
1626 * Disable PIC Intrs in the general
1627 * intr mask register
1628 */
1629 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1630 temp64 = readq(&bar0->general_int_mask);
1631 val64 |= temp64;
1632 writeq(val64, &bar0->general_int_mask);
1633 }
1634 }
1635
1636 /* DMA Interrupts */
1637 /* Enabling/Disabling Tx DMA interrupts */
1638 if (mask & TX_DMA_INTR) {
1639 /* Enable TxDMA Intrs in the general intr mask register */
1640 val64 = TXDMA_INT_M;
1641 if (flag == ENABLE_INTRS) {
1642 temp64 = readq(&bar0->general_int_mask);
1643 temp64 &= ~((u64) val64);
1644 writeq(temp64, &bar0->general_int_mask);
1645 /*
1646 * Keep all interrupts other than PFC interrupt
1647 * and PCC interrupt disabled in DMA level.
1648 */
1649 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1650 TXDMA_PCC_INT_M);
1651 writeq(val64, &bar0->txdma_int_mask);
1652 /*
1653 * Enable only the MISC error 1 interrupt in PFC block
1654 */
1655 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1656 writeq(val64, &bar0->pfc_err_mask);
1657 /*
1658 * Enable only the FB_ECC error interrupt in PCC block
1659 */
1660 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1661 writeq(val64, &bar0->pcc_err_mask);
1662 } else if (flag == DISABLE_INTRS) {
1663 /*
1664 * Disable TxDMA Intrs in the general intr mask
1665 * register
1666 */
1667 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1668 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1669 temp64 = readq(&bar0->general_int_mask);
1670 val64 |= temp64;
1671 writeq(val64, &bar0->general_int_mask);
1672 }
1673 }
1674
1675 /* Enabling/Disabling Rx DMA interrupts */
1676 if (mask & RX_DMA_INTR) {
1677 /* Enable RxDMA Intrs in the general intr mask register */
1678 val64 = RXDMA_INT_M;
1679 if (flag == ENABLE_INTRS) {
1680 temp64 = readq(&bar0->general_int_mask);
1681 temp64 &= ~((u64) val64);
1682 writeq(temp64, &bar0->general_int_mask);
1683 /*
1684 * All RxDMA block interrupts are disabled for now
1685 * TODO
1686 */
1687 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1688 } else if (flag == DISABLE_INTRS) {
1689 /*
1690 * Disable RxDMA Intrs in the general intr mask
1691 * register
1692 */
1693 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1694 temp64 = readq(&bar0->general_int_mask);
1695 val64 |= temp64;
1696 writeq(val64, &bar0->general_int_mask);
1697 }
1698 }
1699
1700 /* MAC Interrupts */
1701 /* Enabling/Disabling MAC interrupts */
1702 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1703 val64 = TXMAC_INT_M | RXMAC_INT_M;
1704 if (flag == ENABLE_INTRS) {
1705 temp64 = readq(&bar0->general_int_mask);
1706 temp64 &= ~((u64) val64);
1707 writeq(temp64, &bar0->general_int_mask);
1708 /*
1709 * All MAC block error interrupts are disabled for now
1710 * TODO
1711 */
1712 } else if (flag == DISABLE_INTRS) {
1713 /*
1714 * Disable MAC Intrs in the general intr mask register
1715 */
1716 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1717 writeq(DISABLE_ALL_INTRS,
1718 &bar0->mac_rmac_err_mask);
1719
1720 temp64 = readq(&bar0->general_int_mask);
1721 val64 |= temp64;
1722 writeq(val64, &bar0->general_int_mask);
1723 }
1724 }
1725
1726 /* XGXS Interrupts */
1727 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1728 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1729 if (flag == ENABLE_INTRS) {
1730 temp64 = readq(&bar0->general_int_mask);
1731 temp64 &= ~((u64) val64);
1732 writeq(temp64, &bar0->general_int_mask);
1733 /*
1734 * All XGXS block error interrupts are disabled for now
1735 * TODO
1736 */
1737 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1738 } else if (flag == DISABLE_INTRS) {
1739 /*
1740 * Disable MC Intrs in the general intr mask register
1741 */
1742 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1743 temp64 = readq(&bar0->general_int_mask);
1744 val64 |= temp64;
1745 writeq(val64, &bar0->general_int_mask);
1746 }
1747 }
1748
1749 /* Memory Controller(MC) interrupts */
1750 if (mask & MC_INTR) {
1751 val64 = MC_INT_M;
1752 if (flag == ENABLE_INTRS) {
1753 temp64 = readq(&bar0->general_int_mask);
1754 temp64 &= ~((u64) val64);
1755 writeq(temp64, &bar0->general_int_mask);
1756 /*
1757 * Enable all MC Intrs.
1758 */
1759 writeq(0x0, &bar0->mc_int_mask);
1760 writeq(0x0, &bar0->mc_err_mask);
1761 } else if (flag == DISABLE_INTRS) {
1762 /*
1763 * Disable MC Intrs in the general intr mask register
1764 */
1765 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1766 temp64 = readq(&bar0->general_int_mask);
1767 val64 |= temp64;
1768 writeq(val64, &bar0->general_int_mask);
1769 }
1770 }
1771
1772
1773 /* Tx traffic interrupts */
1774 if (mask & TX_TRAFFIC_INTR) {
1775 val64 = TXTRAFFIC_INT_M;
1776 if (flag == ENABLE_INTRS) {
1777 temp64 = readq(&bar0->general_int_mask);
1778 temp64 &= ~((u64) val64);
1779 writeq(temp64, &bar0->general_int_mask);
1780 /*
1781 * Enable all the Tx side interrupts
1782 * writing 0 Enables all 64 TX interrupt levels
1783 */
1784 writeq(0x0, &bar0->tx_traffic_mask);
1785 } else if (flag == DISABLE_INTRS) {
1786 /*
1787 * Disable Tx Traffic Intrs in the general intr mask
1788 * register.
1789 */
1790 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1791 temp64 = readq(&bar0->general_int_mask);
1792 val64 |= temp64;
1793 writeq(val64, &bar0->general_int_mask);
1794 }
1795 }
1796
1797 /* Rx traffic interrupts */
1798 if (mask & RX_TRAFFIC_INTR) {
1799 val64 = RXTRAFFIC_INT_M;
1800 if (flag == ENABLE_INTRS) {
1801 temp64 = readq(&bar0->general_int_mask);
1802 temp64 &= ~((u64) val64);
1803 writeq(temp64, &bar0->general_int_mask);
1804 /* writing 0 Enables all 8 RX interrupt levels */
1805 writeq(0x0, &bar0->rx_traffic_mask);
1806 } else if (flag == DISABLE_INTRS) {
1807 /*
1808 * Disable Rx Traffic Intrs in the general intr mask
1809 * register.
1810 */
1811 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1812 temp64 = readq(&bar0->general_int_mask);
1813 val64 |= temp64;
1814 writeq(val64, &bar0->general_int_mask);
1815 }
1816 }
1817 }
1818
1819 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1820 {
1821 int ret = 0;
1822
1823 if (flag == FALSE) {
1824 if ((!herc && (rev_id >= 4)) || herc) {
1825 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1826 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1827 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1828 ret = 1;
1829 }
1830 }else {
1831 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1832 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1833 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1834 ret = 1;
1835 }
1836 }
1837 } else {
1838 if ((!herc && (rev_id >= 4)) || herc) {
1839 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1840 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1841 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1842 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1843 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1844 ret = 1;
1845 }
1846 } else {
1847 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1848 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1849 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1850 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1851 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1852 ret = 1;
1853 }
1854 }
1855 }
1856
1857 return ret;
1858 }
1859 /**
1860 * verify_xena_quiescence - Checks whether the H/W is ready
1861 * @val64 : Value read from adapter status register.
1862 * @flag : indicates if the adapter enable bit was ever written once
1863 * before.
1864 * Description: Returns whether the H/W is ready to go or not. Depending
1865 * on whether adapter enable bit was written or not the comparison
1866 * differs and the calling function passes the input argument flag to
1867 * indicate this.
1868 * Return: 1 If xena is quiescence
1869 * 0 If Xena is not quiescence
1870 */
1871
1872 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1873 {
1874 int ret = 0, herc;
1875 u64 tmp64 = ~((u64) val64);
1876 int rev_id = get_xena_rev_id(sp->pdev);
1877
1878 herc = (sp->device_type == XFRAME_II_DEVICE);
1879 if (!
1880 (tmp64 &
1881 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1882 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1883 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1884 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1885 ADAPTER_STATUS_P_PLL_LOCK))) {
1886 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1887 }
1888
1889 return ret;
1890 }
1891
1892 /**
1893 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1894 * @sp: Pointer to device specifc structure
1895 * Description :
1896 * New procedure to clear mac address reading problems on Alpha platforms
1897 *
1898 */
1899
1900 static void fix_mac_address(nic_t * sp)
1901 {
1902 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1903 u64 val64;
1904 int i = 0;
1905
1906 while (fix_mac[i] != END_SIGN) {
1907 writeq(fix_mac[i++], &bar0->gpio_control);
1908 udelay(10);
1909 val64 = readq(&bar0->gpio_control);
1910 }
1911 }
1912
1913 /**
1914 * start_nic - Turns the device on
1915 * @nic : device private variable.
1916 * Description:
1917 * This function actually turns the device on. Before this function is
1918 * called,all Registers are configured from their reset states
1919 * and shared memory is allocated but the NIC is still quiescent. On
1920 * calling this function, the device interrupts are cleared and the NIC is
1921 * literally switched on by writing into the adapter control register.
1922 * Return Value:
1923 * SUCCESS on success and -1 on failure.
1924 */
1925
1926 static int start_nic(struct s2io_nic *nic)
1927 {
1928 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1929 struct net_device *dev = nic->dev;
1930 register u64 val64 = 0;
1931 u16 interruptible;
1932 u16 subid, i;
1933 mac_info_t *mac_control;
1934 struct config_param *config;
1935
1936 mac_control = &nic->mac_control;
1937 config = &nic->config;
1938
1939 /* PRC Initialization and configuration */
1940 for (i = 0; i < config->rx_ring_num; i++) {
1941 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1942 &bar0->prc_rxd0_n[i]);
1943
1944 val64 = readq(&bar0->prc_ctrl_n[i]);
1945 if (nic->config.bimodal)
1946 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1947 if (nic->rxd_mode == RXD_MODE_1)
1948 val64 |= PRC_CTRL_RC_ENABLED;
1949 else
1950 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1951 writeq(val64, &bar0->prc_ctrl_n[i]);
1952 }
1953
1954 if (nic->rxd_mode == RXD_MODE_3B) {
1955 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1956 val64 = readq(&bar0->rx_pa_cfg);
1957 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1958 writeq(val64, &bar0->rx_pa_cfg);
1959 }
1960
1961 /*
1962 * Enabling MC-RLDRAM. After enabling the device, we timeout
1963 * for around 100ms, which is approximately the time required
1964 * for the device to be ready for operation.
1965 */
1966 val64 = readq(&bar0->mc_rldram_mrs);
1967 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1968 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1969 val64 = readq(&bar0->mc_rldram_mrs);
1970
1971 msleep(100); /* Delay by around 100 ms. */
1972
1973 /* Enabling ECC Protection. */
1974 val64 = readq(&bar0->adapter_control);
1975 val64 &= ~ADAPTER_ECC_EN;
1976 writeq(val64, &bar0->adapter_control);
1977
1978 /*
1979 * Clearing any possible Link state change interrupts that
1980 * could have popped up just before Enabling the card.
1981 */
1982 val64 = readq(&bar0->mac_rmac_err_reg);
1983 if (val64)
1984 writeq(val64, &bar0->mac_rmac_err_reg);
1985
1986 /*
1987 * Verify if the device is ready to be enabled, if so enable
1988 * it.
1989 */
1990 val64 = readq(&bar0->adapter_status);
1991 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1992 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1993 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1994 (unsigned long long) val64);
1995 return FAILURE;
1996 }
1997
1998 /* Enable select interrupts */
1999 if (nic->intr_type != INTA)
2000 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2001 else {
2002 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2003 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2004 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2005 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
2006 }
2007
2008 /*
2009 * With some switches, link might be already up at this point.
2010 * Because of this weird behavior, when we enable laser,
2011 * we may not get link. We need to handle this. We cannot
2012 * figure out which switch is misbehaving. So we are forced to
2013 * make a global change.
2014 */
2015
2016 /* Enabling Laser. */
2017 val64 = readq(&bar0->adapter_control);
2018 val64 |= ADAPTER_EOI_TX_ON;
2019 writeq(val64, &bar0->adapter_control);
2020
2021 /* SXE-002: Initialize link and activity LED */
2022 subid = nic->pdev->subsystem_device;
2023 if (((subid & 0xFF) >= 0x07) &&
2024 (nic->device_type == XFRAME_I_DEVICE)) {
2025 val64 = readq(&bar0->gpio_control);
2026 val64 |= 0x0000800000000000ULL;
2027 writeq(val64, &bar0->gpio_control);
2028 val64 = 0x0411040400000000ULL;
2029 writeq(val64, (void __iomem *)bar0 + 0x2700);
2030 }
2031
2032 /*
2033 * Don't see link state interrupts on certain switches, so
2034 * directly scheduling a link state task from here.
2035 */
2036 schedule_work(&nic->set_link_task);
2037
2038 return SUCCESS;
2039 }
2040 /**
2041 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2042 */
2043 static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2044 {
2045 nic_t *nic = fifo_data->nic;
2046 struct sk_buff *skb;
2047 TxD_t *txds;
2048 u16 j, frg_cnt;
2049
2050 txds = txdlp;
2051 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2052 pci_unmap_single(nic->pdev, (dma_addr_t)
2053 txds->Buffer_Pointer, sizeof(u64),
2054 PCI_DMA_TODEVICE);
2055 txds++;
2056 }
2057
2058 skb = (struct sk_buff *) ((unsigned long)
2059 txds->Host_Control);
2060 if (!skb) {
2061 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2062 return NULL;
2063 }
2064 pci_unmap_single(nic->pdev, (dma_addr_t)
2065 txds->Buffer_Pointer,
2066 skb->len - skb->data_len,
2067 PCI_DMA_TODEVICE);
2068 frg_cnt = skb_shinfo(skb)->nr_frags;
2069 if (frg_cnt) {
2070 txds++;
2071 for (j = 0; j < frg_cnt; j++, txds++) {
2072 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2073 if (!txds->Buffer_Pointer)
2074 break;
2075 pci_unmap_page(nic->pdev, (dma_addr_t)
2076 txds->Buffer_Pointer,
2077 frag->size, PCI_DMA_TODEVICE);
2078 }
2079 }
2080 txdlp->Host_Control = 0;
2081 return(skb);
2082 }
2083
2084 /**
2085 * free_tx_buffers - Free all queued Tx buffers
2086 * @nic : device private variable.
2087 * Description:
2088 * Free all queued Tx buffers.
2089 * Return Value: void
2090 */
2091
2092 static void free_tx_buffers(struct s2io_nic *nic)
2093 {
2094 struct net_device *dev = nic->dev;
2095 struct sk_buff *skb;
2096 TxD_t *txdp;
2097 int i, j;
2098 mac_info_t *mac_control;
2099 struct config_param *config;
2100 int cnt = 0;
2101
2102 mac_control = &nic->mac_control;
2103 config = &nic->config;
2104
2105 for (i = 0; i < config->tx_fifo_num; i++) {
2106 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2107 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2108 list_virt_addr;
2109 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2110 if (skb) {
2111 dev_kfree_skb(skb);
2112 cnt++;
2113 }
2114 }
2115 DBG_PRINT(INTR_DBG,
2116 "%s:forcibly freeing %d skbs on FIFO%d\n",
2117 dev->name, cnt, i);
2118 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2119 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2120 }
2121 }
2122
2123 /**
2124 * stop_nic - To stop the nic
2125 * @nic ; device private variable.
2126 * Description:
2127 * This function does exactly the opposite of what the start_nic()
2128 * function does. This function is called to stop the device.
2129 * Return Value:
2130 * void.
2131 */
2132
2133 static void stop_nic(struct s2io_nic *nic)
2134 {
2135 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2136 register u64 val64 = 0;
2137 u16 interruptible, i;
2138 mac_info_t *mac_control;
2139 struct config_param *config;
2140
2141 mac_control = &nic->mac_control;
2142 config = &nic->config;
2143
2144 /* Disable all interrupts */
2145 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2146 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2147 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2148 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2149
2150 /* Disable PRCs */
2151 for (i = 0; i < config->rx_ring_num; i++) {
2152 val64 = readq(&bar0->prc_ctrl_n[i]);
2153 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2154 writeq(val64, &bar0->prc_ctrl_n[i]);
2155 }
2156 }
2157
2158 static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2159 {
2160 struct net_device *dev = nic->dev;
2161 struct sk_buff *frag_list;
2162 void *tmp;
2163
2164 /* Buffer-1 receives L3/L4 headers */
2165 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2166 (nic->pdev, skb->data, l3l4hdr_size + 4,
2167 PCI_DMA_FROMDEVICE);
2168
2169 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2170 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2171 if (skb_shinfo(skb)->frag_list == NULL) {
2172 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2173 return -ENOMEM ;
2174 }
2175 frag_list = skb_shinfo(skb)->frag_list;
2176 frag_list->next = NULL;
2177 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2178 frag_list->data = tmp;
2179 frag_list->tail = tmp;
2180
2181 /* Buffer-2 receives L4 data payload */
2182 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2183 frag_list->data, dev->mtu,
2184 PCI_DMA_FROMDEVICE);
2185 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2186 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2187
2188 return SUCCESS;
2189 }
2190
2191 /**
2192 * fill_rx_buffers - Allocates the Rx side skbs
2193 * @nic: device private variable
2194 * @ring_no: ring number
2195 * Description:
2196 * The function allocates Rx side skbs and puts the physical
2197 * address of these buffers into the RxD buffer pointers, so that the NIC
2198 * can DMA the received frame into these locations.
2199 * The NIC supports 3 receive modes, viz
2200 * 1. single buffer,
2201 * 2. three buffer and
2202 * 3. Five buffer modes.
2203 * Each mode defines how many fragments the received frame will be split
2204 * up into by the NIC. The frame is split into L3 header, L4 Header,
2205 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2206 * is split into 3 fragments. As of now only single buffer mode is
2207 * supported.
2208 * Return Value:
2209 * SUCCESS on success or an appropriate -ve value on failure.
2210 */
2211
2212 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2213 {
2214 struct net_device *dev = nic->dev;
2215 struct sk_buff *skb;
2216 RxD_t *rxdp;
2217 int off, off1, size, block_no, block_no1;
2218 u32 alloc_tab = 0;
2219 u32 alloc_cnt;
2220 mac_info_t *mac_control;
2221 struct config_param *config;
2222 u64 tmp;
2223 buffAdd_t *ba;
2224 #ifndef CONFIG_S2IO_NAPI
2225 unsigned long flags;
2226 #endif
2227 RxD_t *first_rxdp = NULL;
2228
2229 mac_control = &nic->mac_control;
2230 config = &nic->config;
2231 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2232 atomic_read(&nic->rx_bufs_left[ring_no]);
2233
2234 while (alloc_tab < alloc_cnt) {
2235 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2236 block_index;
2237 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2238 block_index;
2239 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2240 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2241
2242 rxdp = mac_control->rings[ring_no].
2243 rx_blocks[block_no].rxds[off].virt_addr;
2244
2245 if ((block_no == block_no1) && (off == off1) &&
2246 (rxdp->Host_Control)) {
2247 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2248 dev->name);
2249 DBG_PRINT(INTR_DBG, " info equated\n");
2250 goto end;
2251 }
2252 if (off && (off == rxd_count[nic->rxd_mode])) {
2253 mac_control->rings[ring_no].rx_curr_put_info.
2254 block_index++;
2255 if (mac_control->rings[ring_no].rx_curr_put_info.
2256 block_index == mac_control->rings[ring_no].
2257 block_count)
2258 mac_control->rings[ring_no].rx_curr_put_info.
2259 block_index = 0;
2260 block_no = mac_control->rings[ring_no].
2261 rx_curr_put_info.block_index;
2262 if (off == rxd_count[nic->rxd_mode])
2263 off = 0;
2264 mac_control->rings[ring_no].rx_curr_put_info.
2265 offset = off;
2266 rxdp = mac_control->rings[ring_no].
2267 rx_blocks[block_no].block_virt_addr;
2268 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2269 dev->name, rxdp);
2270 }
2271 #ifndef CONFIG_S2IO_NAPI
2272 spin_lock_irqsave(&nic->put_lock, flags);
2273 mac_control->rings[ring_no].put_pos =
2274 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2275 spin_unlock_irqrestore(&nic->put_lock, flags);
2276 #endif
2277 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2278 ((nic->rxd_mode >= RXD_MODE_3A) &&
2279 (rxdp->Control_2 & BIT(0)))) {
2280 mac_control->rings[ring_no].rx_curr_put_info.
2281 offset = off;
2282 goto end;
2283 }
2284 /* calculate size of skb based on ring mode */
2285 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2286 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2287 if (nic->rxd_mode == RXD_MODE_1)
2288 size += NET_IP_ALIGN;
2289 else if (nic->rxd_mode == RXD_MODE_3B)
2290 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2291 else
2292 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2293
2294 /* allocate skb */
2295 skb = dev_alloc_skb(size);
2296 if(!skb) {
2297 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2298 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2299 if (first_rxdp) {
2300 wmb();
2301 first_rxdp->Control_1 |= RXD_OWN_XENA;
2302 }
2303 return -ENOMEM ;
2304 }
2305 if (nic->rxd_mode == RXD_MODE_1) {
2306 /* 1 buffer mode - normal operation mode */
2307 memset(rxdp, 0, sizeof(RxD1_t));
2308 skb_reserve(skb, NET_IP_ALIGN);
2309 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2310 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2311 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE_1);
2312 rxdp->Control_2 |= SET_BUFFER0_SIZE_1(size);
2313
2314 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2315 /*
2316 * 2 or 3 buffer mode -
2317 * Both 2 buffer mode and 3 buffer mode provides 128
2318 * byte aligned receive buffers.
2319 *
2320 * 3 buffer mode provides header separation where in
2321 * skb->data will have L3/L4 headers where as
2322 * skb_shinfo(skb)->frag_list will have the L4 data
2323 * payload
2324 */
2325
2326 memset(rxdp, 0, sizeof(RxD3_t));
2327 ba = &mac_control->rings[ring_no].ba[block_no][off];
2328 skb_reserve(skb, BUF0_LEN);
2329 tmp = (u64)(unsigned long) skb->data;
2330 tmp += ALIGN_SIZE;
2331 tmp &= ~ALIGN_SIZE;
2332 skb->data = (void *) (unsigned long)tmp;
2333 skb->tail = (void *) (unsigned long)tmp;
2334
2335 ((RxD3_t*)rxdp)->Buffer0_ptr =
2336 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2337 PCI_DMA_FROMDEVICE);
2338 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2339 if (nic->rxd_mode == RXD_MODE_3B) {
2340 /* Two buffer mode */
2341
2342 /*
2343 * Buffer2 will have L3/L4 header plus
2344 * L4 payload
2345 */
2346 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2347 (nic->pdev, skb->data, dev->mtu + 4,
2348 PCI_DMA_FROMDEVICE);
2349
2350 /* Buffer-1 will be dummy buffer not used */
2351 ((RxD3_t*)rxdp)->Buffer1_ptr =
2352 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2353 PCI_DMA_FROMDEVICE);
2354 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2355 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2356 (dev->mtu + 4);
2357 } else {
2358 /* 3 buffer mode */
2359 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2360 dev_kfree_skb_irq(skb);
2361 if (first_rxdp) {
2362 wmb();
2363 first_rxdp->Control_1 |=
2364 RXD_OWN_XENA;
2365 }
2366 return -ENOMEM ;
2367 }
2368 }
2369 rxdp->Control_2 |= BIT(0);
2370 }
2371 rxdp->Host_Control = (unsigned long) (skb);
2372 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2373 rxdp->Control_1 |= RXD_OWN_XENA;
2374 off++;
2375 if (off == (rxd_count[nic->rxd_mode] + 1))
2376 off = 0;
2377 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2378
2379 rxdp->Control_2 |= SET_RXD_MARKER;
2380 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2381 if (first_rxdp) {
2382 wmb();
2383 first_rxdp->Control_1 |= RXD_OWN_XENA;
2384 }
2385 first_rxdp = rxdp;
2386 }
2387 atomic_inc(&nic->rx_bufs_left[ring_no]);
2388 alloc_tab++;
2389 }
2390
2391 end:
2392 /* Transfer ownership of first descriptor to adapter just before
2393 * exiting. Before that, use memory barrier so that ownership
2394 * and other fields are seen by adapter correctly.
2395 */
2396 if (first_rxdp) {
2397 wmb();
2398 first_rxdp->Control_1 |= RXD_OWN_XENA;
2399 }
2400
2401 return SUCCESS;
2402 }
2403
2404 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2405 {
2406 struct net_device *dev = sp->dev;
2407 int j;
2408 struct sk_buff *skb;
2409 RxD_t *rxdp;
2410 mac_info_t *mac_control;
2411 buffAdd_t *ba;
2412
2413 mac_control = &sp->mac_control;
2414 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2415 rxdp = mac_control->rings[ring_no].
2416 rx_blocks[blk].rxds[j].virt_addr;
2417 skb = (struct sk_buff *)
2418 ((unsigned long) rxdp->Host_Control);
2419 if (!skb) {
2420 continue;
2421 }
2422 if (sp->rxd_mode == RXD_MODE_1) {
2423 pci_unmap_single(sp->pdev, (dma_addr_t)
2424 ((RxD1_t*)rxdp)->Buffer0_ptr,
2425 dev->mtu +
2426 HEADER_ETHERNET_II_802_3_SIZE
2427 + HEADER_802_2_SIZE +
2428 HEADER_SNAP_SIZE,
2429 PCI_DMA_FROMDEVICE);
2430 memset(rxdp, 0, sizeof(RxD1_t));
2431 } else if(sp->rxd_mode == RXD_MODE_3B) {
2432 ba = &mac_control->rings[ring_no].
2433 ba[blk][j];
2434 pci_unmap_single(sp->pdev, (dma_addr_t)
2435 ((RxD3_t*)rxdp)->Buffer0_ptr,
2436 BUF0_LEN,
2437 PCI_DMA_FROMDEVICE);
2438 pci_unmap_single(sp->pdev, (dma_addr_t)
2439 ((RxD3_t*)rxdp)->Buffer1_ptr,
2440 BUF1_LEN,
2441 PCI_DMA_FROMDEVICE);
2442 pci_unmap_single(sp->pdev, (dma_addr_t)
2443 ((RxD3_t*)rxdp)->Buffer2_ptr,
2444 dev->mtu + 4,
2445 PCI_DMA_FROMDEVICE);
2446 memset(rxdp, 0, sizeof(RxD3_t));
2447 } else {
2448 pci_unmap_single(sp->pdev, (dma_addr_t)
2449 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2450 PCI_DMA_FROMDEVICE);
2451 pci_unmap_single(sp->pdev, (dma_addr_t)
2452 ((RxD3_t*)rxdp)->Buffer1_ptr,
2453 l3l4hdr_size + 4,
2454 PCI_DMA_FROMDEVICE);
2455 pci_unmap_single(sp->pdev, (dma_addr_t)
2456 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2457 PCI_DMA_FROMDEVICE);
2458 memset(rxdp, 0, sizeof(RxD3_t));
2459 }
2460 dev_kfree_skb(skb);
2461 atomic_dec(&sp->rx_bufs_left[ring_no]);
2462 }
2463 }
2464
2465 /**
2466 * free_rx_buffers - Frees all Rx buffers
2467 * @sp: device private variable.
2468 * Description:
2469 * This function will free all Rx buffers allocated by host.
2470 * Return Value:
2471 * NONE.
2472 */
2473
2474 static void free_rx_buffers(struct s2io_nic *sp)
2475 {
2476 struct net_device *dev = sp->dev;
2477 int i, blk = 0, buf_cnt = 0;
2478 mac_info_t *mac_control;
2479 struct config_param *config;
2480
2481 mac_control = &sp->mac_control;
2482 config = &sp->config;
2483
2484 for (i = 0; i < config->rx_ring_num; i++) {
2485 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2486 free_rxd_blk(sp,i,blk);
2487
2488 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2489 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2490 mac_control->rings[i].rx_curr_put_info.offset = 0;
2491 mac_control->rings[i].rx_curr_get_info.offset = 0;
2492 atomic_set(&sp->rx_bufs_left[i], 0);
2493 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2494 dev->name, buf_cnt, i);
2495 }
2496 }
2497
2498 /**
2499 * s2io_poll - Rx interrupt handler for NAPI support
2500 * @dev : pointer to the device structure.
2501 * @budget : The number of packets that were budgeted to be processed
2502 * during one pass through the 'Poll" function.
2503 * Description:
2504 * Comes into picture only if NAPI support has been incorporated. It does
2505 * the same thing that rx_intr_handler does, but not in a interrupt context
2506 * also It will process only a given number of packets.
2507 * Return value:
2508 * 0 on success and 1 if there are No Rx packets to be processed.
2509 */
2510
2511 #if defined(CONFIG_S2IO_NAPI)
2512 static int s2io_poll(struct net_device *dev, int *budget)
2513 {
2514 nic_t *nic = dev->priv;
2515 int pkt_cnt = 0, org_pkts_to_process;
2516 mac_info_t *mac_control;
2517 struct config_param *config;
2518 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2519 u64 val64;
2520 int i;
2521
2522 atomic_inc(&nic->isr_cnt);
2523 mac_control = &nic->mac_control;
2524 config = &nic->config;
2525
2526 nic->pkts_to_process = *budget;
2527 if (nic->pkts_to_process > dev->quota)
2528 nic->pkts_to_process = dev->quota;
2529 org_pkts_to_process = nic->pkts_to_process;
2530
2531 val64 = readq(&bar0->rx_traffic_int);
2532 writeq(val64, &bar0->rx_traffic_int);
2533
2534 for (i = 0; i < config->rx_ring_num; i++) {
2535 rx_intr_handler(&mac_control->rings[i]);
2536 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2537 if (!nic->pkts_to_process) {
2538 /* Quota for the current iteration has been met */
2539 goto no_rx;
2540 }
2541 }
2542 if (!pkt_cnt)
2543 pkt_cnt = 1;
2544
2545 dev->quota -= pkt_cnt;
2546 *budget -= pkt_cnt;
2547 netif_rx_complete(dev);
2548
2549 for (i = 0; i < config->rx_ring_num; i++) {
2550 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2551 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2552 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2553 break;
2554 }
2555 }
2556 /* Re enable the Rx interrupts. */
2557 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2558 atomic_dec(&nic->isr_cnt);
2559 return 0;
2560
2561 no_rx:
2562 dev->quota -= pkt_cnt;
2563 *budget -= pkt_cnt;
2564
2565 for (i = 0; i < config->rx_ring_num; i++) {
2566 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2567 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2568 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2569 break;
2570 }
2571 }
2572 atomic_dec(&nic->isr_cnt);
2573 return 1;
2574 }
2575 #endif
2576
2577 /**
2578 * rx_intr_handler - Rx interrupt handler
2579 * @nic: device private variable.
2580 * Description:
2581 * If the interrupt is because of a received frame or if the
2582 * receive ring contains fresh as yet un-processed frames,this function is
2583 * called. It picks out the RxD at which place the last Rx processing had
2584 * stopped and sends the skb to the OSM's Rx handler and then increments
2585 * the offset.
2586 * Return Value:
2587 * NONE.
2588 */
2589 static void rx_intr_handler(ring_info_t *ring_data)
2590 {
2591 nic_t *nic = ring_data->nic;
2592 struct net_device *dev = (struct net_device *) nic->dev;
2593 int get_block, put_block, put_offset;
2594 rx_curr_get_info_t get_info, put_info;
2595 RxD_t *rxdp;
2596 struct sk_buff *skb;
2597 #ifndef CONFIG_S2IO_NAPI
2598 int pkt_cnt = 0;
2599 #endif
2600 int i;
2601
2602 spin_lock(&nic->rx_lock);
2603 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2604 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2605 __FUNCTION__, dev->name);
2606 spin_unlock(&nic->rx_lock);
2607 return;
2608 }
2609
2610 get_info = ring_data->rx_curr_get_info;
2611 get_block = get_info.block_index;
2612 put_info = ring_data->rx_curr_put_info;
2613 put_block = put_info.block_index;
2614 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2615 #ifndef CONFIG_S2IO_NAPI
2616 spin_lock(&nic->put_lock);
2617 put_offset = ring_data->put_pos;
2618 spin_unlock(&nic->put_lock);
2619 #else
2620 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2621 put_info.offset;
2622 #endif
2623 while (RXD_IS_UP2DT(rxdp)) {
2624 /* If your are next to put index then it's FIFO full condition */
2625 if ((get_block == put_block) &&
2626 (get_info.offset + 1) == put_info.offset) {
2627 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2628 break;
2629 }
2630 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2631 if (skb == NULL) {
2632 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2633 dev->name);
2634 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2635 spin_unlock(&nic->rx_lock);
2636 return;
2637 }
2638 if (nic->rxd_mode == RXD_MODE_1) {
2639 pci_unmap_single(nic->pdev, (dma_addr_t)
2640 ((RxD1_t*)rxdp)->Buffer0_ptr,
2641 dev->mtu +
2642 HEADER_ETHERNET_II_802_3_SIZE +
2643 HEADER_802_2_SIZE +
2644 HEADER_SNAP_SIZE,
2645 PCI_DMA_FROMDEVICE);
2646 } else if (nic->rxd_mode == RXD_MODE_3B) {
2647 pci_unmap_single(nic->pdev, (dma_addr_t)
2648 ((RxD3_t*)rxdp)->Buffer0_ptr,
2649 BUF0_LEN, PCI_DMA_FROMDEVICE);
2650 pci_unmap_single(nic->pdev, (dma_addr_t)
2651 ((RxD3_t*)rxdp)->Buffer1_ptr,
2652 BUF1_LEN, PCI_DMA_FROMDEVICE);
2653 pci_unmap_single(nic->pdev, (dma_addr_t)
2654 ((RxD3_t*)rxdp)->Buffer2_ptr,
2655 dev->mtu + 4,
2656 PCI_DMA_FROMDEVICE);
2657 } else {
2658 pci_unmap_single(nic->pdev, (dma_addr_t)
2659 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2660 PCI_DMA_FROMDEVICE);
2661 pci_unmap_single(nic->pdev, (dma_addr_t)
2662 ((RxD3_t*)rxdp)->Buffer1_ptr,
2663 l3l4hdr_size + 4,
2664 PCI_DMA_FROMDEVICE);
2665 pci_unmap_single(nic->pdev, (dma_addr_t)
2666 ((RxD3_t*)rxdp)->Buffer2_ptr,
2667 dev->mtu, PCI_DMA_FROMDEVICE);
2668 }
2669 rx_osm_handler(ring_data, rxdp);
2670 get_info.offset++;
2671 ring_data->rx_curr_get_info.offset = get_info.offset;
2672 rxdp = ring_data->rx_blocks[get_block].
2673 rxds[get_info.offset].virt_addr;
2674 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2675 get_info.offset = 0;
2676 ring_data->rx_curr_get_info.offset = get_info.offset;
2677 get_block++;
2678 if (get_block == ring_data->block_count)
2679 get_block = 0;
2680 ring_data->rx_curr_get_info.block_index = get_block;
2681 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2682 }
2683
2684 #ifdef CONFIG_S2IO_NAPI
2685 nic->pkts_to_process -= 1;
2686 if (!nic->pkts_to_process)
2687 break;
2688 #else
2689 pkt_cnt++;
2690 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2691 break;
2692 #endif
2693 }
2694 if (nic->lro) {
2695 /* Clear all LRO sessions before exiting */
2696 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2697 lro_t *lro = &nic->lro0_n[i];
2698 if (lro->in_use) {
2699 update_L3L4_header(nic, lro);
2700 queue_rx_frame(lro->parent);
2701 clear_lro_session(lro);
2702 }
2703 }
2704 }
2705
2706 spin_unlock(&nic->rx_lock);
2707 }
2708
2709 /**
2710 * tx_intr_handler - Transmit interrupt handler
2711 * @nic : device private variable
2712 * Description:
2713 * If an interrupt was raised to indicate DMA complete of the
2714 * Tx packet, this function is called. It identifies the last TxD
2715 * whose buffer was freed and frees all skbs whose data have already
2716 * DMA'ed into the NICs internal memory.
2717 * Return Value:
2718 * NONE
2719 */
2720
2721 static void tx_intr_handler(fifo_info_t *fifo_data)
2722 {
2723 nic_t *nic = fifo_data->nic;
2724 struct net_device *dev = (struct net_device *) nic->dev;
2725 tx_curr_get_info_t get_info, put_info;
2726 struct sk_buff *skb;
2727 TxD_t *txdlp;
2728
2729 get_info = fifo_data->tx_curr_get_info;
2730 put_info = fifo_data->tx_curr_put_info;
2731 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2732 list_virt_addr;
2733 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2734 (get_info.offset != put_info.offset) &&
2735 (txdlp->Host_Control)) {
2736 /* Check for TxD errors */
2737 if (txdlp->Control_1 & TXD_T_CODE) {
2738 unsigned long long err;
2739 err = txdlp->Control_1 & TXD_T_CODE;
2740 if ((err >> 48) == 0xA) {
2741 DBG_PRINT(TX_DBG, "TxD returned due \
2742 to loss of link\n");
2743 }
2744 else {
2745 DBG_PRINT(ERR_DBG, "***TxD error \
2746 %llx\n", err);
2747 }
2748 }
2749
2750 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2751 if (skb == NULL) {
2752 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2753 __FUNCTION__);
2754 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2755 return;
2756 }
2757
2758 /* Updating the statistics block */
2759 nic->stats.tx_bytes += skb->len;
2760 dev_kfree_skb_irq(skb);
2761
2762 get_info.offset++;
2763 get_info.offset %= get_info.fifo_len + 1;
2764 txdlp = (TxD_t *) fifo_data->list_info
2765 [get_info.offset].list_virt_addr;
2766 fifo_data->tx_curr_get_info.offset =
2767 get_info.offset;
2768 }
2769
2770 spin_lock(&nic->tx_lock);
2771 if (netif_queue_stopped(dev))
2772 netif_wake_queue(dev);
2773 spin_unlock(&nic->tx_lock);
2774 }
2775
2776 /**
2777 * alarm_intr_handler - Alarm Interrrupt handler
2778 * @nic: device private variable
2779 * Description: If the interrupt was neither because of Rx packet or Tx
2780 * complete, this function is called. If the interrupt was to indicate
2781 * a loss of link, the OSM link status handler is invoked for any other
2782 * alarm interrupt the block that raised the interrupt is displayed
2783 * and a H/W reset is issued.
2784 * Return Value:
2785 * NONE
2786 */
2787
2788 static void alarm_intr_handler(struct s2io_nic *nic)
2789 {
2790 struct net_device *dev = (struct net_device *) nic->dev;
2791 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2792 register u64 val64 = 0, err_reg = 0;
2793
2794 /* Handling link status change error Intr */
2795 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2796 err_reg = readq(&bar0->mac_rmac_err_reg);
2797 writeq(err_reg, &bar0->mac_rmac_err_reg);
2798 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2799 schedule_work(&nic->set_link_task);
2800 }
2801 }
2802
2803 /* Handling Ecc errors */
2804 val64 = readq(&bar0->mc_err_reg);
2805 writeq(val64, &bar0->mc_err_reg);
2806 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2807 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2808 nic->mac_control.stats_info->sw_stat.
2809 double_ecc_errs++;
2810 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
2811 dev->name);
2812 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
2813 if (nic->device_type != XFRAME_II_DEVICE) {
2814 /* Reset XframeI only if critical error */
2815 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2816 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2817 netif_stop_queue(dev);
2818 schedule_work(&nic->rst_timer_task);
2819 }
2820 }
2821 } else {
2822 nic->mac_control.stats_info->sw_stat.
2823 single_ecc_errs++;
2824 }
2825 }
2826
2827 /* In case of a serious error, the device will be Reset. */
2828 val64 = readq(&bar0->serr_source);
2829 if (val64 & SERR_SOURCE_ANY) {
2830 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2831 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
2832 (unsigned long long)val64);
2833 netif_stop_queue(dev);
2834 schedule_work(&nic->rst_timer_task);
2835 }
2836
2837 /*
2838 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2839 * Error occurs, the adapter will be recycled by disabling the
2840 * adapter enable bit and enabling it again after the device
2841 * becomes Quiescent.
2842 */
2843 val64 = readq(&bar0->pcc_err_reg);
2844 writeq(val64, &bar0->pcc_err_reg);
2845 if (val64 & PCC_FB_ECC_DB_ERR) {
2846 u64 ac = readq(&bar0->adapter_control);
2847 ac &= ~(ADAPTER_CNTL_EN);
2848 writeq(ac, &bar0->adapter_control);
2849 ac = readq(&bar0->adapter_control);
2850 schedule_work(&nic->set_link_task);
2851 }
2852
2853 /* Other type of interrupts are not being handled now, TODO */
2854 }
2855
2856 /**
2857 * wait_for_cmd_complete - waits for a command to complete.
2858 * @sp : private member of the device structure, which is a pointer to the
2859 * s2io_nic structure.
2860 * Description: Function that waits for a command to Write into RMAC
2861 * ADDR DATA registers to be completed and returns either success or
2862 * error depending on whether the command was complete or not.
2863 * Return value:
2864 * SUCCESS on success and FAILURE on failure.
2865 */
2866
2867 static int wait_for_cmd_complete(nic_t * sp)
2868 {
2869 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2870 int ret = FAILURE, cnt = 0;
2871 u64 val64;
2872
2873 while (TRUE) {
2874 val64 = readq(&bar0->rmac_addr_cmd_mem);
2875 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2876 ret = SUCCESS;
2877 break;
2878 }
2879 msleep(50);
2880 if (cnt++ > 10)
2881 break;
2882 }
2883
2884 return ret;
2885 }
2886
2887 /**
2888 * s2io_reset - Resets the card.
2889 * @sp : private member of the device structure.
2890 * Description: Function to Reset the card. This function then also
2891 * restores the previously saved PCI configuration space registers as
2892 * the card reset also resets the configuration space.
2893 * Return value:
2894 * void.
2895 */
2896
2897 static void s2io_reset(nic_t * sp)
2898 {
2899 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2900 u64 val64;
2901 u16 subid, pci_cmd;
2902
2903 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2904 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2905
2906 val64 = SW_RESET_ALL;
2907 writeq(val64, &bar0->sw_reset);
2908
2909 /*
2910 * At this stage, if the PCI write is indeed completed, the
2911 * card is reset and so is the PCI Config space of the device.
2912 * So a read cannot be issued at this stage on any of the
2913 * registers to ensure the write into "sw_reset" register
2914 * has gone through.
2915 * Question: Is there any system call that will explicitly force
2916 * all the write commands still pending on the bus to be pushed
2917 * through?
2918 * As of now I'am just giving a 250ms delay and hoping that the
2919 * PCI write to sw_reset register is done by this time.
2920 */
2921 msleep(250);
2922
2923 /* Restore the PCI state saved during initialization. */
2924 pci_restore_state(sp->pdev);
2925 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2926 pci_cmd);
2927 s2io_init_pci(sp);
2928
2929 msleep(250);
2930
2931 /* Set swapper to enable I/O register access */
2932 s2io_set_swapper(sp);
2933
2934 /* Restore the MSIX table entries from local variables */
2935 restore_xmsi_data(sp);
2936
2937 /* Clear certain PCI/PCI-X fields after reset */
2938 if (sp->device_type == XFRAME_II_DEVICE) {
2939 /* Clear parity err detect bit */
2940 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2941
2942 /* Clearing PCIX Ecc status register */
2943 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2944
2945 /* Clearing PCI_STATUS error reflected here */
2946 writeq(BIT(62), &bar0->txpic_int_reg);
2947 }
2948
2949 /* Reset device statistics maintained by OS */
2950 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2951
2952 /* SXE-002: Configure link and activity LED to turn it off */
2953 subid = sp->pdev->subsystem_device;
2954 if (((subid & 0xFF) >= 0x07) &&
2955 (sp->device_type == XFRAME_I_DEVICE)) {
2956 val64 = readq(&bar0->gpio_control);
2957 val64 |= 0x0000800000000000ULL;
2958 writeq(val64, &bar0->gpio_control);
2959 val64 = 0x0411040400000000ULL;
2960 writeq(val64, (void __iomem *)bar0 + 0x2700);
2961 }
2962
2963 /*
2964 * Clear spurious ECC interrupts that would have occured on
2965 * XFRAME II cards after reset.
2966 */
2967 if (sp->device_type == XFRAME_II_DEVICE) {
2968 val64 = readq(&bar0->pcc_err_reg);
2969 writeq(val64, &bar0->pcc_err_reg);
2970 }
2971
2972 sp->device_enabled_once = FALSE;
2973 }
2974
2975 /**
2976 * s2io_set_swapper - to set the swapper controle on the card
2977 * @sp : private member of the device structure,
2978 * pointer to the s2io_nic structure.
2979 * Description: Function to set the swapper control on the card
2980 * correctly depending on the 'endianness' of the system.
2981 * Return value:
2982 * SUCCESS on success and FAILURE on failure.
2983 */
2984
2985 static int s2io_set_swapper(nic_t * sp)
2986 {
2987 struct net_device *dev = sp->dev;
2988 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2989 u64 val64, valt, valr;
2990
2991 /*
2992 * Set proper endian settings and verify the same by reading
2993 * the PIF Feed-back register.
2994 */
2995
2996 val64 = readq(&bar0->pif_rd_swapper_fb);
2997 if (val64 != 0x0123456789ABCDEFULL) {
2998 int i = 0;
2999 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3000 0x8100008181000081ULL, /* FE=1, SE=0 */
3001 0x4200004242000042ULL, /* FE=0, SE=1 */
3002 0}; /* FE=0, SE=0 */
3003
3004 while(i<4) {
3005 writeq(value[i], &bar0->swapper_ctrl);
3006 val64 = readq(&bar0->pif_rd_swapper_fb);
3007 if (val64 == 0x0123456789ABCDEFULL)
3008 break;
3009 i++;
3010 }
3011 if (i == 4) {
3012 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3013 dev->name);
3014 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3015 (unsigned long long) val64);
3016 return FAILURE;
3017 }
3018 valr = value[i];
3019 } else {
3020 valr = readq(&bar0->swapper_ctrl);
3021 }
3022
3023 valt = 0x0123456789ABCDEFULL;
3024 writeq(valt, &bar0->xmsi_address);
3025 val64 = readq(&bar0->xmsi_address);
3026
3027 if(val64 != valt) {
3028 int i = 0;
3029 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3030 0x0081810000818100ULL, /* FE=1, SE=0 */
3031 0x0042420000424200ULL, /* FE=0, SE=1 */
3032 0}; /* FE=0, SE=0 */
3033
3034 while(i<4) {
3035 writeq((value[i] | valr), &bar0->swapper_ctrl);
3036 writeq(valt, &bar0->xmsi_address);
3037 val64 = readq(&bar0->xmsi_address);
3038 if(val64 == valt)
3039 break;
3040 i++;
3041 }
3042 if(i == 4) {
3043 unsigned long long x = val64;
3044 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3045 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3046 return FAILURE;
3047 }
3048 }
3049 val64 = readq(&bar0->swapper_ctrl);
3050 val64 &= 0xFFFF000000000000ULL;
3051
3052 #ifdef __BIG_ENDIAN
3053 /*
3054 * The device by default set to a big endian format, so a
3055 * big endian driver need not set anything.
3056 */
3057 val64 |= (SWAPPER_CTRL_TXP_FE |
3058 SWAPPER_CTRL_TXP_SE |
3059 SWAPPER_CTRL_TXD_R_FE |
3060 SWAPPER_CTRL_TXD_W_FE |
3061 SWAPPER_CTRL_TXF_R_FE |
3062 SWAPPER_CTRL_RXD_R_FE |
3063 SWAPPER_CTRL_RXD_W_FE |
3064 SWAPPER_CTRL_RXF_W_FE |
3065 SWAPPER_CTRL_XMSI_FE |
3066 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3067 if (sp->intr_type == INTA)
3068 val64 |= SWAPPER_CTRL_XMSI_SE;
3069 writeq(val64, &bar0->swapper_ctrl);
3070 #else
3071 /*
3072 * Initially we enable all bits to make it accessible by the
3073 * driver, then we selectively enable only those bits that
3074 * we want to set.
3075 */
3076 val64 |= (SWAPPER_CTRL_TXP_FE |
3077 SWAPPER_CTRL_TXP_SE |
3078 SWAPPER_CTRL_TXD_R_FE |
3079 SWAPPER_CTRL_TXD_R_SE |
3080 SWAPPER_CTRL_TXD_W_FE |
3081 SWAPPER_CTRL_TXD_W_SE |
3082 SWAPPER_CTRL_TXF_R_FE |
3083 SWAPPER_CTRL_RXD_R_FE |
3084 SWAPPER_CTRL_RXD_R_SE |
3085 SWAPPER_CTRL_RXD_W_FE |
3086 SWAPPER_CTRL_RXD_W_SE |
3087 SWAPPER_CTRL_RXF_W_FE |
3088 SWAPPER_CTRL_XMSI_FE |
3089 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3090 if (sp->intr_type == INTA)
3091 val64 |= SWAPPER_CTRL_XMSI_SE;
3092 writeq(val64, &bar0->swapper_ctrl);
3093 #endif
3094 val64 = readq(&bar0->swapper_ctrl);
3095
3096 /*
3097 * Verifying if endian settings are accurate by reading a
3098 * feedback register.
3099 */
3100 val64 = readq(&bar0->pif_rd_swapper_fb);
3101 if (val64 != 0x0123456789ABCDEFULL) {
3102 /* Endian settings are incorrect, calls for another dekko. */
3103 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3104 dev->name);
3105 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3106 (unsigned long long) val64);
3107 return FAILURE;
3108 }
3109
3110 return SUCCESS;
3111 }
3112
3113 static int wait_for_msix_trans(nic_t *nic, int i)
3114 {
3115 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3116 u64 val64;
3117 int ret = 0, cnt = 0;
3118
3119 do {
3120 val64 = readq(&bar0->xmsi_access);
3121 if (!(val64 & BIT(15)))
3122 break;
3123 mdelay(1);
3124 cnt++;
3125 } while(cnt < 5);
3126 if (cnt == 5) {
3127 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3128 ret = 1;
3129 }
3130
3131 return ret;
3132 }
3133
3134 static void restore_xmsi_data(nic_t *nic)
3135 {
3136 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3137 u64 val64;
3138 int i;
3139
3140 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3141 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3142 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3143 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3144 writeq(val64, &bar0->xmsi_access);
3145 if (wait_for_msix_trans(nic, i)) {
3146 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3147 continue;
3148 }
3149 }
3150 }
3151
3152 static void store_xmsi_data(nic_t *nic)
3153 {
3154 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3155 u64 val64, addr, data;
3156 int i;
3157
3158 /* Store and display */
3159 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3160 val64 = (BIT(15) | vBIT(i, 26, 6));
3161 writeq(val64, &bar0->xmsi_access);
3162 if (wait_for_msix_trans(nic, i)) {
3163 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3164 continue;
3165 }
3166 addr = readq(&bar0->xmsi_address);
3167 data = readq(&bar0->xmsi_data);
3168 if (addr && data) {
3169 nic->msix_info[i].addr = addr;
3170 nic->msix_info[i].data = data;
3171 }
3172 }
3173 }
3174
3175 int s2io_enable_msi(nic_t *nic)
3176 {
3177 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3178 u16 msi_ctrl, msg_val;
3179 struct config_param *config = &nic->config;
3180 struct net_device *dev = nic->dev;
3181 u64 val64, tx_mat, rx_mat;
3182 int i, err;
3183
3184 val64 = readq(&bar0->pic_control);
3185 val64 &= ~BIT(1);
3186 writeq(val64, &bar0->pic_control);
3187
3188 err = pci_enable_msi(nic->pdev);
3189 if (err) {
3190 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3191 nic->dev->name);
3192 return err;
3193 }
3194
3195 /*
3196 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3197 * for interrupt handling.
3198 */
3199 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3200 msg_val ^= 0x1;
3201 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3202 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3203
3204 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3205 msi_ctrl |= 0x10;
3206 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3207
3208 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3209 tx_mat = readq(&bar0->tx_mat0_n[0]);
3210 for (i=0; i<config->tx_fifo_num; i++) {
3211 tx_mat |= TX_MAT_SET(i, 1);
3212 }
3213 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3214
3215 rx_mat = readq(&bar0->rx_mat);
3216 for (i=0; i<config->rx_ring_num; i++) {
3217 rx_mat |= RX_MAT_SET(i, 1);
3218 }
3219 writeq(rx_mat, &bar0->rx_mat);
3220
3221 dev->irq = nic->pdev->irq;
3222 return 0;
3223 }
3224
3225 static int s2io_enable_msi_x(nic_t *nic)
3226 {
3227 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3228 u64 tx_mat, rx_mat;
3229 u16 msi_control; /* Temp variable */
3230 int ret, i, j, msix_indx = 1;
3231
3232 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3233 GFP_KERNEL);
3234 if (nic->entries == NULL) {
3235 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3236 return -ENOMEM;
3237 }
3238 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3239
3240 nic->s2io_entries =
3241 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3242 GFP_KERNEL);
3243 if (nic->s2io_entries == NULL) {
3244 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3245 kfree(nic->entries);
3246 return -ENOMEM;
3247 }
3248 memset(nic->s2io_entries, 0,
3249 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3250
3251 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3252 nic->entries[i].entry = i;
3253 nic->s2io_entries[i].entry = i;
3254 nic->s2io_entries[i].arg = NULL;
3255 nic->s2io_entries[i].in_use = 0;
3256 }
3257
3258 tx_mat = readq(&bar0->tx_mat0_n[0]);
3259 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3260 tx_mat |= TX_MAT_SET(i, msix_indx);
3261 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3262 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3263 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3264 }
3265 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3266
3267 if (!nic->config.bimodal) {
3268 rx_mat = readq(&bar0->rx_mat);
3269 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3270 rx_mat |= RX_MAT_SET(j, msix_indx);
3271 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3272 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3273 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3274 }
3275 writeq(rx_mat, &bar0->rx_mat);
3276 } else {
3277 tx_mat = readq(&bar0->tx_mat0_n[7]);
3278 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3279 tx_mat |= TX_MAT_SET(i, msix_indx);
3280 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3281 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3282 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3283 }
3284 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3285 }
3286
3287 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3288 if (ret) {
3289 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3290 kfree(nic->entries);
3291 kfree(nic->s2io_entries);
3292 nic->entries = NULL;
3293 nic->s2io_entries = NULL;
3294 return -ENOMEM;
3295 }
3296
3297 /*
3298 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3299 * in the herc NIC. (Temp change, needs to be removed later)
3300 */
3301 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3302 msi_control |= 0x1; /* Enable MSI */
3303 pci_write_config_word(nic->pdev, 0x42, msi_control);
3304
3305 return 0;
3306 }
3307
3308 /* ********************************************************* *
3309 * Functions defined below concern the OS part of the driver *
3310 * ********************************************************* */
3311
3312 /**
3313 * s2io_open - open entry point of the driver
3314 * @dev : pointer to the device structure.
3315 * Description:
3316 * This function is the open entry point of the driver. It mainly calls a
3317 * function to allocate Rx buffers and inserts them into the buffer
3318 * descriptors and then enables the Rx part of the NIC.
3319 * Return value:
3320 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3321 * file on failure.
3322 */
3323
3324 static int s2io_open(struct net_device *dev)
3325 {
3326 nic_t *sp = dev->priv;
3327 int err = 0;
3328 int i;
3329 u16 msi_control; /* Temp variable */
3330
3331 /*
3332 * Make sure you have link off by default every time
3333 * Nic is initialized
3334 */
3335 netif_carrier_off(dev);
3336 sp->last_link_state = 0;
3337
3338 /* Initialize H/W and enable interrupts */
3339 if (s2io_card_up(sp)) {
3340 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3341 dev->name);
3342 err = -ENODEV;
3343 goto hw_init_failed;
3344 }
3345
3346 /* Store the values of the MSIX table in the nic_t structure */
3347 store_xmsi_data(sp);
3348
3349 /* After proper initialization of H/W, register ISR */
3350 if (sp->intr_type == MSI) {
3351 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
3352 SA_SHIRQ, sp->name, dev);
3353 if (err) {
3354 DBG_PRINT(ERR_DBG, "%s: MSI registration \
3355 failed\n", dev->name);
3356 goto isr_registration_failed;
3357 }
3358 }
3359 if (sp->intr_type == MSI_X) {
3360 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
3361 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
3362 sprintf(sp->desc1, "%s:MSI-X-%d-TX",
3363 dev->name, i);
3364 err = request_irq(sp->entries[i].vector,
3365 s2io_msix_fifo_handle, 0, sp->desc1,
3366 sp->s2io_entries[i].arg);
3367 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
3368 (unsigned long long)sp->msix_info[i].addr);
3369 } else {
3370 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3371 dev->name, i);
3372 err = request_irq(sp->entries[i].vector,
3373 s2io_msix_ring_handle, 0, sp->desc2,
3374 sp->s2io_entries[i].arg);
3375 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
3376 (unsigned long long)sp->msix_info[i].addr);
3377 }
3378 if (err) {
3379 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
3380 failed\n", dev->name, i);
3381 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
3382 goto isr_registration_failed;
3383 }
3384 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
3385 }
3386 }
3387 if (sp->intr_type == INTA) {
3388 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
3389 sp->name, dev);
3390 if (err) {
3391 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3392 dev->name);
3393 goto isr_registration_failed;
3394 }
3395 }
3396
3397 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3398 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3399 err = -ENODEV;
3400 goto setting_mac_address_failed;
3401 }
3402
3403 netif_start_queue(dev);
3404 return 0;
3405
3406 setting_mac_address_failed:
3407 if (sp->intr_type != MSI_X)
3408 free_irq(sp->pdev->irq, dev);
3409 isr_registration_failed:
3410 del_timer_sync(&sp->alarm_timer);
3411 if (sp->intr_type == MSI_X) {
3412 if (sp->device_type == XFRAME_II_DEVICE) {
3413 for (i=1; (sp->s2io_entries[i].in_use ==
3414 MSIX_REGISTERED_SUCCESS); i++) {
3415 int vector = sp->entries[i].vector;
3416 void *arg = sp->s2io_entries[i].arg;
3417
3418 free_irq(vector, arg);
3419 }
3420 pci_disable_msix(sp->pdev);
3421
3422 /* Temp */
3423 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3424 msi_control &= 0xFFFE; /* Disable MSI */
3425 pci_write_config_word(sp->pdev, 0x42, msi_control);
3426 }
3427 }
3428 else if (sp->intr_type == MSI)
3429 pci_disable_msi(sp->pdev);
3430 s2io_reset(sp);
3431 hw_init_failed:
3432 if (sp->intr_type == MSI_X) {
3433 if (sp->entries)
3434 kfree(sp->entries);
3435 if (sp->s2io_entries)
3436 kfree(sp->s2io_entries);
3437 }
3438 return err;
3439 }
3440
3441 /**
3442 * s2io_close -close entry point of the driver
3443 * @dev : device pointer.
3444 * Description:
3445 * This is the stop entry point of the driver. It needs to undo exactly
3446 * whatever was done by the open entry point,thus it's usually referred to
3447 * as the close function.Among other things this function mainly stops the
3448 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3449 * Return value:
3450 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3451 * file on failure.
3452 */
3453
3454 static int s2io_close(struct net_device *dev)
3455 {
3456 nic_t *sp = dev->priv;
3457 int i;
3458 u16 msi_control;
3459
3460 flush_scheduled_work();
3461 netif_stop_queue(dev);
3462 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3463 s2io_card_down(sp);
3464
3465 if (sp->intr_type == MSI_X) {
3466 if (sp->device_type == XFRAME_II_DEVICE) {
3467 for (i=1; (sp->s2io_entries[i].in_use ==
3468 MSIX_REGISTERED_SUCCESS); i++) {
3469 int vector = sp->entries[i].vector;
3470 void *arg = sp->s2io_entries[i].arg;
3471
3472 free_irq(vector, arg);
3473 }
3474 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3475 msi_control &= 0xFFFE; /* Disable MSI */
3476 pci_write_config_word(sp->pdev, 0x42, msi_control);
3477
3478 pci_disable_msix(sp->pdev);
3479 }
3480 }
3481 else {
3482 free_irq(sp->pdev->irq, dev);
3483 if (sp->intr_type == MSI)
3484 pci_disable_msi(sp->pdev);
3485 }
3486 sp->device_close_flag = TRUE; /* Device is shut down. */
3487 return 0;
3488 }
3489
3490 /**
3491 * s2io_xmit - Tx entry point of te driver
3492 * @skb : the socket buffer containing the Tx data.
3493 * @dev : device pointer.
3494 * Description :
3495 * This function is the Tx entry point of the driver. S2IO NIC supports
3496 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3497 * NOTE: when device cant queue the pkt,just the trans_start variable will
3498 * not be upadted.
3499 * Return value:
3500 * 0 on success & 1 on failure.
3501 */
3502
3503 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3504 {
3505 nic_t *sp = dev->priv;
3506 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3507 register u64 val64;
3508 TxD_t *txdp;
3509 TxFIFO_element_t __iomem *tx_fifo;
3510 unsigned long flags;
3511 #ifdef NETIF_F_TSO
3512 int mss;
3513 #endif
3514 u16 vlan_tag = 0;
3515 int vlan_priority = 0;
3516 mac_info_t *mac_control;
3517 struct config_param *config;
3518
3519 mac_control = &sp->mac_control;
3520 config = &sp->config;
3521
3522 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3523 spin_lock_irqsave(&sp->tx_lock, flags);
3524 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3525 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3526 dev->name);
3527 spin_unlock_irqrestore(&sp->tx_lock, flags);
3528 dev_kfree_skb(skb);
3529 return 0;
3530 }
3531
3532 queue = 0;
3533
3534 /* Get Fifo number to Transmit based on vlan priority */
3535 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3536 vlan_tag = vlan_tx_tag_get(skb);
3537 vlan_priority = vlan_tag >> 13;
3538 queue = config->fifo_mapping[vlan_priority];
3539 }
3540
3541 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3542 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3543 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3544 list_virt_addr;
3545
3546 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3547 /* Avoid "put" pointer going beyond "get" pointer */
3548 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3549 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3550 netif_stop_queue(dev);
3551 dev_kfree_skb(skb);
3552 spin_unlock_irqrestore(&sp->tx_lock, flags);
3553 return 0;
3554 }
3555
3556 /* A buffer with no data will be dropped */
3557 if (!skb->len) {
3558 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3559 dev_kfree_skb(skb);
3560 spin_unlock_irqrestore(&sp->tx_lock, flags);
3561 return 0;
3562 }
3563
3564 txdp->Control_1 = 0;
3565 txdp->Control_2 = 0;
3566 #ifdef NETIF_F_TSO
3567 mss = skb_shinfo(skb)->tso_size;
3568 if (mss) {
3569 txdp->Control_1 |= TXD_TCP_LSO_EN;
3570 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3571 }
3572 #endif
3573 if (skb->ip_summed == CHECKSUM_HW) {
3574 txdp->Control_2 |=
3575 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3576 TXD_TX_CKO_UDP_EN);
3577 }
3578 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3579 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3580 txdp->Control_2 |= config->tx_intr_type;
3581
3582 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3583 txdp->Control_2 |= TXD_VLAN_ENABLE;
3584 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3585 }
3586
3587 frg_len = skb->len - skb->data_len;
3588 if (skb_shinfo(skb)->ufo_size) {
3589 int ufo_size;
3590
3591 ufo_size = skb_shinfo(skb)->ufo_size;
3592 ufo_size &= ~7;
3593 txdp->Control_1 |= TXD_UFO_EN;
3594 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3595 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3596 #ifdef __BIG_ENDIAN
3597 sp->ufo_in_band_v[put_off] =
3598 (u64)skb_shinfo(skb)->ip6_frag_id;
3599 #else
3600 sp->ufo_in_band_v[put_off] =
3601 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3602 #endif
3603 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3604 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3605 sp->ufo_in_band_v,
3606 sizeof(u64), PCI_DMA_TODEVICE);
3607 txdp++;
3608 txdp->Control_1 = 0;
3609 txdp->Control_2 = 0;
3610 }
3611
3612 txdp->Buffer_Pointer = pci_map_single
3613 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3614 txdp->Host_Control = (unsigned long) skb;
3615 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3616
3617 if (skb_shinfo(skb)->ufo_size)
3618 txdp->Control_1 |= TXD_UFO_EN;
3619
3620 frg_cnt = skb_shinfo(skb)->nr_frags;
3621 /* For fragmented SKB. */
3622 for (i = 0; i < frg_cnt; i++) {
3623 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3624 /* A '0' length fragment will be ignored */
3625 if (!frag->size)
3626 continue;
3627 txdp++;
3628 txdp->Buffer_Pointer = (u64) pci_map_page
3629 (sp->pdev, frag->page, frag->page_offset,
3630 frag->size, PCI_DMA_TODEVICE);
3631 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3632 if (skb_shinfo(skb)->ufo_size)
3633 txdp->Control_1 |= TXD_UFO_EN;
3634 }
3635 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3636
3637 if (skb_shinfo(skb)->ufo_size)
3638 frg_cnt++; /* as Txd0 was used for inband header */
3639
3640 tx_fifo = mac_control->tx_FIFO_start[queue];
3641 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3642 writeq(val64, &tx_fifo->TxDL_Pointer);
3643
3644 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3645 TX_FIFO_LAST_LIST);
3646
3647 #ifdef NETIF_F_TSO
3648 if (mss)
3649 val64 |= TX_FIFO_SPECIAL_FUNC;
3650 #endif
3651 if (skb_shinfo(skb)->ufo_size)
3652 val64 |= TX_FIFO_SPECIAL_FUNC;
3653 writeq(val64, &tx_fifo->List_Control);
3654
3655 mmiowb();
3656
3657 put_off++;
3658 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3659 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3660
3661 /* Avoid "put" pointer going beyond "get" pointer */
3662 if (((put_off + 1) % queue_len) == get_off) {
3663 DBG_PRINT(TX_DBG,
3664 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3665 put_off, get_off);
3666 netif_stop_queue(dev);
3667 }
3668
3669 dev->trans_start = jiffies;
3670 spin_unlock_irqrestore(&sp->tx_lock, flags);
3671
3672 return 0;
3673 }
3674
3675 static void
3676 s2io_alarm_handle(unsigned long data)
3677 {
3678 nic_t *sp = (nic_t *)data;
3679
3680 alarm_intr_handler(sp);
3681 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3682 }
3683
3684 static irqreturn_t
3685 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3686 {
3687 struct net_device *dev = (struct net_device *) dev_id;
3688 nic_t *sp = dev->priv;
3689 int i;
3690 int ret;
3691 mac_info_t *mac_control;
3692 struct config_param *config;
3693
3694 atomic_inc(&sp->isr_cnt);
3695 mac_control = &sp->mac_control;
3696 config = &sp->config;
3697 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
3698
3699 /* If Intr is because of Rx Traffic */
3700 for (i = 0; i < config->rx_ring_num; i++)
3701 rx_intr_handler(&mac_control->rings[i]);
3702
3703 /* If Intr is because of Tx Traffic */
3704 for (i = 0; i < config->tx_fifo_num; i++)
3705 tx_intr_handler(&mac_control->fifos[i]);
3706
3707 /*
3708 * If the Rx buffer count is below the panic threshold then
3709 * reallocate the buffers from the interrupt handler itself,
3710 * else schedule a tasklet to reallocate the buffers.
3711 */
3712 for (i = 0; i < config->rx_ring_num; i++) {
3713 if (!sp->lro) {
3714 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3715 int level = rx_buffer_level(sp, rxb_size, i);
3716
3717 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3718 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
3719 dev->name);
3720 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3721 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3722 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3723 dev->name);
3724 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3725 clear_bit(0, (&sp->tasklet_status));
3726 atomic_dec(&sp->isr_cnt);
3727 return IRQ_HANDLED;
3728 }
3729 clear_bit(0, (&sp->tasklet_status));
3730 } else if (level == LOW) {
3731 tasklet_schedule(&sp->task);
3732 }
3733 }
3734 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
3735 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3736 dev->name);
3737 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
3738 break;
3739 }
3740 }
3741
3742 atomic_dec(&sp->isr_cnt);
3743 return IRQ_HANDLED;
3744 }
3745
3746 static irqreturn_t
3747 s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
3748 {
3749 ring_info_t *ring = (ring_info_t *)dev_id;
3750 nic_t *sp = ring->nic;
3751 struct net_device *dev = (struct net_device *) dev_id;
3752 int rxb_size, level, rng_n;
3753
3754 atomic_inc(&sp->isr_cnt);
3755 rx_intr_handler(ring);
3756
3757 rng_n = ring->ring_no;
3758 if (!sp->lro) {
3759 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3760 level = rx_buffer_level(sp, rxb_size, rng_n);
3761
3762 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3763 int ret;
3764 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3765 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3766 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3767 DBG_PRINT(ERR_DBG, "Out of memory in %s",
3768 __FUNCTION__);
3769 clear_bit(0, (&sp->tasklet_status));
3770 return IRQ_HANDLED;
3771 }
3772 clear_bit(0, (&sp->tasklet_status));
3773 } else if (level == LOW) {
3774 tasklet_schedule(&sp->task);
3775 }
3776 }
3777 else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
3778 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
3779 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
3780 }
3781
3782 atomic_dec(&sp->isr_cnt);
3783
3784 return IRQ_HANDLED;
3785 }
3786
3787 static irqreturn_t
3788 s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
3789 {
3790 fifo_info_t *fifo = (fifo_info_t *)dev_id;
3791 nic_t *sp = fifo->nic;
3792
3793 atomic_inc(&sp->isr_cnt);
3794 tx_intr_handler(fifo);
3795 atomic_dec(&sp->isr_cnt);
3796 return IRQ_HANDLED;
3797 }
3798
3799 static void s2io_txpic_intr_handle(nic_t *sp)
3800 {
3801 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3802 u64 val64;
3803
3804 val64 = readq(&bar0->pic_int_status);
3805 if (val64 & PIC_INT_GPIO) {
3806 val64 = readq(&bar0->gpio_int_reg);
3807 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3808 (val64 & GPIO_INT_REG_LINK_UP)) {
3809 val64 |= GPIO_INT_REG_LINK_DOWN;
3810 val64 |= GPIO_INT_REG_LINK_UP;
3811 writeq(val64, &bar0->gpio_int_reg);
3812 goto masking;
3813 }
3814
3815 if (((sp->last_link_state == LINK_UP) &&
3816 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3817 ((sp->last_link_state == LINK_DOWN) &&
3818 (val64 & GPIO_INT_REG_LINK_UP))) {
3819 val64 = readq(&bar0->gpio_int_mask);
3820 val64 |= GPIO_INT_MASK_LINK_DOWN;
3821 val64 |= GPIO_INT_MASK_LINK_UP;
3822 writeq(val64, &bar0->gpio_int_mask);
3823 s2io_set_link((unsigned long)sp);
3824 }
3825 masking:
3826 if (sp->last_link_state == LINK_UP) {
3827 /*enable down interrupt */
3828 val64 = readq(&bar0->gpio_int_mask);
3829 /* unmasks link down intr */
3830 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3831 /* masks link up intr */
3832 val64 |= GPIO_INT_MASK_LINK_UP;
3833 writeq(val64, &bar0->gpio_int_mask);
3834 } else {
3835 /*enable UP Interrupt */
3836 val64 = readq(&bar0->gpio_int_mask);
3837 /* unmasks link up interrupt */
3838 val64 &= ~GPIO_INT_MASK_LINK_UP;
3839 /* masks link down interrupt */
3840 val64 |= GPIO_INT_MASK_LINK_DOWN;
3841 writeq(val64, &bar0->gpio_int_mask);
3842 }
3843 }
3844 }
3845
3846 /**
3847 * s2io_isr - ISR handler of the device .
3848 * @irq: the irq of the device.
3849 * @dev_id: a void pointer to the dev structure of the NIC.
3850 * @pt_regs: pointer to the registers pushed on the stack.
3851 * Description: This function is the ISR handler of the device. It
3852 * identifies the reason for the interrupt and calls the relevant
3853 * service routines. As a contongency measure, this ISR allocates the
3854 * recv buffers, if their numbers are below the panic value which is
3855 * presently set to 25% of the original number of rcv buffers allocated.
3856 * Return value:
3857 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
3858 * IRQ_NONE: will be returned if interrupt is not from our device
3859 */
3860 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3861 {
3862 struct net_device *dev = (struct net_device *) dev_id;
3863 nic_t *sp = dev->priv;
3864 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3865 int i;
3866 u64 reason = 0, val64;
3867 mac_info_t *mac_control;
3868 struct config_param *config;
3869
3870 atomic_inc(&sp->isr_cnt);
3871 mac_control = &sp->mac_control;
3872 config = &sp->config;
3873
3874 /*
3875 * Identify the cause for interrupt and call the appropriate
3876 * interrupt handler. Causes for the interrupt could be;
3877 * 1. Rx of packet.
3878 * 2. Tx complete.
3879 * 3. Link down.
3880 * 4. Error in any functional blocks of the NIC.
3881 */
3882 reason = readq(&bar0->general_int_status);
3883
3884 if (!reason) {
3885 /* The interrupt was not raised by Xena. */
3886 atomic_dec(&sp->isr_cnt);
3887 return IRQ_NONE;
3888 }
3889
3890 #ifdef CONFIG_S2IO_NAPI
3891 if (reason & GEN_INTR_RXTRAFFIC) {
3892 if (netif_rx_schedule_prep(dev)) {
3893 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3894 DISABLE_INTRS);
3895 __netif_rx_schedule(dev);
3896 }
3897 }
3898 #else
3899 /* If Intr is because of Rx Traffic */
3900 if (reason & GEN_INTR_RXTRAFFIC) {
3901 /*
3902 * rx_traffic_int reg is an R1 register, writing all 1's
3903 * will ensure that the actual interrupt causing bit get's
3904 * cleared and hence a read can be avoided.
3905 */
3906 val64 = 0xFFFFFFFFFFFFFFFFULL;
3907 writeq(val64, &bar0->rx_traffic_int);
3908 for (i = 0; i < config->rx_ring_num; i++) {
3909 rx_intr_handler(&mac_control->rings[i]);
3910 }
3911 }
3912 #endif
3913
3914 /* If Intr is because of Tx Traffic */
3915 if (reason & GEN_INTR_TXTRAFFIC) {
3916 /*
3917 * tx_traffic_int reg is an R1 register, writing all 1's
3918 * will ensure that the actual interrupt causing bit get's
3919 * cleared and hence a read can be avoided.
3920 */
3921 val64 = 0xFFFFFFFFFFFFFFFFULL;
3922 writeq(val64, &bar0->tx_traffic_int);
3923
3924 for (i = 0; i < config->tx_fifo_num; i++)
3925 tx_intr_handler(&mac_control->fifos[i]);
3926 }
3927
3928 if (reason & GEN_INTR_TXPIC)
3929 s2io_txpic_intr_handle(sp);
3930 /*
3931 * If the Rx buffer count is below the panic threshold then
3932 * reallocate the buffers from the interrupt handler itself,
3933 * else schedule a tasklet to reallocate the buffers.
3934 */
3935 #ifndef CONFIG_S2IO_NAPI
3936 for (i = 0; i < config->rx_ring_num; i++) {
3937 if (!sp->lro) {
3938 int ret;
3939 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3940 int level = rx_buffer_level(sp, rxb_size, i);
3941
3942 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3943 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
3944 dev->name);
3945 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3946 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3947 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3948 dev->name);
3949 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3950 clear_bit(0, (&sp->tasklet_status));
3951 atomic_dec(&sp->isr_cnt);
3952 return IRQ_HANDLED;
3953 }
3954 clear_bit(0, (&sp->tasklet_status));
3955 } else if (level == LOW) {
3956 tasklet_schedule(&sp->task);
3957 }
3958 }
3959 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
3960 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3961 dev->name);
3962 DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
3963 break;
3964 }
3965 }
3966 #endif
3967
3968 atomic_dec(&sp->isr_cnt);
3969 return IRQ_HANDLED;
3970 }
3971
3972 /**
3973 * s2io_updt_stats -
3974 */
3975 static void s2io_updt_stats(nic_t *sp)
3976 {
3977 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3978 u64 val64;
3979 int cnt = 0;
3980
3981 if (atomic_read(&sp->card_state) == CARD_UP) {
3982 /* Apprx 30us on a 133 MHz bus */
3983 val64 = SET_UPDT_CLICKS(10) |
3984 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3985 writeq(val64, &bar0->stat_cfg);
3986 do {
3987 udelay(100);
3988 val64 = readq(&bar0->stat_cfg);
3989 if (!(val64 & BIT(0)))
3990 break;
3991 cnt++;
3992 if (cnt == 5)
3993 break; /* Updt failed */
3994 } while(1);
3995 }
3996 }
3997
3998 /**
3999 * s2io_get_stats - Updates the device statistics structure.
4000 * @dev : pointer to the device structure.
4001 * Description:
4002 * This function updates the device statistics structure in the s2io_nic
4003 * structure and returns a pointer to the same.
4004 * Return value:
4005 * pointer to the updated net_device_stats structure.
4006 */
4007
4008 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4009 {
4010 nic_t *sp = dev->priv;
4011 mac_info_t *mac_control;
4012 struct config_param *config;
4013
4014
4015 mac_control = &sp->mac_control;
4016 config = &sp->config;
4017
4018 /* Configure Stats for immediate updt */
4019 s2io_updt_stats(sp);
4020
4021 sp->stats.tx_packets =
4022 le32_to_cpu(mac_control->stats_info->tmac_frms);
4023 sp->stats.tx_errors =
4024 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4025 sp->stats.rx_errors =
4026 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
4027 sp->stats.multicast =
4028 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4029 sp->stats.rx_length_errors =
4030 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
4031
4032 return (&sp->stats);
4033 }
4034
4035 /**
4036 * s2io_set_multicast - entry point for multicast address enable/disable.
4037 * @dev : pointer to the device structure
4038 * Description:
4039 * This function is a driver entry point which gets called by the kernel
4040 * whenever multicast addresses must be enabled/disabled. This also gets
4041 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4042 * determine, if multicast address must be enabled or if promiscuous mode
4043 * is to be disabled etc.
4044 * Return value:
4045 * void.
4046 */
4047
4048 static void s2io_set_multicast(struct net_device *dev)
4049 {
4050 int i, j, prev_cnt;
4051 struct dev_mc_list *mclist;
4052 nic_t *sp = dev->priv;
4053 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4054 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4055 0xfeffffffffffULL;
4056 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4057 void __iomem *add;
4058
4059 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4060 /* Enable all Multicast addresses */
4061 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4062 &bar0->rmac_addr_data0_mem);
4063 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4064 &bar0->rmac_addr_data1_mem);
4065 val64 = RMAC_ADDR_CMD_MEM_WE |
4066 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4067 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4068 writeq(val64, &bar0->rmac_addr_cmd_mem);
4069 /* Wait till command completes */
4070 wait_for_cmd_complete(sp);
4071
4072 sp->m_cast_flg = 1;
4073 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4074 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4075 /* Disable all Multicast addresses */
4076 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4077 &bar0->rmac_addr_data0_mem);
4078 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4079 &bar0->rmac_addr_data1_mem);
4080 val64 = RMAC_ADDR_CMD_MEM_WE |
4081 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4082 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4083 writeq(val64, &bar0->rmac_addr_cmd_mem);
4084 /* Wait till command completes */
4085 wait_for_cmd_complete(sp);
4086
4087 sp->m_cast_flg = 0;
4088 sp->all_multi_pos = 0;
4089 }
4090
4091 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4092 /* Put the NIC into promiscuous mode */
4093 add = &bar0->mac_cfg;
4094 val64 = readq(&bar0->mac_cfg);
4095 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4096
4097 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4098 writel((u32) val64, add);
4099 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4100 writel((u32) (val64 >> 32), (add + 4));
4101
4102 val64 = readq(&bar0->mac_cfg);
4103 sp->promisc_flg = 1;
4104 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4105 dev->name);
4106 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4107 /* Remove the NIC from promiscuous mode */
4108 add = &bar0->mac_cfg;
4109 val64 = readq(&bar0->mac_cfg);
4110 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4111
4112 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4113 writel((u32) val64, add);
4114 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4115 writel((u32) (val64 >> 32), (add + 4));
4116
4117 val64 = readq(&bar0->mac_cfg);
4118 sp->promisc_flg = 0;
4119 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4120 dev->name);
4121 }
4122
4123 /* Update individual M_CAST address list */
4124 if ((!sp->m_cast_flg) && dev->mc_count) {
4125 if (dev->mc_count >
4126 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4127 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4128 dev->name);
4129 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4130 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4131 return;
4132 }
4133
4134 prev_cnt = sp->mc_addr_count;
4135 sp->mc_addr_count = dev->mc_count;
4136
4137 /* Clear out the previous list of Mc in the H/W. */
4138 for (i = 0; i < prev_cnt; i++) {
4139 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4140 &bar0->rmac_addr_data0_mem);
4141 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4142 &bar0->rmac_addr_data1_mem);
4143 val64 = RMAC_ADDR_CMD_MEM_WE |
4144 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4145 RMAC_ADDR_CMD_MEM_OFFSET
4146 (MAC_MC_ADDR_START_OFFSET + i);
4147 writeq(val64, &bar0->rmac_addr_cmd_mem);
4148
4149 /* Wait for command completes */
4150 if (wait_for_cmd_complete(sp)) {
4151 DBG_PRINT(ERR_DBG, "%s: Adding ",
4152 dev->name);
4153 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4154 return;
4155 }
4156 }
4157
4158 /* Create the new Rx filter list and update the same in H/W. */
4159 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4160 i++, mclist = mclist->next) {
4161 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4162 ETH_ALEN);
4163 mac_addr = 0;
4164 for (j = 0; j < ETH_ALEN; j++) {
4165 mac_addr |= mclist->dmi_addr[j];
4166 mac_addr <<= 8;
4167 }
4168 mac_addr >>= 8;
4169 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4170 &bar0->rmac_addr_data0_mem);
4171 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4172 &bar0->rmac_addr_data1_mem);
4173 val64 = RMAC_ADDR_CMD_MEM_WE |
4174 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4175 RMAC_ADDR_CMD_MEM_OFFSET
4176 (i + MAC_MC_ADDR_START_OFFSET);
4177 writeq(val64, &bar0->rmac_addr_cmd_mem);
4178
4179 /* Wait for command completes */
4180 if (wait_for_cmd_complete(sp)) {
4181 DBG_PRINT(ERR_DBG, "%s: Adding ",
4182 dev->name);
4183 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4184 return;
4185 }
4186 }
4187 }
4188 }
4189
4190 /**
4191 * s2io_set_mac_addr - Programs the Xframe mac address
4192 * @dev : pointer to the device structure.
4193 * @addr: a uchar pointer to the new mac address which is to be set.
4194 * Description : This procedure will program the Xframe to receive
4195 * frames with new Mac Address
4196 * Return value: SUCCESS on success and an appropriate (-)ve integer
4197 * as defined in errno.h file on failure.
4198 */
4199
4200 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4201 {
4202 nic_t *sp = dev->priv;
4203 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4204 register u64 val64, mac_addr = 0;
4205 int i;
4206
4207 /*
4208 * Set the new MAC address as the new unicast filter and reflect this
4209 * change on the device address registered with the OS. It will be
4210 * at offset 0.
4211 */
4212 for (i = 0; i < ETH_ALEN; i++) {
4213 mac_addr <<= 8;
4214 mac_addr |= addr[i];
4215 }
4216
4217 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4218 &bar0->rmac_addr_data0_mem);
4219
4220 val64 =
4221 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4222 RMAC_ADDR_CMD_MEM_OFFSET(0);
4223 writeq(val64, &bar0->rmac_addr_cmd_mem);
4224 /* Wait till command completes */
4225 if (wait_for_cmd_complete(sp)) {
4226 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4227 return FAILURE;
4228 }
4229
4230 return SUCCESS;
4231 }
4232
4233 /**
4234 * s2io_ethtool_sset - Sets different link parameters.
4235 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4236 * @info: pointer to the structure with parameters given by ethtool to set
4237 * link information.
4238 * Description:
4239 * The function sets different link parameters provided by the user onto
4240 * the NIC.
4241 * Return value:
4242 * 0 on success.
4243 */
4244
4245 static int s2io_ethtool_sset(struct net_device *dev,
4246 struct ethtool_cmd *info)
4247 {
4248 nic_t *sp = dev->priv;
4249 if ((info->autoneg == AUTONEG_ENABLE) ||
4250 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4251 return -EINVAL;
4252 else {
4253 s2io_close(sp->dev);
4254 s2io_open(sp->dev);
4255 }
4256
4257 return 0;
4258 }
4259
4260 /**
4261 * s2io_ethtol_gset - Return link specific information.
4262 * @sp : private member of the device structure, pointer to the
4263 * s2io_nic structure.
4264 * @info : pointer to the structure with parameters given by ethtool
4265 * to return link information.
4266 * Description:
4267 * Returns link specific information like speed, duplex etc.. to ethtool.
4268 * Return value :
4269 * return 0 on success.
4270 */
4271
4272 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4273 {
4274 nic_t *sp = dev->priv;
4275 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4276 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4277 info->port = PORT_FIBRE;
4278 /* info->transceiver?? TODO */
4279
4280 if (netif_carrier_ok(sp->dev)) {
4281 info->speed = 10000;
4282 info->duplex = DUPLEX_FULL;
4283 } else {
4284 info->speed = -1;
4285 info->duplex = -1;
4286 }
4287
4288 info->autoneg = AUTONEG_DISABLE;
4289 return 0;
4290 }
4291
4292 /**
4293 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4294 * @sp : private member of the device structure, which is a pointer to the
4295 * s2io_nic structure.
4296 * @info : pointer to the structure with parameters given by ethtool to
4297 * return driver information.
4298 * Description:
4299 * Returns driver specefic information like name, version etc.. to ethtool.
4300 * Return value:
4301 * void
4302 */
4303
4304 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4305 struct ethtool_drvinfo *info)
4306 {
4307 nic_t *sp = dev->priv;
4308
4309 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4310 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4311 strncpy(info->fw_version, "", sizeof(info->fw_version));
4312 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4313 info->regdump_len = XENA_REG_SPACE;
4314 info->eedump_len = XENA_EEPROM_SPACE;
4315 info->testinfo_len = S2IO_TEST_LEN;
4316 info->n_stats = S2IO_STAT_LEN;
4317 }
4318
4319 /**
4320 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4321 * @sp: private member of the device structure, which is a pointer to the
4322 * s2io_nic structure.
4323 * @regs : pointer to the structure with parameters given by ethtool for
4324 * dumping the registers.
4325 * @reg_space: The input argumnet into which all the registers are dumped.
4326 * Description:
4327 * Dumps the entire register space of xFrame NIC into the user given
4328 * buffer area.
4329 * Return value :
4330 * void .
4331 */
4332
4333 static void s2io_ethtool_gregs(struct net_device *dev,
4334 struct ethtool_regs *regs, void *space)
4335 {
4336 int i;
4337 u64 reg;
4338 u8 *reg_space = (u8 *) space;
4339 nic_t *sp = dev->priv;
4340
4341 regs->len = XENA_REG_SPACE;
4342 regs->version = sp->pdev->subsystem_device;
4343
4344 for (i = 0; i < regs->len; i += 8) {
4345 reg = readq(sp->bar0 + i);
4346 memcpy((reg_space + i), &reg, 8);
4347 }
4348 }
4349
4350 /**
4351 * s2io_phy_id - timer function that alternates adapter LED.
4352 * @data : address of the private member of the device structure, which
4353 * is a pointer to the s2io_nic structure, provided as an u32.
4354 * Description: This is actually the timer function that alternates the
4355 * adapter LED bit of the adapter control bit to set/reset every time on
4356 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4357 * once every second.
4358 */
4359 static void s2io_phy_id(unsigned long data)
4360 {
4361 nic_t *sp = (nic_t *) data;
4362 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4363 u64 val64 = 0;
4364 u16 subid;
4365
4366 subid = sp->pdev->subsystem_device;
4367 if ((sp->device_type == XFRAME_II_DEVICE) ||
4368 ((subid & 0xFF) >= 0x07)) {
4369 val64 = readq(&bar0->gpio_control);
4370 val64 ^= GPIO_CTRL_GPIO_0;
4371 writeq(val64, &bar0->gpio_control);
4372 } else {
4373 val64 = readq(&bar0->adapter_control);
4374 val64 ^= ADAPTER_LED_ON;
4375 writeq(val64, &bar0->adapter_control);
4376 }
4377
4378 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4379 }
4380
4381 /**
4382 * s2io_ethtool_idnic - To physically identify the nic on the system.
4383 * @sp : private member of the device structure, which is a pointer to the
4384 * s2io_nic structure.
4385 * @id : pointer to the structure with identification parameters given by
4386 * ethtool.
4387 * Description: Used to physically identify the NIC on the system.
4388 * The Link LED will blink for a time specified by the user for
4389 * identification.
4390 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4391 * identification is possible only if it's link is up.
4392 * Return value:
4393 * int , returns 0 on success
4394 */
4395
4396 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4397 {
4398 u64 val64 = 0, last_gpio_ctrl_val;
4399 nic_t *sp = dev->priv;
4400 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4401 u16 subid;
4402
4403 subid = sp->pdev->subsystem_device;
4404 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4405 if ((sp->device_type == XFRAME_I_DEVICE) &&
4406 ((subid & 0xFF) < 0x07)) {
4407 val64 = readq(&bar0->adapter_control);
4408 if (!(val64 & ADAPTER_CNTL_EN)) {
4409 printk(KERN_ERR
4410 "Adapter Link down, cannot blink LED\n");
4411 return -EFAULT;
4412 }
4413 }
4414 if (sp->id_timer.function == NULL) {
4415 init_timer(&sp->id_timer);
4416 sp->id_timer.function = s2io_phy_id;
4417 sp->id_timer.data = (unsigned long) sp;
4418 }
4419 mod_timer(&sp->id_timer, jiffies);
4420 if (data)
4421 msleep_interruptible(data * HZ);
4422 else
4423 msleep_interruptible(MAX_FLICKER_TIME);
4424 del_timer_sync(&sp->id_timer);
4425
4426 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4427 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4428 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4429 }
4430
4431 return 0;
4432 }
4433
4434 /**
4435 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4436 * @sp : private member of the device structure, which is a pointer to the
4437 * s2io_nic structure.
4438 * @ep : pointer to the structure with pause parameters given by ethtool.
4439 * Description:
4440 * Returns the Pause frame generation and reception capability of the NIC.
4441 * Return value:
4442 * void
4443 */
4444 static void s2io_ethtool_getpause_data(struct net_device *dev,
4445 struct ethtool_pauseparam *ep)
4446 {
4447 u64 val64;
4448 nic_t *sp = dev->priv;
4449 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4450
4451 val64 = readq(&bar0->rmac_pause_cfg);
4452 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4453 ep->tx_pause = TRUE;
4454 if (val64 & RMAC_PAUSE_RX_ENABLE)
4455 ep->rx_pause = TRUE;
4456 ep->autoneg = FALSE;
4457 }
4458
4459 /**
4460 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4461 * @sp : private member of the device structure, which is a pointer to the
4462 * s2io_nic structure.
4463 * @ep : pointer to the structure with pause parameters given by ethtool.
4464 * Description:
4465 * It can be used to set or reset Pause frame generation or reception
4466 * support of the NIC.
4467 * Return value:
4468 * int, returns 0 on Success
4469 */
4470
4471 static int s2io_ethtool_setpause_data(struct net_device *dev,
4472 struct ethtool_pauseparam *ep)
4473 {
4474 u64 val64;
4475 nic_t *sp = dev->priv;
4476 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4477
4478 val64 = readq(&bar0->rmac_pause_cfg);
4479 if (ep->tx_pause)
4480 val64 |= RMAC_PAUSE_GEN_ENABLE;
4481 else
4482 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4483 if (ep->rx_pause)
4484 val64 |= RMAC_PAUSE_RX_ENABLE;
4485 else
4486 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4487 writeq(val64, &bar0->rmac_pause_cfg);
4488 return 0;
4489 }
4490
4491 /**
4492 * read_eeprom - reads 4 bytes of data from user given offset.
4493 * @sp : private member of the device structure, which is a pointer to the
4494 * s2io_nic structure.
4495 * @off : offset at which the data must be written
4496 * @data : Its an output parameter where the data read at the given
4497 * offset is stored.
4498 * Description:
4499 * Will read 4 bytes of data from the user given offset and return the
4500 * read data.
4501 * NOTE: Will allow to read only part of the EEPROM visible through the
4502 * I2C bus.
4503 * Return value:
4504 * -1 on failure and 0 on success.
4505 */
4506
4507 #define S2IO_DEV_ID 5
4508 static int read_eeprom(nic_t * sp, int off, u64 * data)
4509 {
4510 int ret = -1;
4511 u32 exit_cnt = 0;
4512 u64 val64;
4513 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4514
4515 if (sp->device_type == XFRAME_I_DEVICE) {
4516 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4517 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4518 I2C_CONTROL_CNTL_START;
4519 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4520
4521 while (exit_cnt < 5) {
4522 val64 = readq(&bar0->i2c_control);
4523 if (I2C_CONTROL_CNTL_END(val64)) {
4524 *data = I2C_CONTROL_GET_DATA(val64);
4525 ret = 0;
4526 break;
4527 }
4528 msleep(50);
4529 exit_cnt++;
4530 }
4531 }
4532
4533 if (sp->device_type == XFRAME_II_DEVICE) {
4534 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4535 SPI_CONTROL_BYTECNT(0x3) |
4536 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4537 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4538 val64 |= SPI_CONTROL_REQ;
4539 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4540 while (exit_cnt < 5) {
4541 val64 = readq(&bar0->spi_control);
4542 if (val64 & SPI_CONTROL_NACK) {
4543 ret = 1;
4544 break;
4545 } else if (val64 & SPI_CONTROL_DONE) {
4546 *data = readq(&bar0->spi_data);
4547 *data &= 0xffffff;
4548 ret = 0;
4549 break;
4550 }
4551 msleep(50);
4552 exit_cnt++;
4553 }
4554 }
4555 return ret;
4556 }
4557
4558 /**
4559 * write_eeprom - actually writes the relevant part of the data value.
4560 * @sp : private member of the device structure, which is a pointer to the
4561 * s2io_nic structure.
4562 * @off : offset at which the data must be written
4563 * @data : The data that is to be written
4564 * @cnt : Number of bytes of the data that are actually to be written into
4565 * the Eeprom. (max of 3)
4566 * Description:
4567 * Actually writes the relevant part of the data value into the Eeprom
4568 * through the I2C bus.
4569 * Return value:
4570 * 0 on success, -1 on failure.
4571 */
4572
4573 static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4574 {
4575 int exit_cnt = 0, ret = -1;
4576 u64 val64;
4577 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4578
4579 if (sp->device_type == XFRAME_I_DEVICE) {
4580 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4581 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4582 I2C_CONTROL_CNTL_START;
4583 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4584
4585 while (exit_cnt < 5) {
4586 val64 = readq(&bar0->i2c_control);
4587 if (I2C_CONTROL_CNTL_END(val64)) {
4588 if (!(val64 & I2C_CONTROL_NACK))
4589 ret = 0;
4590 break;
4591 }
4592 msleep(50);
4593 exit_cnt++;
4594 }
4595 }
4596
4597 if (sp->device_type == XFRAME_II_DEVICE) {
4598 int write_cnt = (cnt == 8) ? 0 : cnt;
4599 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4600
4601 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4602 SPI_CONTROL_BYTECNT(write_cnt) |
4603 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4604 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4605 val64 |= SPI_CONTROL_REQ;
4606 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4607 while (exit_cnt < 5) {
4608 val64 = readq(&bar0->spi_control);
4609 if (val64 & SPI_CONTROL_NACK) {
4610 ret = 1;
4611 break;
4612 } else if (val64 & SPI_CONTROL_DONE) {
4613 ret = 0;
4614 break;
4615 }
4616 msleep(50);
4617 exit_cnt++;
4618 }
4619 }
4620 return ret;
4621 }
4622
4623 /**
4624 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4625 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4626 * @eeprom : pointer to the user level structure provided by ethtool,
4627 * containing all relevant information.
4628 * @data_buf : user defined value to be written into Eeprom.
4629 * Description: Reads the values stored in the Eeprom at given offset
4630 * for a given length. Stores these values int the input argument data
4631 * buffer 'data_buf' and returns these to the caller (ethtool.)
4632 * Return value:
4633 * int 0 on success
4634 */
4635
4636 static int s2io_ethtool_geeprom(struct net_device *dev,
4637 struct ethtool_eeprom *eeprom, u8 * data_buf)
4638 {
4639 u32 i, valid;
4640 u64 data;
4641 nic_t *sp = dev->priv;
4642
4643 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4644
4645 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4646 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4647
4648 for (i = 0; i < eeprom->len; i += 4) {
4649 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4650 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4651 return -EFAULT;
4652 }
4653 valid = INV(data);
4654 memcpy((data_buf + i), &valid, 4);
4655 }
4656 return 0;
4657 }
4658
4659 /**
4660 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4661 * @sp : private member of the device structure, which is a pointer to the
4662 * s2io_nic structure.
4663 * @eeprom : pointer to the user level structure provided by ethtool,
4664 * containing all relevant information.
4665 * @data_buf ; user defined value to be written into Eeprom.
4666 * Description:
4667 * Tries to write the user provided value in the Eeprom, at the offset
4668 * given by the user.
4669 * Return value:
4670 * 0 on success, -EFAULT on failure.
4671 */
4672
4673 static int s2io_ethtool_seeprom(struct net_device *dev,
4674 struct ethtool_eeprom *eeprom,
4675 u8 * data_buf)
4676 {
4677 int len = eeprom->len, cnt = 0;
4678 u64 valid = 0, data;
4679 nic_t *sp = dev->priv;
4680
4681 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4682 DBG_PRINT(ERR_DBG,
4683 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4684 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4685 eeprom->magic);
4686 return -EFAULT;
4687 }
4688
4689 while (len) {
4690 data = (u32) data_buf[cnt] & 0x000000FF;
4691 if (data) {
4692 valid = (u32) (data << 24);
4693 } else
4694 valid = data;
4695
4696 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4697 DBG_PRINT(ERR_DBG,
4698 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4699 DBG_PRINT(ERR_DBG,
4700 "write into the specified offset\n");
4701 return -EFAULT;
4702 }
4703 cnt++;
4704 len--;
4705 }
4706
4707 return 0;
4708 }
4709
4710 /**
4711 * s2io_register_test - reads and writes into all clock domains.
4712 * @sp : private member of the device structure, which is a pointer to the
4713 * s2io_nic structure.
4714 * @data : variable that returns the result of each of the test conducted b
4715 * by the driver.
4716 * Description:
4717 * Read and write into all clock domains. The NIC has 3 clock domains,
4718 * see that registers in all the three regions are accessible.
4719 * Return value:
4720 * 0 on success.
4721 */
4722
4723 static int s2io_register_test(nic_t * sp, uint64_t * data)
4724 {
4725 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4726 u64 val64 = 0, exp_val;
4727 int fail = 0;
4728
4729 val64 = readq(&bar0->pif_rd_swapper_fb);
4730 if (val64 != 0x123456789abcdefULL) {
4731 fail = 1;
4732 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4733 }
4734
4735 val64 = readq(&bar0->rmac_pause_cfg);
4736 if (val64 != 0xc000ffff00000000ULL) {
4737 fail = 1;
4738 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4739 }
4740
4741 val64 = readq(&bar0->rx_queue_cfg);
4742 if (sp->device_type == XFRAME_II_DEVICE)
4743 exp_val = 0x0404040404040404ULL;
4744 else
4745 exp_val = 0x0808080808080808ULL;
4746 if (val64 != exp_val) {
4747 fail = 1;
4748 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4749 }
4750
4751 val64 = readq(&bar0->xgxs_efifo_cfg);
4752 if (val64 != 0x000000001923141EULL) {
4753 fail = 1;
4754 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4755 }
4756
4757 val64 = 0x5A5A5A5A5A5A5A5AULL;
4758 writeq(val64, &bar0->xmsi_data);
4759 val64 = readq(&bar0->xmsi_data);
4760 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4761 fail = 1;
4762 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4763 }
4764
4765 val64 = 0xA5A5A5A5A5A5A5A5ULL;
4766 writeq(val64, &bar0->xmsi_data);
4767 val64 = readq(&bar0->xmsi_data);
4768 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4769 fail = 1;
4770 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4771 }
4772
4773 *data = fail;
4774 return fail;
4775 }
4776
4777 /**
4778 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4779 * @sp : private member of the device structure, which is a pointer to the
4780 * s2io_nic structure.
4781 * @data:variable that returns the result of each of the test conducted by
4782 * the driver.
4783 * Description:
4784 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4785 * register.
4786 * Return value:
4787 * 0 on success.
4788 */
4789
4790 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4791 {
4792 int fail = 0;
4793 u64 ret_data, org_4F0, org_7F0;
4794 u8 saved_4F0 = 0, saved_7F0 = 0;
4795 struct net_device *dev = sp->dev;
4796
4797 /* Test Write Error at offset 0 */
4798 /* Note that SPI interface allows write access to all areas
4799 * of EEPROM. Hence doing all negative testing only for Xframe I.
4800 */
4801 if (sp->device_type == XFRAME_I_DEVICE)
4802 if (!write_eeprom(sp, 0, 0, 3))
4803 fail = 1;
4804
4805 /* Save current values at offsets 0x4F0 and 0x7F0 */
4806 if (!read_eeprom(sp, 0x4F0, &org_4F0))
4807 saved_4F0 = 1;
4808 if (!read_eeprom(sp, 0x7F0, &org_7F0))
4809 saved_7F0 = 1;
4810
4811 /* Test Write at offset 4f0 */
4812 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
4813 fail = 1;
4814 if (read_eeprom(sp, 0x4F0, &ret_data))
4815 fail = 1;
4816
4817 if (ret_data != 0x012345) {
4818 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
4819 "Data written %llx Data read %llx\n",
4820 dev->name, (unsigned long long)0x12345,
4821 (unsigned long long)ret_data);
4822 fail = 1;
4823 }
4824
4825 /* Reset the EEPROM data go FFFF */
4826 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
4827
4828 /* Test Write Request Error at offset 0x7c */
4829 if (sp->device_type == XFRAME_I_DEVICE)
4830 if (!write_eeprom(sp, 0x07C, 0, 3))
4831 fail = 1;
4832
4833 /* Test Write Request at offset 0x7f0 */
4834 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
4835 fail = 1;
4836 if (read_eeprom(sp, 0x7F0, &ret_data))
4837 fail = 1;
4838
4839 if (ret_data != 0x012345) {
4840 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
4841 "Data written %llx Data read %llx\n",
4842 dev->name, (unsigned long long)0x12345,
4843 (unsigned long long)ret_data);
4844 fail = 1;
4845 }
4846
4847 /* Reset the EEPROM data go FFFF */
4848 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
4849
4850 if (sp->device_type == XFRAME_I_DEVICE) {
4851 /* Test Write Error at offset 0x80 */
4852 if (!write_eeprom(sp, 0x080, 0, 3))
4853 fail = 1;
4854
4855 /* Test Write Error at offset 0xfc */
4856 if (!write_eeprom(sp, 0x0FC, 0, 3))
4857 fail = 1;
4858
4859 /* Test Write Error at offset 0x100 */
4860 if (!write_eeprom(sp, 0x100, 0, 3))
4861 fail = 1;
4862
4863 /* Test Write Error at offset 4ec */
4864 if (!write_eeprom(sp, 0x4EC, 0, 3))
4865 fail = 1;
4866 }
4867
4868 /* Restore values at offsets 0x4F0 and 0x7F0 */
4869 if (saved_4F0)
4870 write_eeprom(sp, 0x4F0, org_4F0, 3);
4871 if (saved_7F0)
4872 write_eeprom(sp, 0x7F0, org_7F0, 3);
4873
4874 *data = fail;
4875 return fail;
4876 }
4877
4878 /**
4879 * s2io_bist_test - invokes the MemBist test of the card .
4880 * @sp : private member of the device structure, which is a pointer to the
4881 * s2io_nic structure.
4882 * @data:variable that returns the result of each of the test conducted by
4883 * the driver.
4884 * Description:
4885 * This invokes the MemBist test of the card. We give around
4886 * 2 secs time for the Test to complete. If it's still not complete
4887 * within this peiod, we consider that the test failed.
4888 * Return value:
4889 * 0 on success and -1 on failure.
4890 */
4891
4892 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4893 {
4894 u8 bist = 0;
4895 int cnt = 0, ret = -1;
4896
4897 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4898 bist |= PCI_BIST_START;
4899 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4900
4901 while (cnt < 20) {
4902 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4903 if (!(bist & PCI_BIST_START)) {
4904 *data = (bist & PCI_BIST_CODE_MASK);
4905 ret = 0;
4906 break;
4907 }
4908 msleep(100);
4909 cnt++;
4910 }
4911
4912 return ret;
4913 }
4914
4915 /**
4916 * s2io-link_test - verifies the link state of the nic
4917 * @sp ; private member of the device structure, which is a pointer to the
4918 * s2io_nic structure.
4919 * @data: variable that returns the result of each of the test conducted by
4920 * the driver.
4921 * Description:
4922 * The function verifies the link state of the NIC and updates the input
4923 * argument 'data' appropriately.
4924 * Return value:
4925 * 0 on success.
4926 */
4927
4928 static int s2io_link_test(nic_t * sp, uint64_t * data)
4929 {
4930 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4931 u64 val64;
4932
4933 val64 = readq(&bar0->adapter_status);
4934 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4935 *data = 1;
4936
4937 return 0;
4938 }
4939
4940 /**
4941 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4942 * @sp - private member of the device structure, which is a pointer to the
4943 * s2io_nic structure.
4944 * @data - variable that returns the result of each of the test
4945 * conducted by the driver.
4946 * Description:
4947 * This is one of the offline test that tests the read and write
4948 * access to the RldRam chip on the NIC.
4949 * Return value:
4950 * 0 on success.
4951 */
4952
4953 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4954 {
4955 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4956 u64 val64;
4957 int cnt, iteration = 0, test_fail = 0;
4958
4959 val64 = readq(&bar0->adapter_control);
4960 val64 &= ~ADAPTER_ECC_EN;
4961 writeq(val64, &bar0->adapter_control);
4962
4963 val64 = readq(&bar0->mc_rldram_test_ctrl);
4964 val64 |= MC_RLDRAM_TEST_MODE;
4965 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4966
4967 val64 = readq(&bar0->mc_rldram_mrs);
4968 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4969 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4970
4971 val64 |= MC_RLDRAM_MRS_ENABLE;
4972 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4973
4974 while (iteration < 2) {
4975 val64 = 0x55555555aaaa0000ULL;
4976 if (iteration == 1) {
4977 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4978 }
4979 writeq(val64, &bar0->mc_rldram_test_d0);
4980
4981 val64 = 0xaaaa5a5555550000ULL;
4982 if (iteration == 1) {
4983 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4984 }
4985 writeq(val64, &bar0->mc_rldram_test_d1);
4986
4987 val64 = 0x55aaaaaaaa5a0000ULL;
4988 if (iteration == 1) {
4989 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4990 }
4991 writeq(val64, &bar0->mc_rldram_test_d2);
4992
4993 val64 = (u64) (0x0000003ffffe0100ULL);
4994 writeq(val64, &bar0->mc_rldram_test_add);
4995
4996 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4997 MC_RLDRAM_TEST_GO;
4998 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4999
5000 for (cnt = 0; cnt < 5; cnt++) {
5001 val64 = readq(&bar0->mc_rldram_test_ctrl);
5002 if (val64 & MC_RLDRAM_TEST_DONE)
5003 break;
5004 msleep(200);
5005 }
5006
5007 if (cnt == 5)
5008 break;
5009
5010 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5011 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5012
5013 for (cnt = 0; cnt < 5; cnt++) {
5014 val64 = readq(&bar0->mc_rldram_test_ctrl);
5015 if (val64 & MC_RLDRAM_TEST_DONE)
5016 break;
5017 msleep(500);
5018 }
5019
5020 if (cnt == 5)
5021 break;
5022
5023 val64 = readq(&bar0->mc_rldram_test_ctrl);
5024 if (!(val64 & MC_RLDRAM_TEST_PASS))
5025 test_fail = 1;
5026
5027 iteration++;
5028 }
5029
5030 *data = test_fail;
5031
5032 /* Bring the adapter out of test mode */
5033 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5034
5035 return test_fail;
5036 }
5037
5038 /**
5039 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5040 * @sp : private member of the device structure, which is a pointer to the
5041 * s2io_nic structure.
5042 * @ethtest : pointer to a ethtool command specific structure that will be
5043 * returned to the user.
5044 * @data : variable that returns the result of each of the test
5045 * conducted by the driver.
5046 * Description:
5047 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5048 * the health of the card.
5049 * Return value:
5050 * void
5051 */
5052
5053 static void s2io_ethtool_test(struct net_device *dev,
5054 struct ethtool_test *ethtest,
5055 uint64_t * data)
5056 {
5057 nic_t *sp = dev->priv;
5058 int orig_state = netif_running(sp->dev);
5059
5060 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5061 /* Offline Tests. */
5062 if (orig_state)
5063 s2io_close(sp->dev);
5064
5065 if (s2io_register_test(sp, &data[0]))
5066 ethtest->flags |= ETH_TEST_FL_FAILED;
5067
5068 s2io_reset(sp);
5069
5070 if (s2io_rldram_test(sp, &data[3]))
5071 ethtest->flags |= ETH_TEST_FL_FAILED;
5072
5073 s2io_reset(sp);
5074
5075 if (s2io_eeprom_test(sp, &data[1]))
5076 ethtest->flags |= ETH_TEST_FL_FAILED;
5077
5078 if (s2io_bist_test(sp, &data[4]))
5079 ethtest->flags |= ETH_TEST_FL_FAILED;
5080
5081 if (orig_state)
5082 s2io_open(sp->dev);
5083
5084 data[2] = 0;
5085 } else {
5086 /* Online Tests. */
5087 if (!orig_state) {
5088 DBG_PRINT(ERR_DBG,
5089 "%s: is not up, cannot run test\n",
5090 dev->name);
5091 data[0] = -1;
5092 data[1] = -1;
5093 data[2] = -1;
5094 data[3] = -1;
5095 data[4] = -1;
5096 }
5097
5098 if (s2io_link_test(sp, &data[2]))
5099 ethtest->flags |= ETH_TEST_FL_FAILED;
5100
5101 data[0] = 0;
5102 data[1] = 0;
5103 data[3] = 0;
5104 data[4] = 0;
5105 }
5106 }
5107
5108 static void s2io_get_ethtool_stats(struct net_device *dev,
5109 struct ethtool_stats *estats,
5110 u64 * tmp_stats)
5111 {
5112 int i = 0;
5113 nic_t *sp = dev->priv;
5114 StatInfo_t *stat_info = sp->mac_control.stats_info;
5115 u64 tmp;
5116
5117 s2io_updt_stats(sp);
5118 tmp_stats[i++] =
5119 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5120 le32_to_cpu(stat_info->tmac_frms);
5121 tmp_stats[i++] =
5122 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5123 le32_to_cpu(stat_info->tmac_data_octets);
5124 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5125 tmp_stats[i++] =
5126 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5127 le32_to_cpu(stat_info->tmac_mcst_frms);
5128 tmp_stats[i++] =
5129 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5130 le32_to_cpu(stat_info->tmac_bcst_frms);
5131 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5132 tmp_stats[i++] =
5133 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5134 le32_to_cpu(stat_info->tmac_any_err_frms);
5135 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5136 tmp_stats[i++] =
5137 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5138 le32_to_cpu(stat_info->tmac_vld_ip);
5139 tmp_stats[i++] =
5140 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5141 le32_to_cpu(stat_info->tmac_drop_ip);
5142 tmp_stats[i++] =
5143 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5144 le32_to_cpu(stat_info->tmac_icmp);
5145 tmp_stats[i++] =
5146 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5147 le32_to_cpu(stat_info->tmac_rst_tcp);
5148 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5149 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5150 le32_to_cpu(stat_info->tmac_udp);
5151 tmp_stats[i++] =
5152 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5153 le32_to_cpu(stat_info->rmac_vld_frms);
5154 tmp_stats[i++] =
5155 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5156 le32_to_cpu(stat_info->rmac_data_octets);
5157 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5158 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5159 tmp_stats[i++] =
5160 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5161 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5162 tmp_stats[i++] =
5163 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5164 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5165 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5166 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5167 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5168 tmp_stats[i++] =
5169 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5170 le32_to_cpu(stat_info->rmac_discarded_frms);
5171 tmp_stats[i++] =
5172 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5173 le32_to_cpu(stat_info->rmac_usized_frms);
5174 tmp_stats[i++] =
5175 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5176 le32_to_cpu(stat_info->rmac_osized_frms);
5177 tmp_stats[i++] =
5178 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5179 le32_to_cpu(stat_info->rmac_frag_frms);
5180 tmp_stats[i++] =
5181 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5182 le32_to_cpu(stat_info->rmac_jabber_frms);
5183 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5184 le32_to_cpu(stat_info->rmac_ip);
5185 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5186 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5187 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5188 le32_to_cpu(stat_info->rmac_drop_ip);
5189 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5190 le32_to_cpu(stat_info->rmac_icmp);
5191 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5192 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5193 le32_to_cpu(stat_info->rmac_udp);
5194 tmp_stats[i++] =
5195 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5196 le32_to_cpu(stat_info->rmac_err_drp_udp);
5197 tmp_stats[i++] =
5198 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5199 le32_to_cpu(stat_info->rmac_pause_cnt);
5200 tmp_stats[i++] =
5201 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5202 le32_to_cpu(stat_info->rmac_accepted_ip);
5203 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5204 tmp_stats[i++] = 0;
5205 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5206 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5207 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5208 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5209 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5210 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5211 tmp = 0;
5212 if (stat_info->sw_stat.num_aggregations) {
5213 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5214 do_div(tmp, stat_info->sw_stat.num_aggregations);
5215 }
5216 tmp_stats[i++] = tmp;
5217 }
5218
5219 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5220 {
5221 return (XENA_REG_SPACE);
5222 }
5223
5224
5225 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5226 {
5227 nic_t *sp = dev->priv;
5228
5229 return (sp->rx_csum);
5230 }
5231
5232 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5233 {
5234 nic_t *sp = dev->priv;
5235
5236 if (data)
5237 sp->rx_csum = 1;
5238 else
5239 sp->rx_csum = 0;
5240
5241 return 0;
5242 }
5243
5244 static int s2io_get_eeprom_len(struct net_device *dev)
5245 {
5246 return (XENA_EEPROM_SPACE);
5247 }
5248
5249 static int s2io_ethtool_self_test_count(struct net_device *dev)
5250 {
5251 return (S2IO_TEST_LEN);
5252 }
5253
5254 static void s2io_ethtool_get_strings(struct net_device *dev,
5255 u32 stringset, u8 * data)
5256 {
5257 switch (stringset) {
5258 case ETH_SS_TEST:
5259 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5260 break;
5261 case ETH_SS_STATS:
5262 memcpy(data, &ethtool_stats_keys,
5263 sizeof(ethtool_stats_keys));
5264 }
5265 }
5266 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5267 {
5268 return (S2IO_STAT_LEN);
5269 }
5270
5271 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5272 {
5273 if (data)
5274 dev->features |= NETIF_F_IP_CSUM;
5275 else
5276 dev->features &= ~NETIF_F_IP_CSUM;
5277
5278 return 0;
5279 }
5280
5281
5282 static struct ethtool_ops netdev_ethtool_ops = {
5283 .get_settings = s2io_ethtool_gset,
5284 .set_settings = s2io_ethtool_sset,
5285 .get_drvinfo = s2io_ethtool_gdrvinfo,
5286 .get_regs_len = s2io_ethtool_get_regs_len,
5287 .get_regs = s2io_ethtool_gregs,
5288 .get_link = ethtool_op_get_link,
5289 .get_eeprom_len = s2io_get_eeprom_len,
5290 .get_eeprom = s2io_ethtool_geeprom,
5291 .set_eeprom = s2io_ethtool_seeprom,
5292 .get_pauseparam = s2io_ethtool_getpause_data,
5293 .set_pauseparam = s2io_ethtool_setpause_data,
5294 .get_rx_csum = s2io_ethtool_get_rx_csum,
5295 .set_rx_csum = s2io_ethtool_set_rx_csum,
5296 .get_tx_csum = ethtool_op_get_tx_csum,
5297 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5298 .get_sg = ethtool_op_get_sg,
5299 .set_sg = ethtool_op_set_sg,
5300 #ifdef NETIF_F_TSO
5301 .get_tso = ethtool_op_get_tso,
5302 .set_tso = ethtool_op_set_tso,
5303 #endif
5304 .get_ufo = ethtool_op_get_ufo,
5305 .set_ufo = ethtool_op_set_ufo,
5306 .self_test_count = s2io_ethtool_self_test_count,
5307 .self_test = s2io_ethtool_test,
5308 .get_strings = s2io_ethtool_get_strings,
5309 .phys_id = s2io_ethtool_idnic,
5310 .get_stats_count = s2io_ethtool_get_stats_count,
5311 .get_ethtool_stats = s2io_get_ethtool_stats
5312 };
5313
5314 /**
5315 * s2io_ioctl - Entry point for the Ioctl
5316 * @dev : Device pointer.
5317 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5318 * a proprietary structure used to pass information to the driver.
5319 * @cmd : This is used to distinguish between the different commands that
5320 * can be passed to the IOCTL functions.
5321 * Description:
5322 * Currently there are no special functionality supported in IOCTL, hence
5323 * function always return EOPNOTSUPPORTED
5324 */
5325
5326 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5327 {
5328 return -EOPNOTSUPP;
5329 }
5330
5331 /**
5332 * s2io_change_mtu - entry point to change MTU size for the device.
5333 * @dev : device pointer.
5334 * @new_mtu : the new MTU size for the device.
5335 * Description: A driver entry point to change MTU size for the device.
5336 * Before changing the MTU the device must be stopped.
5337 * Return value:
5338 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5339 * file on failure.
5340 */
5341
5342 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5343 {
5344 nic_t *sp = dev->priv;
5345
5346 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5347 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5348 dev->name);
5349 return -EPERM;
5350 }
5351
5352 dev->mtu = new_mtu;
5353 if (netif_running(dev)) {
5354 s2io_card_down(sp);
5355 netif_stop_queue(dev);
5356 if (s2io_card_up(sp)) {
5357 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5358 __FUNCTION__);
5359 }
5360 if (netif_queue_stopped(dev))
5361 netif_wake_queue(dev);
5362 } else { /* Device is down */
5363 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5364 u64 val64 = new_mtu;
5365
5366 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5367 }
5368
5369 return 0;
5370 }
5371
5372 /**
5373 * s2io_tasklet - Bottom half of the ISR.
5374 * @dev_adr : address of the device structure in dma_addr_t format.
5375 * Description:
5376 * This is the tasklet or the bottom half of the ISR. This is
5377 * an extension of the ISR which is scheduled by the scheduler to be run
5378 * when the load on the CPU is low. All low priority tasks of the ISR can
5379 * be pushed into the tasklet. For now the tasklet is used only to
5380 * replenish the Rx buffers in the Rx buffer descriptors.
5381 * Return value:
5382 * void.
5383 */
5384
5385 static void s2io_tasklet(unsigned long dev_addr)
5386 {
5387 struct net_device *dev = (struct net_device *) dev_addr;
5388 nic_t *sp = dev->priv;
5389 int i, ret;
5390 mac_info_t *mac_control;
5391 struct config_param *config;
5392
5393 mac_control = &sp->mac_control;
5394 config = &sp->config;
5395
5396 if (!TASKLET_IN_USE) {
5397 for (i = 0; i < config->rx_ring_num; i++) {
5398 ret = fill_rx_buffers(sp, i);
5399 if (ret == -ENOMEM) {
5400 DBG_PRINT(ERR_DBG, "%s: Out of ",
5401 dev->name);
5402 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5403 break;
5404 } else if (ret == -EFILL) {
5405 DBG_PRINT(ERR_DBG,
5406 "%s: Rx Ring %d is full\n",
5407 dev->name, i);
5408 break;
5409 }
5410 }
5411 clear_bit(0, (&sp->tasklet_status));
5412 }
5413 }
5414
5415 /**
5416 * s2io_set_link - Set the LInk status
5417 * @data: long pointer to device private structue
5418 * Description: Sets the link status for the adapter
5419 */
5420
5421 static void s2io_set_link(unsigned long data)
5422 {
5423 nic_t *nic = (nic_t *) data;
5424 struct net_device *dev = nic->dev;
5425 XENA_dev_config_t __iomem *bar0 = nic->bar0;
5426 register u64 val64;
5427 u16 subid;
5428
5429 if (test_and_set_bit(0, &(nic->link_state))) {
5430 /* The card is being reset, no point doing anything */
5431 return;
5432 }
5433
5434 subid = nic->pdev->subsystem_device;
5435 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5436 /*
5437 * Allow a small delay for the NICs self initiated
5438 * cleanup to complete.
5439 */
5440 msleep(100);
5441 }
5442
5443 val64 = readq(&bar0->adapter_status);
5444 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
5445 if (LINK_IS_UP(val64)) {
5446 val64 = readq(&bar0->adapter_control);
5447 val64 |= ADAPTER_CNTL_EN;
5448 writeq(val64, &bar0->adapter_control);
5449 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5450 subid)) {
5451 val64 = readq(&bar0->gpio_control);
5452 val64 |= GPIO_CTRL_GPIO_0;
5453 writeq(val64, &bar0->gpio_control);
5454 val64 = readq(&bar0->gpio_control);
5455 } else {
5456 val64 |= ADAPTER_LED_ON;
5457 writeq(val64, &bar0->adapter_control);
5458 }
5459 if (s2io_link_fault_indication(nic) ==
5460 MAC_RMAC_ERR_TIMER) {
5461 val64 = readq(&bar0->adapter_status);
5462 if (!LINK_IS_UP(val64)) {
5463 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5464 DBG_PRINT(ERR_DBG, " Link down");
5465 DBG_PRINT(ERR_DBG, "after ");
5466 DBG_PRINT(ERR_DBG, "enabling ");
5467 DBG_PRINT(ERR_DBG, "device \n");
5468 }
5469 }
5470 if (nic->device_enabled_once == FALSE) {
5471 nic->device_enabled_once = TRUE;
5472 }
5473 s2io_link(nic, LINK_UP);
5474 } else {
5475 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5476 subid)) {
5477 val64 = readq(&bar0->gpio_control);
5478 val64 &= ~GPIO_CTRL_GPIO_0;
5479 writeq(val64, &bar0->gpio_control);
5480 val64 = readq(&bar0->gpio_control);
5481 }
5482 s2io_link(nic, LINK_DOWN);
5483 }
5484 } else { /* NIC is not Quiescent. */
5485 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5486 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5487 netif_stop_queue(dev);
5488 }
5489 clear_bit(0, &(nic->link_state));
5490 }
5491
5492 static void s2io_card_down(nic_t * sp)
5493 {
5494 int cnt = 0;
5495 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5496 unsigned long flags;
5497 register u64 val64 = 0;
5498
5499 del_timer_sync(&sp->alarm_timer);
5500 /* If s2io_set_link task is executing, wait till it completes. */
5501 while (test_and_set_bit(0, &(sp->link_state))) {
5502 msleep(50);
5503 }
5504 atomic_set(&sp->card_state, CARD_DOWN);
5505
5506 /* disable Tx and Rx traffic on the NIC */
5507 stop_nic(sp);
5508
5509 /* Kill tasklet. */
5510 tasklet_kill(&sp->task);
5511
5512 /* Check if the device is Quiescent and then Reset the NIC */
5513 do {
5514 val64 = readq(&bar0->adapter_status);
5515 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
5516 break;
5517 }
5518
5519 msleep(50);
5520 cnt++;
5521 if (cnt == 10) {
5522 DBG_PRINT(ERR_DBG,
5523 "s2io_close:Device not Quiescent ");
5524 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
5525 (unsigned long long) val64);
5526 break;
5527 }
5528 } while (1);
5529 s2io_reset(sp);
5530
5531 /* Waiting till all Interrupt handlers are complete */
5532 cnt = 0;
5533 do {
5534 msleep(10);
5535 if (!atomic_read(&sp->isr_cnt))
5536 break;
5537 cnt++;
5538 } while(cnt < 5);
5539
5540 spin_lock_irqsave(&sp->tx_lock, flags);
5541 /* Free all Tx buffers */
5542 free_tx_buffers(sp);
5543 spin_unlock_irqrestore(&sp->tx_lock, flags);
5544
5545 /* Free all Rx buffers */
5546 spin_lock_irqsave(&sp->rx_lock, flags);
5547 free_rx_buffers(sp);
5548 spin_unlock_irqrestore(&sp->rx_lock, flags);
5549
5550 clear_bit(0, &(sp->link_state));
5551 }
5552
5553 static int s2io_card_up(nic_t * sp)
5554 {
5555 int i, ret = 0;
5556 mac_info_t *mac_control;
5557 struct config_param *config;
5558 struct net_device *dev = (struct net_device *) sp->dev;
5559
5560 /* Initialize the H/W I/O registers */
5561 if (init_nic(sp) != 0) {
5562 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
5563 dev->name);
5564 return -ENODEV;
5565 }
5566
5567 if (sp->intr_type == MSI)
5568 ret = s2io_enable_msi(sp);
5569 else if (sp->intr_type == MSI_X)
5570 ret = s2io_enable_msi_x(sp);
5571 if (ret) {
5572 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
5573 sp->intr_type = INTA;
5574 }
5575
5576 /*
5577 * Initializing the Rx buffers. For now we are considering only 1
5578 * Rx ring and initializing buffers into 30 Rx blocks
5579 */
5580 mac_control = &sp->mac_control;
5581 config = &sp->config;
5582
5583 for (i = 0; i < config->rx_ring_num; i++) {
5584 if ((ret = fill_rx_buffers(sp, i))) {
5585 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
5586 dev->name);
5587 s2io_reset(sp);
5588 free_rx_buffers(sp);
5589 return -ENOMEM;
5590 }
5591 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
5592 atomic_read(&sp->rx_bufs_left[i]));
5593 }
5594
5595 /* Setting its receive mode */
5596 s2io_set_multicast(dev);
5597
5598 if (sp->lro) {
5599 /* Initialize max aggregatable pkts based on MTU */
5600 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
5601 /* Check if we can use(if specified) user provided value */
5602 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
5603 sp->lro_max_aggr_per_sess = lro_max_pkts;
5604 }
5605
5606 /* Enable tasklet for the device */
5607 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
5608
5609 /* Enable Rx Traffic and interrupts on the NIC */
5610 if (start_nic(sp)) {
5611 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
5612 tasklet_kill(&sp->task);
5613 s2io_reset(sp);
5614 free_irq(dev->irq, dev);
5615 free_rx_buffers(sp);
5616 return -ENODEV;
5617 }
5618
5619 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
5620
5621 atomic_set(&sp->card_state, CARD_UP);
5622 return 0;
5623 }
5624
5625 /**
5626 * s2io_restart_nic - Resets the NIC.
5627 * @data : long pointer to the device private structure
5628 * Description:
5629 * This function is scheduled to be run by the s2io_tx_watchdog
5630 * function after 0.5 secs to reset the NIC. The idea is to reduce
5631 * the run time of the watch dog routine which is run holding a
5632 * spin lock.
5633 */
5634
5635 static void s2io_restart_nic(unsigned long data)
5636 {
5637 struct net_device *dev = (struct net_device *) data;
5638 nic_t *sp = dev->priv;
5639
5640 s2io_card_down(sp);
5641 if (s2io_card_up(sp)) {
5642 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5643 dev->name);
5644 }
5645 netif_wake_queue(dev);
5646 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
5647 dev->name);
5648
5649 }
5650
5651 /**
5652 * s2io_tx_watchdog - Watchdog for transmit side.
5653 * @dev : Pointer to net device structure
5654 * Description:
5655 * This function is triggered if the Tx Queue is stopped
5656 * for a pre-defined amount of time when the Interface is still up.
5657 * If the Interface is jammed in such a situation, the hardware is
5658 * reset (by s2io_close) and restarted again (by s2io_open) to
5659 * overcome any problem that might have been caused in the hardware.
5660 * Return value:
5661 * void
5662 */
5663
5664 static void s2io_tx_watchdog(struct net_device *dev)
5665 {
5666 nic_t *sp = dev->priv;
5667
5668 if (netif_carrier_ok(dev)) {
5669 schedule_work(&sp->rst_timer_task);
5670 }
5671 }
5672
5673 /**
5674 * rx_osm_handler - To perform some OS related operations on SKB.
5675 * @sp: private member of the device structure,pointer to s2io_nic structure.
5676 * @skb : the socket buffer pointer.
5677 * @len : length of the packet
5678 * @cksum : FCS checksum of the frame.
5679 * @ring_no : the ring from which this RxD was extracted.
5680 * Description:
5681 * This function is called by the Tx interrupt serivce routine to perform
5682 * some OS related operations on the SKB before passing it to the upper
5683 * layers. It mainly checks if the checksum is OK, if so adds it to the
5684 * SKBs cksum variable, increments the Rx packet count and passes the SKB
5685 * to the upper layer. If the checksum is wrong, it increments the Rx
5686 * packet error count, frees the SKB and returns error.
5687 * Return value:
5688 * SUCCESS on success and -1 on failure.
5689 */
5690 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5691 {
5692 nic_t *sp = ring_data->nic;
5693 struct net_device *dev = (struct net_device *) sp->dev;
5694 struct sk_buff *skb = (struct sk_buff *)
5695 ((unsigned long) rxdp->Host_Control);
5696 int ring_no = ring_data->ring_no;
5697 u16 l3_csum, l4_csum;
5698 lro_t *lro;
5699
5700 skb->dev = dev;
5701 if (rxdp->Control_1 & RXD_T_CODE) {
5702 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5703 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5704 dev->name, err);
5705 dev_kfree_skb(skb);
5706 sp->stats.rx_crc_errors++;
5707 atomic_dec(&sp->rx_bufs_left[ring_no]);
5708 rxdp->Host_Control = 0;
5709 return 0;
5710 }
5711
5712 /* Updating statistics */
5713 rxdp->Host_Control = 0;
5714 sp->rx_pkt_count++;
5715 sp->stats.rx_packets++;
5716 if (sp->rxd_mode == RXD_MODE_1) {
5717 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
5718
5719 sp->stats.rx_bytes += len;
5720 skb_put(skb, len);
5721
5722 } else if (sp->rxd_mode >= RXD_MODE_3A) {
5723 int get_block = ring_data->rx_curr_get_info.block_index;
5724 int get_off = ring_data->rx_curr_get_info.offset;
5725 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
5726 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
5727 unsigned char *buff = skb_push(skb, buf0_len);
5728
5729 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5730 sp->stats.rx_bytes += buf0_len + buf2_len;
5731 memcpy(buff, ba->ba_0, buf0_len);
5732
5733 if (sp->rxd_mode == RXD_MODE_3A) {
5734 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
5735
5736 skb_put(skb, buf1_len);
5737 skb->len += buf2_len;
5738 skb->data_len += buf2_len;
5739 skb->truesize += buf2_len;
5740 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
5741 sp->stats.rx_bytes += buf1_len;
5742
5743 } else
5744 skb_put(skb, buf2_len);
5745 }
5746
5747 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
5748 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
5749 (sp->rx_csum)) {
5750 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
5751 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5752 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
5753 /*
5754 * NIC verifies if the Checksum of the received
5755 * frame is Ok or not and accordingly returns
5756 * a flag in the RxD.
5757 */
5758 skb->ip_summed = CHECKSUM_UNNECESSARY;
5759 if (sp->lro) {
5760 u32 tcp_len;
5761 u8 *tcp;
5762 int ret = 0;
5763
5764 ret = s2io_club_tcp_session(skb->data, &tcp,
5765 &tcp_len, &lro, rxdp, sp);
5766 switch (ret) {
5767 case 3: /* Begin anew */
5768 lro->parent = skb;
5769 goto aggregate;
5770 case 1: /* Aggregate */
5771 {
5772 lro_append_pkt(sp, lro,
5773 skb, tcp_len);
5774 goto aggregate;
5775 }
5776 case 4: /* Flush session */
5777 {
5778 lro_append_pkt(sp, lro,
5779 skb, tcp_len);
5780 queue_rx_frame(lro->parent);
5781 clear_lro_session(lro);
5782 sp->mac_control.stats_info->
5783 sw_stat.flush_max_pkts++;
5784 goto aggregate;
5785 }
5786 case 2: /* Flush both */
5787 lro->parent->data_len =
5788 lro->frags_len;
5789 sp->mac_control.stats_info->
5790 sw_stat.sending_both++;
5791 queue_rx_frame(lro->parent);
5792 clear_lro_session(lro);
5793 goto send_up;
5794 case 0: /* sessions exceeded */
5795 case 5: /*
5796 * First pkt in session not
5797 * L3/L4 aggregatable
5798 */
5799 break;
5800 default:
5801 DBG_PRINT(ERR_DBG,
5802 "%s: Samadhana!!\n",
5803 __FUNCTION__);
5804 BUG();
5805 }
5806 }
5807 } else {
5808 /*
5809 * Packet with erroneous checksum, let the
5810 * upper layers deal with it.
5811 */
5812 skb->ip_summed = CHECKSUM_NONE;
5813 }
5814 } else {
5815 skb->ip_summed = CHECKSUM_NONE;
5816 }
5817
5818 if (!sp->lro) {
5819 skb->protocol = eth_type_trans(skb, dev);
5820 #ifdef CONFIG_S2IO_NAPI
5821 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5822 /* Queueing the vlan frame to the upper layer */
5823 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5824 RXD_GET_VLAN_TAG(rxdp->Control_2));
5825 } else {
5826 netif_receive_skb(skb);
5827 }
5828 #else
5829 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5830 /* Queueing the vlan frame to the upper layer */
5831 vlan_hwaccel_rx(skb, sp->vlgrp,
5832 RXD_GET_VLAN_TAG(rxdp->Control_2));
5833 } else {
5834 netif_rx(skb);
5835 }
5836 #endif
5837 } else {
5838 send_up:
5839 queue_rx_frame(skb);
5840 }
5841 dev->last_rx = jiffies;
5842 aggregate:
5843 atomic_dec(&sp->rx_bufs_left[ring_no]);
5844 return SUCCESS;
5845 }
5846
5847 /**
5848 * s2io_link - stops/starts the Tx queue.
5849 * @sp : private member of the device structure, which is a pointer to the
5850 * s2io_nic structure.
5851 * @link : inidicates whether link is UP/DOWN.
5852 * Description:
5853 * This function stops/starts the Tx queue depending on whether the link
5854 * status of the NIC is is down or up. This is called by the Alarm
5855 * interrupt handler whenever a link change interrupt comes up.
5856 * Return value:
5857 * void.
5858 */
5859
5860 static void s2io_link(nic_t * sp, int link)
5861 {
5862 struct net_device *dev = (struct net_device *) sp->dev;
5863
5864 if (link != sp->last_link_state) {
5865 if (link == LINK_DOWN) {
5866 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5867 netif_carrier_off(dev);
5868 } else {
5869 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5870 netif_carrier_on(dev);
5871 }
5872 }
5873 sp->last_link_state = link;
5874 }
5875
5876 /**
5877 * get_xena_rev_id - to identify revision ID of xena.
5878 * @pdev : PCI Dev structure
5879 * Description:
5880 * Function to identify the Revision ID of xena.
5881 * Return value:
5882 * returns the revision ID of the device.
5883 */
5884
5885 static int get_xena_rev_id(struct pci_dev *pdev)
5886 {
5887 u8 id = 0;
5888 int ret;
5889 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5890 return id;
5891 }
5892
5893 /**
5894 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5895 * @sp : private member of the device structure, which is a pointer to the
5896 * s2io_nic structure.
5897 * Description:
5898 * This function initializes a few of the PCI and PCI-X configuration registers
5899 * with recommended values.
5900 * Return value:
5901 * void
5902 */
5903
5904 static void s2io_init_pci(nic_t * sp)
5905 {
5906 u16 pci_cmd = 0, pcix_cmd = 0;
5907
5908 /* Enable Data Parity Error Recovery in PCI-X command register. */
5909 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5910 &(pcix_cmd));
5911 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5912 (pcix_cmd | 1));
5913 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5914 &(pcix_cmd));
5915
5916 /* Set the PErr Response bit in PCI command register. */
5917 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5918 pci_write_config_word(sp->pdev, PCI_COMMAND,
5919 (pci_cmd | PCI_COMMAND_PARITY));
5920 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5921
5922 /* Forcibly disabling relaxed ordering capability of the card. */
5923 pcix_cmd &= 0xfffd;
5924 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5925 pcix_cmd);
5926 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5927 &(pcix_cmd));
5928 }
5929
5930 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5931 MODULE_LICENSE("GPL");
5932 MODULE_VERSION(DRV_VERSION);
5933
5934 module_param(tx_fifo_num, int, 0);
5935 module_param(rx_ring_num, int, 0);
5936 module_param(rx_ring_mode, int, 0);
5937 module_param_array(tx_fifo_len, uint, NULL, 0);
5938 module_param_array(rx_ring_sz, uint, NULL, 0);
5939 module_param_array(rts_frm_len, uint, NULL, 0);
5940 module_param(use_continuous_tx_intrs, int, 1);
5941 module_param(rmac_pause_time, int, 0);
5942 module_param(mc_pause_threshold_q0q3, int, 0);
5943 module_param(mc_pause_threshold_q4q7, int, 0);
5944 module_param(shared_splits, int, 0);
5945 module_param(tmac_util_period, int, 0);
5946 module_param(rmac_util_period, int, 0);
5947 module_param(bimodal, bool, 0);
5948 module_param(l3l4hdr_size, int , 0);
5949 #ifndef CONFIG_S2IO_NAPI
5950 module_param(indicate_max_pkts, int, 0);
5951 #endif
5952 module_param(rxsync_frequency, int, 0);
5953 module_param(intr_type, int, 0);
5954 module_param(lro, int, 0);
5955 module_param(lro_max_pkts, int, 0);
5956
5957 /**
5958 * s2io_init_nic - Initialization of the adapter .
5959 * @pdev : structure containing the PCI related information of the device.
5960 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5961 * Description:
5962 * The function initializes an adapter identified by the pci_dec structure.
5963 * All OS related initialization including memory and device structure and
5964 * initlaization of the device private variable is done. Also the swapper
5965 * control register is initialized to enable read and write into the I/O
5966 * registers of the device.
5967 * Return value:
5968 * returns 0 on success and negative on failure.
5969 */
5970
5971 static int __devinit
5972 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5973 {
5974 nic_t *sp;
5975 struct net_device *dev;
5976 int i, j, ret;
5977 int dma_flag = FALSE;
5978 u32 mac_up, mac_down;
5979 u64 val64 = 0, tmp64 = 0;
5980 XENA_dev_config_t __iomem *bar0 = NULL;
5981 u16 subid;
5982 mac_info_t *mac_control;
5983 struct config_param *config;
5984 int mode;
5985 u8 dev_intr_type = intr_type;
5986
5987 #ifdef CONFIG_S2IO_NAPI
5988 if (dev_intr_type != INTA) {
5989 DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
5990 is enabled. Defaulting to INTA\n");
5991 dev_intr_type = INTA;
5992 }
5993 else
5994 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5995 #endif
5996
5997 if ((ret = pci_enable_device(pdev))) {
5998 DBG_PRINT(ERR_DBG,
5999 "s2io_init_nic: pci_enable_device failed\n");
6000 return ret;
6001 }
6002
6003 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
6004 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6005 dma_flag = TRUE;
6006 if (pci_set_consistent_dma_mask
6007 (pdev, DMA_64BIT_MASK)) {
6008 DBG_PRINT(ERR_DBG,
6009 "Unable to obtain 64bit DMA for \
6010 consistent allocations\n");
6011 pci_disable_device(pdev);
6012 return -ENOMEM;
6013 }
6014 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
6015 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6016 } else {
6017 pci_disable_device(pdev);
6018 return -ENOMEM;
6019 }
6020
6021 if ((dev_intr_type == MSI_X) &&
6022 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6023 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6024 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
6025 Defaulting to INTA\n");
6026 dev_intr_type = INTA;
6027 }
6028 if (dev_intr_type != MSI_X) {
6029 if (pci_request_regions(pdev, s2io_driver_name)) {
6030 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
6031 pci_disable_device(pdev);
6032 return -ENODEV;
6033 }
6034 }
6035 else {
6036 if (!(request_mem_region(pci_resource_start(pdev, 0),
6037 pci_resource_len(pdev, 0), s2io_driver_name))) {
6038 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6039 pci_disable_device(pdev);
6040 return -ENODEV;
6041 }
6042 if (!(request_mem_region(pci_resource_start(pdev, 2),
6043 pci_resource_len(pdev, 2), s2io_driver_name))) {
6044 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6045 release_mem_region(pci_resource_start(pdev, 0),
6046 pci_resource_len(pdev, 0));
6047 pci_disable_device(pdev);
6048 return -ENODEV;
6049 }
6050 }
6051
6052 dev = alloc_etherdev(sizeof(nic_t));
6053 if (dev == NULL) {
6054 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6055 pci_disable_device(pdev);
6056 pci_release_regions(pdev);
6057 return -ENODEV;
6058 }
6059
6060 pci_set_master(pdev);
6061 pci_set_drvdata(pdev, dev);
6062 SET_MODULE_OWNER(dev);
6063 SET_NETDEV_DEV(dev, &pdev->dev);
6064
6065 /* Private member variable initialized to s2io NIC structure */
6066 sp = dev->priv;
6067 memset(sp, 0, sizeof(nic_t));
6068 sp->dev = dev;
6069 sp->pdev = pdev;
6070 sp->high_dma_flag = dma_flag;
6071 sp->device_enabled_once = FALSE;
6072 if (rx_ring_mode == 1)
6073 sp->rxd_mode = RXD_MODE_1;
6074 if (rx_ring_mode == 2)
6075 sp->rxd_mode = RXD_MODE_3B;
6076 if (rx_ring_mode == 3)
6077 sp->rxd_mode = RXD_MODE_3A;
6078
6079 sp->intr_type = dev_intr_type;
6080
6081 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6082 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6083 sp->device_type = XFRAME_II_DEVICE;
6084 else
6085 sp->device_type = XFRAME_I_DEVICE;
6086
6087 sp->lro = lro;
6088
6089 /* Initialize some PCI/PCI-X fields of the NIC. */
6090 s2io_init_pci(sp);
6091
6092 /*
6093 * Setting the device configuration parameters.
6094 * Most of these parameters can be specified by the user during
6095 * module insertion as they are module loadable parameters. If
6096 * these parameters are not not specified during load time, they
6097 * are initialized with default values.
6098 */
6099 mac_control = &sp->mac_control;
6100 config = &sp->config;
6101
6102 /* Tx side parameters. */
6103 if (tx_fifo_len[0] == 0)
6104 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
6105 config->tx_fifo_num = tx_fifo_num;
6106 for (i = 0; i < MAX_TX_FIFOS; i++) {
6107 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
6108 config->tx_cfg[i].fifo_priority = i;
6109 }
6110
6111 /* mapping the QoS priority to the configured fifos */
6112 for (i = 0; i < MAX_TX_FIFOS; i++)
6113 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
6114
6115 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
6116 for (i = 0; i < config->tx_fifo_num; i++) {
6117 config->tx_cfg[i].f_no_snoop =
6118 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
6119 if (config->tx_cfg[i].fifo_len < 65) {
6120 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
6121 break;
6122 }
6123 }
6124 /* + 2 because one Txd for skb->data and one Txd for UFO */
6125 config->max_txds = MAX_SKB_FRAGS + 2;
6126
6127 /* Rx side parameters. */
6128 if (rx_ring_sz[0] == 0)
6129 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
6130 config->rx_ring_num = rx_ring_num;
6131 for (i = 0; i < MAX_RX_RINGS; i++) {
6132 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
6133 (rxd_count[sp->rxd_mode] + 1);
6134 config->rx_cfg[i].ring_priority = i;
6135 }
6136
6137 for (i = 0; i < rx_ring_num; i++) {
6138 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
6139 config->rx_cfg[i].f_no_snoop =
6140 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
6141 }
6142
6143 /* Setting Mac Control parameters */
6144 mac_control->rmac_pause_time = rmac_pause_time;
6145 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
6146 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
6147
6148
6149 /* Initialize Ring buffer parameters. */
6150 for (i = 0; i < config->rx_ring_num; i++)
6151 atomic_set(&sp->rx_bufs_left[i], 0);
6152
6153 /* Initialize the number of ISRs currently running */
6154 atomic_set(&sp->isr_cnt, 0);
6155
6156 /* initialize the shared memory used by the NIC and the host */
6157 if (init_shared_mem(sp)) {
6158 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
6159 __FUNCTION__);
6160 ret = -ENOMEM;
6161 goto mem_alloc_failed;
6162 }
6163
6164 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
6165 pci_resource_len(pdev, 0));
6166 if (!sp->bar0) {
6167 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
6168 dev->name);
6169 ret = -ENOMEM;
6170 goto bar0_remap_failed;
6171 }
6172
6173 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
6174 pci_resource_len(pdev, 2));
6175 if (!sp->bar1) {
6176 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
6177 dev->name);
6178 ret = -ENOMEM;
6179 goto bar1_remap_failed;
6180 }
6181
6182 dev->irq = pdev->irq;
6183 dev->base_addr = (unsigned long) sp->bar0;
6184
6185 /* Initializing the BAR1 address as the start of the FIFO pointer. */
6186 for (j = 0; j < MAX_TX_FIFOS; j++) {
6187 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
6188 (sp->bar1 + (j * 0x00020000));
6189 }
6190
6191 /* Driver entry points */
6192 dev->open = &s2io_open;
6193 dev->stop = &s2io_close;
6194 dev->hard_start_xmit = &s2io_xmit;
6195 dev->get_stats = &s2io_get_stats;
6196 dev->set_multicast_list = &s2io_set_multicast;
6197 dev->do_ioctl = &s2io_ioctl;
6198 dev->change_mtu = &s2io_change_mtu;
6199 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
6200 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6201 dev->vlan_rx_register = s2io_vlan_rx_register;
6202 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
6203
6204 /*
6205 * will use eth_mac_addr() for dev->set_mac_address
6206 * mac address will be set every time dev->open() is called
6207 */
6208 #if defined(CONFIG_S2IO_NAPI)
6209 dev->poll = s2io_poll;
6210 dev->weight = 32;
6211 #endif
6212
6213 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
6214 if (sp->high_dma_flag == TRUE)
6215 dev->features |= NETIF_F_HIGHDMA;
6216 #ifdef NETIF_F_TSO
6217 dev->features |= NETIF_F_TSO;
6218 #endif
6219 if (sp->device_type & XFRAME_II_DEVICE) {
6220 dev->features |= NETIF_F_UFO;
6221 dev->features |= NETIF_F_HW_CSUM;
6222 }
6223
6224 dev->tx_timeout = &s2io_tx_watchdog;
6225 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
6226 INIT_WORK(&sp->rst_timer_task,
6227 (void (*)(void *)) s2io_restart_nic, dev);
6228 INIT_WORK(&sp->set_link_task,
6229 (void (*)(void *)) s2io_set_link, sp);
6230
6231 pci_save_state(sp->pdev);
6232
6233 /* Setting swapper control on the NIC, for proper reset operation */
6234 if (s2io_set_swapper(sp)) {
6235 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
6236 dev->name);
6237 ret = -EAGAIN;
6238 goto set_swap_failed;
6239 }
6240
6241 /* Verify if the Herc works on the slot its placed into */
6242 if (sp->device_type & XFRAME_II_DEVICE) {
6243 mode = s2io_verify_pci_mode(sp);
6244 if (mode < 0) {
6245 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
6246 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
6247 ret = -EBADSLT;
6248 goto set_swap_failed;
6249 }
6250 }
6251
6252 /* Not needed for Herc */
6253 if (sp->device_type & XFRAME_I_DEVICE) {
6254 /*
6255 * Fix for all "FFs" MAC address problems observed on
6256 * Alpha platforms
6257 */
6258 fix_mac_address(sp);
6259 s2io_reset(sp);
6260 }
6261
6262 /*
6263 * MAC address initialization.
6264 * For now only one mac address will be read and used.
6265 */
6266 bar0 = sp->bar0;
6267 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
6268 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
6269 writeq(val64, &bar0->rmac_addr_cmd_mem);
6270 wait_for_cmd_complete(sp);
6271
6272 tmp64 = readq(&bar0->rmac_addr_data0_mem);
6273 mac_down = (u32) tmp64;
6274 mac_up = (u32) (tmp64 >> 32);
6275
6276 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
6277
6278 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
6279 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
6280 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
6281 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
6282 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
6283 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
6284
6285 /* Set the factory defined MAC address initially */
6286 dev->addr_len = ETH_ALEN;
6287 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
6288
6289 /*
6290 * Initialize the tasklet status and link state flags
6291 * and the card state parameter
6292 */
6293 atomic_set(&(sp->card_state), 0);
6294 sp->tasklet_status = 0;
6295 sp->link_state = 0;
6296
6297 /* Initialize spinlocks */
6298 spin_lock_init(&sp->tx_lock);
6299 #ifndef CONFIG_S2IO_NAPI
6300 spin_lock_init(&sp->put_lock);
6301 #endif
6302 spin_lock_init(&sp->rx_lock);
6303
6304 /*
6305 * SXE-002: Configure link and activity LED to init state
6306 * on driver load.
6307 */
6308 subid = sp->pdev->subsystem_device;
6309 if ((subid & 0xFF) >= 0x07) {
6310 val64 = readq(&bar0->gpio_control);
6311 val64 |= 0x0000800000000000ULL;
6312 writeq(val64, &bar0->gpio_control);
6313 val64 = 0x0411040400000000ULL;
6314 writeq(val64, (void __iomem *) bar0 + 0x2700);
6315 val64 = readq(&bar0->gpio_control);
6316 }
6317
6318 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
6319
6320 if (register_netdev(dev)) {
6321 DBG_PRINT(ERR_DBG, "Device registration failed\n");
6322 ret = -ENODEV;
6323 goto register_failed;
6324 }
6325
6326 if (sp->device_type & XFRAME_II_DEVICE) {
6327 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
6328 dev->name);
6329 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6330 get_xena_rev_id(sp->pdev),
6331 s2io_driver_version);
6332 switch(sp->intr_type) {
6333 case INTA:
6334 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6335 break;
6336 case MSI:
6337 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6338 break;
6339 case MSI_X:
6340 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6341 break;
6342 }
6343
6344 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6345 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6346 sp->def_mac_addr[0].mac_addr[0],
6347 sp->def_mac_addr[0].mac_addr[1],
6348 sp->def_mac_addr[0].mac_addr[2],
6349 sp->def_mac_addr[0].mac_addr[3],
6350 sp->def_mac_addr[0].mac_addr[4],
6351 sp->def_mac_addr[0].mac_addr[5]);
6352 mode = s2io_print_pci_mode(sp);
6353 if (mode < 0) {
6354 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
6355 ret = -EBADSLT;
6356 goto set_swap_failed;
6357 }
6358 } else {
6359 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
6360 dev->name);
6361 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6362 get_xena_rev_id(sp->pdev),
6363 s2io_driver_version);
6364 switch(sp->intr_type) {
6365 case INTA:
6366 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6367 break;
6368 case MSI:
6369 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6370 break;
6371 case MSI_X:
6372 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6373 break;
6374 }
6375 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6376 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6377 sp->def_mac_addr[0].mac_addr[0],
6378 sp->def_mac_addr[0].mac_addr[1],
6379 sp->def_mac_addr[0].mac_addr[2],
6380 sp->def_mac_addr[0].mac_addr[3],
6381 sp->def_mac_addr[0].mac_addr[4],
6382 sp->def_mac_addr[0].mac_addr[5]);
6383 }
6384 if (sp->rxd_mode == RXD_MODE_3B)
6385 DBG_PRINT(ERR_DBG, "%s: 2-Buffer mode support has been "
6386 "enabled\n",dev->name);
6387 if (sp->rxd_mode == RXD_MODE_3A)
6388 DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been "
6389 "enabled\n",dev->name);
6390
6391 if (sp->lro)
6392 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
6393 dev->name);
6394
6395 /* Initialize device name */
6396 strcpy(sp->name, dev->name);
6397 if (sp->device_type & XFRAME_II_DEVICE)
6398 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
6399 else
6400 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
6401
6402 /* Initialize bimodal Interrupts */
6403 sp->config.bimodal = bimodal;
6404 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
6405 sp->config.bimodal = 0;
6406 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
6407 dev->name);
6408 }
6409
6410 /*
6411 * Make Link state as off at this point, when the Link change
6412 * interrupt comes the state will be automatically changed to
6413 * the right state.
6414 */
6415 netif_carrier_off(dev);
6416
6417 return 0;
6418
6419 register_failed:
6420 set_swap_failed:
6421 iounmap(sp->bar1);
6422 bar1_remap_failed:
6423 iounmap(sp->bar0);
6424 bar0_remap_failed:
6425 mem_alloc_failed:
6426 free_shared_mem(sp);
6427 pci_disable_device(pdev);
6428 if (dev_intr_type != MSI_X)
6429 pci_release_regions(pdev);
6430 else {
6431 release_mem_region(pci_resource_start(pdev, 0),
6432 pci_resource_len(pdev, 0));
6433 release_mem_region(pci_resource_start(pdev, 2),
6434 pci_resource_len(pdev, 2));
6435 }
6436 pci_set_drvdata(pdev, NULL);
6437 free_netdev(dev);
6438
6439 return ret;
6440 }
6441
6442 /**
6443 * s2io_rem_nic - Free the PCI device
6444 * @pdev: structure containing the PCI related information of the device.
6445 * Description: This function is called by the Pci subsystem to release a
6446 * PCI device and free up all resource held up by the device. This could
6447 * be in response to a Hot plug event or when the driver is to be removed
6448 * from memory.
6449 */
6450
6451 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
6452 {
6453 struct net_device *dev =
6454 (struct net_device *) pci_get_drvdata(pdev);
6455 nic_t *sp;
6456
6457 if (dev == NULL) {
6458 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
6459 return;
6460 }
6461
6462 sp = dev->priv;
6463 unregister_netdev(dev);
6464
6465 free_shared_mem(sp);
6466 iounmap(sp->bar0);
6467 iounmap(sp->bar1);
6468 pci_disable_device(pdev);
6469 if (sp->intr_type != MSI_X)
6470 pci_release_regions(pdev);
6471 else {
6472 release_mem_region(pci_resource_start(pdev, 0),
6473 pci_resource_len(pdev, 0));
6474 release_mem_region(pci_resource_start(pdev, 2),
6475 pci_resource_len(pdev, 2));
6476 }
6477 pci_set_drvdata(pdev, NULL);
6478 free_netdev(dev);
6479 }
6480
6481 /**
6482 * s2io_starter - Entry point for the driver
6483 * Description: This function is the entry point for the driver. It verifies
6484 * the module loadable parameters and initializes PCI configuration space.
6485 */
6486
6487 int __init s2io_starter(void)
6488 {
6489 return pci_module_init(&s2io_driver);
6490 }
6491
6492 /**
6493 * s2io_closer - Cleanup routine for the driver
6494 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
6495 */
6496
6497 static void s2io_closer(void)
6498 {
6499 pci_unregister_driver(&s2io_driver);
6500 DBG_PRINT(INIT_DBG, "cleanup done\n");
6501 }
6502
6503 module_init(s2io_starter);
6504 module_exit(s2io_closer);
6505
6506 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
6507 struct tcphdr **tcp, RxD_t *rxdp)
6508 {
6509 int ip_off;
6510 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
6511
6512 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
6513 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
6514 __FUNCTION__);
6515 return -1;
6516 }
6517
6518 /* TODO:
6519 * By default the VLAN field in the MAC is stripped by the card, if this
6520 * feature is turned off in rx_pa_cfg register, then the ip_off field
6521 * has to be shifted by a further 2 bytes
6522 */
6523 switch (l2_type) {
6524 case 0: /* DIX type */
6525 case 4: /* DIX type with VLAN */
6526 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
6527 break;
6528 /* LLC, SNAP etc are considered non-mergeable */
6529 default:
6530 return -1;
6531 }
6532
6533 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
6534 ip_len = (u8)((*ip)->ihl);
6535 ip_len <<= 2;
6536 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
6537
6538 return 0;
6539 }
6540
6541 static int check_for_socket_match(lro_t *lro, struct iphdr *ip,
6542 struct tcphdr *tcp)
6543 {
6544 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
6545 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
6546 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
6547 return -1;
6548 return 0;
6549 }
6550
6551 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
6552 {
6553 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
6554 }
6555
6556 static void initiate_new_session(lro_t *lro, u8 *l2h,
6557 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
6558 {
6559 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
6560 lro->l2h = l2h;
6561 lro->iph = ip;
6562 lro->tcph = tcp;
6563 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
6564 lro->tcp_ack = ntohl(tcp->ack_seq);
6565 lro->sg_num = 1;
6566 lro->total_len = ntohs(ip->tot_len);
6567 lro->frags_len = 0;
6568 /*
6569 * check if we saw TCP timestamp. Other consistency checks have
6570 * already been done.
6571 */
6572 if (tcp->doff == 8) {
6573 u32 *ptr;
6574 ptr = (u32 *)(tcp+1);
6575 lro->saw_ts = 1;
6576 lro->cur_tsval = *(ptr+1);
6577 lro->cur_tsecr = *(ptr+2);
6578 }
6579 lro->in_use = 1;
6580 }
6581
6582 static void update_L3L4_header(nic_t *sp, lro_t *lro)
6583 {
6584 struct iphdr *ip = lro->iph;
6585 struct tcphdr *tcp = lro->tcph;
6586 u16 nchk;
6587 StatInfo_t *statinfo = sp->mac_control.stats_info;
6588 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
6589
6590 /* Update L3 header */
6591 ip->tot_len = htons(lro->total_len);
6592 ip->check = 0;
6593 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
6594 ip->check = nchk;
6595
6596 /* Update L4 header */
6597 tcp->ack_seq = lro->tcp_ack;
6598 tcp->window = lro->window;
6599
6600 /* Update tsecr field if this session has timestamps enabled */
6601 if (lro->saw_ts) {
6602 u32 *ptr = (u32 *)(tcp + 1);
6603 *(ptr+2) = lro->cur_tsecr;
6604 }
6605
6606 /* Update counters required for calculation of
6607 * average no. of packets aggregated.
6608 */
6609 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
6610 statinfo->sw_stat.num_aggregations++;
6611 }
6612
6613 static void aggregate_new_rx(lro_t *lro, struct iphdr *ip,
6614 struct tcphdr *tcp, u32 l4_pyld)
6615 {
6616 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
6617 lro->total_len += l4_pyld;
6618 lro->frags_len += l4_pyld;
6619 lro->tcp_next_seq += l4_pyld;
6620 lro->sg_num++;
6621
6622 /* Update ack seq no. and window ad(from this pkt) in LRO object */
6623 lro->tcp_ack = tcp->ack_seq;
6624 lro->window = tcp->window;
6625
6626 if (lro->saw_ts) {
6627 u32 *ptr;
6628 /* Update tsecr and tsval from this packet */
6629 ptr = (u32 *) (tcp + 1);
6630 lro->cur_tsval = *(ptr + 1);
6631 lro->cur_tsecr = *(ptr + 2);
6632 }
6633 }
6634
6635 static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
6636 struct tcphdr *tcp, u32 tcp_pyld_len)
6637 {
6638 u8 *ptr;
6639
6640 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
6641
6642 if (!tcp_pyld_len) {
6643 /* Runt frame or a pure ack */
6644 return -1;
6645 }
6646
6647 if (ip->ihl != 5) /* IP has options */
6648 return -1;
6649
6650 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
6651 !tcp->ack) {
6652 /*
6653 * Currently recognize only the ack control word and
6654 * any other control field being set would result in
6655 * flushing the LRO session
6656 */
6657 return -1;
6658 }
6659
6660 /*
6661 * Allow only one TCP timestamp option. Don't aggregate if
6662 * any other options are detected.
6663 */
6664 if (tcp->doff != 5 && tcp->doff != 8)
6665 return -1;
6666
6667 if (tcp->doff == 8) {
6668 ptr = (u8 *)(tcp + 1);
6669 while (*ptr == TCPOPT_NOP)
6670 ptr++;
6671 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
6672 return -1;
6673
6674 /* Ensure timestamp value increases monotonically */
6675 if (l_lro)
6676 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
6677 return -1;
6678
6679 /* timestamp echo reply should be non-zero */
6680 if (*((u32 *)(ptr+6)) == 0)
6681 return -1;
6682 }
6683
6684 return 0;
6685 }
6686
6687 static int
6688 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
6689 RxD_t *rxdp, nic_t *sp)
6690 {
6691 struct iphdr *ip;
6692 struct tcphdr *tcph;
6693 int ret = 0, i;
6694
6695 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
6696 rxdp))) {
6697 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
6698 ip->saddr, ip->daddr);
6699 } else {
6700 return ret;
6701 }
6702
6703 tcph = (struct tcphdr *)*tcp;
6704 *tcp_len = get_l4_pyld_length(ip, tcph);
6705 for (i=0; i<MAX_LRO_SESSIONS; i++) {
6706 lro_t *l_lro = &sp->lro0_n[i];
6707 if (l_lro->in_use) {
6708 if (check_for_socket_match(l_lro, ip, tcph))
6709 continue;
6710 /* Sock pair matched */
6711 *lro = l_lro;
6712
6713 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
6714 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
6715 "0x%x, actual 0x%x\n", __FUNCTION__,
6716 (*lro)->tcp_next_seq,
6717 ntohl(tcph->seq));
6718
6719 sp->mac_control.stats_info->
6720 sw_stat.outof_sequence_pkts++;
6721 ret = 2;
6722 break;
6723 }
6724
6725 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
6726 ret = 1; /* Aggregate */
6727 else
6728 ret = 2; /* Flush both */
6729 break;
6730 }
6731 }
6732
6733 if (ret == 0) {
6734 /* Before searching for available LRO objects,
6735 * check if the pkt is L3/L4 aggregatable. If not
6736 * don't create new LRO session. Just send this
6737 * packet up.
6738 */
6739 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
6740 return 5;
6741 }
6742
6743 for (i=0; i<MAX_LRO_SESSIONS; i++) {
6744 lro_t *l_lro = &sp->lro0_n[i];
6745 if (!(l_lro->in_use)) {
6746 *lro = l_lro;
6747 ret = 3; /* Begin anew */
6748 break;
6749 }
6750 }
6751 }
6752
6753 if (ret == 0) { /* sessions exceeded */
6754 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
6755 __FUNCTION__);
6756 *lro = NULL;
6757 return ret;
6758 }
6759
6760 switch (ret) {
6761 case 3:
6762 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
6763 break;
6764 case 2:
6765 update_L3L4_header(sp, *lro);
6766 break;
6767 case 1:
6768 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
6769 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
6770 update_L3L4_header(sp, *lro);
6771 ret = 4; /* Flush the LRO */
6772 }
6773 break;
6774 default:
6775 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
6776 __FUNCTION__);
6777 break;
6778 }
6779
6780 return ret;
6781 }
6782
6783 static void clear_lro_session(lro_t *lro)
6784 {
6785 static u16 lro_struct_size = sizeof(lro_t);
6786
6787 memset(lro, 0, lro_struct_size);
6788 }
6789
6790 static void queue_rx_frame(struct sk_buff *skb)
6791 {
6792 struct net_device *dev = skb->dev;
6793
6794 skb->protocol = eth_type_trans(skb, dev);
6795 #ifdef CONFIG_S2IO_NAPI
6796 netif_receive_skb(skb);
6797 #else
6798 netif_rx(skb);
6799 #endif
6800 }
6801
6802 static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
6803 u32 tcp_len)
6804 {
6805 struct sk_buff *tmp, *first = lro->parent;
6806
6807 first->len += tcp_len;
6808 first->data_len = lro->frags_len;
6809 skb_pull(skb, (skb->len - tcp_len));
6810 if ((tmp = skb_shinfo(first)->frag_list)) {
6811 while (tmp->next)
6812 tmp = tmp->next;
6813 tmp->next = skb;
6814 }
6815 else
6816 skb_shinfo(first)->frag_list = skb;
6817 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
6818 return;
6819 }
This page took 0.31373 seconds and 5 git commands to generate.