[PATCH] s2io driver bug fixes #1
[deliverable/linux.git] / drivers / net / s2io.c
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 *
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 ************************************************************************/
46
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/ioport.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/stddef.h>
60 #include <linux/ioctl.h>
61 #include <linux/timex.h>
62 #include <linux/sched.h>
63 #include <linux/ethtool.h>
64 #include <linux/workqueue.h>
65 #include <linux/if_vlan.h>
66 #include <linux/ip.h>
67 #include <linux/tcp.h>
68 #include <net/tcp.h>
69
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
72 #include <asm/io.h>
73 #include <asm/div64.h>
74
75 /* local include */
76 #include "s2io.h"
77 #include "s2io-regs.h"
78
79 #define DRV_VERSION "2.0.14.2"
80
81 /* S2io Driver name & version. */
82 static char s2io_driver_name[] = "Neterion";
83 static char s2io_driver_version[] = DRV_VERSION;
84
85 static int rxd_size[4] = {32,48,48,64};
86 static int rxd_count[4] = {127,85,85,63};
87
88 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
89 {
90 int ret;
91
92 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
93 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
94
95 return ret;
96 }
97
98 /*
99 * Cards with following subsystem_id have a link state indication
100 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
101 * macro below identifies these cards given the subsystem_id.
102 */
103 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
104 (dev_type == XFRAME_I_DEVICE) ? \
105 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
106 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
107
108 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
109 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
110 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
111 #define PANIC 1
112 #define LOW 2
113 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
114 {
115 mac_info_t *mac_control;
116
117 mac_control = &sp->mac_control;
118 if (rxb_size <= rxd_count[sp->rxd_mode])
119 return PANIC;
120 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
121 return LOW;
122 return 0;
123 }
124
125 /* Ethtool related variables and Macros. */
126 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
127 "Register test\t(offline)",
128 "Eeprom test\t(offline)",
129 "Link test\t(online)",
130 "RLDRAM test\t(offline)",
131 "BIST Test\t(offline)"
132 };
133
134 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
135 {"tmac_frms"},
136 {"tmac_data_octets"},
137 {"tmac_drop_frms"},
138 {"tmac_mcst_frms"},
139 {"tmac_bcst_frms"},
140 {"tmac_pause_ctrl_frms"},
141 {"tmac_ttl_octets"},
142 {"tmac_ucst_frms"},
143 {"tmac_nucst_frms"},
144 {"tmac_any_err_frms"},
145 {"tmac_ttl_less_fb_octets"},
146 {"tmac_vld_ip_octets"},
147 {"tmac_vld_ip"},
148 {"tmac_drop_ip"},
149 {"tmac_icmp"},
150 {"tmac_rst_tcp"},
151 {"tmac_tcp"},
152 {"tmac_udp"},
153 {"rmac_vld_frms"},
154 {"rmac_data_octets"},
155 {"rmac_fcs_err_frms"},
156 {"rmac_drop_frms"},
157 {"rmac_vld_mcst_frms"},
158 {"rmac_vld_bcst_frms"},
159 {"rmac_in_rng_len_err_frms"},
160 {"rmac_out_rng_len_err_frms"},
161 {"rmac_long_frms"},
162 {"rmac_pause_ctrl_frms"},
163 {"rmac_unsup_ctrl_frms"},
164 {"rmac_ttl_octets"},
165 {"rmac_accepted_ucst_frms"},
166 {"rmac_accepted_nucst_frms"},
167 {"rmac_discarded_frms"},
168 {"rmac_drop_events"},
169 {"rmac_ttl_less_fb_octets"},
170 {"rmac_ttl_frms"},
171 {"rmac_usized_frms"},
172 {"rmac_osized_frms"},
173 {"rmac_frag_frms"},
174 {"rmac_jabber_frms"},
175 {"rmac_ttl_64_frms"},
176 {"rmac_ttl_65_127_frms"},
177 {"rmac_ttl_128_255_frms"},
178 {"rmac_ttl_256_511_frms"},
179 {"rmac_ttl_512_1023_frms"},
180 {"rmac_ttl_1024_1518_frms"},
181 {"rmac_ip"},
182 {"rmac_ip_octets"},
183 {"rmac_hdr_err_ip"},
184 {"rmac_drop_ip"},
185 {"rmac_icmp"},
186 {"rmac_tcp"},
187 {"rmac_udp"},
188 {"rmac_err_drp_udp"},
189 {"rmac_xgmii_err_sym"},
190 {"rmac_frms_q0"},
191 {"rmac_frms_q1"},
192 {"rmac_frms_q2"},
193 {"rmac_frms_q3"},
194 {"rmac_frms_q4"},
195 {"rmac_frms_q5"},
196 {"rmac_frms_q6"},
197 {"rmac_frms_q7"},
198 {"rmac_full_q0"},
199 {"rmac_full_q1"},
200 {"rmac_full_q2"},
201 {"rmac_full_q3"},
202 {"rmac_full_q4"},
203 {"rmac_full_q5"},
204 {"rmac_full_q6"},
205 {"rmac_full_q7"},
206 {"rmac_pause_cnt"},
207 {"rmac_xgmii_data_err_cnt"},
208 {"rmac_xgmii_ctrl_err_cnt"},
209 {"rmac_accepted_ip"},
210 {"rmac_err_tcp"},
211 {"rd_req_cnt"},
212 {"new_rd_req_cnt"},
213 {"new_rd_req_rtry_cnt"},
214 {"rd_rtry_cnt"},
215 {"wr_rtry_rd_ack_cnt"},
216 {"wr_req_cnt"},
217 {"new_wr_req_cnt"},
218 {"new_wr_req_rtry_cnt"},
219 {"wr_rtry_cnt"},
220 {"wr_disc_cnt"},
221 {"rd_rtry_wr_ack_cnt"},
222 {"txp_wr_cnt"},
223 {"txd_rd_cnt"},
224 {"txd_wr_cnt"},
225 {"rxd_rd_cnt"},
226 {"rxd_wr_cnt"},
227 {"txf_rd_cnt"},
228 {"rxf_wr_cnt"},
229 {"rmac_ttl_1519_4095_frms"},
230 {"rmac_ttl_4096_8191_frms"},
231 {"rmac_ttl_8192_max_frms"},
232 {"rmac_ttl_gt_max_frms"},
233 {"rmac_osized_alt_frms"},
234 {"rmac_jabber_alt_frms"},
235 {"rmac_gt_max_alt_frms"},
236 {"rmac_vlan_frms"},
237 {"rmac_len_discard"},
238 {"rmac_fcs_discard"},
239 {"rmac_pf_discard"},
240 {"rmac_da_discard"},
241 {"rmac_red_discard"},
242 {"rmac_rts_discard"},
243 {"rmac_ingm_full_discard"},
244 {"link_fault_cnt"},
245 {"\n DRIVER STATISTICS"},
246 {"single_bit_ecc_errs"},
247 {"double_bit_ecc_errs"},
248 {"parity_err_cnt"},
249 {"serious_err_cnt"},
250 {"soft_reset_cnt"},
251 {"fifo_full_cnt"},
252 {"ring_full_cnt"},
253 ("alarm_transceiver_temp_high"),
254 ("alarm_transceiver_temp_low"),
255 ("alarm_laser_bias_current_high"),
256 ("alarm_laser_bias_current_low"),
257 ("alarm_laser_output_power_high"),
258 ("alarm_laser_output_power_low"),
259 ("warn_transceiver_temp_high"),
260 ("warn_transceiver_temp_low"),
261 ("warn_laser_bias_current_high"),
262 ("warn_laser_bias_current_low"),
263 ("warn_laser_output_power_high"),
264 ("warn_laser_output_power_low"),
265 ("lro_aggregated_pkts"),
266 ("lro_flush_both_count"),
267 ("lro_out_of_sequence_pkts"),
268 ("lro_flush_due_to_max_pkts"),
269 ("lro_avg_aggr_pkts"),
270 };
271
272 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
273 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
274
275 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
276 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
277
278 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
279 init_timer(&timer); \
280 timer.function = handle; \
281 timer.data = (unsigned long) arg; \
282 mod_timer(&timer, (jiffies + exp)) \
283
284 /* Add the vlan */
285 static void s2io_vlan_rx_register(struct net_device *dev,
286 struct vlan_group *grp)
287 {
288 nic_t *nic = dev->priv;
289 unsigned long flags;
290
291 spin_lock_irqsave(&nic->tx_lock, flags);
292 nic->vlgrp = grp;
293 spin_unlock_irqrestore(&nic->tx_lock, flags);
294 }
295
296 /* Unregister the vlan */
297 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
298 {
299 nic_t *nic = dev->priv;
300 unsigned long flags;
301
302 spin_lock_irqsave(&nic->tx_lock, flags);
303 if (nic->vlgrp)
304 nic->vlgrp->vlan_devices[vid] = NULL;
305 spin_unlock_irqrestore(&nic->tx_lock, flags);
306 }
307
308 /*
309 * Constants to be programmed into the Xena's registers, to configure
310 * the XAUI.
311 */
312
313 #define END_SIGN 0x0
314 static const u64 herc_act_dtx_cfg[] = {
315 /* Set address */
316 0x8000051536750000ULL, 0x80000515367500E0ULL,
317 /* Write data */
318 0x8000051536750004ULL, 0x80000515367500E4ULL,
319 /* Set address */
320 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
321 /* Write data */
322 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
323 /* Set address */
324 0x801205150D440000ULL, 0x801205150D4400E0ULL,
325 /* Write data */
326 0x801205150D440004ULL, 0x801205150D4400E4ULL,
327 /* Set address */
328 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
329 /* Write data */
330 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
331 /* Done */
332 END_SIGN
333 };
334
335 static const u64 xena_dtx_cfg[] = {
336 /* Set address */
337 0x8000051500000000ULL, 0x80000515000000E0ULL,
338 /* Write data */
339 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
340 /* Set address */
341 0x8001051500000000ULL, 0x80010515000000E0ULL,
342 /* Write data */
343 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
344 /* Set address */
345 0x8002051500000000ULL, 0x80020515000000E0ULL,
346 /* Write data */
347 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
348 END_SIGN
349 };
350
351 /*
352 * Constants for Fixing the MacAddress problem seen mostly on
353 * Alpha machines.
354 */
355 static const u64 fix_mac[] = {
356 0x0060000000000000ULL, 0x0060600000000000ULL,
357 0x0040600000000000ULL, 0x0000600000000000ULL,
358 0x0020600000000000ULL, 0x0060600000000000ULL,
359 0x0020600000000000ULL, 0x0060600000000000ULL,
360 0x0020600000000000ULL, 0x0060600000000000ULL,
361 0x0020600000000000ULL, 0x0060600000000000ULL,
362 0x0020600000000000ULL, 0x0060600000000000ULL,
363 0x0020600000000000ULL, 0x0060600000000000ULL,
364 0x0020600000000000ULL, 0x0060600000000000ULL,
365 0x0020600000000000ULL, 0x0060600000000000ULL,
366 0x0020600000000000ULL, 0x0060600000000000ULL,
367 0x0020600000000000ULL, 0x0060600000000000ULL,
368 0x0020600000000000ULL, 0x0000600000000000ULL,
369 0x0040600000000000ULL, 0x0060600000000000ULL,
370 END_SIGN
371 };
372
373 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
374 MODULE_LICENSE("GPL");
375 MODULE_VERSION(DRV_VERSION);
376
377
378 /* Module Loadable parameters. */
379 S2IO_PARM_INT(tx_fifo_num, 1);
380 S2IO_PARM_INT(rx_ring_num, 1);
381
382
383 S2IO_PARM_INT(rx_ring_mode, 1);
384 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
385 S2IO_PARM_INT(rmac_pause_time, 0x100);
386 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
387 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
388 S2IO_PARM_INT(shared_splits, 0);
389 S2IO_PARM_INT(tmac_util_period, 5);
390 S2IO_PARM_INT(rmac_util_period, 5);
391 S2IO_PARM_INT(bimodal, 0);
392 S2IO_PARM_INT(l3l4hdr_size, 128);
393 /* Frequency of Rx desc syncs expressed as power of 2 */
394 S2IO_PARM_INT(rxsync_frequency, 3);
395 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
396 S2IO_PARM_INT(intr_type, 0);
397 /* Large receive offload feature */
398 S2IO_PARM_INT(lro, 0);
399 /* Max pkts to be aggregated by LRO at one time. If not specified,
400 * aggregation happens until we hit max IP pkt size(64K)
401 */
402 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
403 #ifndef CONFIG_S2IO_NAPI
404 S2IO_PARM_INT(indicate_max_pkts, 0);
405 #endif
406
407 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
408 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
409 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
410 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
411 static unsigned int rts_frm_len[MAX_RX_RINGS] =
412 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
413
414 module_param_array(tx_fifo_len, uint, NULL, 0);
415 module_param_array(rx_ring_sz, uint, NULL, 0);
416 module_param_array(rts_frm_len, uint, NULL, 0);
417
418 /*
419 * S2IO device table.
420 * This table lists all the devices that this driver supports.
421 */
422 static struct pci_device_id s2io_tbl[] __devinitdata = {
423 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
424 PCI_ANY_ID, PCI_ANY_ID},
425 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
426 PCI_ANY_ID, PCI_ANY_ID},
427 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
428 PCI_ANY_ID, PCI_ANY_ID},
429 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
430 PCI_ANY_ID, PCI_ANY_ID},
431 {0,}
432 };
433
434 MODULE_DEVICE_TABLE(pci, s2io_tbl);
435
436 static struct pci_driver s2io_driver = {
437 .name = "S2IO",
438 .id_table = s2io_tbl,
439 .probe = s2io_init_nic,
440 .remove = __devexit_p(s2io_rem_nic),
441 };
442
443 /* A simplifier macro used both by init and free shared_mem Fns(). */
444 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
445
446 /**
447 * init_shared_mem - Allocation and Initialization of Memory
448 * @nic: Device private variable.
449 * Description: The function allocates all the memory areas shared
450 * between the NIC and the driver. This includes Tx descriptors,
451 * Rx descriptors and the statistics block.
452 */
453
454 static int init_shared_mem(struct s2io_nic *nic)
455 {
456 u32 size;
457 void *tmp_v_addr, *tmp_v_addr_next;
458 dma_addr_t tmp_p_addr, tmp_p_addr_next;
459 RxD_block_t *pre_rxd_blk = NULL;
460 int i, j, blk_cnt, rx_sz, tx_sz;
461 int lst_size, lst_per_page;
462 struct net_device *dev = nic->dev;
463 unsigned long tmp;
464 buffAdd_t *ba;
465
466 mac_info_t *mac_control;
467 struct config_param *config;
468
469 mac_control = &nic->mac_control;
470 config = &nic->config;
471
472
473 /* Allocation and initialization of TXDLs in FIOFs */
474 size = 0;
475 for (i = 0; i < config->tx_fifo_num; i++) {
476 size += config->tx_cfg[i].fifo_len;
477 }
478 if (size > MAX_AVAILABLE_TXDS) {
479 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
480 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
481 return -EINVAL;
482 }
483
484 lst_size = (sizeof(TxD_t) * config->max_txds);
485 tx_sz = lst_size * size;
486 lst_per_page = PAGE_SIZE / lst_size;
487
488 for (i = 0; i < config->tx_fifo_num; i++) {
489 int fifo_len = config->tx_cfg[i].fifo_len;
490 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
491 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
492 GFP_KERNEL);
493 if (!mac_control->fifos[i].list_info) {
494 DBG_PRINT(ERR_DBG,
495 "Malloc failed for list_info\n");
496 return -ENOMEM;
497 }
498 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
499 }
500 for (i = 0; i < config->tx_fifo_num; i++) {
501 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
502 lst_per_page);
503 mac_control->fifos[i].tx_curr_put_info.offset = 0;
504 mac_control->fifos[i].tx_curr_put_info.fifo_len =
505 config->tx_cfg[i].fifo_len - 1;
506 mac_control->fifos[i].tx_curr_get_info.offset = 0;
507 mac_control->fifos[i].tx_curr_get_info.fifo_len =
508 config->tx_cfg[i].fifo_len - 1;
509 mac_control->fifos[i].fifo_no = i;
510 mac_control->fifos[i].nic = nic;
511 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
512
513 for (j = 0; j < page_num; j++) {
514 int k = 0;
515 dma_addr_t tmp_p;
516 void *tmp_v;
517 tmp_v = pci_alloc_consistent(nic->pdev,
518 PAGE_SIZE, &tmp_p);
519 if (!tmp_v) {
520 DBG_PRINT(ERR_DBG,
521 "pci_alloc_consistent ");
522 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
523 return -ENOMEM;
524 }
525 /* If we got a zero DMA address(can happen on
526 * certain platforms like PPC), reallocate.
527 * Store virtual address of page we don't want,
528 * to be freed later.
529 */
530 if (!tmp_p) {
531 mac_control->zerodma_virt_addr = tmp_v;
532 DBG_PRINT(INIT_DBG,
533 "%s: Zero DMA address for TxDL. ", dev->name);
534 DBG_PRINT(INIT_DBG,
535 "Virtual address %p\n", tmp_v);
536 tmp_v = pci_alloc_consistent(nic->pdev,
537 PAGE_SIZE, &tmp_p);
538 if (!tmp_v) {
539 DBG_PRINT(ERR_DBG,
540 "pci_alloc_consistent ");
541 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
542 return -ENOMEM;
543 }
544 }
545 while (k < lst_per_page) {
546 int l = (j * lst_per_page) + k;
547 if (l == config->tx_cfg[i].fifo_len)
548 break;
549 mac_control->fifos[i].list_info[l].list_virt_addr =
550 tmp_v + (k * lst_size);
551 mac_control->fifos[i].list_info[l].list_phy_addr =
552 tmp_p + (k * lst_size);
553 k++;
554 }
555 }
556 }
557
558 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
559 if (!nic->ufo_in_band_v)
560 return -ENOMEM;
561 memset(nic->ufo_in_band_v, 0, size);
562
563 /* Allocation and initialization of RXDs in Rings */
564 size = 0;
565 for (i = 0; i < config->rx_ring_num; i++) {
566 if (config->rx_cfg[i].num_rxd %
567 (rxd_count[nic->rxd_mode] + 1)) {
568 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
569 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
570 i);
571 DBG_PRINT(ERR_DBG, "RxDs per Block");
572 return FAILURE;
573 }
574 size += config->rx_cfg[i].num_rxd;
575 mac_control->rings[i].block_count =
576 config->rx_cfg[i].num_rxd /
577 (rxd_count[nic->rxd_mode] + 1 );
578 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
579 mac_control->rings[i].block_count;
580 }
581 if (nic->rxd_mode == RXD_MODE_1)
582 size = (size * (sizeof(RxD1_t)));
583 else
584 size = (size * (sizeof(RxD3_t)));
585 rx_sz = size;
586
587 for (i = 0; i < config->rx_ring_num; i++) {
588 mac_control->rings[i].rx_curr_get_info.block_index = 0;
589 mac_control->rings[i].rx_curr_get_info.offset = 0;
590 mac_control->rings[i].rx_curr_get_info.ring_len =
591 config->rx_cfg[i].num_rxd - 1;
592 mac_control->rings[i].rx_curr_put_info.block_index = 0;
593 mac_control->rings[i].rx_curr_put_info.offset = 0;
594 mac_control->rings[i].rx_curr_put_info.ring_len =
595 config->rx_cfg[i].num_rxd - 1;
596 mac_control->rings[i].nic = nic;
597 mac_control->rings[i].ring_no = i;
598
599 blk_cnt = config->rx_cfg[i].num_rxd /
600 (rxd_count[nic->rxd_mode] + 1);
601 /* Allocating all the Rx blocks */
602 for (j = 0; j < blk_cnt; j++) {
603 rx_block_info_t *rx_blocks;
604 int l;
605
606 rx_blocks = &mac_control->rings[i].rx_blocks[j];
607 size = SIZE_OF_BLOCK; //size is always page size
608 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
609 &tmp_p_addr);
610 if (tmp_v_addr == NULL) {
611 /*
612 * In case of failure, free_shared_mem()
613 * is called, which should free any
614 * memory that was alloced till the
615 * failure happened.
616 */
617 rx_blocks->block_virt_addr = tmp_v_addr;
618 return -ENOMEM;
619 }
620 memset(tmp_v_addr, 0, size);
621 rx_blocks->block_virt_addr = tmp_v_addr;
622 rx_blocks->block_dma_addr = tmp_p_addr;
623 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
624 rxd_count[nic->rxd_mode],
625 GFP_KERNEL);
626 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
627 rx_blocks->rxds[l].virt_addr =
628 rx_blocks->block_virt_addr +
629 (rxd_size[nic->rxd_mode] * l);
630 rx_blocks->rxds[l].dma_addr =
631 rx_blocks->block_dma_addr +
632 (rxd_size[nic->rxd_mode] * l);
633 }
634 }
635 /* Interlinking all Rx Blocks */
636 for (j = 0; j < blk_cnt; j++) {
637 tmp_v_addr =
638 mac_control->rings[i].rx_blocks[j].block_virt_addr;
639 tmp_v_addr_next =
640 mac_control->rings[i].rx_blocks[(j + 1) %
641 blk_cnt].block_virt_addr;
642 tmp_p_addr =
643 mac_control->rings[i].rx_blocks[j].block_dma_addr;
644 tmp_p_addr_next =
645 mac_control->rings[i].rx_blocks[(j + 1) %
646 blk_cnt].block_dma_addr;
647
648 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
649 pre_rxd_blk->reserved_2_pNext_RxD_block =
650 (unsigned long) tmp_v_addr_next;
651 pre_rxd_blk->pNext_RxD_Blk_physical =
652 (u64) tmp_p_addr_next;
653 }
654 }
655 if (nic->rxd_mode >= RXD_MODE_3A) {
656 /*
657 * Allocation of Storages for buffer addresses in 2BUFF mode
658 * and the buffers as well.
659 */
660 for (i = 0; i < config->rx_ring_num; i++) {
661 blk_cnt = config->rx_cfg[i].num_rxd /
662 (rxd_count[nic->rxd_mode]+ 1);
663 mac_control->rings[i].ba =
664 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
665 GFP_KERNEL);
666 if (!mac_control->rings[i].ba)
667 return -ENOMEM;
668 for (j = 0; j < blk_cnt; j++) {
669 int k = 0;
670 mac_control->rings[i].ba[j] =
671 kmalloc((sizeof(buffAdd_t) *
672 (rxd_count[nic->rxd_mode] + 1)),
673 GFP_KERNEL);
674 if (!mac_control->rings[i].ba[j])
675 return -ENOMEM;
676 while (k != rxd_count[nic->rxd_mode]) {
677 ba = &mac_control->rings[i].ba[j][k];
678
679 ba->ba_0_org = (void *) kmalloc
680 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
681 if (!ba->ba_0_org)
682 return -ENOMEM;
683 tmp = (unsigned long)ba->ba_0_org;
684 tmp += ALIGN_SIZE;
685 tmp &= ~((unsigned long) ALIGN_SIZE);
686 ba->ba_0 = (void *) tmp;
687
688 ba->ba_1_org = (void *) kmalloc
689 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
690 if (!ba->ba_1_org)
691 return -ENOMEM;
692 tmp = (unsigned long) ba->ba_1_org;
693 tmp += ALIGN_SIZE;
694 tmp &= ~((unsigned long) ALIGN_SIZE);
695 ba->ba_1 = (void *) tmp;
696 k++;
697 }
698 }
699 }
700 }
701
702 /* Allocation and initialization of Statistics block */
703 size = sizeof(StatInfo_t);
704 mac_control->stats_mem = pci_alloc_consistent
705 (nic->pdev, size, &mac_control->stats_mem_phy);
706
707 if (!mac_control->stats_mem) {
708 /*
709 * In case of failure, free_shared_mem() is called, which
710 * should free any memory that was alloced till the
711 * failure happened.
712 */
713 return -ENOMEM;
714 }
715 mac_control->stats_mem_sz = size;
716
717 tmp_v_addr = mac_control->stats_mem;
718 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
719 memset(tmp_v_addr, 0, size);
720 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
721 (unsigned long long) tmp_p_addr);
722
723 return SUCCESS;
724 }
725
726 /**
727 * free_shared_mem - Free the allocated Memory
728 * @nic: Device private variable.
729 * Description: This function is to free all memory locations allocated by
730 * the init_shared_mem() function and return it to the kernel.
731 */
732
733 static void free_shared_mem(struct s2io_nic *nic)
734 {
735 int i, j, blk_cnt, size;
736 void *tmp_v_addr;
737 dma_addr_t tmp_p_addr;
738 mac_info_t *mac_control;
739 struct config_param *config;
740 int lst_size, lst_per_page;
741 struct net_device *dev = nic->dev;
742
743 if (!nic)
744 return;
745
746 mac_control = &nic->mac_control;
747 config = &nic->config;
748
749 lst_size = (sizeof(TxD_t) * config->max_txds);
750 lst_per_page = PAGE_SIZE / lst_size;
751
752 for (i = 0; i < config->tx_fifo_num; i++) {
753 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
754 lst_per_page);
755 for (j = 0; j < page_num; j++) {
756 int mem_blks = (j * lst_per_page);
757 if (!mac_control->fifos[i].list_info)
758 return;
759 if (!mac_control->fifos[i].list_info[mem_blks].
760 list_virt_addr)
761 break;
762 pci_free_consistent(nic->pdev, PAGE_SIZE,
763 mac_control->fifos[i].
764 list_info[mem_blks].
765 list_virt_addr,
766 mac_control->fifos[i].
767 list_info[mem_blks].
768 list_phy_addr);
769 }
770 /* If we got a zero DMA address during allocation,
771 * free the page now
772 */
773 if (mac_control->zerodma_virt_addr) {
774 pci_free_consistent(nic->pdev, PAGE_SIZE,
775 mac_control->zerodma_virt_addr,
776 (dma_addr_t)0);
777 DBG_PRINT(INIT_DBG,
778 "%s: Freeing TxDL with zero DMA addr. ",
779 dev->name);
780 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
781 mac_control->zerodma_virt_addr);
782 }
783 kfree(mac_control->fifos[i].list_info);
784 }
785
786 size = SIZE_OF_BLOCK;
787 for (i = 0; i < config->rx_ring_num; i++) {
788 blk_cnt = mac_control->rings[i].block_count;
789 for (j = 0; j < blk_cnt; j++) {
790 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
791 block_virt_addr;
792 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
793 block_dma_addr;
794 if (tmp_v_addr == NULL)
795 break;
796 pci_free_consistent(nic->pdev, size,
797 tmp_v_addr, tmp_p_addr);
798 kfree(mac_control->rings[i].rx_blocks[j].rxds);
799 }
800 }
801
802 if (nic->rxd_mode >= RXD_MODE_3A) {
803 /* Freeing buffer storage addresses in 2BUFF mode. */
804 for (i = 0; i < config->rx_ring_num; i++) {
805 blk_cnt = config->rx_cfg[i].num_rxd /
806 (rxd_count[nic->rxd_mode] + 1);
807 for (j = 0; j < blk_cnt; j++) {
808 int k = 0;
809 if (!mac_control->rings[i].ba[j])
810 continue;
811 while (k != rxd_count[nic->rxd_mode]) {
812 buffAdd_t *ba =
813 &mac_control->rings[i].ba[j][k];
814 kfree(ba->ba_0_org);
815 kfree(ba->ba_1_org);
816 k++;
817 }
818 kfree(mac_control->rings[i].ba[j]);
819 }
820 kfree(mac_control->rings[i].ba);
821 }
822 }
823
824 if (mac_control->stats_mem) {
825 pci_free_consistent(nic->pdev,
826 mac_control->stats_mem_sz,
827 mac_control->stats_mem,
828 mac_control->stats_mem_phy);
829 }
830 if (nic->ufo_in_band_v)
831 kfree(nic->ufo_in_band_v);
832 }
833
834 /**
835 * s2io_verify_pci_mode -
836 */
837
838 static int s2io_verify_pci_mode(nic_t *nic)
839 {
840 XENA_dev_config_t __iomem *bar0 = nic->bar0;
841 register u64 val64 = 0;
842 int mode;
843
844 val64 = readq(&bar0->pci_mode);
845 mode = (u8)GET_PCI_MODE(val64);
846
847 if ( val64 & PCI_MODE_UNKNOWN_MODE)
848 return -1; /* Unknown PCI mode */
849 return mode;
850 }
851
852 #define NEC_VENID 0x1033
853 #define NEC_DEVID 0x0125
854 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
855 {
856 struct pci_dev *tdev = NULL;
857 while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
858 if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){
859 if (tdev->bus == s2io_pdev->bus->parent)
860 return 1;
861 }
862 }
863 return 0;
864 }
865
866 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
867 /**
868 * s2io_print_pci_mode -
869 */
870 static int s2io_print_pci_mode(nic_t *nic)
871 {
872 XENA_dev_config_t __iomem *bar0 = nic->bar0;
873 register u64 val64 = 0;
874 int mode;
875 struct config_param *config = &nic->config;
876
877 val64 = readq(&bar0->pci_mode);
878 mode = (u8)GET_PCI_MODE(val64);
879
880 if ( val64 & PCI_MODE_UNKNOWN_MODE)
881 return -1; /* Unknown PCI mode */
882
883 config->bus_speed = bus_speed[mode];
884
885 if (s2io_on_nec_bridge(nic->pdev)) {
886 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
887 nic->dev->name);
888 return mode;
889 }
890
891 if (val64 & PCI_MODE_32_BITS) {
892 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
893 } else {
894 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
895 }
896
897 switch(mode) {
898 case PCI_MODE_PCI_33:
899 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
900 break;
901 case PCI_MODE_PCI_66:
902 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
903 break;
904 case PCI_MODE_PCIX_M1_66:
905 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
906 break;
907 case PCI_MODE_PCIX_M1_100:
908 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
909 break;
910 case PCI_MODE_PCIX_M1_133:
911 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
912 break;
913 case PCI_MODE_PCIX_M2_66:
914 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
915 break;
916 case PCI_MODE_PCIX_M2_100:
917 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
918 break;
919 case PCI_MODE_PCIX_M2_133:
920 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
921 break;
922 default:
923 return -1; /* Unsupported bus speed */
924 }
925
926 return mode;
927 }
928
929 /**
930 * init_nic - Initialization of hardware
931 * @nic: device peivate variable
932 * Description: The function sequentially configures every block
933 * of the H/W from their reset values.
934 * Return Value: SUCCESS on success and
935 * '-1' on failure (endian settings incorrect).
936 */
937
938 static int init_nic(struct s2io_nic *nic)
939 {
940 XENA_dev_config_t __iomem *bar0 = nic->bar0;
941 struct net_device *dev = nic->dev;
942 register u64 val64 = 0;
943 void __iomem *add;
944 u32 time;
945 int i, j;
946 mac_info_t *mac_control;
947 struct config_param *config;
948 int dtx_cnt = 0;
949 unsigned long long mem_share;
950 int mem_size;
951
952 mac_control = &nic->mac_control;
953 config = &nic->config;
954
955 /* to set the swapper controle on the card */
956 if(s2io_set_swapper(nic)) {
957 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
958 return -1;
959 }
960
961 /*
962 * Herc requires EOI to be removed from reset before XGXS, so..
963 */
964 if (nic->device_type & XFRAME_II_DEVICE) {
965 val64 = 0xA500000000ULL;
966 writeq(val64, &bar0->sw_reset);
967 msleep(500);
968 val64 = readq(&bar0->sw_reset);
969 }
970
971 /* Remove XGXS from reset state */
972 val64 = 0;
973 writeq(val64, &bar0->sw_reset);
974 msleep(500);
975 val64 = readq(&bar0->sw_reset);
976
977 /* Enable Receiving broadcasts */
978 add = &bar0->mac_cfg;
979 val64 = readq(&bar0->mac_cfg);
980 val64 |= MAC_RMAC_BCAST_ENABLE;
981 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
982 writel((u32) val64, add);
983 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
984 writel((u32) (val64 >> 32), (add + 4));
985
986 /* Read registers in all blocks */
987 val64 = readq(&bar0->mac_int_mask);
988 val64 = readq(&bar0->mc_int_mask);
989 val64 = readq(&bar0->xgxs_int_mask);
990
991 /* Set MTU */
992 val64 = dev->mtu;
993 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
994
995 if (nic->device_type & XFRAME_II_DEVICE) {
996 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
997 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
998 &bar0->dtx_control, UF);
999 if (dtx_cnt & 0x1)
1000 msleep(1); /* Necessary!! */
1001 dtx_cnt++;
1002 }
1003 } else {
1004 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1005 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1006 &bar0->dtx_control, UF);
1007 val64 = readq(&bar0->dtx_control);
1008 dtx_cnt++;
1009 }
1010 }
1011
1012 /* Tx DMA Initialization */
1013 val64 = 0;
1014 writeq(val64, &bar0->tx_fifo_partition_0);
1015 writeq(val64, &bar0->tx_fifo_partition_1);
1016 writeq(val64, &bar0->tx_fifo_partition_2);
1017 writeq(val64, &bar0->tx_fifo_partition_3);
1018
1019
1020 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1021 val64 |=
1022 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1023 13) | vBIT(config->tx_cfg[i].fifo_priority,
1024 ((i * 32) + 5), 3);
1025
1026 if (i == (config->tx_fifo_num - 1)) {
1027 if (i % 2 == 0)
1028 i++;
1029 }
1030
1031 switch (i) {
1032 case 1:
1033 writeq(val64, &bar0->tx_fifo_partition_0);
1034 val64 = 0;
1035 break;
1036 case 3:
1037 writeq(val64, &bar0->tx_fifo_partition_1);
1038 val64 = 0;
1039 break;
1040 case 5:
1041 writeq(val64, &bar0->tx_fifo_partition_2);
1042 val64 = 0;
1043 break;
1044 case 7:
1045 writeq(val64, &bar0->tx_fifo_partition_3);
1046 break;
1047 }
1048 }
1049
1050 /*
1051 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1052 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1053 */
1054 if ((nic->device_type == XFRAME_I_DEVICE) &&
1055 (get_xena_rev_id(nic->pdev) < 4))
1056 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1057
1058 val64 = readq(&bar0->tx_fifo_partition_0);
1059 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1060 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1061
1062 /*
1063 * Initialization of Tx_PA_CONFIG register to ignore packet
1064 * integrity checking.
1065 */
1066 val64 = readq(&bar0->tx_pa_cfg);
1067 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1068 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1069 writeq(val64, &bar0->tx_pa_cfg);
1070
1071 /* Rx DMA intialization. */
1072 val64 = 0;
1073 for (i = 0; i < config->rx_ring_num; i++) {
1074 val64 |=
1075 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1076 3);
1077 }
1078 writeq(val64, &bar0->rx_queue_priority);
1079
1080 /*
1081 * Allocating equal share of memory to all the
1082 * configured Rings.
1083 */
1084 val64 = 0;
1085 if (nic->device_type & XFRAME_II_DEVICE)
1086 mem_size = 32;
1087 else
1088 mem_size = 64;
1089
1090 for (i = 0; i < config->rx_ring_num; i++) {
1091 switch (i) {
1092 case 0:
1093 mem_share = (mem_size / config->rx_ring_num +
1094 mem_size % config->rx_ring_num);
1095 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1096 continue;
1097 case 1:
1098 mem_share = (mem_size / config->rx_ring_num);
1099 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1100 continue;
1101 case 2:
1102 mem_share = (mem_size / config->rx_ring_num);
1103 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1104 continue;
1105 case 3:
1106 mem_share = (mem_size / config->rx_ring_num);
1107 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1108 continue;
1109 case 4:
1110 mem_share = (mem_size / config->rx_ring_num);
1111 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1112 continue;
1113 case 5:
1114 mem_share = (mem_size / config->rx_ring_num);
1115 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1116 continue;
1117 case 6:
1118 mem_share = (mem_size / config->rx_ring_num);
1119 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1120 continue;
1121 case 7:
1122 mem_share = (mem_size / config->rx_ring_num);
1123 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1124 continue;
1125 }
1126 }
1127 writeq(val64, &bar0->rx_queue_cfg);
1128
1129 /*
1130 * Filling Tx round robin registers
1131 * as per the number of FIFOs
1132 */
1133 switch (config->tx_fifo_num) {
1134 case 1:
1135 val64 = 0x0000000000000000ULL;
1136 writeq(val64, &bar0->tx_w_round_robin_0);
1137 writeq(val64, &bar0->tx_w_round_robin_1);
1138 writeq(val64, &bar0->tx_w_round_robin_2);
1139 writeq(val64, &bar0->tx_w_round_robin_3);
1140 writeq(val64, &bar0->tx_w_round_robin_4);
1141 break;
1142 case 2:
1143 val64 = 0x0000010000010000ULL;
1144 writeq(val64, &bar0->tx_w_round_robin_0);
1145 val64 = 0x0100000100000100ULL;
1146 writeq(val64, &bar0->tx_w_round_robin_1);
1147 val64 = 0x0001000001000001ULL;
1148 writeq(val64, &bar0->tx_w_round_robin_2);
1149 val64 = 0x0000010000010000ULL;
1150 writeq(val64, &bar0->tx_w_round_robin_3);
1151 val64 = 0x0100000000000000ULL;
1152 writeq(val64, &bar0->tx_w_round_robin_4);
1153 break;
1154 case 3:
1155 val64 = 0x0001000102000001ULL;
1156 writeq(val64, &bar0->tx_w_round_robin_0);
1157 val64 = 0x0001020000010001ULL;
1158 writeq(val64, &bar0->tx_w_round_robin_1);
1159 val64 = 0x0200000100010200ULL;
1160 writeq(val64, &bar0->tx_w_round_robin_2);
1161 val64 = 0x0001000102000001ULL;
1162 writeq(val64, &bar0->tx_w_round_robin_3);
1163 val64 = 0x0001020000000000ULL;
1164 writeq(val64, &bar0->tx_w_round_robin_4);
1165 break;
1166 case 4:
1167 val64 = 0x0001020300010200ULL;
1168 writeq(val64, &bar0->tx_w_round_robin_0);
1169 val64 = 0x0100000102030001ULL;
1170 writeq(val64, &bar0->tx_w_round_robin_1);
1171 val64 = 0x0200010000010203ULL;
1172 writeq(val64, &bar0->tx_w_round_robin_2);
1173 val64 = 0x0001020001000001ULL;
1174 writeq(val64, &bar0->tx_w_round_robin_3);
1175 val64 = 0x0203000100000000ULL;
1176 writeq(val64, &bar0->tx_w_round_robin_4);
1177 break;
1178 case 5:
1179 val64 = 0x0001000203000102ULL;
1180 writeq(val64, &bar0->tx_w_round_robin_0);
1181 val64 = 0x0001020001030004ULL;
1182 writeq(val64, &bar0->tx_w_round_robin_1);
1183 val64 = 0x0001000203000102ULL;
1184 writeq(val64, &bar0->tx_w_round_robin_2);
1185 val64 = 0x0001020001030004ULL;
1186 writeq(val64, &bar0->tx_w_round_robin_3);
1187 val64 = 0x0001000000000000ULL;
1188 writeq(val64, &bar0->tx_w_round_robin_4);
1189 break;
1190 case 6:
1191 val64 = 0x0001020304000102ULL;
1192 writeq(val64, &bar0->tx_w_round_robin_0);
1193 val64 = 0x0304050001020001ULL;
1194 writeq(val64, &bar0->tx_w_round_robin_1);
1195 val64 = 0x0203000100000102ULL;
1196 writeq(val64, &bar0->tx_w_round_robin_2);
1197 val64 = 0x0304000102030405ULL;
1198 writeq(val64, &bar0->tx_w_round_robin_3);
1199 val64 = 0x0001000200000000ULL;
1200 writeq(val64, &bar0->tx_w_round_robin_4);
1201 break;
1202 case 7:
1203 val64 = 0x0001020001020300ULL;
1204 writeq(val64, &bar0->tx_w_round_robin_0);
1205 val64 = 0x0102030400010203ULL;
1206 writeq(val64, &bar0->tx_w_round_robin_1);
1207 val64 = 0x0405060001020001ULL;
1208 writeq(val64, &bar0->tx_w_round_robin_2);
1209 val64 = 0x0304050000010200ULL;
1210 writeq(val64, &bar0->tx_w_round_robin_3);
1211 val64 = 0x0102030000000000ULL;
1212 writeq(val64, &bar0->tx_w_round_robin_4);
1213 break;
1214 case 8:
1215 val64 = 0x0001020300040105ULL;
1216 writeq(val64, &bar0->tx_w_round_robin_0);
1217 val64 = 0x0200030106000204ULL;
1218 writeq(val64, &bar0->tx_w_round_robin_1);
1219 val64 = 0x0103000502010007ULL;
1220 writeq(val64, &bar0->tx_w_round_robin_2);
1221 val64 = 0x0304010002060500ULL;
1222 writeq(val64, &bar0->tx_w_round_robin_3);
1223 val64 = 0x0103020400000000ULL;
1224 writeq(val64, &bar0->tx_w_round_robin_4);
1225 break;
1226 }
1227
1228 /* Enable all configured Tx FIFO partitions */
1229 val64 = readq(&bar0->tx_fifo_partition_0);
1230 val64 |= (TX_FIFO_PARTITION_EN);
1231 writeq(val64, &bar0->tx_fifo_partition_0);
1232
1233 /* Filling the Rx round robin registers as per the
1234 * number of Rings and steering based on QoS.
1235 */
1236 switch (config->rx_ring_num) {
1237 case 1:
1238 val64 = 0x8080808080808080ULL;
1239 writeq(val64, &bar0->rts_qos_steering);
1240 break;
1241 case 2:
1242 val64 = 0x0000010000010000ULL;
1243 writeq(val64, &bar0->rx_w_round_robin_0);
1244 val64 = 0x0100000100000100ULL;
1245 writeq(val64, &bar0->rx_w_round_robin_1);
1246 val64 = 0x0001000001000001ULL;
1247 writeq(val64, &bar0->rx_w_round_robin_2);
1248 val64 = 0x0000010000010000ULL;
1249 writeq(val64, &bar0->rx_w_round_robin_3);
1250 val64 = 0x0100000000000000ULL;
1251 writeq(val64, &bar0->rx_w_round_robin_4);
1252
1253 val64 = 0x8080808040404040ULL;
1254 writeq(val64, &bar0->rts_qos_steering);
1255 break;
1256 case 3:
1257 val64 = 0x0001000102000001ULL;
1258 writeq(val64, &bar0->rx_w_round_robin_0);
1259 val64 = 0x0001020000010001ULL;
1260 writeq(val64, &bar0->rx_w_round_robin_1);
1261 val64 = 0x0200000100010200ULL;
1262 writeq(val64, &bar0->rx_w_round_robin_2);
1263 val64 = 0x0001000102000001ULL;
1264 writeq(val64, &bar0->rx_w_round_robin_3);
1265 val64 = 0x0001020000000000ULL;
1266 writeq(val64, &bar0->rx_w_round_robin_4);
1267
1268 val64 = 0x8080804040402020ULL;
1269 writeq(val64, &bar0->rts_qos_steering);
1270 break;
1271 case 4:
1272 val64 = 0x0001020300010200ULL;
1273 writeq(val64, &bar0->rx_w_round_robin_0);
1274 val64 = 0x0100000102030001ULL;
1275 writeq(val64, &bar0->rx_w_round_robin_1);
1276 val64 = 0x0200010000010203ULL;
1277 writeq(val64, &bar0->rx_w_round_robin_2);
1278 val64 = 0x0001020001000001ULL;
1279 writeq(val64, &bar0->rx_w_round_robin_3);
1280 val64 = 0x0203000100000000ULL;
1281 writeq(val64, &bar0->rx_w_round_robin_4);
1282
1283 val64 = 0x8080404020201010ULL;
1284 writeq(val64, &bar0->rts_qos_steering);
1285 break;
1286 case 5:
1287 val64 = 0x0001000203000102ULL;
1288 writeq(val64, &bar0->rx_w_round_robin_0);
1289 val64 = 0x0001020001030004ULL;
1290 writeq(val64, &bar0->rx_w_round_robin_1);
1291 val64 = 0x0001000203000102ULL;
1292 writeq(val64, &bar0->rx_w_round_robin_2);
1293 val64 = 0x0001020001030004ULL;
1294 writeq(val64, &bar0->rx_w_round_robin_3);
1295 val64 = 0x0001000000000000ULL;
1296 writeq(val64, &bar0->rx_w_round_robin_4);
1297
1298 val64 = 0x8080404020201008ULL;
1299 writeq(val64, &bar0->rts_qos_steering);
1300 break;
1301 case 6:
1302 val64 = 0x0001020304000102ULL;
1303 writeq(val64, &bar0->rx_w_round_robin_0);
1304 val64 = 0x0304050001020001ULL;
1305 writeq(val64, &bar0->rx_w_round_robin_1);
1306 val64 = 0x0203000100000102ULL;
1307 writeq(val64, &bar0->rx_w_round_robin_2);
1308 val64 = 0x0304000102030405ULL;
1309 writeq(val64, &bar0->rx_w_round_robin_3);
1310 val64 = 0x0001000200000000ULL;
1311 writeq(val64, &bar0->rx_w_round_robin_4);
1312
1313 val64 = 0x8080404020100804ULL;
1314 writeq(val64, &bar0->rts_qos_steering);
1315 break;
1316 case 7:
1317 val64 = 0x0001020001020300ULL;
1318 writeq(val64, &bar0->rx_w_round_robin_0);
1319 val64 = 0x0102030400010203ULL;
1320 writeq(val64, &bar0->rx_w_round_robin_1);
1321 val64 = 0x0405060001020001ULL;
1322 writeq(val64, &bar0->rx_w_round_robin_2);
1323 val64 = 0x0304050000010200ULL;
1324 writeq(val64, &bar0->rx_w_round_robin_3);
1325 val64 = 0x0102030000000000ULL;
1326 writeq(val64, &bar0->rx_w_round_robin_4);
1327
1328 val64 = 0x8080402010080402ULL;
1329 writeq(val64, &bar0->rts_qos_steering);
1330 break;
1331 case 8:
1332 val64 = 0x0001020300040105ULL;
1333 writeq(val64, &bar0->rx_w_round_robin_0);
1334 val64 = 0x0200030106000204ULL;
1335 writeq(val64, &bar0->rx_w_round_robin_1);
1336 val64 = 0x0103000502010007ULL;
1337 writeq(val64, &bar0->rx_w_round_robin_2);
1338 val64 = 0x0304010002060500ULL;
1339 writeq(val64, &bar0->rx_w_round_robin_3);
1340 val64 = 0x0103020400000000ULL;
1341 writeq(val64, &bar0->rx_w_round_robin_4);
1342
1343 val64 = 0x8040201008040201ULL;
1344 writeq(val64, &bar0->rts_qos_steering);
1345 break;
1346 }
1347
1348 /* UDP Fix */
1349 val64 = 0;
1350 for (i = 0; i < 8; i++)
1351 writeq(val64, &bar0->rts_frm_len_n[i]);
1352
1353 /* Set the default rts frame length for the rings configured */
1354 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1355 for (i = 0 ; i < config->rx_ring_num ; i++)
1356 writeq(val64, &bar0->rts_frm_len_n[i]);
1357
1358 /* Set the frame length for the configured rings
1359 * desired by the user
1360 */
1361 for (i = 0; i < config->rx_ring_num; i++) {
1362 /* If rts_frm_len[i] == 0 then it is assumed that user not
1363 * specified frame length steering.
1364 * If the user provides the frame length then program
1365 * the rts_frm_len register for those values or else
1366 * leave it as it is.
1367 */
1368 if (rts_frm_len[i] != 0) {
1369 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1370 &bar0->rts_frm_len_n[i]);
1371 }
1372 }
1373
1374 /* Program statistics memory */
1375 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1376
1377 if (nic->device_type == XFRAME_II_DEVICE) {
1378 val64 = STAT_BC(0x320);
1379 writeq(val64, &bar0->stat_byte_cnt);
1380 }
1381
1382 /*
1383 * Initializing the sampling rate for the device to calculate the
1384 * bandwidth utilization.
1385 */
1386 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1387 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1388 writeq(val64, &bar0->mac_link_util);
1389
1390
1391 /*
1392 * Initializing the Transmit and Receive Traffic Interrupt
1393 * Scheme.
1394 */
1395 /*
1396 * TTI Initialization. Default Tx timer gets us about
1397 * 250 interrupts per sec. Continuous interrupts are enabled
1398 * by default.
1399 */
1400 if (nic->device_type == XFRAME_II_DEVICE) {
1401 int count = (nic->config.bus_speed * 125)/2;
1402 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1403 } else {
1404
1405 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1406 }
1407 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1408 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1409 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1410 if (use_continuous_tx_intrs)
1411 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1412 writeq(val64, &bar0->tti_data1_mem);
1413
1414 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1415 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1416 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1417 writeq(val64, &bar0->tti_data2_mem);
1418
1419 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1420 writeq(val64, &bar0->tti_command_mem);
1421
1422 /*
1423 * Once the operation completes, the Strobe bit of the command
1424 * register will be reset. We poll for this particular condition
1425 * We wait for a maximum of 500ms for the operation to complete,
1426 * if it's not complete by then we return error.
1427 */
1428 time = 0;
1429 while (TRUE) {
1430 val64 = readq(&bar0->tti_command_mem);
1431 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1432 break;
1433 }
1434 if (time > 10) {
1435 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1436 dev->name);
1437 return -1;
1438 }
1439 msleep(50);
1440 time++;
1441 }
1442
1443 if (nic->config.bimodal) {
1444 int k = 0;
1445 for (k = 0; k < config->rx_ring_num; k++) {
1446 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1447 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1448 writeq(val64, &bar0->tti_command_mem);
1449
1450 /*
1451 * Once the operation completes, the Strobe bit of the command
1452 * register will be reset. We poll for this particular condition
1453 * We wait for a maximum of 500ms for the operation to complete,
1454 * if it's not complete by then we return error.
1455 */
1456 time = 0;
1457 while (TRUE) {
1458 val64 = readq(&bar0->tti_command_mem);
1459 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1460 break;
1461 }
1462 if (time > 10) {
1463 DBG_PRINT(ERR_DBG,
1464 "%s: TTI init Failed\n",
1465 dev->name);
1466 return -1;
1467 }
1468 time++;
1469 msleep(50);
1470 }
1471 }
1472 } else {
1473
1474 /* RTI Initialization */
1475 if (nic->device_type == XFRAME_II_DEVICE) {
1476 /*
1477 * Programmed to generate Apprx 500 Intrs per
1478 * second
1479 */
1480 int count = (nic->config.bus_speed * 125)/4;
1481 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1482 } else {
1483 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1484 }
1485 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1486 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1487 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1488
1489 writeq(val64, &bar0->rti_data1_mem);
1490
1491 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1492 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1493 if (nic->intr_type == MSI_X)
1494 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1495 RTI_DATA2_MEM_RX_UFC_D(0x40));
1496 else
1497 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1498 RTI_DATA2_MEM_RX_UFC_D(0x80));
1499 writeq(val64, &bar0->rti_data2_mem);
1500
1501 for (i = 0; i < config->rx_ring_num; i++) {
1502 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1503 | RTI_CMD_MEM_OFFSET(i);
1504 writeq(val64, &bar0->rti_command_mem);
1505
1506 /*
1507 * Once the operation completes, the Strobe bit of the
1508 * command register will be reset. We poll for this
1509 * particular condition. We wait for a maximum of 500ms
1510 * for the operation to complete, if it's not complete
1511 * by then we return error.
1512 */
1513 time = 0;
1514 while (TRUE) {
1515 val64 = readq(&bar0->rti_command_mem);
1516 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1517 break;
1518 }
1519 if (time > 10) {
1520 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1521 dev->name);
1522 return -1;
1523 }
1524 time++;
1525 msleep(50);
1526 }
1527 }
1528 }
1529
1530 /*
1531 * Initializing proper values as Pause threshold into all
1532 * the 8 Queues on Rx side.
1533 */
1534 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1535 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1536
1537 /* Disable RMAC PAD STRIPPING */
1538 add = &bar0->mac_cfg;
1539 val64 = readq(&bar0->mac_cfg);
1540 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1541 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1542 writel((u32) (val64), add);
1543 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1544 writel((u32) (val64 >> 32), (add + 4));
1545 val64 = readq(&bar0->mac_cfg);
1546
1547 /* Enable FCS stripping by adapter */
1548 add = &bar0->mac_cfg;
1549 val64 = readq(&bar0->mac_cfg);
1550 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1551 if (nic->device_type == XFRAME_II_DEVICE)
1552 writeq(val64, &bar0->mac_cfg);
1553 else {
1554 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1555 writel((u32) (val64), add);
1556 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1557 writel((u32) (val64 >> 32), (add + 4));
1558 }
1559
1560 /*
1561 * Set the time value to be inserted in the pause frame
1562 * generated by xena.
1563 */
1564 val64 = readq(&bar0->rmac_pause_cfg);
1565 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1566 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1567 writeq(val64, &bar0->rmac_pause_cfg);
1568
1569 /*
1570 * Set the Threshold Limit for Generating the pause frame
1571 * If the amount of data in any Queue exceeds ratio of
1572 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1573 * pause frame is generated
1574 */
1575 val64 = 0;
1576 for (i = 0; i < 4; i++) {
1577 val64 |=
1578 (((u64) 0xFF00 | nic->mac_control.
1579 mc_pause_threshold_q0q3)
1580 << (i * 2 * 8));
1581 }
1582 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1583
1584 val64 = 0;
1585 for (i = 0; i < 4; i++) {
1586 val64 |=
1587 (((u64) 0xFF00 | nic->mac_control.
1588 mc_pause_threshold_q4q7)
1589 << (i * 2 * 8));
1590 }
1591 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1592
1593 /*
1594 * TxDMA will stop Read request if the number of read split has
1595 * exceeded the limit pointed by shared_splits
1596 */
1597 val64 = readq(&bar0->pic_control);
1598 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1599 writeq(val64, &bar0->pic_control);
1600
1601 if (nic->config.bus_speed == 266) {
1602 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1603 writeq(0x0, &bar0->read_retry_delay);
1604 writeq(0x0, &bar0->write_retry_delay);
1605 }
1606
1607 /*
1608 * Programming the Herc to split every write transaction
1609 * that does not start on an ADB to reduce disconnects.
1610 */
1611 if (nic->device_type == XFRAME_II_DEVICE) {
1612 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
1613 writeq(val64, &bar0->misc_control);
1614 val64 = readq(&bar0->pic_control2);
1615 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1616 writeq(val64, &bar0->pic_control2);
1617 }
1618 if (strstr(nic->product_name, "CX4")) {
1619 val64 = TMAC_AVG_IPG(0x17);
1620 writeq(val64, &bar0->tmac_avg_ipg);
1621 }
1622
1623 return SUCCESS;
1624 }
1625 #define LINK_UP_DOWN_INTERRUPT 1
1626 #define MAC_RMAC_ERR_TIMER 2
1627
1628 static int s2io_link_fault_indication(nic_t *nic)
1629 {
1630 if (nic->intr_type != INTA)
1631 return MAC_RMAC_ERR_TIMER;
1632 if (nic->device_type == XFRAME_II_DEVICE)
1633 return LINK_UP_DOWN_INTERRUPT;
1634 else
1635 return MAC_RMAC_ERR_TIMER;
1636 }
1637
1638 /**
1639 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1640 * @nic: device private variable,
1641 * @mask: A mask indicating which Intr block must be modified and,
1642 * @flag: A flag indicating whether to enable or disable the Intrs.
1643 * Description: This function will either disable or enable the interrupts
1644 * depending on the flag argument. The mask argument can be used to
1645 * enable/disable any Intr block.
1646 * Return Value: NONE.
1647 */
1648
1649 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1650 {
1651 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1652 register u64 val64 = 0, temp64 = 0;
1653
1654 /* Top level interrupt classification */
1655 /* PIC Interrupts */
1656 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1657 /* Enable PIC Intrs in the general intr mask register */
1658 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1659 if (flag == ENABLE_INTRS) {
1660 temp64 = readq(&bar0->general_int_mask);
1661 temp64 &= ~((u64) val64);
1662 writeq(temp64, &bar0->general_int_mask);
1663 /*
1664 * If Hercules adapter enable GPIO otherwise
1665 * disable all PCIX, Flash, MDIO, IIC and GPIO
1666 * interrupts for now.
1667 * TODO
1668 */
1669 if (s2io_link_fault_indication(nic) ==
1670 LINK_UP_DOWN_INTERRUPT ) {
1671 temp64 = readq(&bar0->pic_int_mask);
1672 temp64 &= ~((u64) PIC_INT_GPIO);
1673 writeq(temp64, &bar0->pic_int_mask);
1674 temp64 = readq(&bar0->gpio_int_mask);
1675 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1676 writeq(temp64, &bar0->gpio_int_mask);
1677 } else {
1678 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1679 }
1680 /*
1681 * No MSI Support is available presently, so TTI and
1682 * RTI interrupts are also disabled.
1683 */
1684 } else if (flag == DISABLE_INTRS) {
1685 /*
1686 * Disable PIC Intrs in the general
1687 * intr mask register
1688 */
1689 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1690 temp64 = readq(&bar0->general_int_mask);
1691 val64 |= temp64;
1692 writeq(val64, &bar0->general_int_mask);
1693 }
1694 }
1695
1696 /* DMA Interrupts */
1697 /* Enabling/Disabling Tx DMA interrupts */
1698 if (mask & TX_DMA_INTR) {
1699 /* Enable TxDMA Intrs in the general intr mask register */
1700 val64 = TXDMA_INT_M;
1701 if (flag == ENABLE_INTRS) {
1702 temp64 = readq(&bar0->general_int_mask);
1703 temp64 &= ~((u64) val64);
1704 writeq(temp64, &bar0->general_int_mask);
1705 /*
1706 * Keep all interrupts other than PFC interrupt
1707 * and PCC interrupt disabled in DMA level.
1708 */
1709 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1710 TXDMA_PCC_INT_M);
1711 writeq(val64, &bar0->txdma_int_mask);
1712 /*
1713 * Enable only the MISC error 1 interrupt in PFC block
1714 */
1715 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1716 writeq(val64, &bar0->pfc_err_mask);
1717 /*
1718 * Enable only the FB_ECC error interrupt in PCC block
1719 */
1720 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1721 writeq(val64, &bar0->pcc_err_mask);
1722 } else if (flag == DISABLE_INTRS) {
1723 /*
1724 * Disable TxDMA Intrs in the general intr mask
1725 * register
1726 */
1727 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1728 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1729 temp64 = readq(&bar0->general_int_mask);
1730 val64 |= temp64;
1731 writeq(val64, &bar0->general_int_mask);
1732 }
1733 }
1734
1735 /* Enabling/Disabling Rx DMA interrupts */
1736 if (mask & RX_DMA_INTR) {
1737 /* Enable RxDMA Intrs in the general intr mask register */
1738 val64 = RXDMA_INT_M;
1739 if (flag == ENABLE_INTRS) {
1740 temp64 = readq(&bar0->general_int_mask);
1741 temp64 &= ~((u64) val64);
1742 writeq(temp64, &bar0->general_int_mask);
1743 /*
1744 * All RxDMA block interrupts are disabled for now
1745 * TODO
1746 */
1747 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1748 } else if (flag == DISABLE_INTRS) {
1749 /*
1750 * Disable RxDMA Intrs in the general intr mask
1751 * register
1752 */
1753 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1754 temp64 = readq(&bar0->general_int_mask);
1755 val64 |= temp64;
1756 writeq(val64, &bar0->general_int_mask);
1757 }
1758 }
1759
1760 /* MAC Interrupts */
1761 /* Enabling/Disabling MAC interrupts */
1762 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1763 val64 = TXMAC_INT_M | RXMAC_INT_M;
1764 if (flag == ENABLE_INTRS) {
1765 temp64 = readq(&bar0->general_int_mask);
1766 temp64 &= ~((u64) val64);
1767 writeq(temp64, &bar0->general_int_mask);
1768 /*
1769 * All MAC block error interrupts are disabled for now
1770 * TODO
1771 */
1772 } else if (flag == DISABLE_INTRS) {
1773 /*
1774 * Disable MAC Intrs in the general intr mask register
1775 */
1776 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1777 writeq(DISABLE_ALL_INTRS,
1778 &bar0->mac_rmac_err_mask);
1779
1780 temp64 = readq(&bar0->general_int_mask);
1781 val64 |= temp64;
1782 writeq(val64, &bar0->general_int_mask);
1783 }
1784 }
1785
1786 /* XGXS Interrupts */
1787 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1788 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1789 if (flag == ENABLE_INTRS) {
1790 temp64 = readq(&bar0->general_int_mask);
1791 temp64 &= ~((u64) val64);
1792 writeq(temp64, &bar0->general_int_mask);
1793 /*
1794 * All XGXS block error interrupts are disabled for now
1795 * TODO
1796 */
1797 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1798 } else if (flag == DISABLE_INTRS) {
1799 /*
1800 * Disable MC Intrs in the general intr mask register
1801 */
1802 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1803 temp64 = readq(&bar0->general_int_mask);
1804 val64 |= temp64;
1805 writeq(val64, &bar0->general_int_mask);
1806 }
1807 }
1808
1809 /* Memory Controller(MC) interrupts */
1810 if (mask & MC_INTR) {
1811 val64 = MC_INT_M;
1812 if (flag == ENABLE_INTRS) {
1813 temp64 = readq(&bar0->general_int_mask);
1814 temp64 &= ~((u64) val64);
1815 writeq(temp64, &bar0->general_int_mask);
1816 /*
1817 * Enable all MC Intrs.
1818 */
1819 writeq(0x0, &bar0->mc_int_mask);
1820 writeq(0x0, &bar0->mc_err_mask);
1821 } else if (flag == DISABLE_INTRS) {
1822 /*
1823 * Disable MC Intrs in the general intr mask register
1824 */
1825 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1826 temp64 = readq(&bar0->general_int_mask);
1827 val64 |= temp64;
1828 writeq(val64, &bar0->general_int_mask);
1829 }
1830 }
1831
1832
1833 /* Tx traffic interrupts */
1834 if (mask & TX_TRAFFIC_INTR) {
1835 val64 = TXTRAFFIC_INT_M;
1836 if (flag == ENABLE_INTRS) {
1837 temp64 = readq(&bar0->general_int_mask);
1838 temp64 &= ~((u64) val64);
1839 writeq(temp64, &bar0->general_int_mask);
1840 /*
1841 * Enable all the Tx side interrupts
1842 * writing 0 Enables all 64 TX interrupt levels
1843 */
1844 writeq(0x0, &bar0->tx_traffic_mask);
1845 } else if (flag == DISABLE_INTRS) {
1846 /*
1847 * Disable Tx Traffic Intrs in the general intr mask
1848 * register.
1849 */
1850 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1851 temp64 = readq(&bar0->general_int_mask);
1852 val64 |= temp64;
1853 writeq(val64, &bar0->general_int_mask);
1854 }
1855 }
1856
1857 /* Rx traffic interrupts */
1858 if (mask & RX_TRAFFIC_INTR) {
1859 val64 = RXTRAFFIC_INT_M;
1860 if (flag == ENABLE_INTRS) {
1861 temp64 = readq(&bar0->general_int_mask);
1862 temp64 &= ~((u64) val64);
1863 writeq(temp64, &bar0->general_int_mask);
1864 /* writing 0 Enables all 8 RX interrupt levels */
1865 writeq(0x0, &bar0->rx_traffic_mask);
1866 } else if (flag == DISABLE_INTRS) {
1867 /*
1868 * Disable Rx Traffic Intrs in the general intr mask
1869 * register.
1870 */
1871 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1872 temp64 = readq(&bar0->general_int_mask);
1873 val64 |= temp64;
1874 writeq(val64, &bar0->general_int_mask);
1875 }
1876 }
1877 }
1878
1879 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1880 {
1881 int ret = 0;
1882
1883 if (flag == FALSE) {
1884 if ((!herc && (rev_id >= 4)) || herc) {
1885 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1886 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1887 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1888 ret = 1;
1889 }
1890 }else {
1891 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1892 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1893 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1894 ret = 1;
1895 }
1896 }
1897 } else {
1898 if ((!herc && (rev_id >= 4)) || herc) {
1899 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1900 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1901 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1902 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1903 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1904 ret = 1;
1905 }
1906 } else {
1907 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1908 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1909 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1910 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1911 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1912 ret = 1;
1913 }
1914 }
1915 }
1916
1917 return ret;
1918 }
1919 /**
1920 * verify_xena_quiescence - Checks whether the H/W is ready
1921 * @val64 : Value read from adapter status register.
1922 * @flag : indicates if the adapter enable bit was ever written once
1923 * before.
1924 * Description: Returns whether the H/W is ready to go or not. Depending
1925 * on whether adapter enable bit was written or not the comparison
1926 * differs and the calling function passes the input argument flag to
1927 * indicate this.
1928 * Return: 1 If xena is quiescence
1929 * 0 If Xena is not quiescence
1930 */
1931
1932 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1933 {
1934 int ret = 0, herc;
1935 u64 tmp64 = ~((u64) val64);
1936 int rev_id = get_xena_rev_id(sp->pdev);
1937
1938 herc = (sp->device_type == XFRAME_II_DEVICE);
1939 if (!
1940 (tmp64 &
1941 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1942 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1943 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1944 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1945 ADAPTER_STATUS_P_PLL_LOCK))) {
1946 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1947 }
1948
1949 return ret;
1950 }
1951
1952 /**
1953 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1954 * @sp: Pointer to device specifc structure
1955 * Description :
1956 * New procedure to clear mac address reading problems on Alpha platforms
1957 *
1958 */
1959
1960 static void fix_mac_address(nic_t * sp)
1961 {
1962 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1963 u64 val64;
1964 int i = 0;
1965
1966 while (fix_mac[i] != END_SIGN) {
1967 writeq(fix_mac[i++], &bar0->gpio_control);
1968 udelay(10);
1969 val64 = readq(&bar0->gpio_control);
1970 }
1971 }
1972
1973 /**
1974 * start_nic - Turns the device on
1975 * @nic : device private variable.
1976 * Description:
1977 * This function actually turns the device on. Before this function is
1978 * called,all Registers are configured from their reset states
1979 * and shared memory is allocated but the NIC is still quiescent. On
1980 * calling this function, the device interrupts are cleared and the NIC is
1981 * literally switched on by writing into the adapter control register.
1982 * Return Value:
1983 * SUCCESS on success and -1 on failure.
1984 */
1985
1986 static int start_nic(struct s2io_nic *nic)
1987 {
1988 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1989 struct net_device *dev = nic->dev;
1990 register u64 val64 = 0;
1991 u16 subid, i;
1992 mac_info_t *mac_control;
1993 struct config_param *config;
1994
1995 mac_control = &nic->mac_control;
1996 config = &nic->config;
1997
1998 /* PRC Initialization and configuration */
1999 for (i = 0; i < config->rx_ring_num; i++) {
2000 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2001 &bar0->prc_rxd0_n[i]);
2002
2003 val64 = readq(&bar0->prc_ctrl_n[i]);
2004 if (nic->config.bimodal)
2005 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2006 if (nic->rxd_mode == RXD_MODE_1)
2007 val64 |= PRC_CTRL_RC_ENABLED;
2008 else
2009 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2010 if (nic->device_type == XFRAME_II_DEVICE)
2011 val64 |= PRC_CTRL_GROUP_READS;
2012 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2013 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2014 writeq(val64, &bar0->prc_ctrl_n[i]);
2015 }
2016
2017 if (nic->rxd_mode == RXD_MODE_3B) {
2018 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2019 val64 = readq(&bar0->rx_pa_cfg);
2020 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2021 writeq(val64, &bar0->rx_pa_cfg);
2022 }
2023
2024 /*
2025 * Enabling MC-RLDRAM. After enabling the device, we timeout
2026 * for around 100ms, which is approximately the time required
2027 * for the device to be ready for operation.
2028 */
2029 val64 = readq(&bar0->mc_rldram_mrs);
2030 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2031 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2032 val64 = readq(&bar0->mc_rldram_mrs);
2033
2034 msleep(100); /* Delay by around 100 ms. */
2035
2036 /* Enabling ECC Protection. */
2037 val64 = readq(&bar0->adapter_control);
2038 val64 &= ~ADAPTER_ECC_EN;
2039 writeq(val64, &bar0->adapter_control);
2040
2041 /*
2042 * Clearing any possible Link state change interrupts that
2043 * could have popped up just before Enabling the card.
2044 */
2045 val64 = readq(&bar0->mac_rmac_err_reg);
2046 if (val64)
2047 writeq(val64, &bar0->mac_rmac_err_reg);
2048
2049 /*
2050 * Verify if the device is ready to be enabled, if so enable
2051 * it.
2052 */
2053 val64 = readq(&bar0->adapter_status);
2054 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
2055 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2056 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2057 (unsigned long long) val64);
2058 return FAILURE;
2059 }
2060
2061 /*
2062 * With some switches, link might be already up at this point.
2063 * Because of this weird behavior, when we enable laser,
2064 * we may not get link. We need to handle this. We cannot
2065 * figure out which switch is misbehaving. So we are forced to
2066 * make a global change.
2067 */
2068
2069 /* Enabling Laser. */
2070 val64 = readq(&bar0->adapter_control);
2071 val64 |= ADAPTER_EOI_TX_ON;
2072 writeq(val64, &bar0->adapter_control);
2073
2074 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2075 /*
2076 * Dont see link state interrupts initally on some switches,
2077 * so directly scheduling the link state task here.
2078 */
2079 schedule_work(&nic->set_link_task);
2080 }
2081 /* SXE-002: Initialize link and activity LED */
2082 subid = nic->pdev->subsystem_device;
2083 if (((subid & 0xFF) >= 0x07) &&
2084 (nic->device_type == XFRAME_I_DEVICE)) {
2085 val64 = readq(&bar0->gpio_control);
2086 val64 |= 0x0000800000000000ULL;
2087 writeq(val64, &bar0->gpio_control);
2088 val64 = 0x0411040400000000ULL;
2089 writeq(val64, (void __iomem *)bar0 + 0x2700);
2090 }
2091
2092 return SUCCESS;
2093 }
2094 /**
2095 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2096 */
2097 static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2098 {
2099 nic_t *nic = fifo_data->nic;
2100 struct sk_buff *skb;
2101 TxD_t *txds;
2102 u16 j, frg_cnt;
2103
2104 txds = txdlp;
2105 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2106 pci_unmap_single(nic->pdev, (dma_addr_t)
2107 txds->Buffer_Pointer, sizeof(u64),
2108 PCI_DMA_TODEVICE);
2109 txds++;
2110 }
2111
2112 skb = (struct sk_buff *) ((unsigned long)
2113 txds->Host_Control);
2114 if (!skb) {
2115 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2116 return NULL;
2117 }
2118 pci_unmap_single(nic->pdev, (dma_addr_t)
2119 txds->Buffer_Pointer,
2120 skb->len - skb->data_len,
2121 PCI_DMA_TODEVICE);
2122 frg_cnt = skb_shinfo(skb)->nr_frags;
2123 if (frg_cnt) {
2124 txds++;
2125 for (j = 0; j < frg_cnt; j++, txds++) {
2126 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2127 if (!txds->Buffer_Pointer)
2128 break;
2129 pci_unmap_page(nic->pdev, (dma_addr_t)
2130 txds->Buffer_Pointer,
2131 frag->size, PCI_DMA_TODEVICE);
2132 }
2133 }
2134 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds));
2135 return(skb);
2136 }
2137
2138 /**
2139 * free_tx_buffers - Free all queued Tx buffers
2140 * @nic : device private variable.
2141 * Description:
2142 * Free all queued Tx buffers.
2143 * Return Value: void
2144 */
2145
2146 static void free_tx_buffers(struct s2io_nic *nic)
2147 {
2148 struct net_device *dev = nic->dev;
2149 struct sk_buff *skb;
2150 TxD_t *txdp;
2151 int i, j;
2152 mac_info_t *mac_control;
2153 struct config_param *config;
2154 int cnt = 0;
2155
2156 mac_control = &nic->mac_control;
2157 config = &nic->config;
2158
2159 for (i = 0; i < config->tx_fifo_num; i++) {
2160 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2161 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2162 list_virt_addr;
2163 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2164 if (skb) {
2165 dev_kfree_skb(skb);
2166 cnt++;
2167 }
2168 }
2169 DBG_PRINT(INTR_DBG,
2170 "%s:forcibly freeing %d skbs on FIFO%d\n",
2171 dev->name, cnt, i);
2172 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2173 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2174 }
2175 }
2176
2177 /**
2178 * stop_nic - To stop the nic
2179 * @nic ; device private variable.
2180 * Description:
2181 * This function does exactly the opposite of what the start_nic()
2182 * function does. This function is called to stop the device.
2183 * Return Value:
2184 * void.
2185 */
2186
2187 static void stop_nic(struct s2io_nic *nic)
2188 {
2189 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2190 register u64 val64 = 0;
2191 u16 interruptible;
2192 mac_info_t *mac_control;
2193 struct config_param *config;
2194
2195 mac_control = &nic->mac_control;
2196 config = &nic->config;
2197
2198 /* Disable all interrupts */
2199 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2200 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2201 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2202 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2203
2204 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2205 val64 = readq(&bar0->adapter_control);
2206 val64 &= ~(ADAPTER_CNTL_EN);
2207 writeq(val64, &bar0->adapter_control);
2208 }
2209
2210 static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2211 {
2212 struct net_device *dev = nic->dev;
2213 struct sk_buff *frag_list;
2214 void *tmp;
2215
2216 /* Buffer-1 receives L3/L4 headers */
2217 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2218 (nic->pdev, skb->data, l3l4hdr_size + 4,
2219 PCI_DMA_FROMDEVICE);
2220
2221 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2222 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2223 if (skb_shinfo(skb)->frag_list == NULL) {
2224 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2225 return -ENOMEM ;
2226 }
2227 frag_list = skb_shinfo(skb)->frag_list;
2228 frag_list->next = NULL;
2229 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2230 frag_list->data = tmp;
2231 frag_list->tail = tmp;
2232
2233 /* Buffer-2 receives L4 data payload */
2234 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2235 frag_list->data, dev->mtu,
2236 PCI_DMA_FROMDEVICE);
2237 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2238 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2239
2240 return SUCCESS;
2241 }
2242
2243 /**
2244 * fill_rx_buffers - Allocates the Rx side skbs
2245 * @nic: device private variable
2246 * @ring_no: ring number
2247 * Description:
2248 * The function allocates Rx side skbs and puts the physical
2249 * address of these buffers into the RxD buffer pointers, so that the NIC
2250 * can DMA the received frame into these locations.
2251 * The NIC supports 3 receive modes, viz
2252 * 1. single buffer,
2253 * 2. three buffer and
2254 * 3. Five buffer modes.
2255 * Each mode defines how many fragments the received frame will be split
2256 * up into by the NIC. The frame is split into L3 header, L4 Header,
2257 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2258 * is split into 3 fragments. As of now only single buffer mode is
2259 * supported.
2260 * Return Value:
2261 * SUCCESS on success or an appropriate -ve value on failure.
2262 */
2263
2264 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2265 {
2266 struct net_device *dev = nic->dev;
2267 struct sk_buff *skb;
2268 RxD_t *rxdp;
2269 int off, off1, size, block_no, block_no1;
2270 u32 alloc_tab = 0;
2271 u32 alloc_cnt;
2272 mac_info_t *mac_control;
2273 struct config_param *config;
2274 u64 tmp;
2275 buffAdd_t *ba;
2276 #ifndef CONFIG_S2IO_NAPI
2277 unsigned long flags;
2278 #endif
2279 RxD_t *first_rxdp = NULL;
2280
2281 mac_control = &nic->mac_control;
2282 config = &nic->config;
2283 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2284 atomic_read(&nic->rx_bufs_left[ring_no]);
2285
2286 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2287 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2288 while (alloc_tab < alloc_cnt) {
2289 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2290 block_index;
2291 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2292
2293 rxdp = mac_control->rings[ring_no].
2294 rx_blocks[block_no].rxds[off].virt_addr;
2295
2296 if ((block_no == block_no1) && (off == off1) &&
2297 (rxdp->Host_Control)) {
2298 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2299 dev->name);
2300 DBG_PRINT(INTR_DBG, " info equated\n");
2301 goto end;
2302 }
2303 if (off && (off == rxd_count[nic->rxd_mode])) {
2304 mac_control->rings[ring_no].rx_curr_put_info.
2305 block_index++;
2306 if (mac_control->rings[ring_no].rx_curr_put_info.
2307 block_index == mac_control->rings[ring_no].
2308 block_count)
2309 mac_control->rings[ring_no].rx_curr_put_info.
2310 block_index = 0;
2311 block_no = mac_control->rings[ring_no].
2312 rx_curr_put_info.block_index;
2313 if (off == rxd_count[nic->rxd_mode])
2314 off = 0;
2315 mac_control->rings[ring_no].rx_curr_put_info.
2316 offset = off;
2317 rxdp = mac_control->rings[ring_no].
2318 rx_blocks[block_no].block_virt_addr;
2319 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2320 dev->name, rxdp);
2321 }
2322 #ifndef CONFIG_S2IO_NAPI
2323 spin_lock_irqsave(&nic->put_lock, flags);
2324 mac_control->rings[ring_no].put_pos =
2325 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2326 spin_unlock_irqrestore(&nic->put_lock, flags);
2327 #endif
2328 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2329 ((nic->rxd_mode >= RXD_MODE_3A) &&
2330 (rxdp->Control_2 & BIT(0)))) {
2331 mac_control->rings[ring_no].rx_curr_put_info.
2332 offset = off;
2333 goto end;
2334 }
2335 /* calculate size of skb based on ring mode */
2336 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2337 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2338 if (nic->rxd_mode == RXD_MODE_1)
2339 size += NET_IP_ALIGN;
2340 else if (nic->rxd_mode == RXD_MODE_3B)
2341 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2342 else
2343 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2344
2345 /* allocate skb */
2346 skb = dev_alloc_skb(size);
2347 if(!skb) {
2348 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2349 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2350 if (first_rxdp) {
2351 wmb();
2352 first_rxdp->Control_1 |= RXD_OWN_XENA;
2353 }
2354 return -ENOMEM ;
2355 }
2356 if (nic->rxd_mode == RXD_MODE_1) {
2357 /* 1 buffer mode - normal operation mode */
2358 memset(rxdp, 0, sizeof(RxD1_t));
2359 skb_reserve(skb, NET_IP_ALIGN);
2360 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2361 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2362 PCI_DMA_FROMDEVICE);
2363 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2364
2365 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2366 /*
2367 * 2 or 3 buffer mode -
2368 * Both 2 buffer mode and 3 buffer mode provides 128
2369 * byte aligned receive buffers.
2370 *
2371 * 3 buffer mode provides header separation where in
2372 * skb->data will have L3/L4 headers where as
2373 * skb_shinfo(skb)->frag_list will have the L4 data
2374 * payload
2375 */
2376
2377 memset(rxdp, 0, sizeof(RxD3_t));
2378 ba = &mac_control->rings[ring_no].ba[block_no][off];
2379 skb_reserve(skb, BUF0_LEN);
2380 tmp = (u64)(unsigned long) skb->data;
2381 tmp += ALIGN_SIZE;
2382 tmp &= ~ALIGN_SIZE;
2383 skb->data = (void *) (unsigned long)tmp;
2384 skb->tail = (void *) (unsigned long)tmp;
2385
2386 ((RxD3_t*)rxdp)->Buffer0_ptr =
2387 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2388 PCI_DMA_FROMDEVICE);
2389 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2390 if (nic->rxd_mode == RXD_MODE_3B) {
2391 /* Two buffer mode */
2392
2393 /*
2394 * Buffer2 will have L3/L4 header plus
2395 * L4 payload
2396 */
2397 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2398 (nic->pdev, skb->data, dev->mtu + 4,
2399 PCI_DMA_FROMDEVICE);
2400
2401 /* Buffer-1 will be dummy buffer not used */
2402 ((RxD3_t*)rxdp)->Buffer1_ptr =
2403 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2404 PCI_DMA_FROMDEVICE);
2405 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2406 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2407 (dev->mtu + 4);
2408 } else {
2409 /* 3 buffer mode */
2410 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2411 dev_kfree_skb_irq(skb);
2412 if (first_rxdp) {
2413 wmb();
2414 first_rxdp->Control_1 |=
2415 RXD_OWN_XENA;
2416 }
2417 return -ENOMEM ;
2418 }
2419 }
2420 rxdp->Control_2 |= BIT(0);
2421 }
2422 rxdp->Host_Control = (unsigned long) (skb);
2423 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2424 rxdp->Control_1 |= RXD_OWN_XENA;
2425 off++;
2426 if (off == (rxd_count[nic->rxd_mode] + 1))
2427 off = 0;
2428 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2429
2430 rxdp->Control_2 |= SET_RXD_MARKER;
2431 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2432 if (first_rxdp) {
2433 wmb();
2434 first_rxdp->Control_1 |= RXD_OWN_XENA;
2435 }
2436 first_rxdp = rxdp;
2437 }
2438 atomic_inc(&nic->rx_bufs_left[ring_no]);
2439 alloc_tab++;
2440 }
2441
2442 end:
2443 /* Transfer ownership of first descriptor to adapter just before
2444 * exiting. Before that, use memory barrier so that ownership
2445 * and other fields are seen by adapter correctly.
2446 */
2447 if (first_rxdp) {
2448 wmb();
2449 first_rxdp->Control_1 |= RXD_OWN_XENA;
2450 }
2451
2452 return SUCCESS;
2453 }
2454
2455 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2456 {
2457 struct net_device *dev = sp->dev;
2458 int j;
2459 struct sk_buff *skb;
2460 RxD_t *rxdp;
2461 mac_info_t *mac_control;
2462 buffAdd_t *ba;
2463
2464 mac_control = &sp->mac_control;
2465 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2466 rxdp = mac_control->rings[ring_no].
2467 rx_blocks[blk].rxds[j].virt_addr;
2468 skb = (struct sk_buff *)
2469 ((unsigned long) rxdp->Host_Control);
2470 if (!skb) {
2471 continue;
2472 }
2473 if (sp->rxd_mode == RXD_MODE_1) {
2474 pci_unmap_single(sp->pdev, (dma_addr_t)
2475 ((RxD1_t*)rxdp)->Buffer0_ptr,
2476 dev->mtu +
2477 HEADER_ETHERNET_II_802_3_SIZE
2478 + HEADER_802_2_SIZE +
2479 HEADER_SNAP_SIZE,
2480 PCI_DMA_FROMDEVICE);
2481 memset(rxdp, 0, sizeof(RxD1_t));
2482 } else if(sp->rxd_mode == RXD_MODE_3B) {
2483 ba = &mac_control->rings[ring_no].
2484 ba[blk][j];
2485 pci_unmap_single(sp->pdev, (dma_addr_t)
2486 ((RxD3_t*)rxdp)->Buffer0_ptr,
2487 BUF0_LEN,
2488 PCI_DMA_FROMDEVICE);
2489 pci_unmap_single(sp->pdev, (dma_addr_t)
2490 ((RxD3_t*)rxdp)->Buffer1_ptr,
2491 BUF1_LEN,
2492 PCI_DMA_FROMDEVICE);
2493 pci_unmap_single(sp->pdev, (dma_addr_t)
2494 ((RxD3_t*)rxdp)->Buffer2_ptr,
2495 dev->mtu + 4,
2496 PCI_DMA_FROMDEVICE);
2497 memset(rxdp, 0, sizeof(RxD3_t));
2498 } else {
2499 pci_unmap_single(sp->pdev, (dma_addr_t)
2500 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2501 PCI_DMA_FROMDEVICE);
2502 pci_unmap_single(sp->pdev, (dma_addr_t)
2503 ((RxD3_t*)rxdp)->Buffer1_ptr,
2504 l3l4hdr_size + 4,
2505 PCI_DMA_FROMDEVICE);
2506 pci_unmap_single(sp->pdev, (dma_addr_t)
2507 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2508 PCI_DMA_FROMDEVICE);
2509 memset(rxdp, 0, sizeof(RxD3_t));
2510 }
2511 dev_kfree_skb(skb);
2512 atomic_dec(&sp->rx_bufs_left[ring_no]);
2513 }
2514 }
2515
2516 /**
2517 * free_rx_buffers - Frees all Rx buffers
2518 * @sp: device private variable.
2519 * Description:
2520 * This function will free all Rx buffers allocated by host.
2521 * Return Value:
2522 * NONE.
2523 */
2524
2525 static void free_rx_buffers(struct s2io_nic *sp)
2526 {
2527 struct net_device *dev = sp->dev;
2528 int i, blk = 0, buf_cnt = 0;
2529 mac_info_t *mac_control;
2530 struct config_param *config;
2531
2532 mac_control = &sp->mac_control;
2533 config = &sp->config;
2534
2535 for (i = 0; i < config->rx_ring_num; i++) {
2536 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2537 free_rxd_blk(sp,i,blk);
2538
2539 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2540 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2541 mac_control->rings[i].rx_curr_put_info.offset = 0;
2542 mac_control->rings[i].rx_curr_get_info.offset = 0;
2543 atomic_set(&sp->rx_bufs_left[i], 0);
2544 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2545 dev->name, buf_cnt, i);
2546 }
2547 }
2548
2549 /**
2550 * s2io_poll - Rx interrupt handler for NAPI support
2551 * @dev : pointer to the device structure.
2552 * @budget : The number of packets that were budgeted to be processed
2553 * during one pass through the 'Poll" function.
2554 * Description:
2555 * Comes into picture only if NAPI support has been incorporated. It does
2556 * the same thing that rx_intr_handler does, but not in a interrupt context
2557 * also It will process only a given number of packets.
2558 * Return value:
2559 * 0 on success and 1 if there are No Rx packets to be processed.
2560 */
2561
2562 #if defined(CONFIG_S2IO_NAPI)
2563 static int s2io_poll(struct net_device *dev, int *budget)
2564 {
2565 nic_t *nic = dev->priv;
2566 int pkt_cnt = 0, org_pkts_to_process;
2567 mac_info_t *mac_control;
2568 struct config_param *config;
2569 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2570 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2571 int i;
2572
2573 atomic_inc(&nic->isr_cnt);
2574 mac_control = &nic->mac_control;
2575 config = &nic->config;
2576
2577 nic->pkts_to_process = *budget;
2578 if (nic->pkts_to_process > dev->quota)
2579 nic->pkts_to_process = dev->quota;
2580 org_pkts_to_process = nic->pkts_to_process;
2581
2582 writeq(val64, &bar0->rx_traffic_int);
2583 val64 = readl(&bar0->rx_traffic_int);
2584
2585 for (i = 0; i < config->rx_ring_num; i++) {
2586 rx_intr_handler(&mac_control->rings[i]);
2587 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2588 if (!nic->pkts_to_process) {
2589 /* Quota for the current iteration has been met */
2590 goto no_rx;
2591 }
2592 }
2593 if (!pkt_cnt)
2594 pkt_cnt = 1;
2595
2596 dev->quota -= pkt_cnt;
2597 *budget -= pkt_cnt;
2598 netif_rx_complete(dev);
2599
2600 for (i = 0; i < config->rx_ring_num; i++) {
2601 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2602 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2603 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2604 break;
2605 }
2606 }
2607 /* Re enable the Rx interrupts. */
2608 writeq(0x0, &bar0->rx_traffic_mask);
2609 val64 = readl(&bar0->rx_traffic_mask);
2610 atomic_dec(&nic->isr_cnt);
2611 return 0;
2612
2613 no_rx:
2614 dev->quota -= pkt_cnt;
2615 *budget -= pkt_cnt;
2616
2617 for (i = 0; i < config->rx_ring_num; i++) {
2618 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2619 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2620 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2621 break;
2622 }
2623 }
2624 atomic_dec(&nic->isr_cnt);
2625 return 1;
2626 }
2627 #endif
2628
2629 #ifdef CONFIG_NET_POLL_CONTROLLER
2630 /**
2631 * s2io_netpoll - netpoll event handler entry point
2632 * @dev : pointer to the device structure.
2633 * Description:
2634 * This function will be called by upper layer to check for events on the
2635 * interface in situations where interrupts are disabled. It is used for
2636 * specific in-kernel networking tasks, such as remote consoles and kernel
2637 * debugging over the network (example netdump in RedHat).
2638 */
2639 static void s2io_netpoll(struct net_device *dev)
2640 {
2641 nic_t *nic = dev->priv;
2642 mac_info_t *mac_control;
2643 struct config_param *config;
2644 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2645 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2646 int i;
2647
2648 disable_irq(dev->irq);
2649
2650 atomic_inc(&nic->isr_cnt);
2651 mac_control = &nic->mac_control;
2652 config = &nic->config;
2653
2654 writeq(val64, &bar0->rx_traffic_int);
2655 writeq(val64, &bar0->tx_traffic_int);
2656
2657 /* we need to free up the transmitted skbufs or else netpoll will
2658 * run out of skbs and will fail and eventually netpoll application such
2659 * as netdump will fail.
2660 */
2661 for (i = 0; i < config->tx_fifo_num; i++)
2662 tx_intr_handler(&mac_control->fifos[i]);
2663
2664 /* check for received packet and indicate up to network */
2665 for (i = 0; i < config->rx_ring_num; i++)
2666 rx_intr_handler(&mac_control->rings[i]);
2667
2668 for (i = 0; i < config->rx_ring_num; i++) {
2669 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2670 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2671 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2672 break;
2673 }
2674 }
2675 atomic_dec(&nic->isr_cnt);
2676 enable_irq(dev->irq);
2677 return;
2678 }
2679 #endif
2680
2681 /**
2682 * rx_intr_handler - Rx interrupt handler
2683 * @nic: device private variable.
2684 * Description:
2685 * If the interrupt is because of a received frame or if the
2686 * receive ring contains fresh as yet un-processed frames,this function is
2687 * called. It picks out the RxD at which place the last Rx processing had
2688 * stopped and sends the skb to the OSM's Rx handler and then increments
2689 * the offset.
2690 * Return Value:
2691 * NONE.
2692 */
2693 static void rx_intr_handler(ring_info_t *ring_data)
2694 {
2695 nic_t *nic = ring_data->nic;
2696 struct net_device *dev = (struct net_device *) nic->dev;
2697 int get_block, put_block, put_offset;
2698 rx_curr_get_info_t get_info, put_info;
2699 RxD_t *rxdp;
2700 struct sk_buff *skb;
2701 #ifndef CONFIG_S2IO_NAPI
2702 int pkt_cnt = 0;
2703 #endif
2704 int i;
2705
2706 spin_lock(&nic->rx_lock);
2707 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2708 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2709 __FUNCTION__, dev->name);
2710 spin_unlock(&nic->rx_lock);
2711 return;
2712 }
2713
2714 get_info = ring_data->rx_curr_get_info;
2715 get_block = get_info.block_index;
2716 put_info = ring_data->rx_curr_put_info;
2717 put_block = put_info.block_index;
2718 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2719 #ifndef CONFIG_S2IO_NAPI
2720 spin_lock(&nic->put_lock);
2721 put_offset = ring_data->put_pos;
2722 spin_unlock(&nic->put_lock);
2723 #else
2724 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2725 put_info.offset;
2726 #endif
2727 while (RXD_IS_UP2DT(rxdp)) {
2728 /* If your are next to put index then it's FIFO full condition */
2729 if ((get_block == put_block) &&
2730 (get_info.offset + 1) == put_info.offset) {
2731 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2732 break;
2733 }
2734 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2735 if (skb == NULL) {
2736 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2737 dev->name);
2738 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2739 spin_unlock(&nic->rx_lock);
2740 return;
2741 }
2742 if (nic->rxd_mode == RXD_MODE_1) {
2743 pci_unmap_single(nic->pdev, (dma_addr_t)
2744 ((RxD1_t*)rxdp)->Buffer0_ptr,
2745 dev->mtu +
2746 HEADER_ETHERNET_II_802_3_SIZE +
2747 HEADER_802_2_SIZE +
2748 HEADER_SNAP_SIZE,
2749 PCI_DMA_FROMDEVICE);
2750 } else if (nic->rxd_mode == RXD_MODE_3B) {
2751 pci_unmap_single(nic->pdev, (dma_addr_t)
2752 ((RxD3_t*)rxdp)->Buffer0_ptr,
2753 BUF0_LEN, PCI_DMA_FROMDEVICE);
2754 pci_unmap_single(nic->pdev, (dma_addr_t)
2755 ((RxD3_t*)rxdp)->Buffer1_ptr,
2756 BUF1_LEN, PCI_DMA_FROMDEVICE);
2757 pci_unmap_single(nic->pdev, (dma_addr_t)
2758 ((RxD3_t*)rxdp)->Buffer2_ptr,
2759 dev->mtu + 4,
2760 PCI_DMA_FROMDEVICE);
2761 } else {
2762 pci_unmap_single(nic->pdev, (dma_addr_t)
2763 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2764 PCI_DMA_FROMDEVICE);
2765 pci_unmap_single(nic->pdev, (dma_addr_t)
2766 ((RxD3_t*)rxdp)->Buffer1_ptr,
2767 l3l4hdr_size + 4,
2768 PCI_DMA_FROMDEVICE);
2769 pci_unmap_single(nic->pdev, (dma_addr_t)
2770 ((RxD3_t*)rxdp)->Buffer2_ptr,
2771 dev->mtu, PCI_DMA_FROMDEVICE);
2772 }
2773 prefetch(skb->data);
2774 rx_osm_handler(ring_data, rxdp);
2775 get_info.offset++;
2776 ring_data->rx_curr_get_info.offset = get_info.offset;
2777 rxdp = ring_data->rx_blocks[get_block].
2778 rxds[get_info.offset].virt_addr;
2779 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2780 get_info.offset = 0;
2781 ring_data->rx_curr_get_info.offset = get_info.offset;
2782 get_block++;
2783 if (get_block == ring_data->block_count)
2784 get_block = 0;
2785 ring_data->rx_curr_get_info.block_index = get_block;
2786 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2787 }
2788
2789 #ifdef CONFIG_S2IO_NAPI
2790 nic->pkts_to_process -= 1;
2791 if (!nic->pkts_to_process)
2792 break;
2793 #else
2794 pkt_cnt++;
2795 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2796 break;
2797 #endif
2798 }
2799 if (nic->lro) {
2800 /* Clear all LRO sessions before exiting */
2801 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2802 lro_t *lro = &nic->lro0_n[i];
2803 if (lro->in_use) {
2804 update_L3L4_header(nic, lro);
2805 queue_rx_frame(lro->parent);
2806 clear_lro_session(lro);
2807 }
2808 }
2809 }
2810
2811 spin_unlock(&nic->rx_lock);
2812 }
2813
2814 /**
2815 * tx_intr_handler - Transmit interrupt handler
2816 * @nic : device private variable
2817 * Description:
2818 * If an interrupt was raised to indicate DMA complete of the
2819 * Tx packet, this function is called. It identifies the last TxD
2820 * whose buffer was freed and frees all skbs whose data have already
2821 * DMA'ed into the NICs internal memory.
2822 * Return Value:
2823 * NONE
2824 */
2825
2826 static void tx_intr_handler(fifo_info_t *fifo_data)
2827 {
2828 nic_t *nic = fifo_data->nic;
2829 struct net_device *dev = (struct net_device *) nic->dev;
2830 tx_curr_get_info_t get_info, put_info;
2831 struct sk_buff *skb;
2832 TxD_t *txdlp;
2833
2834 get_info = fifo_data->tx_curr_get_info;
2835 put_info = fifo_data->tx_curr_put_info;
2836 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2837 list_virt_addr;
2838 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2839 (get_info.offset != put_info.offset) &&
2840 (txdlp->Host_Control)) {
2841 /* Check for TxD errors */
2842 if (txdlp->Control_1 & TXD_T_CODE) {
2843 unsigned long long err;
2844 err = txdlp->Control_1 & TXD_T_CODE;
2845 if (err & 0x1) {
2846 nic->mac_control.stats_info->sw_stat.
2847 parity_err_cnt++;
2848 }
2849 if ((err >> 48) == 0xA) {
2850 DBG_PRINT(TX_DBG, "TxD returned due \
2851 to loss of link\n");
2852 }
2853 else {
2854 DBG_PRINT(ERR_DBG, "***TxD error \
2855 %llx\n", err);
2856 }
2857 }
2858
2859 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2860 if (skb == NULL) {
2861 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2862 __FUNCTION__);
2863 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2864 return;
2865 }
2866
2867 /* Updating the statistics block */
2868 nic->stats.tx_bytes += skb->len;
2869 dev_kfree_skb_irq(skb);
2870
2871 get_info.offset++;
2872 if (get_info.offset == get_info.fifo_len + 1)
2873 get_info.offset = 0;
2874 txdlp = (TxD_t *) fifo_data->list_info
2875 [get_info.offset].list_virt_addr;
2876 fifo_data->tx_curr_get_info.offset =
2877 get_info.offset;
2878 }
2879
2880 spin_lock(&nic->tx_lock);
2881 if (netif_queue_stopped(dev))
2882 netif_wake_queue(dev);
2883 spin_unlock(&nic->tx_lock);
2884 }
2885
2886 /**
2887 * s2io_mdio_write - Function to write in to MDIO registers
2888 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2889 * @addr : address value
2890 * @value : data value
2891 * @dev : pointer to net_device structure
2892 * Description:
2893 * This function is used to write values to the MDIO registers
2894 * NONE
2895 */
2896 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2897 {
2898 u64 val64 = 0x0;
2899 nic_t *sp = dev->priv;
2900 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2901
2902 //address transaction
2903 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2904 | MDIO_MMD_DEV_ADDR(mmd_type)
2905 | MDIO_MMS_PRT_ADDR(0x0);
2906 writeq(val64, &bar0->mdio_control);
2907 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2908 writeq(val64, &bar0->mdio_control);
2909 udelay(100);
2910
2911 //Data transaction
2912 val64 = 0x0;
2913 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2914 | MDIO_MMD_DEV_ADDR(mmd_type)
2915 | MDIO_MMS_PRT_ADDR(0x0)
2916 | MDIO_MDIO_DATA(value)
2917 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2918 writeq(val64, &bar0->mdio_control);
2919 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2920 writeq(val64, &bar0->mdio_control);
2921 udelay(100);
2922
2923 val64 = 0x0;
2924 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2925 | MDIO_MMD_DEV_ADDR(mmd_type)
2926 | MDIO_MMS_PRT_ADDR(0x0)
2927 | MDIO_OP(MDIO_OP_READ_TRANS);
2928 writeq(val64, &bar0->mdio_control);
2929 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2930 writeq(val64, &bar0->mdio_control);
2931 udelay(100);
2932
2933 }
2934
2935 /**
2936 * s2io_mdio_read - Function to write in to MDIO registers
2937 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2938 * @addr : address value
2939 * @dev : pointer to net_device structure
2940 * Description:
2941 * This function is used to read values to the MDIO registers
2942 * NONE
2943 */
2944 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2945 {
2946 u64 val64 = 0x0;
2947 u64 rval64 = 0x0;
2948 nic_t *sp = dev->priv;
2949 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2950
2951 /* address transaction */
2952 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2953 | MDIO_MMD_DEV_ADDR(mmd_type)
2954 | MDIO_MMS_PRT_ADDR(0x0);
2955 writeq(val64, &bar0->mdio_control);
2956 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2957 writeq(val64, &bar0->mdio_control);
2958 udelay(100);
2959
2960 /* Data transaction */
2961 val64 = 0x0;
2962 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2963 | MDIO_MMD_DEV_ADDR(mmd_type)
2964 | MDIO_MMS_PRT_ADDR(0x0)
2965 | MDIO_OP(MDIO_OP_READ_TRANS);
2966 writeq(val64, &bar0->mdio_control);
2967 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2968 writeq(val64, &bar0->mdio_control);
2969 udelay(100);
2970
2971 /* Read the value from regs */
2972 rval64 = readq(&bar0->mdio_control);
2973 rval64 = rval64 & 0xFFFF0000;
2974 rval64 = rval64 >> 16;
2975 return rval64;
2976 }
2977 /**
2978 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2979 * @counter : couter value to be updated
2980 * @flag : flag to indicate the status
2981 * @type : counter type
2982 * Description:
2983 * This function is to check the status of the xpak counters value
2984 * NONE
2985 */
2986
2987 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2988 {
2989 u64 mask = 0x3;
2990 u64 val64;
2991 int i;
2992 for(i = 0; i <index; i++)
2993 mask = mask << 0x2;
2994
2995 if(flag > 0)
2996 {
2997 *counter = *counter + 1;
2998 val64 = *regs_stat & mask;
2999 val64 = val64 >> (index * 0x2);
3000 val64 = val64 + 1;
3001 if(val64 == 3)
3002 {
3003 switch(type)
3004 {
3005 case 1:
3006 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3007 "service. Excessive temperatures may "
3008 "result in premature transceiver "
3009 "failure \n");
3010 break;
3011 case 2:
3012 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3013 "service Excessive bias currents may "
3014 "indicate imminent laser diode "
3015 "failure \n");
3016 break;
3017 case 3:
3018 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3019 "service Excessive laser output "
3020 "power may saturate far-end "
3021 "receiver\n");
3022 break;
3023 default:
3024 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3025 "type \n");
3026 }
3027 val64 = 0x0;
3028 }
3029 val64 = val64 << (index * 0x2);
3030 *regs_stat = (*regs_stat & (~mask)) | (val64);
3031
3032 } else {
3033 *regs_stat = *regs_stat & (~mask);
3034 }
3035 }
3036
3037 /**
3038 * s2io_updt_xpak_counter - Function to update the xpak counters
3039 * @dev : pointer to net_device struct
3040 * Description:
3041 * This function is to upate the status of the xpak counters value
3042 * NONE
3043 */
3044 static void s2io_updt_xpak_counter(struct net_device *dev)
3045 {
3046 u16 flag = 0x0;
3047 u16 type = 0x0;
3048 u16 val16 = 0x0;
3049 u64 val64 = 0x0;
3050 u64 addr = 0x0;
3051
3052 nic_t *sp = dev->priv;
3053 StatInfo_t *stat_info = sp->mac_control.stats_info;
3054
3055 /* Check the communication with the MDIO slave */
3056 addr = 0x0000;
3057 val64 = 0x0;
3058 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3059 if((val64 == 0xFFFF) || (val64 == 0x0000))
3060 {
3061 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3062 "Returned %llx\n", (unsigned long long)val64);
3063 return;
3064 }
3065
3066 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3067 if(val64 != 0x2040)
3068 {
3069 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3070 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3071 (unsigned long long)val64);
3072 return;
3073 }
3074
3075 /* Loading the DOM register to MDIO register */
3076 addr = 0xA100;
3077 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3078 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3079
3080 /* Reading the Alarm flags */
3081 addr = 0xA070;
3082 val64 = 0x0;
3083 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3084
3085 flag = CHECKBIT(val64, 0x7);
3086 type = 1;
3087 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3088 &stat_info->xpak_stat.xpak_regs_stat,
3089 0x0, flag, type);
3090
3091 if(CHECKBIT(val64, 0x6))
3092 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3093
3094 flag = CHECKBIT(val64, 0x3);
3095 type = 2;
3096 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3097 &stat_info->xpak_stat.xpak_regs_stat,
3098 0x2, flag, type);
3099
3100 if(CHECKBIT(val64, 0x2))
3101 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3102
3103 flag = CHECKBIT(val64, 0x1);
3104 type = 3;
3105 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3106 &stat_info->xpak_stat.xpak_regs_stat,
3107 0x4, flag, type);
3108
3109 if(CHECKBIT(val64, 0x0))
3110 stat_info->xpak_stat.alarm_laser_output_power_low++;
3111
3112 /* Reading the Warning flags */
3113 addr = 0xA074;
3114 val64 = 0x0;
3115 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3116
3117 if(CHECKBIT(val64, 0x7))
3118 stat_info->xpak_stat.warn_transceiver_temp_high++;
3119
3120 if(CHECKBIT(val64, 0x6))
3121 stat_info->xpak_stat.warn_transceiver_temp_low++;
3122
3123 if(CHECKBIT(val64, 0x3))
3124 stat_info->xpak_stat.warn_laser_bias_current_high++;
3125
3126 if(CHECKBIT(val64, 0x2))
3127 stat_info->xpak_stat.warn_laser_bias_current_low++;
3128
3129 if(CHECKBIT(val64, 0x1))
3130 stat_info->xpak_stat.warn_laser_output_power_high++;
3131
3132 if(CHECKBIT(val64, 0x0))
3133 stat_info->xpak_stat.warn_laser_output_power_low++;
3134 }
3135
3136 /**
3137 * alarm_intr_handler - Alarm Interrrupt handler
3138 * @nic: device private variable
3139 * Description: If the interrupt was neither because of Rx packet or Tx
3140 * complete, this function is called. If the interrupt was to indicate
3141 * a loss of link, the OSM link status handler is invoked for any other
3142 * alarm interrupt the block that raised the interrupt is displayed
3143 * and a H/W reset is issued.
3144 * Return Value:
3145 * NONE
3146 */
3147
3148 static void alarm_intr_handler(struct s2io_nic *nic)
3149 {
3150 struct net_device *dev = (struct net_device *) nic->dev;
3151 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3152 register u64 val64 = 0, err_reg = 0;
3153 u64 cnt;
3154 int i;
3155 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3156 /* Handling the XPAK counters update */
3157 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3158 /* waiting for an hour */
3159 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3160 } else {
3161 s2io_updt_xpak_counter(dev);
3162 /* reset the count to zero */
3163 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3164 }
3165
3166 /* Handling link status change error Intr */
3167 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3168 err_reg = readq(&bar0->mac_rmac_err_reg);
3169 writeq(err_reg, &bar0->mac_rmac_err_reg);
3170 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3171 schedule_work(&nic->set_link_task);
3172 }
3173 }
3174
3175 /* Handling Ecc errors */
3176 val64 = readq(&bar0->mc_err_reg);
3177 writeq(val64, &bar0->mc_err_reg);
3178 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3179 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3180 nic->mac_control.stats_info->sw_stat.
3181 double_ecc_errs++;
3182 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3183 dev->name);
3184 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3185 if (nic->device_type != XFRAME_II_DEVICE) {
3186 /* Reset XframeI only if critical error */
3187 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3188 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3189 netif_stop_queue(dev);
3190 schedule_work(&nic->rst_timer_task);
3191 nic->mac_control.stats_info->sw_stat.
3192 soft_reset_cnt++;
3193 }
3194 }
3195 } else {
3196 nic->mac_control.stats_info->sw_stat.
3197 single_ecc_errs++;
3198 }
3199 }
3200
3201 /* In case of a serious error, the device will be Reset. */
3202 val64 = readq(&bar0->serr_source);
3203 if (val64 & SERR_SOURCE_ANY) {
3204 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3205 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3206 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3207 (unsigned long long)val64);
3208 netif_stop_queue(dev);
3209 schedule_work(&nic->rst_timer_task);
3210 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3211 }
3212
3213 /*
3214 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3215 * Error occurs, the adapter will be recycled by disabling the
3216 * adapter enable bit and enabling it again after the device
3217 * becomes Quiescent.
3218 */
3219 val64 = readq(&bar0->pcc_err_reg);
3220 writeq(val64, &bar0->pcc_err_reg);
3221 if (val64 & PCC_FB_ECC_DB_ERR) {
3222 u64 ac = readq(&bar0->adapter_control);
3223 ac &= ~(ADAPTER_CNTL_EN);
3224 writeq(ac, &bar0->adapter_control);
3225 ac = readq(&bar0->adapter_control);
3226 schedule_work(&nic->set_link_task);
3227 }
3228 /* Check for data parity error */
3229 val64 = readq(&bar0->pic_int_status);
3230 if (val64 & PIC_INT_GPIO) {
3231 val64 = readq(&bar0->gpio_int_reg);
3232 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3233 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3234 schedule_work(&nic->rst_timer_task);
3235 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3236 }
3237 }
3238
3239 /* Check for ring full counter */
3240 if (nic->device_type & XFRAME_II_DEVICE) {
3241 val64 = readq(&bar0->ring_bump_counter1);
3242 for (i=0; i<4; i++) {
3243 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3244 cnt >>= 64 - ((i+1)*16);
3245 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3246 += cnt;
3247 }
3248
3249 val64 = readq(&bar0->ring_bump_counter2);
3250 for (i=0; i<4; i++) {
3251 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3252 cnt >>= 64 - ((i+1)*16);
3253 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3254 += cnt;
3255 }
3256 }
3257
3258 /* Other type of interrupts are not being handled now, TODO */
3259 }
3260
3261 /**
3262 * wait_for_cmd_complete - waits for a command to complete.
3263 * @sp : private member of the device structure, which is a pointer to the
3264 * s2io_nic structure.
3265 * Description: Function that waits for a command to Write into RMAC
3266 * ADDR DATA registers to be completed and returns either success or
3267 * error depending on whether the command was complete or not.
3268 * Return value:
3269 * SUCCESS on success and FAILURE on failure.
3270 */
3271
3272 static int wait_for_cmd_complete(void *addr, u64 busy_bit)
3273 {
3274 int ret = FAILURE, cnt = 0;
3275 u64 val64;
3276
3277 while (TRUE) {
3278 val64 = readq(addr);
3279 if (!(val64 & busy_bit)) {
3280 ret = SUCCESS;
3281 break;
3282 }
3283
3284 if(in_interrupt())
3285 mdelay(50);
3286 else
3287 msleep(50);
3288
3289 if (cnt++ > 10)
3290 break;
3291 }
3292 return ret;
3293 }
3294
3295 /**
3296 * s2io_reset - Resets the card.
3297 * @sp : private member of the device structure.
3298 * Description: Function to Reset the card. This function then also
3299 * restores the previously saved PCI configuration space registers as
3300 * the card reset also resets the configuration space.
3301 * Return value:
3302 * void.
3303 */
3304
3305 static void s2io_reset(nic_t * sp)
3306 {
3307 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3308 u64 val64;
3309 u16 subid, pci_cmd;
3310
3311 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3312 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3313
3314 val64 = SW_RESET_ALL;
3315 writeq(val64, &bar0->sw_reset);
3316
3317 /*
3318 * At this stage, if the PCI write is indeed completed, the
3319 * card is reset and so is the PCI Config space of the device.
3320 * So a read cannot be issued at this stage on any of the
3321 * registers to ensure the write into "sw_reset" register
3322 * has gone through.
3323 * Question: Is there any system call that will explicitly force
3324 * all the write commands still pending on the bus to be pushed
3325 * through?
3326 * As of now I'am just giving a 250ms delay and hoping that the
3327 * PCI write to sw_reset register is done by this time.
3328 */
3329 msleep(250);
3330 if (strstr(sp->product_name, "CX4")) {
3331 msleep(750);
3332 }
3333
3334 /* Restore the PCI state saved during initialization. */
3335 pci_restore_state(sp->pdev);
3336 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
3337 pci_cmd);
3338 s2io_init_pci(sp);
3339
3340 msleep(250);
3341
3342 /* Set swapper to enable I/O register access */
3343 s2io_set_swapper(sp);
3344
3345 /* Restore the MSIX table entries from local variables */
3346 restore_xmsi_data(sp);
3347
3348 /* Clear certain PCI/PCI-X fields after reset */
3349 if (sp->device_type == XFRAME_II_DEVICE) {
3350 /* Clear "detected parity error" bit */
3351 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3352
3353 /* Clearing PCIX Ecc status register */
3354 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3355
3356 /* Clearing PCI_STATUS error reflected here */
3357 writeq(BIT(62), &bar0->txpic_int_reg);
3358 }
3359
3360 /* Reset device statistics maintained by OS */
3361 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3362
3363 /* SXE-002: Configure link and activity LED to turn it off */
3364 subid = sp->pdev->subsystem_device;
3365 if (((subid & 0xFF) >= 0x07) &&
3366 (sp->device_type == XFRAME_I_DEVICE)) {
3367 val64 = readq(&bar0->gpio_control);
3368 val64 |= 0x0000800000000000ULL;
3369 writeq(val64, &bar0->gpio_control);
3370 val64 = 0x0411040400000000ULL;
3371 writeq(val64, (void __iomem *)bar0 + 0x2700);
3372 }
3373
3374 /*
3375 * Clear spurious ECC interrupts that would have occured on
3376 * XFRAME II cards after reset.
3377 */
3378 if (sp->device_type == XFRAME_II_DEVICE) {
3379 val64 = readq(&bar0->pcc_err_reg);
3380 writeq(val64, &bar0->pcc_err_reg);
3381 }
3382
3383 sp->device_enabled_once = FALSE;
3384 }
3385
3386 /**
3387 * s2io_set_swapper - to set the swapper controle on the card
3388 * @sp : private member of the device structure,
3389 * pointer to the s2io_nic structure.
3390 * Description: Function to set the swapper control on the card
3391 * correctly depending on the 'endianness' of the system.
3392 * Return value:
3393 * SUCCESS on success and FAILURE on failure.
3394 */
3395
3396 static int s2io_set_swapper(nic_t * sp)
3397 {
3398 struct net_device *dev = sp->dev;
3399 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3400 u64 val64, valt, valr;
3401
3402 /*
3403 * Set proper endian settings and verify the same by reading
3404 * the PIF Feed-back register.
3405 */
3406
3407 val64 = readq(&bar0->pif_rd_swapper_fb);
3408 if (val64 != 0x0123456789ABCDEFULL) {
3409 int i = 0;
3410 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3411 0x8100008181000081ULL, /* FE=1, SE=0 */
3412 0x4200004242000042ULL, /* FE=0, SE=1 */
3413 0}; /* FE=0, SE=0 */
3414
3415 while(i<4) {
3416 writeq(value[i], &bar0->swapper_ctrl);
3417 val64 = readq(&bar0->pif_rd_swapper_fb);
3418 if (val64 == 0x0123456789ABCDEFULL)
3419 break;
3420 i++;
3421 }
3422 if (i == 4) {
3423 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3424 dev->name);
3425 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3426 (unsigned long long) val64);
3427 return FAILURE;
3428 }
3429 valr = value[i];
3430 } else {
3431 valr = readq(&bar0->swapper_ctrl);
3432 }
3433
3434 valt = 0x0123456789ABCDEFULL;
3435 writeq(valt, &bar0->xmsi_address);
3436 val64 = readq(&bar0->xmsi_address);
3437
3438 if(val64 != valt) {
3439 int i = 0;
3440 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3441 0x0081810000818100ULL, /* FE=1, SE=0 */
3442 0x0042420000424200ULL, /* FE=0, SE=1 */
3443 0}; /* FE=0, SE=0 */
3444
3445 while(i<4) {
3446 writeq((value[i] | valr), &bar0->swapper_ctrl);
3447 writeq(valt, &bar0->xmsi_address);
3448 val64 = readq(&bar0->xmsi_address);
3449 if(val64 == valt)
3450 break;
3451 i++;
3452 }
3453 if(i == 4) {
3454 unsigned long long x = val64;
3455 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3456 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3457 return FAILURE;
3458 }
3459 }
3460 val64 = readq(&bar0->swapper_ctrl);
3461 val64 &= 0xFFFF000000000000ULL;
3462
3463 #ifdef __BIG_ENDIAN
3464 /*
3465 * The device by default set to a big endian format, so a
3466 * big endian driver need not set anything.
3467 */
3468 val64 |= (SWAPPER_CTRL_TXP_FE |
3469 SWAPPER_CTRL_TXP_SE |
3470 SWAPPER_CTRL_TXD_R_FE |
3471 SWAPPER_CTRL_TXD_W_FE |
3472 SWAPPER_CTRL_TXF_R_FE |
3473 SWAPPER_CTRL_RXD_R_FE |
3474 SWAPPER_CTRL_RXD_W_FE |
3475 SWAPPER_CTRL_RXF_W_FE |
3476 SWAPPER_CTRL_XMSI_FE |
3477 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3478 if (sp->intr_type == INTA)
3479 val64 |= SWAPPER_CTRL_XMSI_SE;
3480 writeq(val64, &bar0->swapper_ctrl);
3481 #else
3482 /*
3483 * Initially we enable all bits to make it accessible by the
3484 * driver, then we selectively enable only those bits that
3485 * we want to set.
3486 */
3487 val64 |= (SWAPPER_CTRL_TXP_FE |
3488 SWAPPER_CTRL_TXP_SE |
3489 SWAPPER_CTRL_TXD_R_FE |
3490 SWAPPER_CTRL_TXD_R_SE |
3491 SWAPPER_CTRL_TXD_W_FE |
3492 SWAPPER_CTRL_TXD_W_SE |
3493 SWAPPER_CTRL_TXF_R_FE |
3494 SWAPPER_CTRL_RXD_R_FE |
3495 SWAPPER_CTRL_RXD_R_SE |
3496 SWAPPER_CTRL_RXD_W_FE |
3497 SWAPPER_CTRL_RXD_W_SE |
3498 SWAPPER_CTRL_RXF_W_FE |
3499 SWAPPER_CTRL_XMSI_FE |
3500 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3501 if (sp->intr_type == INTA)
3502 val64 |= SWAPPER_CTRL_XMSI_SE;
3503 writeq(val64, &bar0->swapper_ctrl);
3504 #endif
3505 val64 = readq(&bar0->swapper_ctrl);
3506
3507 /*
3508 * Verifying if endian settings are accurate by reading a
3509 * feedback register.
3510 */
3511 val64 = readq(&bar0->pif_rd_swapper_fb);
3512 if (val64 != 0x0123456789ABCDEFULL) {
3513 /* Endian settings are incorrect, calls for another dekko. */
3514 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3515 dev->name);
3516 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3517 (unsigned long long) val64);
3518 return FAILURE;
3519 }
3520
3521 return SUCCESS;
3522 }
3523
3524 static int wait_for_msix_trans(nic_t *nic, int i)
3525 {
3526 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3527 u64 val64;
3528 int ret = 0, cnt = 0;
3529
3530 do {
3531 val64 = readq(&bar0->xmsi_access);
3532 if (!(val64 & BIT(15)))
3533 break;
3534 mdelay(1);
3535 cnt++;
3536 } while(cnt < 5);
3537 if (cnt == 5) {
3538 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3539 ret = 1;
3540 }
3541
3542 return ret;
3543 }
3544
3545 static void restore_xmsi_data(nic_t *nic)
3546 {
3547 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3548 u64 val64;
3549 int i;
3550
3551 for (i=0; i< nic->avail_msix_vectors; i++) {
3552 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3553 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3554 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3555 writeq(val64, &bar0->xmsi_access);
3556 if (wait_for_msix_trans(nic, i)) {
3557 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3558 continue;
3559 }
3560 }
3561 }
3562
3563 static void store_xmsi_data(nic_t *nic)
3564 {
3565 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3566 u64 val64, addr, data;
3567 int i;
3568
3569 /* Store and display */
3570 for (i=0; i< nic->avail_msix_vectors; i++) {
3571 val64 = (BIT(15) | vBIT(i, 26, 6));
3572 writeq(val64, &bar0->xmsi_access);
3573 if (wait_for_msix_trans(nic, i)) {
3574 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3575 continue;
3576 }
3577 addr = readq(&bar0->xmsi_address);
3578 data = readq(&bar0->xmsi_data);
3579 if (addr && data) {
3580 nic->msix_info[i].addr = addr;
3581 nic->msix_info[i].data = data;
3582 }
3583 }
3584 }
3585
3586 int s2io_enable_msi(nic_t *nic)
3587 {
3588 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3589 u16 msi_ctrl, msg_val;
3590 struct config_param *config = &nic->config;
3591 struct net_device *dev = nic->dev;
3592 u64 val64, tx_mat, rx_mat;
3593 int i, err;
3594
3595 val64 = readq(&bar0->pic_control);
3596 val64 &= ~BIT(1);
3597 writeq(val64, &bar0->pic_control);
3598
3599 err = pci_enable_msi(nic->pdev);
3600 if (err) {
3601 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3602 nic->dev->name);
3603 return err;
3604 }
3605
3606 /*
3607 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3608 * for interrupt handling.
3609 */
3610 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3611 msg_val ^= 0x1;
3612 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3613 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3614
3615 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3616 msi_ctrl |= 0x10;
3617 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3618
3619 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3620 tx_mat = readq(&bar0->tx_mat0_n[0]);
3621 for (i=0; i<config->tx_fifo_num; i++) {
3622 tx_mat |= TX_MAT_SET(i, 1);
3623 }
3624 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3625
3626 rx_mat = readq(&bar0->rx_mat);
3627 for (i=0; i<config->rx_ring_num; i++) {
3628 rx_mat |= RX_MAT_SET(i, 1);
3629 }
3630 writeq(rx_mat, &bar0->rx_mat);
3631
3632 dev->irq = nic->pdev->irq;
3633 return 0;
3634 }
3635
3636 static int s2io_enable_msi_x(nic_t *nic)
3637 {
3638 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3639 u64 tx_mat, rx_mat;
3640 u16 msi_control; /* Temp variable */
3641 int ret, i, j, msix_indx = 1;
3642
3643 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3644 GFP_KERNEL);
3645 if (nic->entries == NULL) {
3646 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3647 return -ENOMEM;
3648 }
3649 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3650
3651 nic->s2io_entries =
3652 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3653 GFP_KERNEL);
3654 if (nic->s2io_entries == NULL) {
3655 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3656 kfree(nic->entries);
3657 return -ENOMEM;
3658 }
3659 memset(nic->s2io_entries, 0,
3660 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3661
3662 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3663 nic->entries[i].entry = i;
3664 nic->s2io_entries[i].entry = i;
3665 nic->s2io_entries[i].arg = NULL;
3666 nic->s2io_entries[i].in_use = 0;
3667 }
3668
3669 tx_mat = readq(&bar0->tx_mat0_n[0]);
3670 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3671 tx_mat |= TX_MAT_SET(i, msix_indx);
3672 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3673 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3674 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3675 }
3676 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3677
3678 if (!nic->config.bimodal) {
3679 rx_mat = readq(&bar0->rx_mat);
3680 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3681 rx_mat |= RX_MAT_SET(j, msix_indx);
3682 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3683 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3684 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3685 }
3686 writeq(rx_mat, &bar0->rx_mat);
3687 } else {
3688 tx_mat = readq(&bar0->tx_mat0_n[7]);
3689 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3690 tx_mat |= TX_MAT_SET(i, msix_indx);
3691 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3692 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3693 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3694 }
3695 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3696 }
3697
3698 nic->avail_msix_vectors = 0;
3699 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3700 /* We fail init if error or we get less vectors than min required */
3701 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3702 nic->avail_msix_vectors = ret;
3703 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3704 }
3705 if (ret) {
3706 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3707 kfree(nic->entries);
3708 kfree(nic->s2io_entries);
3709 nic->entries = NULL;
3710 nic->s2io_entries = NULL;
3711 nic->avail_msix_vectors = 0;
3712 return -ENOMEM;
3713 }
3714 if (!nic->avail_msix_vectors)
3715 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3716
3717 /*
3718 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3719 * in the herc NIC. (Temp change, needs to be removed later)
3720 */
3721 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3722 msi_control |= 0x1; /* Enable MSI */
3723 pci_write_config_word(nic->pdev, 0x42, msi_control);
3724
3725 return 0;
3726 }
3727
3728 /* ********************************************************* *
3729 * Functions defined below concern the OS part of the driver *
3730 * ********************************************************* */
3731
3732 /**
3733 * s2io_open - open entry point of the driver
3734 * @dev : pointer to the device structure.
3735 * Description:
3736 * This function is the open entry point of the driver. It mainly calls a
3737 * function to allocate Rx buffers and inserts them into the buffer
3738 * descriptors and then enables the Rx part of the NIC.
3739 * Return value:
3740 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3741 * file on failure.
3742 */
3743
3744 static int s2io_open(struct net_device *dev)
3745 {
3746 nic_t *sp = dev->priv;
3747 int err = 0;
3748
3749 /*
3750 * Make sure you have link off by default every time
3751 * Nic is initialized
3752 */
3753 netif_carrier_off(dev);
3754 sp->last_link_state = 0;
3755
3756 /* Initialize H/W and enable interrupts */
3757 err = s2io_card_up(sp);
3758 if (err) {
3759 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3760 dev->name);
3761 goto hw_init_failed;
3762 }
3763
3764 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3765 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3766 s2io_card_down(sp);
3767 err = -ENODEV;
3768 goto hw_init_failed;
3769 }
3770
3771 netif_start_queue(dev);
3772 return 0;
3773
3774 hw_init_failed:
3775 if (sp->intr_type == MSI_X) {
3776 if (sp->entries)
3777 kfree(sp->entries);
3778 if (sp->s2io_entries)
3779 kfree(sp->s2io_entries);
3780 }
3781 return err;
3782 }
3783
3784 /**
3785 * s2io_close -close entry point of the driver
3786 * @dev : device pointer.
3787 * Description:
3788 * This is the stop entry point of the driver. It needs to undo exactly
3789 * whatever was done by the open entry point,thus it's usually referred to
3790 * as the close function.Among other things this function mainly stops the
3791 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3792 * Return value:
3793 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3794 * file on failure.
3795 */
3796
3797 static int s2io_close(struct net_device *dev)
3798 {
3799 nic_t *sp = dev->priv;
3800
3801 flush_scheduled_work();
3802 netif_stop_queue(dev);
3803 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3804 s2io_card_down(sp);
3805
3806 sp->device_close_flag = TRUE; /* Device is shut down. */
3807 return 0;
3808 }
3809
3810 /**
3811 * s2io_xmit - Tx entry point of te driver
3812 * @skb : the socket buffer containing the Tx data.
3813 * @dev : device pointer.
3814 * Description :
3815 * This function is the Tx entry point of the driver. S2IO NIC supports
3816 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3817 * NOTE: when device cant queue the pkt,just the trans_start variable will
3818 * not be upadted.
3819 * Return value:
3820 * 0 on success & 1 on failure.
3821 */
3822
3823 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3824 {
3825 nic_t *sp = dev->priv;
3826 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3827 register u64 val64;
3828 TxD_t *txdp;
3829 TxFIFO_element_t __iomem *tx_fifo;
3830 unsigned long flags;
3831 #ifdef NETIF_F_TSO
3832 int mss;
3833 #endif
3834 u16 vlan_tag = 0;
3835 int vlan_priority = 0;
3836 mac_info_t *mac_control;
3837 struct config_param *config;
3838
3839 mac_control = &sp->mac_control;
3840 config = &sp->config;
3841
3842 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3843 spin_lock_irqsave(&sp->tx_lock, flags);
3844 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3845 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3846 dev->name);
3847 spin_unlock_irqrestore(&sp->tx_lock, flags);
3848 dev_kfree_skb(skb);
3849 return 0;
3850 }
3851
3852 queue = 0;
3853
3854 /* Get Fifo number to Transmit based on vlan priority */
3855 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3856 vlan_tag = vlan_tx_tag_get(skb);
3857 vlan_priority = vlan_tag >> 13;
3858 queue = config->fifo_mapping[vlan_priority];
3859 }
3860
3861 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3862 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3863 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3864 list_virt_addr;
3865
3866 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3867 /* Avoid "put" pointer going beyond "get" pointer */
3868 if (txdp->Host_Control ||
3869 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3870 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3871 netif_stop_queue(dev);
3872 dev_kfree_skb(skb);
3873 spin_unlock_irqrestore(&sp->tx_lock, flags);
3874 return 0;
3875 }
3876
3877 /* A buffer with no data will be dropped */
3878 if (!skb->len) {
3879 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3880 dev_kfree_skb(skb);
3881 spin_unlock_irqrestore(&sp->tx_lock, flags);
3882 return 0;
3883 }
3884
3885 txdp->Control_1 = 0;
3886 txdp->Control_2 = 0;
3887 #ifdef NETIF_F_TSO
3888 mss = skb_shinfo(skb)->gso_size;
3889 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3890 txdp->Control_1 |= TXD_TCP_LSO_EN;
3891 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3892 }
3893 #endif
3894 if (skb->ip_summed == CHECKSUM_HW) {
3895 txdp->Control_2 |=
3896 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3897 TXD_TX_CKO_UDP_EN);
3898 }
3899 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3900 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3901 txdp->Control_2 |= config->tx_intr_type;
3902
3903 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3904 txdp->Control_2 |= TXD_VLAN_ENABLE;
3905 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3906 }
3907
3908 frg_len = skb->len - skb->data_len;
3909 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) {
3910 int ufo_size;
3911
3912 ufo_size = skb_shinfo(skb)->gso_size;
3913 ufo_size &= ~7;
3914 txdp->Control_1 |= TXD_UFO_EN;
3915 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3916 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3917 #ifdef __BIG_ENDIAN
3918 sp->ufo_in_band_v[put_off] =
3919 (u64)skb_shinfo(skb)->ip6_frag_id;
3920 #else
3921 sp->ufo_in_band_v[put_off] =
3922 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3923 #endif
3924 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3925 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3926 sp->ufo_in_band_v,
3927 sizeof(u64), PCI_DMA_TODEVICE);
3928 txdp++;
3929 txdp->Control_1 = 0;
3930 txdp->Control_2 = 0;
3931 }
3932
3933 txdp->Buffer_Pointer = pci_map_single
3934 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3935 txdp->Host_Control = (unsigned long) skb;
3936 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3937
3938 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3939 txdp->Control_1 |= TXD_UFO_EN;
3940
3941 frg_cnt = skb_shinfo(skb)->nr_frags;
3942 /* For fragmented SKB. */
3943 for (i = 0; i < frg_cnt; i++) {
3944 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3945 /* A '0' length fragment will be ignored */
3946 if (!frag->size)
3947 continue;
3948 txdp++;
3949 txdp->Buffer_Pointer = (u64) pci_map_page
3950 (sp->pdev, frag->page, frag->page_offset,
3951 frag->size, PCI_DMA_TODEVICE);
3952 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3953 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3954 txdp->Control_1 |= TXD_UFO_EN;
3955 }
3956 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3957
3958 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3959 frg_cnt++; /* as Txd0 was used for inband header */
3960
3961 tx_fifo = mac_control->tx_FIFO_start[queue];
3962 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3963 writeq(val64, &tx_fifo->TxDL_Pointer);
3964
3965 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3966 TX_FIFO_LAST_LIST);
3967
3968 #ifdef NETIF_F_TSO
3969 if (mss)
3970 val64 |= TX_FIFO_SPECIAL_FUNC;
3971 #endif
3972 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3973 val64 |= TX_FIFO_SPECIAL_FUNC;
3974 writeq(val64, &tx_fifo->List_Control);
3975
3976 mmiowb();
3977
3978 put_off++;
3979 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3980 put_off = 0;
3981 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3982
3983 /* Avoid "put" pointer going beyond "get" pointer */
3984 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3985 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
3986 DBG_PRINT(TX_DBG,
3987 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3988 put_off, get_off);
3989 netif_stop_queue(dev);
3990 }
3991
3992 dev->trans_start = jiffies;
3993 spin_unlock_irqrestore(&sp->tx_lock, flags);
3994
3995 return 0;
3996 }
3997
3998 static void
3999 s2io_alarm_handle(unsigned long data)
4000 {
4001 nic_t *sp = (nic_t *)data;
4002
4003 alarm_intr_handler(sp);
4004 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4005 }
4006
4007 static irqreturn_t
4008 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
4009 {
4010 struct net_device *dev = (struct net_device *) dev_id;
4011 nic_t *sp = dev->priv;
4012 int i;
4013 int ret;
4014 mac_info_t *mac_control;
4015 struct config_param *config;
4016
4017 atomic_inc(&sp->isr_cnt);
4018 mac_control = &sp->mac_control;
4019 config = &sp->config;
4020 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4021
4022 /* If Intr is because of Rx Traffic */
4023 for (i = 0; i < config->rx_ring_num; i++)
4024 rx_intr_handler(&mac_control->rings[i]);
4025
4026 /* If Intr is because of Tx Traffic */
4027 for (i = 0; i < config->tx_fifo_num; i++)
4028 tx_intr_handler(&mac_control->fifos[i]);
4029
4030 /*
4031 * If the Rx buffer count is below the panic threshold then
4032 * reallocate the buffers from the interrupt handler itself,
4033 * else schedule a tasklet to reallocate the buffers.
4034 */
4035 for (i = 0; i < config->rx_ring_num; i++) {
4036 if (!sp->lro) {
4037 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4038 int level = rx_buffer_level(sp, rxb_size, i);
4039
4040 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4041 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4042 dev->name);
4043 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4044 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4045 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4046 dev->name);
4047 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4048 clear_bit(0, (&sp->tasklet_status));
4049 atomic_dec(&sp->isr_cnt);
4050 return IRQ_HANDLED;
4051 }
4052 clear_bit(0, (&sp->tasklet_status));
4053 } else if (level == LOW) {
4054 tasklet_schedule(&sp->task);
4055 }
4056 }
4057 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4058 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4059 dev->name);
4060 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4061 break;
4062 }
4063 }
4064
4065 atomic_dec(&sp->isr_cnt);
4066 return IRQ_HANDLED;
4067 }
4068
4069 static irqreturn_t
4070 s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
4071 {
4072 ring_info_t *ring = (ring_info_t *)dev_id;
4073 nic_t *sp = ring->nic;
4074 struct net_device *dev = (struct net_device *) dev_id;
4075 int rxb_size, level, rng_n;
4076
4077 atomic_inc(&sp->isr_cnt);
4078 rx_intr_handler(ring);
4079
4080 rng_n = ring->ring_no;
4081 if (!sp->lro) {
4082 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4083 level = rx_buffer_level(sp, rxb_size, rng_n);
4084
4085 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4086 int ret;
4087 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4088 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4089 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4090 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4091 __FUNCTION__);
4092 clear_bit(0, (&sp->tasklet_status));
4093 return IRQ_HANDLED;
4094 }
4095 clear_bit(0, (&sp->tasklet_status));
4096 } else if (level == LOW) {
4097 tasklet_schedule(&sp->task);
4098 }
4099 }
4100 else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4101 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
4102 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4103 }
4104
4105 atomic_dec(&sp->isr_cnt);
4106
4107 return IRQ_HANDLED;
4108 }
4109
4110 static irqreturn_t
4111 s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
4112 {
4113 fifo_info_t *fifo = (fifo_info_t *)dev_id;
4114 nic_t *sp = fifo->nic;
4115
4116 atomic_inc(&sp->isr_cnt);
4117 tx_intr_handler(fifo);
4118 atomic_dec(&sp->isr_cnt);
4119 return IRQ_HANDLED;
4120 }
4121 static void s2io_txpic_intr_handle(nic_t *sp)
4122 {
4123 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4124 u64 val64;
4125
4126 val64 = readq(&bar0->pic_int_status);
4127 if (val64 & PIC_INT_GPIO) {
4128 val64 = readq(&bar0->gpio_int_reg);
4129 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4130 (val64 & GPIO_INT_REG_LINK_UP)) {
4131 /*
4132 * This is unstable state so clear both up/down
4133 * interrupt and adapter to re-evaluate the link state.
4134 */
4135 val64 |= GPIO_INT_REG_LINK_DOWN;
4136 val64 |= GPIO_INT_REG_LINK_UP;
4137 writeq(val64, &bar0->gpio_int_reg);
4138 val64 = readq(&bar0->gpio_int_mask);
4139 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4140 GPIO_INT_MASK_LINK_DOWN);
4141 writeq(val64, &bar0->gpio_int_mask);
4142 }
4143 else if (val64 & GPIO_INT_REG_LINK_UP) {
4144 val64 = readq(&bar0->adapter_status);
4145 if (verify_xena_quiescence(sp, val64,
4146 sp->device_enabled_once)) {
4147 /* Enable Adapter */
4148 val64 = readq(&bar0->adapter_control);
4149 val64 |= ADAPTER_CNTL_EN;
4150 writeq(val64, &bar0->adapter_control);
4151 val64 |= ADAPTER_LED_ON;
4152 writeq(val64, &bar0->adapter_control);
4153 if (!sp->device_enabled_once)
4154 sp->device_enabled_once = 1;
4155
4156 s2io_link(sp, LINK_UP);
4157 /*
4158 * unmask link down interrupt and mask link-up
4159 * intr
4160 */
4161 val64 = readq(&bar0->gpio_int_mask);
4162 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4163 val64 |= GPIO_INT_MASK_LINK_UP;
4164 writeq(val64, &bar0->gpio_int_mask);
4165
4166 }
4167 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4168 val64 = readq(&bar0->adapter_status);
4169 if (verify_xena_quiescence(sp, val64,
4170 sp->device_enabled_once)) {
4171 s2io_link(sp, LINK_DOWN);
4172 /* Link is down so unmaks link up interrupt */
4173 val64 = readq(&bar0->gpio_int_mask);
4174 val64 &= ~GPIO_INT_MASK_LINK_UP;
4175 val64 |= GPIO_INT_MASK_LINK_DOWN;
4176 writeq(val64, &bar0->gpio_int_mask);
4177 }
4178 }
4179 }
4180 val64 = readq(&bar0->gpio_int_mask);
4181 }
4182
4183 /**
4184 * s2io_isr - ISR handler of the device .
4185 * @irq: the irq of the device.
4186 * @dev_id: a void pointer to the dev structure of the NIC.
4187 * @pt_regs: pointer to the registers pushed on the stack.
4188 * Description: This function is the ISR handler of the device. It
4189 * identifies the reason for the interrupt and calls the relevant
4190 * service routines. As a contongency measure, this ISR allocates the
4191 * recv buffers, if their numbers are below the panic value which is
4192 * presently set to 25% of the original number of rcv buffers allocated.
4193 * Return value:
4194 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4195 * IRQ_NONE: will be returned if interrupt is not from our device
4196 */
4197 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4198 {
4199 struct net_device *dev = (struct net_device *) dev_id;
4200 nic_t *sp = dev->priv;
4201 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4202 int i;
4203 u64 reason = 0, val64, org_mask;
4204 mac_info_t *mac_control;
4205 struct config_param *config;
4206
4207 atomic_inc(&sp->isr_cnt);
4208 mac_control = &sp->mac_control;
4209 config = &sp->config;
4210
4211 /*
4212 * Identify the cause for interrupt and call the appropriate
4213 * interrupt handler. Causes for the interrupt could be;
4214 * 1. Rx of packet.
4215 * 2. Tx complete.
4216 * 3. Link down.
4217 * 4. Error in any functional blocks of the NIC.
4218 */
4219 reason = readq(&bar0->general_int_status);
4220
4221 if (!reason) {
4222 /* The interrupt was not raised by Xena. */
4223 atomic_dec(&sp->isr_cnt);
4224 return IRQ_NONE;
4225 }
4226
4227 val64 = 0xFFFFFFFFFFFFFFFFULL;
4228 /* Store current mask before masking all interrupts */
4229 org_mask = readq(&bar0->general_int_mask);
4230 writeq(val64, &bar0->general_int_mask);
4231
4232 #ifdef CONFIG_S2IO_NAPI
4233 if (reason & GEN_INTR_RXTRAFFIC) {
4234 if (netif_rx_schedule_prep(dev)) {
4235 writeq(val64, &bar0->rx_traffic_mask);
4236 __netif_rx_schedule(dev);
4237 }
4238 }
4239 #else
4240 /*
4241 * Rx handler is called by default, without checking for the
4242 * cause of interrupt.
4243 * rx_traffic_int reg is an R1 register, writing all 1's
4244 * will ensure that the actual interrupt causing bit get's
4245 * cleared and hence a read can be avoided.
4246 */
4247 writeq(val64, &bar0->rx_traffic_int);
4248 for (i = 0; i < config->rx_ring_num; i++) {
4249 rx_intr_handler(&mac_control->rings[i]);
4250 }
4251 #endif
4252
4253 /*
4254 * tx_traffic_int reg is an R1 register, writing all 1's
4255 * will ensure that the actual interrupt causing bit get's
4256 * cleared and hence a read can be avoided.
4257 */
4258 writeq(val64, &bar0->tx_traffic_int);
4259
4260 for (i = 0; i < config->tx_fifo_num; i++)
4261 tx_intr_handler(&mac_control->fifos[i]);
4262
4263 if (reason & GEN_INTR_TXPIC)
4264 s2io_txpic_intr_handle(sp);
4265 /*
4266 * If the Rx buffer count is below the panic threshold then
4267 * reallocate the buffers from the interrupt handler itself,
4268 * else schedule a tasklet to reallocate the buffers.
4269 */
4270 #ifndef CONFIG_S2IO_NAPI
4271 for (i = 0; i < config->rx_ring_num; i++) {
4272 if (!sp->lro) {
4273 int ret;
4274 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4275 int level = rx_buffer_level(sp, rxb_size, i);
4276
4277 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4278 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4279 dev->name);
4280 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4281 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4282 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4283 dev->name);
4284 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4285 clear_bit(0, (&sp->tasklet_status));
4286 atomic_dec(&sp->isr_cnt);
4287 writeq(org_mask, &bar0->general_int_mask);
4288 return IRQ_HANDLED;
4289 }
4290 clear_bit(0, (&sp->tasklet_status));
4291 } else if (level == LOW) {
4292 tasklet_schedule(&sp->task);
4293 }
4294 }
4295 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4296 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4297 dev->name);
4298 DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
4299 break;
4300 }
4301 }
4302 #endif
4303 writeq(org_mask, &bar0->general_int_mask);
4304 atomic_dec(&sp->isr_cnt);
4305 return IRQ_HANDLED;
4306 }
4307
4308 /**
4309 * s2io_updt_stats -
4310 */
4311 static void s2io_updt_stats(nic_t *sp)
4312 {
4313 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4314 u64 val64;
4315 int cnt = 0;
4316
4317 if (atomic_read(&sp->card_state) == CARD_UP) {
4318 /* Apprx 30us on a 133 MHz bus */
4319 val64 = SET_UPDT_CLICKS(10) |
4320 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4321 writeq(val64, &bar0->stat_cfg);
4322 do {
4323 udelay(100);
4324 val64 = readq(&bar0->stat_cfg);
4325 if (!(val64 & BIT(0)))
4326 break;
4327 cnt++;
4328 if (cnt == 5)
4329 break; /* Updt failed */
4330 } while(1);
4331 }
4332 }
4333
4334 /**
4335 * s2io_get_stats - Updates the device statistics structure.
4336 * @dev : pointer to the device structure.
4337 * Description:
4338 * This function updates the device statistics structure in the s2io_nic
4339 * structure and returns a pointer to the same.
4340 * Return value:
4341 * pointer to the updated net_device_stats structure.
4342 */
4343
4344 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4345 {
4346 nic_t *sp = dev->priv;
4347 mac_info_t *mac_control;
4348 struct config_param *config;
4349
4350
4351 mac_control = &sp->mac_control;
4352 config = &sp->config;
4353
4354 /* Configure Stats for immediate updt */
4355 s2io_updt_stats(sp);
4356
4357 sp->stats.tx_packets =
4358 le32_to_cpu(mac_control->stats_info->tmac_frms);
4359 sp->stats.tx_errors =
4360 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4361 sp->stats.rx_errors =
4362 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
4363 sp->stats.multicast =
4364 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4365 sp->stats.rx_length_errors =
4366 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
4367
4368 return (&sp->stats);
4369 }
4370
4371 /**
4372 * s2io_set_multicast - entry point for multicast address enable/disable.
4373 * @dev : pointer to the device structure
4374 * Description:
4375 * This function is a driver entry point which gets called by the kernel
4376 * whenever multicast addresses must be enabled/disabled. This also gets
4377 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4378 * determine, if multicast address must be enabled or if promiscuous mode
4379 * is to be disabled etc.
4380 * Return value:
4381 * void.
4382 */
4383
4384 static void s2io_set_multicast(struct net_device *dev)
4385 {
4386 int i, j, prev_cnt;
4387 struct dev_mc_list *mclist;
4388 nic_t *sp = dev->priv;
4389 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4390 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4391 0xfeffffffffffULL;
4392 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4393 void __iomem *add;
4394
4395 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4396 /* Enable all Multicast addresses */
4397 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4398 &bar0->rmac_addr_data0_mem);
4399 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4400 &bar0->rmac_addr_data1_mem);
4401 val64 = RMAC_ADDR_CMD_MEM_WE |
4402 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4403 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4404 writeq(val64, &bar0->rmac_addr_cmd_mem);
4405 /* Wait till command completes */
4406 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4407 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
4408
4409 sp->m_cast_flg = 1;
4410 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4411 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4412 /* Disable all Multicast addresses */
4413 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4414 &bar0->rmac_addr_data0_mem);
4415 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4416 &bar0->rmac_addr_data1_mem);
4417 val64 = RMAC_ADDR_CMD_MEM_WE |
4418 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4419 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4420 writeq(val64, &bar0->rmac_addr_cmd_mem);
4421 /* Wait till command completes */
4422 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4423 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
4424
4425 sp->m_cast_flg = 0;
4426 sp->all_multi_pos = 0;
4427 }
4428
4429 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4430 /* Put the NIC into promiscuous mode */
4431 add = &bar0->mac_cfg;
4432 val64 = readq(&bar0->mac_cfg);
4433 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4434
4435 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4436 writel((u32) val64, add);
4437 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4438 writel((u32) (val64 >> 32), (add + 4));
4439
4440 val64 = readq(&bar0->mac_cfg);
4441 sp->promisc_flg = 1;
4442 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4443 dev->name);
4444 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4445 /* Remove the NIC from promiscuous mode */
4446 add = &bar0->mac_cfg;
4447 val64 = readq(&bar0->mac_cfg);
4448 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4449
4450 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4451 writel((u32) val64, add);
4452 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4453 writel((u32) (val64 >> 32), (add + 4));
4454
4455 val64 = readq(&bar0->mac_cfg);
4456 sp->promisc_flg = 0;
4457 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4458 dev->name);
4459 }
4460
4461 /* Update individual M_CAST address list */
4462 if ((!sp->m_cast_flg) && dev->mc_count) {
4463 if (dev->mc_count >
4464 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4465 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4466 dev->name);
4467 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4468 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4469 return;
4470 }
4471
4472 prev_cnt = sp->mc_addr_count;
4473 sp->mc_addr_count = dev->mc_count;
4474
4475 /* Clear out the previous list of Mc in the H/W. */
4476 for (i = 0; i < prev_cnt; i++) {
4477 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4478 &bar0->rmac_addr_data0_mem);
4479 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4480 &bar0->rmac_addr_data1_mem);
4481 val64 = RMAC_ADDR_CMD_MEM_WE |
4482 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4483 RMAC_ADDR_CMD_MEM_OFFSET
4484 (MAC_MC_ADDR_START_OFFSET + i);
4485 writeq(val64, &bar0->rmac_addr_cmd_mem);
4486
4487 /* Wait for command completes */
4488 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4489 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4490 DBG_PRINT(ERR_DBG, "%s: Adding ",
4491 dev->name);
4492 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4493 return;
4494 }
4495 }
4496
4497 /* Create the new Rx filter list and update the same in H/W. */
4498 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4499 i++, mclist = mclist->next) {
4500 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4501 ETH_ALEN);
4502 mac_addr = 0;
4503 for (j = 0; j < ETH_ALEN; j++) {
4504 mac_addr |= mclist->dmi_addr[j];
4505 mac_addr <<= 8;
4506 }
4507 mac_addr >>= 8;
4508 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4509 &bar0->rmac_addr_data0_mem);
4510 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4511 &bar0->rmac_addr_data1_mem);
4512 val64 = RMAC_ADDR_CMD_MEM_WE |
4513 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4514 RMAC_ADDR_CMD_MEM_OFFSET
4515 (i + MAC_MC_ADDR_START_OFFSET);
4516 writeq(val64, &bar0->rmac_addr_cmd_mem);
4517
4518 /* Wait for command completes */
4519 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4520 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4521 DBG_PRINT(ERR_DBG, "%s: Adding ",
4522 dev->name);
4523 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4524 return;
4525 }
4526 }
4527 }
4528 }
4529
4530 /**
4531 * s2io_set_mac_addr - Programs the Xframe mac address
4532 * @dev : pointer to the device structure.
4533 * @addr: a uchar pointer to the new mac address which is to be set.
4534 * Description : This procedure will program the Xframe to receive
4535 * frames with new Mac Address
4536 * Return value: SUCCESS on success and an appropriate (-)ve integer
4537 * as defined in errno.h file on failure.
4538 */
4539
4540 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4541 {
4542 nic_t *sp = dev->priv;
4543 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4544 register u64 val64, mac_addr = 0;
4545 int i;
4546
4547 /*
4548 * Set the new MAC address as the new unicast filter and reflect this
4549 * change on the device address registered with the OS. It will be
4550 * at offset 0.
4551 */
4552 for (i = 0; i < ETH_ALEN; i++) {
4553 mac_addr <<= 8;
4554 mac_addr |= addr[i];
4555 }
4556
4557 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4558 &bar0->rmac_addr_data0_mem);
4559
4560 val64 =
4561 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4562 RMAC_ADDR_CMD_MEM_OFFSET(0);
4563 writeq(val64, &bar0->rmac_addr_cmd_mem);
4564 /* Wait till command completes */
4565 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4566 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4567 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4568 return FAILURE;
4569 }
4570
4571 return SUCCESS;
4572 }
4573
4574 /**
4575 * s2io_ethtool_sset - Sets different link parameters.
4576 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4577 * @info: pointer to the structure with parameters given by ethtool to set
4578 * link information.
4579 * Description:
4580 * The function sets different link parameters provided by the user onto
4581 * the NIC.
4582 * Return value:
4583 * 0 on success.
4584 */
4585
4586 static int s2io_ethtool_sset(struct net_device *dev,
4587 struct ethtool_cmd *info)
4588 {
4589 nic_t *sp = dev->priv;
4590 if ((info->autoneg == AUTONEG_ENABLE) ||
4591 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4592 return -EINVAL;
4593 else {
4594 s2io_close(sp->dev);
4595 s2io_open(sp->dev);
4596 }
4597
4598 return 0;
4599 }
4600
4601 /**
4602 * s2io_ethtol_gset - Return link specific information.
4603 * @sp : private member of the device structure, pointer to the
4604 * s2io_nic structure.
4605 * @info : pointer to the structure with parameters given by ethtool
4606 * to return link information.
4607 * Description:
4608 * Returns link specific information like speed, duplex etc.. to ethtool.
4609 * Return value :
4610 * return 0 on success.
4611 */
4612
4613 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4614 {
4615 nic_t *sp = dev->priv;
4616 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4617 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4618 info->port = PORT_FIBRE;
4619 /* info->transceiver?? TODO */
4620
4621 if (netif_carrier_ok(sp->dev)) {
4622 info->speed = 10000;
4623 info->duplex = DUPLEX_FULL;
4624 } else {
4625 info->speed = -1;
4626 info->duplex = -1;
4627 }
4628
4629 info->autoneg = AUTONEG_DISABLE;
4630 return 0;
4631 }
4632
4633 /**
4634 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4635 * @sp : private member of the device structure, which is a pointer to the
4636 * s2io_nic structure.
4637 * @info : pointer to the structure with parameters given by ethtool to
4638 * return driver information.
4639 * Description:
4640 * Returns driver specefic information like name, version etc.. to ethtool.
4641 * Return value:
4642 * void
4643 */
4644
4645 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4646 struct ethtool_drvinfo *info)
4647 {
4648 nic_t *sp = dev->priv;
4649
4650 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4651 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4652 strncpy(info->fw_version, "", sizeof(info->fw_version));
4653 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4654 info->regdump_len = XENA_REG_SPACE;
4655 info->eedump_len = XENA_EEPROM_SPACE;
4656 info->testinfo_len = S2IO_TEST_LEN;
4657 info->n_stats = S2IO_STAT_LEN;
4658 }
4659
4660 /**
4661 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4662 * @sp: private member of the device structure, which is a pointer to the
4663 * s2io_nic structure.
4664 * @regs : pointer to the structure with parameters given by ethtool for
4665 * dumping the registers.
4666 * @reg_space: The input argumnet into which all the registers are dumped.
4667 * Description:
4668 * Dumps the entire register space of xFrame NIC into the user given
4669 * buffer area.
4670 * Return value :
4671 * void .
4672 */
4673
4674 static void s2io_ethtool_gregs(struct net_device *dev,
4675 struct ethtool_regs *regs, void *space)
4676 {
4677 int i;
4678 u64 reg;
4679 u8 *reg_space = (u8 *) space;
4680 nic_t *sp = dev->priv;
4681
4682 regs->len = XENA_REG_SPACE;
4683 regs->version = sp->pdev->subsystem_device;
4684
4685 for (i = 0; i < regs->len; i += 8) {
4686 reg = readq(sp->bar0 + i);
4687 memcpy((reg_space + i), &reg, 8);
4688 }
4689 }
4690
4691 /**
4692 * s2io_phy_id - timer function that alternates adapter LED.
4693 * @data : address of the private member of the device structure, which
4694 * is a pointer to the s2io_nic structure, provided as an u32.
4695 * Description: This is actually the timer function that alternates the
4696 * adapter LED bit of the adapter control bit to set/reset every time on
4697 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4698 * once every second.
4699 */
4700 static void s2io_phy_id(unsigned long data)
4701 {
4702 nic_t *sp = (nic_t *) data;
4703 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4704 u64 val64 = 0;
4705 u16 subid;
4706
4707 subid = sp->pdev->subsystem_device;
4708 if ((sp->device_type == XFRAME_II_DEVICE) ||
4709 ((subid & 0xFF) >= 0x07)) {
4710 val64 = readq(&bar0->gpio_control);
4711 val64 ^= GPIO_CTRL_GPIO_0;
4712 writeq(val64, &bar0->gpio_control);
4713 } else {
4714 val64 = readq(&bar0->adapter_control);
4715 val64 ^= ADAPTER_LED_ON;
4716 writeq(val64, &bar0->adapter_control);
4717 }
4718
4719 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4720 }
4721
4722 /**
4723 * s2io_ethtool_idnic - To physically identify the nic on the system.
4724 * @sp : private member of the device structure, which is a pointer to the
4725 * s2io_nic structure.
4726 * @id : pointer to the structure with identification parameters given by
4727 * ethtool.
4728 * Description: Used to physically identify the NIC on the system.
4729 * The Link LED will blink for a time specified by the user for
4730 * identification.
4731 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4732 * identification is possible only if it's link is up.
4733 * Return value:
4734 * int , returns 0 on success
4735 */
4736
4737 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4738 {
4739 u64 val64 = 0, last_gpio_ctrl_val;
4740 nic_t *sp = dev->priv;
4741 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4742 u16 subid;
4743
4744 subid = sp->pdev->subsystem_device;
4745 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4746 if ((sp->device_type == XFRAME_I_DEVICE) &&
4747 ((subid & 0xFF) < 0x07)) {
4748 val64 = readq(&bar0->adapter_control);
4749 if (!(val64 & ADAPTER_CNTL_EN)) {
4750 printk(KERN_ERR
4751 "Adapter Link down, cannot blink LED\n");
4752 return -EFAULT;
4753 }
4754 }
4755 if (sp->id_timer.function == NULL) {
4756 init_timer(&sp->id_timer);
4757 sp->id_timer.function = s2io_phy_id;
4758 sp->id_timer.data = (unsigned long) sp;
4759 }
4760 mod_timer(&sp->id_timer, jiffies);
4761 if (data)
4762 msleep_interruptible(data * HZ);
4763 else
4764 msleep_interruptible(MAX_FLICKER_TIME);
4765 del_timer_sync(&sp->id_timer);
4766
4767 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4768 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4769 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4770 }
4771
4772 return 0;
4773 }
4774
4775 /**
4776 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4777 * @sp : private member of the device structure, which is a pointer to the
4778 * s2io_nic structure.
4779 * @ep : pointer to the structure with pause parameters given by ethtool.
4780 * Description:
4781 * Returns the Pause frame generation and reception capability of the NIC.
4782 * Return value:
4783 * void
4784 */
4785 static void s2io_ethtool_getpause_data(struct net_device *dev,
4786 struct ethtool_pauseparam *ep)
4787 {
4788 u64 val64;
4789 nic_t *sp = dev->priv;
4790 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4791
4792 val64 = readq(&bar0->rmac_pause_cfg);
4793 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4794 ep->tx_pause = TRUE;
4795 if (val64 & RMAC_PAUSE_RX_ENABLE)
4796 ep->rx_pause = TRUE;
4797 ep->autoneg = FALSE;
4798 }
4799
4800 /**
4801 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4802 * @sp : private member of the device structure, which is a pointer to the
4803 * s2io_nic structure.
4804 * @ep : pointer to the structure with pause parameters given by ethtool.
4805 * Description:
4806 * It can be used to set or reset Pause frame generation or reception
4807 * support of the NIC.
4808 * Return value:
4809 * int, returns 0 on Success
4810 */
4811
4812 static int s2io_ethtool_setpause_data(struct net_device *dev,
4813 struct ethtool_pauseparam *ep)
4814 {
4815 u64 val64;
4816 nic_t *sp = dev->priv;
4817 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4818
4819 val64 = readq(&bar0->rmac_pause_cfg);
4820 if (ep->tx_pause)
4821 val64 |= RMAC_PAUSE_GEN_ENABLE;
4822 else
4823 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4824 if (ep->rx_pause)
4825 val64 |= RMAC_PAUSE_RX_ENABLE;
4826 else
4827 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4828 writeq(val64, &bar0->rmac_pause_cfg);
4829 return 0;
4830 }
4831
4832 /**
4833 * read_eeprom - reads 4 bytes of data from user given offset.
4834 * @sp : private member of the device structure, which is a pointer to the
4835 * s2io_nic structure.
4836 * @off : offset at which the data must be written
4837 * @data : Its an output parameter where the data read at the given
4838 * offset is stored.
4839 * Description:
4840 * Will read 4 bytes of data from the user given offset and return the
4841 * read data.
4842 * NOTE: Will allow to read only part of the EEPROM visible through the
4843 * I2C bus.
4844 * Return value:
4845 * -1 on failure and 0 on success.
4846 */
4847
4848 #define S2IO_DEV_ID 5
4849 static int read_eeprom(nic_t * sp, int off, u64 * data)
4850 {
4851 int ret = -1;
4852 u32 exit_cnt = 0;
4853 u64 val64;
4854 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4855
4856 if (sp->device_type == XFRAME_I_DEVICE) {
4857 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4858 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4859 I2C_CONTROL_CNTL_START;
4860 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4861
4862 while (exit_cnt < 5) {
4863 val64 = readq(&bar0->i2c_control);
4864 if (I2C_CONTROL_CNTL_END(val64)) {
4865 *data = I2C_CONTROL_GET_DATA(val64);
4866 ret = 0;
4867 break;
4868 }
4869 msleep(50);
4870 exit_cnt++;
4871 }
4872 }
4873
4874 if (sp->device_type == XFRAME_II_DEVICE) {
4875 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4876 SPI_CONTROL_BYTECNT(0x3) |
4877 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4878 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4879 val64 |= SPI_CONTROL_REQ;
4880 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4881 while (exit_cnt < 5) {
4882 val64 = readq(&bar0->spi_control);
4883 if (val64 & SPI_CONTROL_NACK) {
4884 ret = 1;
4885 break;
4886 } else if (val64 & SPI_CONTROL_DONE) {
4887 *data = readq(&bar0->spi_data);
4888 *data &= 0xffffff;
4889 ret = 0;
4890 break;
4891 }
4892 msleep(50);
4893 exit_cnt++;
4894 }
4895 }
4896 return ret;
4897 }
4898
4899 /**
4900 * write_eeprom - actually writes the relevant part of the data value.
4901 * @sp : private member of the device structure, which is a pointer to the
4902 * s2io_nic structure.
4903 * @off : offset at which the data must be written
4904 * @data : The data that is to be written
4905 * @cnt : Number of bytes of the data that are actually to be written into
4906 * the Eeprom. (max of 3)
4907 * Description:
4908 * Actually writes the relevant part of the data value into the Eeprom
4909 * through the I2C bus.
4910 * Return value:
4911 * 0 on success, -1 on failure.
4912 */
4913
4914 static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4915 {
4916 int exit_cnt = 0, ret = -1;
4917 u64 val64;
4918 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4919
4920 if (sp->device_type == XFRAME_I_DEVICE) {
4921 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4922 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4923 I2C_CONTROL_CNTL_START;
4924 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4925
4926 while (exit_cnt < 5) {
4927 val64 = readq(&bar0->i2c_control);
4928 if (I2C_CONTROL_CNTL_END(val64)) {
4929 if (!(val64 & I2C_CONTROL_NACK))
4930 ret = 0;
4931 break;
4932 }
4933 msleep(50);
4934 exit_cnt++;
4935 }
4936 }
4937
4938 if (sp->device_type == XFRAME_II_DEVICE) {
4939 int write_cnt = (cnt == 8) ? 0 : cnt;
4940 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4941
4942 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4943 SPI_CONTROL_BYTECNT(write_cnt) |
4944 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4945 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4946 val64 |= SPI_CONTROL_REQ;
4947 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4948 while (exit_cnt < 5) {
4949 val64 = readq(&bar0->spi_control);
4950 if (val64 & SPI_CONTROL_NACK) {
4951 ret = 1;
4952 break;
4953 } else if (val64 & SPI_CONTROL_DONE) {
4954 ret = 0;
4955 break;
4956 }
4957 msleep(50);
4958 exit_cnt++;
4959 }
4960 }
4961 return ret;
4962 }
4963 static void s2io_vpd_read(nic_t *nic)
4964 {
4965 u8 *vpd_data;
4966 u8 data;
4967 int i=0, cnt, fail = 0;
4968 int vpd_addr = 0x80;
4969
4970 if (nic->device_type == XFRAME_II_DEVICE) {
4971 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4972 vpd_addr = 0x80;
4973 }
4974 else {
4975 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4976 vpd_addr = 0x50;
4977 }
4978
4979 vpd_data = kmalloc(256, GFP_KERNEL);
4980 if (!vpd_data)
4981 return;
4982
4983 for (i = 0; i < 256; i +=4 ) {
4984 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4985 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
4986 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4987 for (cnt = 0; cnt <5; cnt++) {
4988 msleep(2);
4989 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4990 if (data == 0x80)
4991 break;
4992 }
4993 if (cnt >= 5) {
4994 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4995 fail = 1;
4996 break;
4997 }
4998 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
4999 (u32 *)&vpd_data[i]);
5000 }
5001 if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) {
5002 memset(nic->product_name, 0, vpd_data[1]);
5003 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5004 }
5005 kfree(vpd_data);
5006 }
5007
5008 /**
5009 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5010 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5011 * @eeprom : pointer to the user level structure provided by ethtool,
5012 * containing all relevant information.
5013 * @data_buf : user defined value to be written into Eeprom.
5014 * Description: Reads the values stored in the Eeprom at given offset
5015 * for a given length. Stores these values int the input argument data
5016 * buffer 'data_buf' and returns these to the caller (ethtool.)
5017 * Return value:
5018 * int 0 on success
5019 */
5020
5021 static int s2io_ethtool_geeprom(struct net_device *dev,
5022 struct ethtool_eeprom *eeprom, u8 * data_buf)
5023 {
5024 u32 i, valid;
5025 u64 data;
5026 nic_t *sp = dev->priv;
5027
5028 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5029
5030 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5031 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5032
5033 for (i = 0; i < eeprom->len; i += 4) {
5034 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5035 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5036 return -EFAULT;
5037 }
5038 valid = INV(data);
5039 memcpy((data_buf + i), &valid, 4);
5040 }
5041 return 0;
5042 }
5043
5044 /**
5045 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5046 * @sp : private member of the device structure, which is a pointer to the
5047 * s2io_nic structure.
5048 * @eeprom : pointer to the user level structure provided by ethtool,
5049 * containing all relevant information.
5050 * @data_buf ; user defined value to be written into Eeprom.
5051 * Description:
5052 * Tries to write the user provided value in the Eeprom, at the offset
5053 * given by the user.
5054 * Return value:
5055 * 0 on success, -EFAULT on failure.
5056 */
5057
5058 static int s2io_ethtool_seeprom(struct net_device *dev,
5059 struct ethtool_eeprom *eeprom,
5060 u8 * data_buf)
5061 {
5062 int len = eeprom->len, cnt = 0;
5063 u64 valid = 0, data;
5064 nic_t *sp = dev->priv;
5065
5066 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5067 DBG_PRINT(ERR_DBG,
5068 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5069 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5070 eeprom->magic);
5071 return -EFAULT;
5072 }
5073
5074 while (len) {
5075 data = (u32) data_buf[cnt] & 0x000000FF;
5076 if (data) {
5077 valid = (u32) (data << 24);
5078 } else
5079 valid = data;
5080
5081 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5082 DBG_PRINT(ERR_DBG,
5083 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5084 DBG_PRINT(ERR_DBG,
5085 "write into the specified offset\n");
5086 return -EFAULT;
5087 }
5088 cnt++;
5089 len--;
5090 }
5091
5092 return 0;
5093 }
5094
5095 /**
5096 * s2io_register_test - reads and writes into all clock domains.
5097 * @sp : private member of the device structure, which is a pointer to the
5098 * s2io_nic structure.
5099 * @data : variable that returns the result of each of the test conducted b
5100 * by the driver.
5101 * Description:
5102 * Read and write into all clock domains. The NIC has 3 clock domains,
5103 * see that registers in all the three regions are accessible.
5104 * Return value:
5105 * 0 on success.
5106 */
5107
5108 static int s2io_register_test(nic_t * sp, uint64_t * data)
5109 {
5110 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5111 u64 val64 = 0, exp_val;
5112 int fail = 0;
5113
5114 val64 = readq(&bar0->pif_rd_swapper_fb);
5115 if (val64 != 0x123456789abcdefULL) {
5116 fail = 1;
5117 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5118 }
5119
5120 val64 = readq(&bar0->rmac_pause_cfg);
5121 if (val64 != 0xc000ffff00000000ULL) {
5122 fail = 1;
5123 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5124 }
5125
5126 val64 = readq(&bar0->rx_queue_cfg);
5127 if (sp->device_type == XFRAME_II_DEVICE)
5128 exp_val = 0x0404040404040404ULL;
5129 else
5130 exp_val = 0x0808080808080808ULL;
5131 if (val64 != exp_val) {
5132 fail = 1;
5133 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5134 }
5135
5136 val64 = readq(&bar0->xgxs_efifo_cfg);
5137 if (val64 != 0x000000001923141EULL) {
5138 fail = 1;
5139 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5140 }
5141
5142 val64 = 0x5A5A5A5A5A5A5A5AULL;
5143 writeq(val64, &bar0->xmsi_data);
5144 val64 = readq(&bar0->xmsi_data);
5145 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5146 fail = 1;
5147 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5148 }
5149
5150 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5151 writeq(val64, &bar0->xmsi_data);
5152 val64 = readq(&bar0->xmsi_data);
5153 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5154 fail = 1;
5155 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5156 }
5157
5158 *data = fail;
5159 return fail;
5160 }
5161
5162 /**
5163 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5164 * @sp : private member of the device structure, which is a pointer to the
5165 * s2io_nic structure.
5166 * @data:variable that returns the result of each of the test conducted by
5167 * the driver.
5168 * Description:
5169 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5170 * register.
5171 * Return value:
5172 * 0 on success.
5173 */
5174
5175 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
5176 {
5177 int fail = 0;
5178 u64 ret_data, org_4F0, org_7F0;
5179 u8 saved_4F0 = 0, saved_7F0 = 0;
5180 struct net_device *dev = sp->dev;
5181
5182 /* Test Write Error at offset 0 */
5183 /* Note that SPI interface allows write access to all areas
5184 * of EEPROM. Hence doing all negative testing only for Xframe I.
5185 */
5186 if (sp->device_type == XFRAME_I_DEVICE)
5187 if (!write_eeprom(sp, 0, 0, 3))
5188 fail = 1;
5189
5190 /* Save current values at offsets 0x4F0 and 0x7F0 */
5191 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5192 saved_4F0 = 1;
5193 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5194 saved_7F0 = 1;
5195
5196 /* Test Write at offset 4f0 */
5197 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5198 fail = 1;
5199 if (read_eeprom(sp, 0x4F0, &ret_data))
5200 fail = 1;
5201
5202 if (ret_data != 0x012345) {
5203 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5204 "Data written %llx Data read %llx\n",
5205 dev->name, (unsigned long long)0x12345,
5206 (unsigned long long)ret_data);
5207 fail = 1;
5208 }
5209
5210 /* Reset the EEPROM data go FFFF */
5211 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5212
5213 /* Test Write Request Error at offset 0x7c */
5214 if (sp->device_type == XFRAME_I_DEVICE)
5215 if (!write_eeprom(sp, 0x07C, 0, 3))
5216 fail = 1;
5217
5218 /* Test Write Request at offset 0x7f0 */
5219 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5220 fail = 1;
5221 if (read_eeprom(sp, 0x7F0, &ret_data))
5222 fail = 1;
5223
5224 if (ret_data != 0x012345) {
5225 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5226 "Data written %llx Data read %llx\n",
5227 dev->name, (unsigned long long)0x12345,
5228 (unsigned long long)ret_data);
5229 fail = 1;
5230 }
5231
5232 /* Reset the EEPROM data go FFFF */
5233 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5234
5235 if (sp->device_type == XFRAME_I_DEVICE) {
5236 /* Test Write Error at offset 0x80 */
5237 if (!write_eeprom(sp, 0x080, 0, 3))
5238 fail = 1;
5239
5240 /* Test Write Error at offset 0xfc */
5241 if (!write_eeprom(sp, 0x0FC, 0, 3))
5242 fail = 1;
5243
5244 /* Test Write Error at offset 0x100 */
5245 if (!write_eeprom(sp, 0x100, 0, 3))
5246 fail = 1;
5247
5248 /* Test Write Error at offset 4ec */
5249 if (!write_eeprom(sp, 0x4EC, 0, 3))
5250 fail = 1;
5251 }
5252
5253 /* Restore values at offsets 0x4F0 and 0x7F0 */
5254 if (saved_4F0)
5255 write_eeprom(sp, 0x4F0, org_4F0, 3);
5256 if (saved_7F0)
5257 write_eeprom(sp, 0x7F0, org_7F0, 3);
5258
5259 *data = fail;
5260 return fail;
5261 }
5262
5263 /**
5264 * s2io_bist_test - invokes the MemBist test of the card .
5265 * @sp : private member of the device structure, which is a pointer to the
5266 * s2io_nic structure.
5267 * @data:variable that returns the result of each of the test conducted by
5268 * the driver.
5269 * Description:
5270 * This invokes the MemBist test of the card. We give around
5271 * 2 secs time for the Test to complete. If it's still not complete
5272 * within this peiod, we consider that the test failed.
5273 * Return value:
5274 * 0 on success and -1 on failure.
5275 */
5276
5277 static int s2io_bist_test(nic_t * sp, uint64_t * data)
5278 {
5279 u8 bist = 0;
5280 int cnt = 0, ret = -1;
5281
5282 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5283 bist |= PCI_BIST_START;
5284 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5285
5286 while (cnt < 20) {
5287 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5288 if (!(bist & PCI_BIST_START)) {
5289 *data = (bist & PCI_BIST_CODE_MASK);
5290 ret = 0;
5291 break;
5292 }
5293 msleep(100);
5294 cnt++;
5295 }
5296
5297 return ret;
5298 }
5299
5300 /**
5301 * s2io-link_test - verifies the link state of the nic
5302 * @sp ; private member of the device structure, which is a pointer to the
5303 * s2io_nic structure.
5304 * @data: variable that returns the result of each of the test conducted by
5305 * the driver.
5306 * Description:
5307 * The function verifies the link state of the NIC and updates the input
5308 * argument 'data' appropriately.
5309 * Return value:
5310 * 0 on success.
5311 */
5312
5313 static int s2io_link_test(nic_t * sp, uint64_t * data)
5314 {
5315 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5316 u64 val64;
5317
5318 val64 = readq(&bar0->adapter_status);
5319 if(!(LINK_IS_UP(val64)))
5320 *data = 1;
5321 else
5322 *data = 0;
5323
5324 return *data;
5325 }
5326
5327 /**
5328 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5329 * @sp - private member of the device structure, which is a pointer to the
5330 * s2io_nic structure.
5331 * @data - variable that returns the result of each of the test
5332 * conducted by the driver.
5333 * Description:
5334 * This is one of the offline test that tests the read and write
5335 * access to the RldRam chip on the NIC.
5336 * Return value:
5337 * 0 on success.
5338 */
5339
5340 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
5341 {
5342 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5343 u64 val64;
5344 int cnt, iteration = 0, test_fail = 0;
5345
5346 val64 = readq(&bar0->adapter_control);
5347 val64 &= ~ADAPTER_ECC_EN;
5348 writeq(val64, &bar0->adapter_control);
5349
5350 val64 = readq(&bar0->mc_rldram_test_ctrl);
5351 val64 |= MC_RLDRAM_TEST_MODE;
5352 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5353
5354 val64 = readq(&bar0->mc_rldram_mrs);
5355 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5356 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5357
5358 val64 |= MC_RLDRAM_MRS_ENABLE;
5359 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5360
5361 while (iteration < 2) {
5362 val64 = 0x55555555aaaa0000ULL;
5363 if (iteration == 1) {
5364 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5365 }
5366 writeq(val64, &bar0->mc_rldram_test_d0);
5367
5368 val64 = 0xaaaa5a5555550000ULL;
5369 if (iteration == 1) {
5370 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5371 }
5372 writeq(val64, &bar0->mc_rldram_test_d1);
5373
5374 val64 = 0x55aaaaaaaa5a0000ULL;
5375 if (iteration == 1) {
5376 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5377 }
5378 writeq(val64, &bar0->mc_rldram_test_d2);
5379
5380 val64 = (u64) (0x0000003ffffe0100ULL);
5381 writeq(val64, &bar0->mc_rldram_test_add);
5382
5383 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5384 MC_RLDRAM_TEST_GO;
5385 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5386
5387 for (cnt = 0; cnt < 5; cnt++) {
5388 val64 = readq(&bar0->mc_rldram_test_ctrl);
5389 if (val64 & MC_RLDRAM_TEST_DONE)
5390 break;
5391 msleep(200);
5392 }
5393
5394 if (cnt == 5)
5395 break;
5396
5397 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5398 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5399
5400 for (cnt = 0; cnt < 5; cnt++) {
5401 val64 = readq(&bar0->mc_rldram_test_ctrl);
5402 if (val64 & MC_RLDRAM_TEST_DONE)
5403 break;
5404 msleep(500);
5405 }
5406
5407 if (cnt == 5)
5408 break;
5409
5410 val64 = readq(&bar0->mc_rldram_test_ctrl);
5411 if (!(val64 & MC_RLDRAM_TEST_PASS))
5412 test_fail = 1;
5413
5414 iteration++;
5415 }
5416
5417 *data = test_fail;
5418
5419 /* Bring the adapter out of test mode */
5420 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5421
5422 return test_fail;
5423 }
5424
5425 /**
5426 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5427 * @sp : private member of the device structure, which is a pointer to the
5428 * s2io_nic structure.
5429 * @ethtest : pointer to a ethtool command specific structure that will be
5430 * returned to the user.
5431 * @data : variable that returns the result of each of the test
5432 * conducted by the driver.
5433 * Description:
5434 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5435 * the health of the card.
5436 * Return value:
5437 * void
5438 */
5439
5440 static void s2io_ethtool_test(struct net_device *dev,
5441 struct ethtool_test *ethtest,
5442 uint64_t * data)
5443 {
5444 nic_t *sp = dev->priv;
5445 int orig_state = netif_running(sp->dev);
5446
5447 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5448 /* Offline Tests. */
5449 if (orig_state)
5450 s2io_close(sp->dev);
5451
5452 if (s2io_register_test(sp, &data[0]))
5453 ethtest->flags |= ETH_TEST_FL_FAILED;
5454
5455 s2io_reset(sp);
5456
5457 if (s2io_rldram_test(sp, &data[3]))
5458 ethtest->flags |= ETH_TEST_FL_FAILED;
5459
5460 s2io_reset(sp);
5461
5462 if (s2io_eeprom_test(sp, &data[1]))
5463 ethtest->flags |= ETH_TEST_FL_FAILED;
5464
5465 if (s2io_bist_test(sp, &data[4]))
5466 ethtest->flags |= ETH_TEST_FL_FAILED;
5467
5468 if (orig_state)
5469 s2io_open(sp->dev);
5470
5471 data[2] = 0;
5472 } else {
5473 /* Online Tests. */
5474 if (!orig_state) {
5475 DBG_PRINT(ERR_DBG,
5476 "%s: is not up, cannot run test\n",
5477 dev->name);
5478 data[0] = -1;
5479 data[1] = -1;
5480 data[2] = -1;
5481 data[3] = -1;
5482 data[4] = -1;
5483 }
5484
5485 if (s2io_link_test(sp, &data[2]))
5486 ethtest->flags |= ETH_TEST_FL_FAILED;
5487
5488 data[0] = 0;
5489 data[1] = 0;
5490 data[3] = 0;
5491 data[4] = 0;
5492 }
5493 }
5494
5495 static void s2io_get_ethtool_stats(struct net_device *dev,
5496 struct ethtool_stats *estats,
5497 u64 * tmp_stats)
5498 {
5499 int i = 0;
5500 nic_t *sp = dev->priv;
5501 StatInfo_t *stat_info = sp->mac_control.stats_info;
5502
5503 s2io_updt_stats(sp);
5504 tmp_stats[i++] =
5505 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5506 le32_to_cpu(stat_info->tmac_frms);
5507 tmp_stats[i++] =
5508 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5509 le32_to_cpu(stat_info->tmac_data_octets);
5510 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5511 tmp_stats[i++] =
5512 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5513 le32_to_cpu(stat_info->tmac_mcst_frms);
5514 tmp_stats[i++] =
5515 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5516 le32_to_cpu(stat_info->tmac_bcst_frms);
5517 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5518 tmp_stats[i++] =
5519 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5520 le32_to_cpu(stat_info->tmac_ttl_octets);
5521 tmp_stats[i++] =
5522 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5523 le32_to_cpu(stat_info->tmac_ucst_frms);
5524 tmp_stats[i++] =
5525 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5526 le32_to_cpu(stat_info->tmac_nucst_frms);
5527 tmp_stats[i++] =
5528 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5529 le32_to_cpu(stat_info->tmac_any_err_frms);
5530 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5531 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5532 tmp_stats[i++] =
5533 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5534 le32_to_cpu(stat_info->tmac_vld_ip);
5535 tmp_stats[i++] =
5536 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5537 le32_to_cpu(stat_info->tmac_drop_ip);
5538 tmp_stats[i++] =
5539 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5540 le32_to_cpu(stat_info->tmac_icmp);
5541 tmp_stats[i++] =
5542 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5543 le32_to_cpu(stat_info->tmac_rst_tcp);
5544 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5545 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5546 le32_to_cpu(stat_info->tmac_udp);
5547 tmp_stats[i++] =
5548 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5549 le32_to_cpu(stat_info->rmac_vld_frms);
5550 tmp_stats[i++] =
5551 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5552 le32_to_cpu(stat_info->rmac_data_octets);
5553 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5554 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5555 tmp_stats[i++] =
5556 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5557 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5558 tmp_stats[i++] =
5559 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5560 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5561 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5562 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5563 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5564 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5565 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5566 tmp_stats[i++] =
5567 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5568 le32_to_cpu(stat_info->rmac_ttl_octets);
5569 tmp_stats[i++] =
5570 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5571 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5572 tmp_stats[i++] =
5573 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5574 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5575 tmp_stats[i++] =
5576 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5577 le32_to_cpu(stat_info->rmac_discarded_frms);
5578 tmp_stats[i++] =
5579 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5580 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5581 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5582 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5583 tmp_stats[i++] =
5584 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5585 le32_to_cpu(stat_info->rmac_usized_frms);
5586 tmp_stats[i++] =
5587 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5588 le32_to_cpu(stat_info->rmac_osized_frms);
5589 tmp_stats[i++] =
5590 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5591 le32_to_cpu(stat_info->rmac_frag_frms);
5592 tmp_stats[i++] =
5593 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5594 le32_to_cpu(stat_info->rmac_jabber_frms);
5595 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5596 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5597 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5598 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5599 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5600 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5601 tmp_stats[i++] =
5602 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5603 le32_to_cpu(stat_info->rmac_ip);
5604 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5605 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5606 tmp_stats[i++] =
5607 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5608 le32_to_cpu(stat_info->rmac_drop_ip);
5609 tmp_stats[i++] =
5610 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5611 le32_to_cpu(stat_info->rmac_icmp);
5612 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5613 tmp_stats[i++] =
5614 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5615 le32_to_cpu(stat_info->rmac_udp);
5616 tmp_stats[i++] =
5617 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5618 le32_to_cpu(stat_info->rmac_err_drp_udp);
5619 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5620 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5621 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5622 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5623 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5624 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5625 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5626 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5627 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5628 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5629 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5630 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5631 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5632 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5633 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5634 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5635 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5636 tmp_stats[i++] =
5637 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5638 le32_to_cpu(stat_info->rmac_pause_cnt);
5639 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5640 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5641 tmp_stats[i++] =
5642 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5643 le32_to_cpu(stat_info->rmac_accepted_ip);
5644 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5645 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5646 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5647 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5648 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5649 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5650 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5651 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5652 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5653 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5654 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5655 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5656 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5657 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5658 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5659 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5660 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5661 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5662 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5663 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5664 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5665 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5666 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5667 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5668 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5669 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5670 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5671 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5672 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5673 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5674 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5675 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5676 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5677 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5678 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5679 tmp_stats[i++] = 0;
5680 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5681 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5682 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5683 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5684 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5685 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5686 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5687 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5688 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5689 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5690 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5691 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5692 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5693 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5694 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5695 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5696 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5697 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5698 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5699 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5700 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5701 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5702 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5703 if (stat_info->sw_stat.num_aggregations) {
5704 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5705 int count = 0;
5706 /*
5707 * Since 64-bit divide does not work on all platforms,
5708 * do repeated subtraction.
5709 */
5710 while (tmp >= stat_info->sw_stat.num_aggregations) {
5711 tmp -= stat_info->sw_stat.num_aggregations;
5712 count++;
5713 }
5714 tmp_stats[i++] = count;
5715 }
5716 else
5717 tmp_stats[i++] = 0;
5718 }
5719
5720 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5721 {
5722 return (XENA_REG_SPACE);
5723 }
5724
5725
5726 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5727 {
5728 nic_t *sp = dev->priv;
5729
5730 return (sp->rx_csum);
5731 }
5732
5733 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5734 {
5735 nic_t *sp = dev->priv;
5736
5737 if (data)
5738 sp->rx_csum = 1;
5739 else
5740 sp->rx_csum = 0;
5741
5742 return 0;
5743 }
5744
5745 static int s2io_get_eeprom_len(struct net_device *dev)
5746 {
5747 return (XENA_EEPROM_SPACE);
5748 }
5749
5750 static int s2io_ethtool_self_test_count(struct net_device *dev)
5751 {
5752 return (S2IO_TEST_LEN);
5753 }
5754
5755 static void s2io_ethtool_get_strings(struct net_device *dev,
5756 u32 stringset, u8 * data)
5757 {
5758 switch (stringset) {
5759 case ETH_SS_TEST:
5760 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5761 break;
5762 case ETH_SS_STATS:
5763 memcpy(data, &ethtool_stats_keys,
5764 sizeof(ethtool_stats_keys));
5765 }
5766 }
5767 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5768 {
5769 return (S2IO_STAT_LEN);
5770 }
5771
5772 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5773 {
5774 if (data)
5775 dev->features |= NETIF_F_IP_CSUM;
5776 else
5777 dev->features &= ~NETIF_F_IP_CSUM;
5778
5779 return 0;
5780 }
5781
5782
5783 static struct ethtool_ops netdev_ethtool_ops = {
5784 .get_settings = s2io_ethtool_gset,
5785 .set_settings = s2io_ethtool_sset,
5786 .get_drvinfo = s2io_ethtool_gdrvinfo,
5787 .get_regs_len = s2io_ethtool_get_regs_len,
5788 .get_regs = s2io_ethtool_gregs,
5789 .get_link = ethtool_op_get_link,
5790 .get_eeprom_len = s2io_get_eeprom_len,
5791 .get_eeprom = s2io_ethtool_geeprom,
5792 .set_eeprom = s2io_ethtool_seeprom,
5793 .get_pauseparam = s2io_ethtool_getpause_data,
5794 .set_pauseparam = s2io_ethtool_setpause_data,
5795 .get_rx_csum = s2io_ethtool_get_rx_csum,
5796 .set_rx_csum = s2io_ethtool_set_rx_csum,
5797 .get_tx_csum = ethtool_op_get_tx_csum,
5798 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5799 .get_sg = ethtool_op_get_sg,
5800 .set_sg = ethtool_op_set_sg,
5801 #ifdef NETIF_F_TSO
5802 .get_tso = ethtool_op_get_tso,
5803 .set_tso = ethtool_op_set_tso,
5804 #endif
5805 .get_ufo = ethtool_op_get_ufo,
5806 .set_ufo = ethtool_op_set_ufo,
5807 .self_test_count = s2io_ethtool_self_test_count,
5808 .self_test = s2io_ethtool_test,
5809 .get_strings = s2io_ethtool_get_strings,
5810 .phys_id = s2io_ethtool_idnic,
5811 .get_stats_count = s2io_ethtool_get_stats_count,
5812 .get_ethtool_stats = s2io_get_ethtool_stats
5813 };
5814
5815 /**
5816 * s2io_ioctl - Entry point for the Ioctl
5817 * @dev : Device pointer.
5818 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5819 * a proprietary structure used to pass information to the driver.
5820 * @cmd : This is used to distinguish between the different commands that
5821 * can be passed to the IOCTL functions.
5822 * Description:
5823 * Currently there are no special functionality supported in IOCTL, hence
5824 * function always return EOPNOTSUPPORTED
5825 */
5826
5827 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5828 {
5829 return -EOPNOTSUPP;
5830 }
5831
5832 /**
5833 * s2io_change_mtu - entry point to change MTU size for the device.
5834 * @dev : device pointer.
5835 * @new_mtu : the new MTU size for the device.
5836 * Description: A driver entry point to change MTU size for the device.
5837 * Before changing the MTU the device must be stopped.
5838 * Return value:
5839 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5840 * file on failure.
5841 */
5842
5843 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5844 {
5845 nic_t *sp = dev->priv;
5846
5847 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5848 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5849 dev->name);
5850 return -EPERM;
5851 }
5852
5853 dev->mtu = new_mtu;
5854 if (netif_running(dev)) {
5855 s2io_card_down(sp);
5856 netif_stop_queue(dev);
5857 if (s2io_card_up(sp)) {
5858 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5859 __FUNCTION__);
5860 }
5861 if (netif_queue_stopped(dev))
5862 netif_wake_queue(dev);
5863 } else { /* Device is down */
5864 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5865 u64 val64 = new_mtu;
5866
5867 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5868 }
5869
5870 return 0;
5871 }
5872
5873 /**
5874 * s2io_tasklet - Bottom half of the ISR.
5875 * @dev_adr : address of the device structure in dma_addr_t format.
5876 * Description:
5877 * This is the tasklet or the bottom half of the ISR. This is
5878 * an extension of the ISR which is scheduled by the scheduler to be run
5879 * when the load on the CPU is low. All low priority tasks of the ISR can
5880 * be pushed into the tasklet. For now the tasklet is used only to
5881 * replenish the Rx buffers in the Rx buffer descriptors.
5882 * Return value:
5883 * void.
5884 */
5885
5886 static void s2io_tasklet(unsigned long dev_addr)
5887 {
5888 struct net_device *dev = (struct net_device *) dev_addr;
5889 nic_t *sp = dev->priv;
5890 int i, ret;
5891 mac_info_t *mac_control;
5892 struct config_param *config;
5893
5894 mac_control = &sp->mac_control;
5895 config = &sp->config;
5896
5897 if (!TASKLET_IN_USE) {
5898 for (i = 0; i < config->rx_ring_num; i++) {
5899 ret = fill_rx_buffers(sp, i);
5900 if (ret == -ENOMEM) {
5901 DBG_PRINT(ERR_DBG, "%s: Out of ",
5902 dev->name);
5903 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5904 break;
5905 } else if (ret == -EFILL) {
5906 DBG_PRINT(ERR_DBG,
5907 "%s: Rx Ring %d is full\n",
5908 dev->name, i);
5909 break;
5910 }
5911 }
5912 clear_bit(0, (&sp->tasklet_status));
5913 }
5914 }
5915
5916 /**
5917 * s2io_set_link - Set the LInk status
5918 * @data: long pointer to device private structue
5919 * Description: Sets the link status for the adapter
5920 */
5921
5922 static void s2io_set_link(unsigned long data)
5923 {
5924 nic_t *nic = (nic_t *) data;
5925 struct net_device *dev = nic->dev;
5926 XENA_dev_config_t __iomem *bar0 = nic->bar0;
5927 register u64 val64;
5928 u16 subid;
5929
5930 if (test_and_set_bit(0, &(nic->link_state))) {
5931 /* The card is being reset, no point doing anything */
5932 return;
5933 }
5934
5935 subid = nic->pdev->subsystem_device;
5936 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5937 /*
5938 * Allow a small delay for the NICs self initiated
5939 * cleanup to complete.
5940 */
5941 msleep(100);
5942 }
5943
5944 val64 = readq(&bar0->adapter_status);
5945 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
5946 if (LINK_IS_UP(val64)) {
5947 val64 = readq(&bar0->adapter_control);
5948 val64 |= ADAPTER_CNTL_EN;
5949 writeq(val64, &bar0->adapter_control);
5950 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5951 subid)) {
5952 val64 = readq(&bar0->gpio_control);
5953 val64 |= GPIO_CTRL_GPIO_0;
5954 writeq(val64, &bar0->gpio_control);
5955 val64 = readq(&bar0->gpio_control);
5956 } else {
5957 val64 |= ADAPTER_LED_ON;
5958 writeq(val64, &bar0->adapter_control);
5959 }
5960 if (s2io_link_fault_indication(nic) ==
5961 MAC_RMAC_ERR_TIMER) {
5962 val64 = readq(&bar0->adapter_status);
5963 if (!LINK_IS_UP(val64)) {
5964 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5965 DBG_PRINT(ERR_DBG, " Link down");
5966 DBG_PRINT(ERR_DBG, "after ");
5967 DBG_PRINT(ERR_DBG, "enabling ");
5968 DBG_PRINT(ERR_DBG, "device \n");
5969 }
5970 }
5971 if (nic->device_enabled_once == FALSE) {
5972 nic->device_enabled_once = TRUE;
5973 }
5974 s2io_link(nic, LINK_UP);
5975 } else {
5976 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5977 subid)) {
5978 val64 = readq(&bar0->gpio_control);
5979 val64 &= ~GPIO_CTRL_GPIO_0;
5980 writeq(val64, &bar0->gpio_control);
5981 val64 = readq(&bar0->gpio_control);
5982 }
5983 s2io_link(nic, LINK_DOWN);
5984 }
5985 } else { /* NIC is not Quiescent. */
5986 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5987 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5988 netif_stop_queue(dev);
5989 }
5990 clear_bit(0, &(nic->link_state));
5991 }
5992
5993 static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5994 struct sk_buff **skb, u64 *temp0, u64 *temp1,
5995 u64 *temp2, int size)
5996 {
5997 struct net_device *dev = sp->dev;
5998 struct sk_buff *frag_list;
5999
6000 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6001 /* allocate skb */
6002 if (*skb) {
6003 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6004 /*
6005 * As Rx frame are not going to be processed,
6006 * using same mapped address for the Rxd
6007 * buffer pointer
6008 */
6009 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0;
6010 } else {
6011 *skb = dev_alloc_skb(size);
6012 if (!(*skb)) {
6013 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
6014 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
6015 return -ENOMEM ;
6016 }
6017 /* storing the mapped addr in a temp variable
6018 * such it will be used for next rxd whose
6019 * Host Control is NULL
6020 */
6021 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 =
6022 pci_map_single( sp->pdev, (*skb)->data,
6023 size - NET_IP_ALIGN,
6024 PCI_DMA_FROMDEVICE);
6025 rxdp->Host_Control = (unsigned long) (*skb);
6026 }
6027 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6028 /* Two buffer Mode */
6029 if (*skb) {
6030 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
6031 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
6032 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
6033 } else {
6034 *skb = dev_alloc_skb(size);
6035 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
6036 pci_map_single(sp->pdev, (*skb)->data,
6037 dev->mtu + 4,
6038 PCI_DMA_FROMDEVICE);
6039 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
6040 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6041 PCI_DMA_FROMDEVICE);
6042 rxdp->Host_Control = (unsigned long) (*skb);
6043
6044 /* Buffer-1 will be dummy buffer not used */
6045 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
6046 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6047 PCI_DMA_FROMDEVICE);
6048 }
6049 } else if ((rxdp->Host_Control == 0)) {
6050 /* Three buffer mode */
6051 if (*skb) {
6052 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
6053 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
6054 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
6055 } else {
6056 *skb = dev_alloc_skb(size);
6057
6058 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
6059 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6060 PCI_DMA_FROMDEVICE);
6061 /* Buffer-1 receives L3/L4 headers */
6062 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
6063 pci_map_single( sp->pdev, (*skb)->data,
6064 l3l4hdr_size + 4,
6065 PCI_DMA_FROMDEVICE);
6066 /*
6067 * skb_shinfo(skb)->frag_list will have L4
6068 * data payload
6069 */
6070 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6071 ALIGN_SIZE);
6072 if (skb_shinfo(*skb)->frag_list == NULL) {
6073 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6074 failed\n ", dev->name);
6075 return -ENOMEM ;
6076 }
6077 frag_list = skb_shinfo(*skb)->frag_list;
6078 frag_list->next = NULL;
6079 /*
6080 * Buffer-2 receives L4 data payload
6081 */
6082 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
6083 pci_map_single( sp->pdev, frag_list->data,
6084 dev->mtu, PCI_DMA_FROMDEVICE);
6085 }
6086 }
6087 return 0;
6088 }
6089 static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
6090 {
6091 struct net_device *dev = sp->dev;
6092 if (sp->rxd_mode == RXD_MODE_1) {
6093 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6094 } else if (sp->rxd_mode == RXD_MODE_3B) {
6095 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6096 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6097 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6098 } else {
6099 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6100 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6101 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6102 }
6103 }
6104
6105 static int rxd_owner_bit_reset(nic_t *sp)
6106 {
6107 int i, j, k, blk_cnt = 0, size;
6108 mac_info_t * mac_control = &sp->mac_control;
6109 struct config_param *config = &sp->config;
6110 struct net_device *dev = sp->dev;
6111 RxD_t *rxdp = NULL;
6112 struct sk_buff *skb = NULL;
6113 buffAdd_t *ba = NULL;
6114 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6115
6116 /* Calculate the size based on ring mode */
6117 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6118 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6119 if (sp->rxd_mode == RXD_MODE_1)
6120 size += NET_IP_ALIGN;
6121 else if (sp->rxd_mode == RXD_MODE_3B)
6122 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6123 else
6124 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6125
6126 for (i = 0; i < config->rx_ring_num; i++) {
6127 blk_cnt = config->rx_cfg[i].num_rxd /
6128 (rxd_count[sp->rxd_mode] +1);
6129
6130 for (j = 0; j < blk_cnt; j++) {
6131 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6132 rxdp = mac_control->rings[i].
6133 rx_blocks[j].rxds[k].virt_addr;
6134 if(sp->rxd_mode >= RXD_MODE_3A)
6135 ba = &mac_control->rings[i].ba[j][k];
6136 set_rxd_buffer_pointer(sp, rxdp, ba,
6137 &skb,(u64 *)&temp0_64,
6138 (u64 *)&temp1_64,
6139 (u64 *)&temp2_64, size);
6140
6141 set_rxd_buffer_size(sp, rxdp, size);
6142 wmb();
6143 /* flip the Ownership bit to Hardware */
6144 rxdp->Control_1 |= RXD_OWN_XENA;
6145 }
6146 }
6147 }
6148 return 0;
6149
6150 }
6151
6152 static int s2io_add_isr(nic_t * sp)
6153 {
6154 int ret = 0;
6155 struct net_device *dev = sp->dev;
6156 int err = 0;
6157
6158 if (sp->intr_type == MSI)
6159 ret = s2io_enable_msi(sp);
6160 else if (sp->intr_type == MSI_X)
6161 ret = s2io_enable_msi_x(sp);
6162 if (ret) {
6163 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6164 sp->intr_type = INTA;
6165 }
6166
6167 /* Store the values of the MSIX table in the nic_t structure */
6168 store_xmsi_data(sp);
6169
6170 /* After proper initialization of H/W, register ISR */
6171 if (sp->intr_type == MSI) {
6172 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6173 IRQF_SHARED, sp->name, dev);
6174 if (err) {
6175 pci_disable_msi(sp->pdev);
6176 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6177 dev->name);
6178 return -1;
6179 }
6180 }
6181 if (sp->intr_type == MSI_X) {
6182 int i;
6183
6184 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6185 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6186 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6187 dev->name, i);
6188 err = request_irq(sp->entries[i].vector,
6189 s2io_msix_fifo_handle, 0, sp->desc[i],
6190 sp->s2io_entries[i].arg);
6191 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
6192 (unsigned long long)sp->msix_info[i].addr);
6193 } else {
6194 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6195 dev->name, i);
6196 err = request_irq(sp->entries[i].vector,
6197 s2io_msix_ring_handle, 0, sp->desc[i],
6198 sp->s2io_entries[i].arg);
6199 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
6200 (unsigned long long)sp->msix_info[i].addr);
6201 }
6202 if (err) {
6203 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6204 "failed\n", dev->name, i);
6205 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6206 return -1;
6207 }
6208 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6209 }
6210 }
6211 if (sp->intr_type == INTA) {
6212 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6213 sp->name, dev);
6214 if (err) {
6215 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6216 dev->name);
6217 return -1;
6218 }
6219 }
6220 return 0;
6221 }
6222 static void s2io_rem_isr(nic_t * sp)
6223 {
6224 int cnt = 0;
6225 struct net_device *dev = sp->dev;
6226
6227 if (sp->intr_type == MSI_X) {
6228 int i;
6229 u16 msi_control;
6230
6231 for (i=1; (sp->s2io_entries[i].in_use ==
6232 MSIX_REGISTERED_SUCCESS); i++) {
6233 int vector = sp->entries[i].vector;
6234 void *arg = sp->s2io_entries[i].arg;
6235
6236 free_irq(vector, arg);
6237 }
6238 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6239 msi_control &= 0xFFFE; /* Disable MSI */
6240 pci_write_config_word(sp->pdev, 0x42, msi_control);
6241
6242 pci_disable_msix(sp->pdev);
6243 } else {
6244 free_irq(sp->pdev->irq, dev);
6245 if (sp->intr_type == MSI) {
6246 u16 val;
6247
6248 pci_disable_msi(sp->pdev);
6249 pci_read_config_word(sp->pdev, 0x4c, &val);
6250 val ^= 0x1;
6251 pci_write_config_word(sp->pdev, 0x4c, val);
6252 }
6253 }
6254 /* Waiting till all Interrupt handlers are complete */
6255 cnt = 0;
6256 do {
6257 msleep(10);
6258 if (!atomic_read(&sp->isr_cnt))
6259 break;
6260 cnt++;
6261 } while(cnt < 5);
6262 }
6263
6264 static void s2io_card_down(nic_t * sp)
6265 {
6266 int cnt = 0;
6267 XENA_dev_config_t __iomem *bar0 = sp->bar0;
6268 unsigned long flags;
6269 register u64 val64 = 0;
6270
6271 del_timer_sync(&sp->alarm_timer);
6272 /* If s2io_set_link task is executing, wait till it completes. */
6273 while (test_and_set_bit(0, &(sp->link_state))) {
6274 msleep(50);
6275 }
6276 atomic_set(&sp->card_state, CARD_DOWN);
6277
6278 /* disable Tx and Rx traffic on the NIC */
6279 stop_nic(sp);
6280
6281 s2io_rem_isr(sp);
6282
6283 /* Kill tasklet. */
6284 tasklet_kill(&sp->task);
6285
6286 /* Check if the device is Quiescent and then Reset the NIC */
6287 do {
6288 /* As per the HW requirement we need to replenish the
6289 * receive buffer to avoid the ring bump. Since there is
6290 * no intention of processing the Rx frame at this pointwe are
6291 * just settting the ownership bit of rxd in Each Rx
6292 * ring to HW and set the appropriate buffer size
6293 * based on the ring mode
6294 */
6295 rxd_owner_bit_reset(sp);
6296
6297 val64 = readq(&bar0->adapter_status);
6298 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
6299 break;
6300 }
6301
6302 msleep(50);
6303 cnt++;
6304 if (cnt == 10) {
6305 DBG_PRINT(ERR_DBG,
6306 "s2io_close:Device not Quiescent ");
6307 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6308 (unsigned long long) val64);
6309 break;
6310 }
6311 } while (1);
6312 s2io_reset(sp);
6313
6314 spin_lock_irqsave(&sp->tx_lock, flags);
6315 /* Free all Tx buffers */
6316 free_tx_buffers(sp);
6317 spin_unlock_irqrestore(&sp->tx_lock, flags);
6318
6319 /* Free all Rx buffers */
6320 spin_lock_irqsave(&sp->rx_lock, flags);
6321 free_rx_buffers(sp);
6322 spin_unlock_irqrestore(&sp->rx_lock, flags);
6323
6324 clear_bit(0, &(sp->link_state));
6325 }
6326
6327 static int s2io_card_up(nic_t * sp)
6328 {
6329 int i, ret = 0;
6330 mac_info_t *mac_control;
6331 struct config_param *config;
6332 struct net_device *dev = (struct net_device *) sp->dev;
6333 u16 interruptible;
6334
6335 /* Initialize the H/W I/O registers */
6336 if (init_nic(sp) != 0) {
6337 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6338 dev->name);
6339 s2io_reset(sp);
6340 return -ENODEV;
6341 }
6342
6343 /*
6344 * Initializing the Rx buffers. For now we are considering only 1
6345 * Rx ring and initializing buffers into 30 Rx blocks
6346 */
6347 mac_control = &sp->mac_control;
6348 config = &sp->config;
6349
6350 for (i = 0; i < config->rx_ring_num; i++) {
6351 if ((ret = fill_rx_buffers(sp, i))) {
6352 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6353 dev->name);
6354 s2io_reset(sp);
6355 free_rx_buffers(sp);
6356 return -ENOMEM;
6357 }
6358 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6359 atomic_read(&sp->rx_bufs_left[i]));
6360 }
6361
6362 /* Setting its receive mode */
6363 s2io_set_multicast(dev);
6364
6365 if (sp->lro) {
6366 /* Initialize max aggregatable pkts per session based on MTU */
6367 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6368 /* Check if we can use(if specified) user provided value */
6369 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6370 sp->lro_max_aggr_per_sess = lro_max_pkts;
6371 }
6372
6373 /* Enable Rx Traffic and interrupts on the NIC */
6374 if (start_nic(sp)) {
6375 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6376 s2io_reset(sp);
6377 free_rx_buffers(sp);
6378 return -ENODEV;
6379 }
6380
6381 /* Add interrupt service routine */
6382 if (s2io_add_isr(sp) != 0) {
6383 if (sp->intr_type == MSI_X)
6384 s2io_rem_isr(sp);
6385 s2io_reset(sp);
6386 free_rx_buffers(sp);
6387 return -ENODEV;
6388 }
6389
6390 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6391
6392 /* Enable tasklet for the device */
6393 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6394
6395 /* Enable select interrupts */
6396 if (sp->intr_type != INTA)
6397 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6398 else {
6399 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6400 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6401 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6402 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6403 }
6404
6405
6406 atomic_set(&sp->card_state, CARD_UP);
6407 return 0;
6408 }
6409
6410 /**
6411 * s2io_restart_nic - Resets the NIC.
6412 * @data : long pointer to the device private structure
6413 * Description:
6414 * This function is scheduled to be run by the s2io_tx_watchdog
6415 * function after 0.5 secs to reset the NIC. The idea is to reduce
6416 * the run time of the watch dog routine which is run holding a
6417 * spin lock.
6418 */
6419
6420 static void s2io_restart_nic(unsigned long data)
6421 {
6422 struct net_device *dev = (struct net_device *) data;
6423 nic_t *sp = dev->priv;
6424
6425 s2io_card_down(sp);
6426 if (s2io_card_up(sp)) {
6427 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6428 dev->name);
6429 }
6430 netif_wake_queue(dev);
6431 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6432 dev->name);
6433
6434 }
6435
6436 /**
6437 * s2io_tx_watchdog - Watchdog for transmit side.
6438 * @dev : Pointer to net device structure
6439 * Description:
6440 * This function is triggered if the Tx Queue is stopped
6441 * for a pre-defined amount of time when the Interface is still up.
6442 * If the Interface is jammed in such a situation, the hardware is
6443 * reset (by s2io_close) and restarted again (by s2io_open) to
6444 * overcome any problem that might have been caused in the hardware.
6445 * Return value:
6446 * void
6447 */
6448
6449 static void s2io_tx_watchdog(struct net_device *dev)
6450 {
6451 nic_t *sp = dev->priv;
6452
6453 if (netif_carrier_ok(dev)) {
6454 schedule_work(&sp->rst_timer_task);
6455 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6456 }
6457 }
6458
6459 /**
6460 * rx_osm_handler - To perform some OS related operations on SKB.
6461 * @sp: private member of the device structure,pointer to s2io_nic structure.
6462 * @skb : the socket buffer pointer.
6463 * @len : length of the packet
6464 * @cksum : FCS checksum of the frame.
6465 * @ring_no : the ring from which this RxD was extracted.
6466 * Description:
6467 * This function is called by the Rx interrupt serivce routine to perform
6468 * some OS related operations on the SKB before passing it to the upper
6469 * layers. It mainly checks if the checksum is OK, if so adds it to the
6470 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6471 * to the upper layer. If the checksum is wrong, it increments the Rx
6472 * packet error count, frees the SKB and returns error.
6473 * Return value:
6474 * SUCCESS on success and -1 on failure.
6475 */
6476 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6477 {
6478 nic_t *sp = ring_data->nic;
6479 struct net_device *dev = (struct net_device *) sp->dev;
6480 struct sk_buff *skb = (struct sk_buff *)
6481 ((unsigned long) rxdp->Host_Control);
6482 int ring_no = ring_data->ring_no;
6483 u16 l3_csum, l4_csum;
6484 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6485 lro_t *lro;
6486
6487 skb->dev = dev;
6488
6489 if (err) {
6490 /* Check for parity error */
6491 if (err & 0x1) {
6492 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6493 }
6494
6495 /*
6496 * Drop the packet if bad transfer code. Exception being
6497 * 0x5, which could be due to unsupported IPv6 extension header.
6498 * In this case, we let stack handle the packet.
6499 * Note that in this case, since checksum will be incorrect,
6500 * stack will validate the same.
6501 */
6502 if (err && ((err >> 48) != 0x5)) {
6503 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6504 dev->name, err);
6505 sp->stats.rx_crc_errors++;
6506 dev_kfree_skb(skb);
6507 atomic_dec(&sp->rx_bufs_left[ring_no]);
6508 rxdp->Host_Control = 0;
6509 return 0;
6510 }
6511 }
6512
6513 /* Updating statistics */
6514 rxdp->Host_Control = 0;
6515 sp->rx_pkt_count++;
6516 sp->stats.rx_packets++;
6517 if (sp->rxd_mode == RXD_MODE_1) {
6518 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6519
6520 sp->stats.rx_bytes += len;
6521 skb_put(skb, len);
6522
6523 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6524 int get_block = ring_data->rx_curr_get_info.block_index;
6525 int get_off = ring_data->rx_curr_get_info.offset;
6526 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6527 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6528 unsigned char *buff = skb_push(skb, buf0_len);
6529
6530 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
6531 sp->stats.rx_bytes += buf0_len + buf2_len;
6532 memcpy(buff, ba->ba_0, buf0_len);
6533
6534 if (sp->rxd_mode == RXD_MODE_3A) {
6535 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6536
6537 skb_put(skb, buf1_len);
6538 skb->len += buf2_len;
6539 skb->data_len += buf2_len;
6540 skb->truesize += buf2_len;
6541 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6542 sp->stats.rx_bytes += buf1_len;
6543
6544 } else
6545 skb_put(skb, buf2_len);
6546 }
6547
6548 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6549 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6550 (sp->rx_csum)) {
6551 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6552 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6553 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6554 /*
6555 * NIC verifies if the Checksum of the received
6556 * frame is Ok or not and accordingly returns
6557 * a flag in the RxD.
6558 */
6559 skb->ip_summed = CHECKSUM_UNNECESSARY;
6560 if (sp->lro) {
6561 u32 tcp_len;
6562 u8 *tcp;
6563 int ret = 0;
6564
6565 ret = s2io_club_tcp_session(skb->data, &tcp,
6566 &tcp_len, &lro, rxdp, sp);
6567 switch (ret) {
6568 case 3: /* Begin anew */
6569 lro->parent = skb;
6570 goto aggregate;
6571 case 1: /* Aggregate */
6572 {
6573 lro_append_pkt(sp, lro,
6574 skb, tcp_len);
6575 goto aggregate;
6576 }
6577 case 4: /* Flush session */
6578 {
6579 lro_append_pkt(sp, lro,
6580 skb, tcp_len);
6581 queue_rx_frame(lro->parent);
6582 clear_lro_session(lro);
6583 sp->mac_control.stats_info->
6584 sw_stat.flush_max_pkts++;
6585 goto aggregate;
6586 }
6587 case 2: /* Flush both */
6588 lro->parent->data_len =
6589 lro->frags_len;
6590 sp->mac_control.stats_info->
6591 sw_stat.sending_both++;
6592 queue_rx_frame(lro->parent);
6593 clear_lro_session(lro);
6594 goto send_up;
6595 case 0: /* sessions exceeded */
6596 case -1: /* non-TCP or not
6597 * L2 aggregatable
6598 */
6599 case 5: /*
6600 * First pkt in session not
6601 * L3/L4 aggregatable
6602 */
6603 break;
6604 default:
6605 DBG_PRINT(ERR_DBG,
6606 "%s: Samadhana!!\n",
6607 __FUNCTION__);
6608 BUG();
6609 }
6610 }
6611 } else {
6612 /*
6613 * Packet with erroneous checksum, let the
6614 * upper layers deal with it.
6615 */
6616 skb->ip_summed = CHECKSUM_NONE;
6617 }
6618 } else {
6619 skb->ip_summed = CHECKSUM_NONE;
6620 }
6621
6622 if (!sp->lro) {
6623 skb->protocol = eth_type_trans(skb, dev);
6624 #ifdef CONFIG_S2IO_NAPI
6625 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6626 /* Queueing the vlan frame to the upper layer */
6627 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6628 RXD_GET_VLAN_TAG(rxdp->Control_2));
6629 } else {
6630 netif_receive_skb(skb);
6631 }
6632 #else
6633 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6634 /* Queueing the vlan frame to the upper layer */
6635 vlan_hwaccel_rx(skb, sp->vlgrp,
6636 RXD_GET_VLAN_TAG(rxdp->Control_2));
6637 } else {
6638 netif_rx(skb);
6639 }
6640 #endif
6641 } else {
6642 send_up:
6643 queue_rx_frame(skb);
6644 }
6645 dev->last_rx = jiffies;
6646 aggregate:
6647 atomic_dec(&sp->rx_bufs_left[ring_no]);
6648 return SUCCESS;
6649 }
6650
6651 /**
6652 * s2io_link - stops/starts the Tx queue.
6653 * @sp : private member of the device structure, which is a pointer to the
6654 * s2io_nic structure.
6655 * @link : inidicates whether link is UP/DOWN.
6656 * Description:
6657 * This function stops/starts the Tx queue depending on whether the link
6658 * status of the NIC is is down or up. This is called by the Alarm
6659 * interrupt handler whenever a link change interrupt comes up.
6660 * Return value:
6661 * void.
6662 */
6663
6664 static void s2io_link(nic_t * sp, int link)
6665 {
6666 struct net_device *dev = (struct net_device *) sp->dev;
6667
6668 if (link != sp->last_link_state) {
6669 if (link == LINK_DOWN) {
6670 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6671 netif_carrier_off(dev);
6672 } else {
6673 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6674 netif_carrier_on(dev);
6675 }
6676 }
6677 sp->last_link_state = link;
6678 }
6679
6680 /**
6681 * get_xena_rev_id - to identify revision ID of xena.
6682 * @pdev : PCI Dev structure
6683 * Description:
6684 * Function to identify the Revision ID of xena.
6685 * Return value:
6686 * returns the revision ID of the device.
6687 */
6688
6689 static int get_xena_rev_id(struct pci_dev *pdev)
6690 {
6691 u8 id = 0;
6692 int ret;
6693 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6694 return id;
6695 }
6696
6697 /**
6698 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6699 * @sp : private member of the device structure, which is a pointer to the
6700 * s2io_nic structure.
6701 * Description:
6702 * This function initializes a few of the PCI and PCI-X configuration registers
6703 * with recommended values.
6704 * Return value:
6705 * void
6706 */
6707
6708 static void s2io_init_pci(nic_t * sp)
6709 {
6710 u16 pci_cmd = 0, pcix_cmd = 0;
6711
6712 /* Enable Data Parity Error Recovery in PCI-X command register. */
6713 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6714 &(pcix_cmd));
6715 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6716 (pcix_cmd | 1));
6717 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6718 &(pcix_cmd));
6719
6720 /* Set the PErr Response bit in PCI command register. */
6721 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6722 pci_write_config_word(sp->pdev, PCI_COMMAND,
6723 (pci_cmd | PCI_COMMAND_PARITY));
6724 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6725 }
6726
6727 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6728 {
6729 if ( tx_fifo_num > 8) {
6730 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6731 "supported\n");
6732 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6733 tx_fifo_num = 8;
6734 }
6735 if ( rx_ring_num > 8) {
6736 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6737 "supported\n");
6738 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6739 rx_ring_num = 8;
6740 }
6741 #ifdef CONFIG_S2IO_NAPI
6742 if (*dev_intr_type != INTA) {
6743 DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when "
6744 "MSI/MSI-X is enabled. Defaulting to INTA\n");
6745 *dev_intr_type = INTA;
6746 }
6747 #endif
6748 #ifndef CONFIG_PCI_MSI
6749 if (*dev_intr_type != INTA) {
6750 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6751 "MSI/MSI-X. Defaulting to INTA\n");
6752 *dev_intr_type = INTA;
6753 }
6754 #else
6755 if (*dev_intr_type > MSI_X) {
6756 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6757 "Defaulting to INTA\n");
6758 *dev_intr_type = INTA;
6759 }
6760 #endif
6761 if ((*dev_intr_type == MSI_X) &&
6762 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6763 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6764 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6765 "Defaulting to INTA\n");
6766 *dev_intr_type = INTA;
6767 }
6768 if (rx_ring_mode > 3) {
6769 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6770 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6771 rx_ring_mode = 3;
6772 }
6773 return SUCCESS;
6774 }
6775
6776 /**
6777 * s2io_init_nic - Initialization of the adapter .
6778 * @pdev : structure containing the PCI related information of the device.
6779 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6780 * Description:
6781 * The function initializes an adapter identified by the pci_dec structure.
6782 * All OS related initialization including memory and device structure and
6783 * initlaization of the device private variable is done. Also the swapper
6784 * control register is initialized to enable read and write into the I/O
6785 * registers of the device.
6786 * Return value:
6787 * returns 0 on success and negative on failure.
6788 */
6789
6790 static int __devinit
6791 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6792 {
6793 nic_t *sp;
6794 struct net_device *dev;
6795 int i, j, ret;
6796 int dma_flag = FALSE;
6797 u32 mac_up, mac_down;
6798 u64 val64 = 0, tmp64 = 0;
6799 XENA_dev_config_t __iomem *bar0 = NULL;
6800 u16 subid;
6801 mac_info_t *mac_control;
6802 struct config_param *config;
6803 int mode;
6804 u8 dev_intr_type = intr_type;
6805
6806 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6807 return ret;
6808
6809 if ((ret = pci_enable_device(pdev))) {
6810 DBG_PRINT(ERR_DBG,
6811 "s2io_init_nic: pci_enable_device failed\n");
6812 return ret;
6813 }
6814
6815 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
6816 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6817 dma_flag = TRUE;
6818 if (pci_set_consistent_dma_mask
6819 (pdev, DMA_64BIT_MASK)) {
6820 DBG_PRINT(ERR_DBG,
6821 "Unable to obtain 64bit DMA for \
6822 consistent allocations\n");
6823 pci_disable_device(pdev);
6824 return -ENOMEM;
6825 }
6826 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
6827 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6828 } else {
6829 pci_disable_device(pdev);
6830 return -ENOMEM;
6831 }
6832 if (dev_intr_type != MSI_X) {
6833 if (pci_request_regions(pdev, s2io_driver_name)) {
6834 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6835 pci_disable_device(pdev);
6836 return -ENODEV;
6837 }
6838 }
6839 else {
6840 if (!(request_mem_region(pci_resource_start(pdev, 0),
6841 pci_resource_len(pdev, 0), s2io_driver_name))) {
6842 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6843 pci_disable_device(pdev);
6844 return -ENODEV;
6845 }
6846 if (!(request_mem_region(pci_resource_start(pdev, 2),
6847 pci_resource_len(pdev, 2), s2io_driver_name))) {
6848 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6849 release_mem_region(pci_resource_start(pdev, 0),
6850 pci_resource_len(pdev, 0));
6851 pci_disable_device(pdev);
6852 return -ENODEV;
6853 }
6854 }
6855
6856 dev = alloc_etherdev(sizeof(nic_t));
6857 if (dev == NULL) {
6858 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6859 pci_disable_device(pdev);
6860 pci_release_regions(pdev);
6861 return -ENODEV;
6862 }
6863
6864 pci_set_master(pdev);
6865 pci_set_drvdata(pdev, dev);
6866 SET_MODULE_OWNER(dev);
6867 SET_NETDEV_DEV(dev, &pdev->dev);
6868
6869 /* Private member variable initialized to s2io NIC structure */
6870 sp = dev->priv;
6871 memset(sp, 0, sizeof(nic_t));
6872 sp->dev = dev;
6873 sp->pdev = pdev;
6874 sp->high_dma_flag = dma_flag;
6875 sp->device_enabled_once = FALSE;
6876 if (rx_ring_mode == 1)
6877 sp->rxd_mode = RXD_MODE_1;
6878 if (rx_ring_mode == 2)
6879 sp->rxd_mode = RXD_MODE_3B;
6880 if (rx_ring_mode == 3)
6881 sp->rxd_mode = RXD_MODE_3A;
6882
6883 sp->intr_type = dev_intr_type;
6884
6885 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6886 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6887 sp->device_type = XFRAME_II_DEVICE;
6888 else
6889 sp->device_type = XFRAME_I_DEVICE;
6890
6891 sp->lro = lro;
6892
6893 /* Initialize some PCI/PCI-X fields of the NIC. */
6894 s2io_init_pci(sp);
6895
6896 /*
6897 * Setting the device configuration parameters.
6898 * Most of these parameters can be specified by the user during
6899 * module insertion as they are module loadable parameters. If
6900 * these parameters are not not specified during load time, they
6901 * are initialized with default values.
6902 */
6903 mac_control = &sp->mac_control;
6904 config = &sp->config;
6905
6906 /* Tx side parameters. */
6907 config->tx_fifo_num = tx_fifo_num;
6908 for (i = 0; i < MAX_TX_FIFOS; i++) {
6909 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
6910 config->tx_cfg[i].fifo_priority = i;
6911 }
6912
6913 /* mapping the QoS priority to the configured fifos */
6914 for (i = 0; i < MAX_TX_FIFOS; i++)
6915 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
6916
6917 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
6918 for (i = 0; i < config->tx_fifo_num; i++) {
6919 config->tx_cfg[i].f_no_snoop =
6920 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
6921 if (config->tx_cfg[i].fifo_len < 65) {
6922 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
6923 break;
6924 }
6925 }
6926 /* + 2 because one Txd for skb->data and one Txd for UFO */
6927 config->max_txds = MAX_SKB_FRAGS + 2;
6928
6929 /* Rx side parameters. */
6930 config->rx_ring_num = rx_ring_num;
6931 for (i = 0; i < MAX_RX_RINGS; i++) {
6932 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
6933 (rxd_count[sp->rxd_mode] + 1);
6934 config->rx_cfg[i].ring_priority = i;
6935 }
6936
6937 for (i = 0; i < rx_ring_num; i++) {
6938 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
6939 config->rx_cfg[i].f_no_snoop =
6940 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
6941 }
6942
6943 /* Setting Mac Control parameters */
6944 mac_control->rmac_pause_time = rmac_pause_time;
6945 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
6946 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
6947
6948
6949 /* Initialize Ring buffer parameters. */
6950 for (i = 0; i < config->rx_ring_num; i++)
6951 atomic_set(&sp->rx_bufs_left[i], 0);
6952
6953 /* Initialize the number of ISRs currently running */
6954 atomic_set(&sp->isr_cnt, 0);
6955
6956 /* initialize the shared memory used by the NIC and the host */
6957 if (init_shared_mem(sp)) {
6958 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
6959 dev->name);
6960 ret = -ENOMEM;
6961 goto mem_alloc_failed;
6962 }
6963
6964 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
6965 pci_resource_len(pdev, 0));
6966 if (!sp->bar0) {
6967 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
6968 dev->name);
6969 ret = -ENOMEM;
6970 goto bar0_remap_failed;
6971 }
6972
6973 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
6974 pci_resource_len(pdev, 2));
6975 if (!sp->bar1) {
6976 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
6977 dev->name);
6978 ret = -ENOMEM;
6979 goto bar1_remap_failed;
6980 }
6981
6982 dev->irq = pdev->irq;
6983 dev->base_addr = (unsigned long) sp->bar0;
6984
6985 /* Initializing the BAR1 address as the start of the FIFO pointer. */
6986 for (j = 0; j < MAX_TX_FIFOS; j++) {
6987 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
6988 (sp->bar1 + (j * 0x00020000));
6989 }
6990
6991 /* Driver entry points */
6992 dev->open = &s2io_open;
6993 dev->stop = &s2io_close;
6994 dev->hard_start_xmit = &s2io_xmit;
6995 dev->get_stats = &s2io_get_stats;
6996 dev->set_multicast_list = &s2io_set_multicast;
6997 dev->do_ioctl = &s2io_ioctl;
6998 dev->change_mtu = &s2io_change_mtu;
6999 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7000 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7001 dev->vlan_rx_register = s2io_vlan_rx_register;
7002 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7003
7004 /*
7005 * will use eth_mac_addr() for dev->set_mac_address
7006 * mac address will be set every time dev->open() is called
7007 */
7008 #if defined(CONFIG_S2IO_NAPI)
7009 dev->poll = s2io_poll;
7010 dev->weight = 32;
7011 #endif
7012
7013 #ifdef CONFIG_NET_POLL_CONTROLLER
7014 dev->poll_controller = s2io_netpoll;
7015 #endif
7016
7017 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7018 if (sp->high_dma_flag == TRUE)
7019 dev->features |= NETIF_F_HIGHDMA;
7020 #ifdef NETIF_F_TSO
7021 dev->features |= NETIF_F_TSO;
7022 #endif
7023 #ifdef NETIF_F_TSO6
7024 dev->features |= NETIF_F_TSO6;
7025 #endif
7026 if (sp->device_type & XFRAME_II_DEVICE) {
7027 dev->features |= NETIF_F_UFO;
7028 dev->features |= NETIF_F_HW_CSUM;
7029 }
7030
7031 dev->tx_timeout = &s2io_tx_watchdog;
7032 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7033 INIT_WORK(&sp->rst_timer_task,
7034 (void (*)(void *)) s2io_restart_nic, dev);
7035 INIT_WORK(&sp->set_link_task,
7036 (void (*)(void *)) s2io_set_link, sp);
7037
7038 pci_save_state(sp->pdev);
7039
7040 /* Setting swapper control on the NIC, for proper reset operation */
7041 if (s2io_set_swapper(sp)) {
7042 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7043 dev->name);
7044 ret = -EAGAIN;
7045 goto set_swap_failed;
7046 }
7047
7048 /* Verify if the Herc works on the slot its placed into */
7049 if (sp->device_type & XFRAME_II_DEVICE) {
7050 mode = s2io_verify_pci_mode(sp);
7051 if (mode < 0) {
7052 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7053 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7054 ret = -EBADSLT;
7055 goto set_swap_failed;
7056 }
7057 }
7058
7059 /* Not needed for Herc */
7060 if (sp->device_type & XFRAME_I_DEVICE) {
7061 /*
7062 * Fix for all "FFs" MAC address problems observed on
7063 * Alpha platforms
7064 */
7065 fix_mac_address(sp);
7066 s2io_reset(sp);
7067 }
7068
7069 /*
7070 * MAC address initialization.
7071 * For now only one mac address will be read and used.
7072 */
7073 bar0 = sp->bar0;
7074 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7075 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7076 writeq(val64, &bar0->rmac_addr_cmd_mem);
7077 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7078 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
7079 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7080 mac_down = (u32) tmp64;
7081 mac_up = (u32) (tmp64 >> 32);
7082
7083 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7084
7085 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7086 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7087 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7088 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7089 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7090 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7091
7092 /* Set the factory defined MAC address initially */
7093 dev->addr_len = ETH_ALEN;
7094 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7095
7096 /* reset Nic and bring it to known state */
7097 s2io_reset(sp);
7098
7099 /*
7100 * Initialize the tasklet status and link state flags
7101 * and the card state parameter
7102 */
7103 atomic_set(&(sp->card_state), 0);
7104 sp->tasklet_status = 0;
7105 sp->link_state = 0;
7106
7107 /* Initialize spinlocks */
7108 spin_lock_init(&sp->tx_lock);
7109 #ifndef CONFIG_S2IO_NAPI
7110 spin_lock_init(&sp->put_lock);
7111 #endif
7112 spin_lock_init(&sp->rx_lock);
7113
7114 /*
7115 * SXE-002: Configure link and activity LED to init state
7116 * on driver load.
7117 */
7118 subid = sp->pdev->subsystem_device;
7119 if ((subid & 0xFF) >= 0x07) {
7120 val64 = readq(&bar0->gpio_control);
7121 val64 |= 0x0000800000000000ULL;
7122 writeq(val64, &bar0->gpio_control);
7123 val64 = 0x0411040400000000ULL;
7124 writeq(val64, (void __iomem *) bar0 + 0x2700);
7125 val64 = readq(&bar0->gpio_control);
7126 }
7127
7128 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7129
7130 if (register_netdev(dev)) {
7131 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7132 ret = -ENODEV;
7133 goto register_failed;
7134 }
7135 s2io_vpd_read(sp);
7136 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7137 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7138 sp->product_name, get_xena_rev_id(sp->pdev));
7139 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7140 s2io_driver_version);
7141 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7142 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
7143 sp->def_mac_addr[0].mac_addr[0],
7144 sp->def_mac_addr[0].mac_addr[1],
7145 sp->def_mac_addr[0].mac_addr[2],
7146 sp->def_mac_addr[0].mac_addr[3],
7147 sp->def_mac_addr[0].mac_addr[4],
7148 sp->def_mac_addr[0].mac_addr[5]);
7149 if (sp->device_type & XFRAME_II_DEVICE) {
7150 mode = s2io_print_pci_mode(sp);
7151 if (mode < 0) {
7152 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7153 ret = -EBADSLT;
7154 unregister_netdev(dev);
7155 goto set_swap_failed;
7156 }
7157 }
7158 switch(sp->rxd_mode) {
7159 case RXD_MODE_1:
7160 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7161 dev->name);
7162 break;
7163 case RXD_MODE_3B:
7164 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7165 dev->name);
7166 break;
7167 case RXD_MODE_3A:
7168 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7169 dev->name);
7170 break;
7171 }
7172 #ifdef CONFIG_S2IO_NAPI
7173 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7174 #endif
7175 switch(sp->intr_type) {
7176 case INTA:
7177 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7178 break;
7179 case MSI:
7180 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7181 break;
7182 case MSI_X:
7183 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7184 break;
7185 }
7186 if (sp->lro)
7187 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7188 dev->name);
7189
7190 /* Initialize device name */
7191 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7192
7193 /* Initialize bimodal Interrupts */
7194 sp->config.bimodal = bimodal;
7195 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7196 sp->config.bimodal = 0;
7197 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7198 dev->name);
7199 }
7200
7201 /*
7202 * Make Link state as off at this point, when the Link change
7203 * interrupt comes the state will be automatically changed to
7204 * the right state.
7205 */
7206 netif_carrier_off(dev);
7207
7208 return 0;
7209
7210 register_failed:
7211 set_swap_failed:
7212 iounmap(sp->bar1);
7213 bar1_remap_failed:
7214 iounmap(sp->bar0);
7215 bar0_remap_failed:
7216 mem_alloc_failed:
7217 free_shared_mem(sp);
7218 pci_disable_device(pdev);
7219 if (dev_intr_type != MSI_X)
7220 pci_release_regions(pdev);
7221 else {
7222 release_mem_region(pci_resource_start(pdev, 0),
7223 pci_resource_len(pdev, 0));
7224 release_mem_region(pci_resource_start(pdev, 2),
7225 pci_resource_len(pdev, 2));
7226 }
7227 pci_set_drvdata(pdev, NULL);
7228 free_netdev(dev);
7229
7230 return ret;
7231 }
7232
7233 /**
7234 * s2io_rem_nic - Free the PCI device
7235 * @pdev: structure containing the PCI related information of the device.
7236 * Description: This function is called by the Pci subsystem to release a
7237 * PCI device and free up all resource held up by the device. This could
7238 * be in response to a Hot plug event or when the driver is to be removed
7239 * from memory.
7240 */
7241
7242 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7243 {
7244 struct net_device *dev =
7245 (struct net_device *) pci_get_drvdata(pdev);
7246 nic_t *sp;
7247
7248 if (dev == NULL) {
7249 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7250 return;
7251 }
7252
7253 sp = dev->priv;
7254 unregister_netdev(dev);
7255
7256 free_shared_mem(sp);
7257 iounmap(sp->bar0);
7258 iounmap(sp->bar1);
7259 pci_disable_device(pdev);
7260 if (sp->intr_type != MSI_X)
7261 pci_release_regions(pdev);
7262 else {
7263 release_mem_region(pci_resource_start(pdev, 0),
7264 pci_resource_len(pdev, 0));
7265 release_mem_region(pci_resource_start(pdev, 2),
7266 pci_resource_len(pdev, 2));
7267 }
7268 pci_set_drvdata(pdev, NULL);
7269 free_netdev(dev);
7270 }
7271
7272 /**
7273 * s2io_starter - Entry point for the driver
7274 * Description: This function is the entry point for the driver. It verifies
7275 * the module loadable parameters and initializes PCI configuration space.
7276 */
7277
7278 int __init s2io_starter(void)
7279 {
7280 return pci_module_init(&s2io_driver);
7281 }
7282
7283 /**
7284 * s2io_closer - Cleanup routine for the driver
7285 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7286 */
7287
7288 static void s2io_closer(void)
7289 {
7290 pci_unregister_driver(&s2io_driver);
7291 DBG_PRINT(INIT_DBG, "cleanup done\n");
7292 }
7293
7294 module_init(s2io_starter);
7295 module_exit(s2io_closer);
7296
7297 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7298 struct tcphdr **tcp, RxD_t *rxdp)
7299 {
7300 int ip_off;
7301 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7302
7303 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7304 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7305 __FUNCTION__);
7306 return -1;
7307 }
7308
7309 /* TODO:
7310 * By default the VLAN field in the MAC is stripped by the card, if this
7311 * feature is turned off in rx_pa_cfg register, then the ip_off field
7312 * has to be shifted by a further 2 bytes
7313 */
7314 switch (l2_type) {
7315 case 0: /* DIX type */
7316 case 4: /* DIX type with VLAN */
7317 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7318 break;
7319 /* LLC, SNAP etc are considered non-mergeable */
7320 default:
7321 return -1;
7322 }
7323
7324 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7325 ip_len = (u8)((*ip)->ihl);
7326 ip_len <<= 2;
7327 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7328
7329 return 0;
7330 }
7331
7332 static int check_for_socket_match(lro_t *lro, struct iphdr *ip,
7333 struct tcphdr *tcp)
7334 {
7335 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7336 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7337 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7338 return -1;
7339 return 0;
7340 }
7341
7342 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7343 {
7344 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7345 }
7346
7347 static void initiate_new_session(lro_t *lro, u8 *l2h,
7348 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7349 {
7350 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7351 lro->l2h = l2h;
7352 lro->iph = ip;
7353 lro->tcph = tcp;
7354 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7355 lro->tcp_ack = ntohl(tcp->ack_seq);
7356 lro->sg_num = 1;
7357 lro->total_len = ntohs(ip->tot_len);
7358 lro->frags_len = 0;
7359 /*
7360 * check if we saw TCP timestamp. Other consistency checks have
7361 * already been done.
7362 */
7363 if (tcp->doff == 8) {
7364 u32 *ptr;
7365 ptr = (u32 *)(tcp+1);
7366 lro->saw_ts = 1;
7367 lro->cur_tsval = *(ptr+1);
7368 lro->cur_tsecr = *(ptr+2);
7369 }
7370 lro->in_use = 1;
7371 }
7372
7373 static void update_L3L4_header(nic_t *sp, lro_t *lro)
7374 {
7375 struct iphdr *ip = lro->iph;
7376 struct tcphdr *tcp = lro->tcph;
7377 u16 nchk;
7378 StatInfo_t *statinfo = sp->mac_control.stats_info;
7379 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7380
7381 /* Update L3 header */
7382 ip->tot_len = htons(lro->total_len);
7383 ip->check = 0;
7384 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7385 ip->check = nchk;
7386
7387 /* Update L4 header */
7388 tcp->ack_seq = lro->tcp_ack;
7389 tcp->window = lro->window;
7390
7391 /* Update tsecr field if this session has timestamps enabled */
7392 if (lro->saw_ts) {
7393 u32 *ptr = (u32 *)(tcp + 1);
7394 *(ptr+2) = lro->cur_tsecr;
7395 }
7396
7397 /* Update counters required for calculation of
7398 * average no. of packets aggregated.
7399 */
7400 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7401 statinfo->sw_stat.num_aggregations++;
7402 }
7403
7404 static void aggregate_new_rx(lro_t *lro, struct iphdr *ip,
7405 struct tcphdr *tcp, u32 l4_pyld)
7406 {
7407 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7408 lro->total_len += l4_pyld;
7409 lro->frags_len += l4_pyld;
7410 lro->tcp_next_seq += l4_pyld;
7411 lro->sg_num++;
7412
7413 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7414 lro->tcp_ack = tcp->ack_seq;
7415 lro->window = tcp->window;
7416
7417 if (lro->saw_ts) {
7418 u32 *ptr;
7419 /* Update tsecr and tsval from this packet */
7420 ptr = (u32 *) (tcp + 1);
7421 lro->cur_tsval = *(ptr + 1);
7422 lro->cur_tsecr = *(ptr + 2);
7423 }
7424 }
7425
7426 static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7427 struct tcphdr *tcp, u32 tcp_pyld_len)
7428 {
7429 u8 *ptr;
7430
7431 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7432
7433 if (!tcp_pyld_len) {
7434 /* Runt frame or a pure ack */
7435 return -1;
7436 }
7437
7438 if (ip->ihl != 5) /* IP has options */
7439 return -1;
7440
7441 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7442 !tcp->ack) {
7443 /*
7444 * Currently recognize only the ack control word and
7445 * any other control field being set would result in
7446 * flushing the LRO session
7447 */
7448 return -1;
7449 }
7450
7451 /*
7452 * Allow only one TCP timestamp option. Don't aggregate if
7453 * any other options are detected.
7454 */
7455 if (tcp->doff != 5 && tcp->doff != 8)
7456 return -1;
7457
7458 if (tcp->doff == 8) {
7459 ptr = (u8 *)(tcp + 1);
7460 while (*ptr == TCPOPT_NOP)
7461 ptr++;
7462 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7463 return -1;
7464
7465 /* Ensure timestamp value increases monotonically */
7466 if (l_lro)
7467 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7468 return -1;
7469
7470 /* timestamp echo reply should be non-zero */
7471 if (*((u32 *)(ptr+6)) == 0)
7472 return -1;
7473 }
7474
7475 return 0;
7476 }
7477
7478 static int
7479 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7480 RxD_t *rxdp, nic_t *sp)
7481 {
7482 struct iphdr *ip;
7483 struct tcphdr *tcph;
7484 int ret = 0, i;
7485
7486 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7487 rxdp))) {
7488 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7489 ip->saddr, ip->daddr);
7490 } else {
7491 return ret;
7492 }
7493
7494 tcph = (struct tcphdr *)*tcp;
7495 *tcp_len = get_l4_pyld_length(ip, tcph);
7496 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7497 lro_t *l_lro = &sp->lro0_n[i];
7498 if (l_lro->in_use) {
7499 if (check_for_socket_match(l_lro, ip, tcph))
7500 continue;
7501 /* Sock pair matched */
7502 *lro = l_lro;
7503
7504 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7505 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7506 "0x%x, actual 0x%x\n", __FUNCTION__,
7507 (*lro)->tcp_next_seq,
7508 ntohl(tcph->seq));
7509
7510 sp->mac_control.stats_info->
7511 sw_stat.outof_sequence_pkts++;
7512 ret = 2;
7513 break;
7514 }
7515
7516 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7517 ret = 1; /* Aggregate */
7518 else
7519 ret = 2; /* Flush both */
7520 break;
7521 }
7522 }
7523
7524 if (ret == 0) {
7525 /* Before searching for available LRO objects,
7526 * check if the pkt is L3/L4 aggregatable. If not
7527 * don't create new LRO session. Just send this
7528 * packet up.
7529 */
7530 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7531 return 5;
7532 }
7533
7534 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7535 lro_t *l_lro = &sp->lro0_n[i];
7536 if (!(l_lro->in_use)) {
7537 *lro = l_lro;
7538 ret = 3; /* Begin anew */
7539 break;
7540 }
7541 }
7542 }
7543
7544 if (ret == 0) { /* sessions exceeded */
7545 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7546 __FUNCTION__);
7547 *lro = NULL;
7548 return ret;
7549 }
7550
7551 switch (ret) {
7552 case 3:
7553 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7554 break;
7555 case 2:
7556 update_L3L4_header(sp, *lro);
7557 break;
7558 case 1:
7559 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7560 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7561 update_L3L4_header(sp, *lro);
7562 ret = 4; /* Flush the LRO */
7563 }
7564 break;
7565 default:
7566 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7567 __FUNCTION__);
7568 break;
7569 }
7570
7571 return ret;
7572 }
7573
7574 static void clear_lro_session(lro_t *lro)
7575 {
7576 static u16 lro_struct_size = sizeof(lro_t);
7577
7578 memset(lro, 0, lro_struct_size);
7579 }
7580
7581 static void queue_rx_frame(struct sk_buff *skb)
7582 {
7583 struct net_device *dev = skb->dev;
7584
7585 skb->protocol = eth_type_trans(skb, dev);
7586 #ifdef CONFIG_S2IO_NAPI
7587 netif_receive_skb(skb);
7588 #else
7589 netif_rx(skb);
7590 #endif
7591 }
7592
7593 static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7594 u32 tcp_len)
7595 {
7596 struct sk_buff *tmp, *first = lro->parent;
7597
7598 first->len += tcp_len;
7599 first->data_len = lro->frags_len;
7600 skb_pull(skb, (skb->len - tcp_len));
7601 if ((tmp = skb_shinfo(first)->frag_list)) {
7602 while (tmp->next)
7603 tmp = tmp->next;
7604 tmp->next = skb;
7605 }
7606 else
7607 skb_shinfo(first)->frag_list = skb;
7608 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7609 return;
7610 }
This page took 0.297919 seconds and 5 git commands to generate.