s2io: add PCI error recovery support
[deliverable/linux.git] / drivers / net / s2io.c
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 *
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.23.1"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[4] = {32,48,48,64};
94 static int rxd_count[4] = {127,85,85,63};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98 int ret;
99
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103 return ret;
104 }
105
106 /*
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
110 */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC 1
120 #define LOW 2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123 struct mac_info *mac_control;
124
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
127 return PANIC;
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129 return LOW;
130 return 0;
131 }
132
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
140 };
141
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
143 {"tmac_frms"},
144 {"tmac_data_octets"},
145 {"tmac_drop_frms"},
146 {"tmac_mcst_frms"},
147 {"tmac_bcst_frms"},
148 {"tmac_pause_ctrl_frms"},
149 {"tmac_ttl_octets"},
150 {"tmac_ucst_frms"},
151 {"tmac_nucst_frms"},
152 {"tmac_any_err_frms"},
153 {"tmac_ttl_less_fb_octets"},
154 {"tmac_vld_ip_octets"},
155 {"tmac_vld_ip"},
156 {"tmac_drop_ip"},
157 {"tmac_icmp"},
158 {"tmac_rst_tcp"},
159 {"tmac_tcp"},
160 {"tmac_udp"},
161 {"rmac_vld_frms"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
164 {"rmac_drop_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
168 {"rmac_out_rng_len_err_frms"},
169 {"rmac_long_frms"},
170 {"rmac_pause_ctrl_frms"},
171 {"rmac_unsup_ctrl_frms"},
172 {"rmac_ttl_octets"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
175 {"rmac_discarded_frms"},
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
178 {"rmac_ttl_frms"},
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
181 {"rmac_frag_frms"},
182 {"rmac_jabber_frms"},
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
189 {"rmac_ip"},
190 {"rmac_ip_octets"},
191 {"rmac_hdr_err_ip"},
192 {"rmac_drop_ip"},
193 {"rmac_icmp"},
194 {"rmac_tcp"},
195 {"rmac_udp"},
196 {"rmac_err_drp_udp"},
197 {"rmac_xgmii_err_sym"},
198 {"rmac_frms_q0"},
199 {"rmac_frms_q1"},
200 {"rmac_frms_q2"},
201 {"rmac_frms_q3"},
202 {"rmac_frms_q4"},
203 {"rmac_frms_q5"},
204 {"rmac_frms_q6"},
205 {"rmac_frms_q7"},
206 {"rmac_full_q0"},
207 {"rmac_full_q1"},
208 {"rmac_full_q2"},
209 {"rmac_full_q3"},
210 {"rmac_full_q4"},
211 {"rmac_full_q5"},
212 {"rmac_full_q6"},
213 {"rmac_full_q7"},
214 {"rmac_pause_cnt"},
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
217 {"rmac_accepted_ip"},
218 {"rmac_err_tcp"},
219 {"rd_req_cnt"},
220 {"new_rd_req_cnt"},
221 {"new_rd_req_rtry_cnt"},
222 {"rd_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
224 {"wr_req_cnt"},
225 {"new_wr_req_cnt"},
226 {"new_wr_req_rtry_cnt"},
227 {"wr_rtry_cnt"},
228 {"wr_disc_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
230 {"txp_wr_cnt"},
231 {"txd_rd_cnt"},
232 {"txd_wr_cnt"},
233 {"rxd_rd_cnt"},
234 {"rxd_wr_cnt"},
235 {"txf_rd_cnt"},
236 {"rxf_wr_cnt"}
237 };
238
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
247 {"rmac_vlan_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
250 {"rmac_pf_discard"},
251 {"rmac_da_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
255 {"link_fault_cnt"}
256 };
257
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
262 {"parity_err_cnt"},
263 {"serious_err_cnt"},
264 {"soft_reset_cnt"},
265 {"fifo_full_cnt"},
266 {"ring_full_cnt"},
267 ("alarm_transceiver_temp_high"),
268 ("alarm_transceiver_temp_low"),
269 ("alarm_laser_bias_current_high"),
270 ("alarm_laser_bias_current_low"),
271 ("alarm_laser_output_power_high"),
272 ("alarm_laser_output_power_low"),
273 ("warn_transceiver_temp_high"),
274 ("warn_transceiver_temp_low"),
275 ("warn_laser_bias_current_high"),
276 ("warn_laser_bias_current_low"),
277 ("warn_laser_output_power_high"),
278 ("warn_laser_output_power_low"),
279 ("lro_aggregated_pkts"),
280 ("lro_flush_both_count"),
281 ("lro_out_of_sequence_pkts"),
282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"),
284 ("mem_alloc_fail_cnt"),
285 ("watchdog_timer_cnt"),
286 ("mem_allocated"),
287 ("mem_freed"),
288 ("link_up_cnt"),
289 ("link_down_cnt"),
290 ("link_up_time"),
291 ("link_down_time"),
292 ("tx_tcode_buf_abort_cnt"),
293 ("tx_tcode_desc_abort_cnt"),
294 ("tx_tcode_parity_err_cnt"),
295 ("tx_tcode_link_loss_cnt"),
296 ("tx_tcode_list_proc_err_cnt"),
297 ("rx_tcode_parity_err_cnt"),
298 ("rx_tcode_abort_cnt"),
299 ("rx_tcode_parity_abort_cnt"),
300 ("rx_tcode_rda_fail_cnt"),
301 ("rx_tcode_unkn_prot_cnt"),
302 ("rx_tcode_fcs_err_cnt"),
303 ("rx_tcode_buf_size_err_cnt"),
304 ("rx_tcode_rxd_corrupt_cnt"),
305 ("rx_tcode_unkn_err_cnt")
306 };
307
308 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
309 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
310 ETH_GSTRING_LEN
311 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
312
313 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
314 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
315
316 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
317 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
318
319 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
320 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
321
322 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
323 init_timer(&timer); \
324 timer.function = handle; \
325 timer.data = (unsigned long) arg; \
326 mod_timer(&timer, (jiffies + exp)) \
327
328 /* Add the vlan */
329 static void s2io_vlan_rx_register(struct net_device *dev,
330 struct vlan_group *grp)
331 {
332 struct s2io_nic *nic = dev->priv;
333 unsigned long flags;
334
335 spin_lock_irqsave(&nic->tx_lock, flags);
336 nic->vlgrp = grp;
337 spin_unlock_irqrestore(&nic->tx_lock, flags);
338 }
339
340 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
341 static int vlan_strip_flag;
342
343 /*
344 * Constants to be programmed into the Xena's registers, to configure
345 * the XAUI.
346 */
347
348 #define END_SIGN 0x0
349 static const u64 herc_act_dtx_cfg[] = {
350 /* Set address */
351 0x8000051536750000ULL, 0x80000515367500E0ULL,
352 /* Write data */
353 0x8000051536750004ULL, 0x80000515367500E4ULL,
354 /* Set address */
355 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
356 /* Write data */
357 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
358 /* Set address */
359 0x801205150D440000ULL, 0x801205150D4400E0ULL,
360 /* Write data */
361 0x801205150D440004ULL, 0x801205150D4400E4ULL,
362 /* Set address */
363 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
364 /* Write data */
365 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
366 /* Done */
367 END_SIGN
368 };
369
370 static const u64 xena_dtx_cfg[] = {
371 /* Set address */
372 0x8000051500000000ULL, 0x80000515000000E0ULL,
373 /* Write data */
374 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
375 /* Set address */
376 0x8001051500000000ULL, 0x80010515000000E0ULL,
377 /* Write data */
378 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
379 /* Set address */
380 0x8002051500000000ULL, 0x80020515000000E0ULL,
381 /* Write data */
382 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
383 END_SIGN
384 };
385
386 /*
387 * Constants for Fixing the MacAddress problem seen mostly on
388 * Alpha machines.
389 */
390 static const u64 fix_mac[] = {
391 0x0060000000000000ULL, 0x0060600000000000ULL,
392 0x0040600000000000ULL, 0x0000600000000000ULL,
393 0x0020600000000000ULL, 0x0060600000000000ULL,
394 0x0020600000000000ULL, 0x0060600000000000ULL,
395 0x0020600000000000ULL, 0x0060600000000000ULL,
396 0x0020600000000000ULL, 0x0060600000000000ULL,
397 0x0020600000000000ULL, 0x0060600000000000ULL,
398 0x0020600000000000ULL, 0x0060600000000000ULL,
399 0x0020600000000000ULL, 0x0060600000000000ULL,
400 0x0020600000000000ULL, 0x0060600000000000ULL,
401 0x0020600000000000ULL, 0x0060600000000000ULL,
402 0x0020600000000000ULL, 0x0060600000000000ULL,
403 0x0020600000000000ULL, 0x0000600000000000ULL,
404 0x0040600000000000ULL, 0x0060600000000000ULL,
405 END_SIGN
406 };
407
408 MODULE_LICENSE("GPL");
409 MODULE_VERSION(DRV_VERSION);
410
411
412 /* Module Loadable parameters. */
413 S2IO_PARM_INT(tx_fifo_num, 1);
414 S2IO_PARM_INT(rx_ring_num, 1);
415
416
417 S2IO_PARM_INT(rx_ring_mode, 1);
418 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
419 S2IO_PARM_INT(rmac_pause_time, 0x100);
420 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
421 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
422 S2IO_PARM_INT(shared_splits, 0);
423 S2IO_PARM_INT(tmac_util_period, 5);
424 S2IO_PARM_INT(rmac_util_period, 5);
425 S2IO_PARM_INT(bimodal, 0);
426 S2IO_PARM_INT(l3l4hdr_size, 128);
427 /* Frequency of Rx desc syncs expressed as power of 2 */
428 S2IO_PARM_INT(rxsync_frequency, 3);
429 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
430 S2IO_PARM_INT(intr_type, 0);
431 /* Large receive offload feature */
432 S2IO_PARM_INT(lro, 0);
433 /* Max pkts to be aggregated by LRO at one time. If not specified,
434 * aggregation happens until we hit max IP pkt size(64K)
435 */
436 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
437 S2IO_PARM_INT(indicate_max_pkts, 0);
438
439 S2IO_PARM_INT(napi, 1);
440 S2IO_PARM_INT(ufo, 0);
441 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
442
443 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
444 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
445 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
446 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
447 static unsigned int rts_frm_len[MAX_RX_RINGS] =
448 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
449
450 module_param_array(tx_fifo_len, uint, NULL, 0);
451 module_param_array(rx_ring_sz, uint, NULL, 0);
452 module_param_array(rts_frm_len, uint, NULL, 0);
453
454 /*
455 * S2IO device table.
456 * This table lists all the devices that this driver supports.
457 */
458 static struct pci_device_id s2io_tbl[] __devinitdata = {
459 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
460 PCI_ANY_ID, PCI_ANY_ID},
461 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
462 PCI_ANY_ID, PCI_ANY_ID},
463 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
464 PCI_ANY_ID, PCI_ANY_ID},
465 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
466 PCI_ANY_ID, PCI_ANY_ID},
467 {0,}
468 };
469
470 MODULE_DEVICE_TABLE(pci, s2io_tbl);
471
472 static struct pci_error_handlers s2io_err_handler = {
473 .error_detected = s2io_io_error_detected,
474 .slot_reset = s2io_io_slot_reset,
475 .resume = s2io_io_resume,
476 };
477
478 static struct pci_driver s2io_driver = {
479 .name = "S2IO",
480 .id_table = s2io_tbl,
481 .probe = s2io_init_nic,
482 .remove = __devexit_p(s2io_rem_nic),
483 .err_handler = &s2io_err_handler,
484 };
485
486 /* A simplifier macro used both by init and free shared_mem Fns(). */
487 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
488
489 /**
490 * init_shared_mem - Allocation and Initialization of Memory
491 * @nic: Device private variable.
492 * Description: The function allocates all the memory areas shared
493 * between the NIC and the driver. This includes Tx descriptors,
494 * Rx descriptors and the statistics block.
495 */
496
497 static int init_shared_mem(struct s2io_nic *nic)
498 {
499 u32 size;
500 void *tmp_v_addr, *tmp_v_addr_next;
501 dma_addr_t tmp_p_addr, tmp_p_addr_next;
502 struct RxD_block *pre_rxd_blk = NULL;
503 int i, j, blk_cnt;
504 int lst_size, lst_per_page;
505 struct net_device *dev = nic->dev;
506 unsigned long tmp;
507 struct buffAdd *ba;
508
509 struct mac_info *mac_control;
510 struct config_param *config;
511 unsigned long long mem_allocated = 0;
512
513 mac_control = &nic->mac_control;
514 config = &nic->config;
515
516
517 /* Allocation and initialization of TXDLs in FIOFs */
518 size = 0;
519 for (i = 0; i < config->tx_fifo_num; i++) {
520 size += config->tx_cfg[i].fifo_len;
521 }
522 if (size > MAX_AVAILABLE_TXDS) {
523 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
524 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
525 return -EINVAL;
526 }
527
528 lst_size = (sizeof(struct TxD) * config->max_txds);
529 lst_per_page = PAGE_SIZE / lst_size;
530
531 for (i = 0; i < config->tx_fifo_num; i++) {
532 int fifo_len = config->tx_cfg[i].fifo_len;
533 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
534 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
535 GFP_KERNEL);
536 if (!mac_control->fifos[i].list_info) {
537 DBG_PRINT(INFO_DBG,
538 "Malloc failed for list_info\n");
539 return -ENOMEM;
540 }
541 mem_allocated += list_holder_size;
542 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
543 }
544 for (i = 0; i < config->tx_fifo_num; i++) {
545 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
546 lst_per_page);
547 mac_control->fifos[i].tx_curr_put_info.offset = 0;
548 mac_control->fifos[i].tx_curr_put_info.fifo_len =
549 config->tx_cfg[i].fifo_len - 1;
550 mac_control->fifos[i].tx_curr_get_info.offset = 0;
551 mac_control->fifos[i].tx_curr_get_info.fifo_len =
552 config->tx_cfg[i].fifo_len - 1;
553 mac_control->fifos[i].fifo_no = i;
554 mac_control->fifos[i].nic = nic;
555 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
556
557 for (j = 0; j < page_num; j++) {
558 int k = 0;
559 dma_addr_t tmp_p;
560 void *tmp_v;
561 tmp_v = pci_alloc_consistent(nic->pdev,
562 PAGE_SIZE, &tmp_p);
563 if (!tmp_v) {
564 DBG_PRINT(INFO_DBG,
565 "pci_alloc_consistent ");
566 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
567 return -ENOMEM;
568 }
569 /* If we got a zero DMA address(can happen on
570 * certain platforms like PPC), reallocate.
571 * Store virtual address of page we don't want,
572 * to be freed later.
573 */
574 if (!tmp_p) {
575 mac_control->zerodma_virt_addr = tmp_v;
576 DBG_PRINT(INIT_DBG,
577 "%s: Zero DMA address for TxDL. ", dev->name);
578 DBG_PRINT(INIT_DBG,
579 "Virtual address %p\n", tmp_v);
580 tmp_v = pci_alloc_consistent(nic->pdev,
581 PAGE_SIZE, &tmp_p);
582 if (!tmp_v) {
583 DBG_PRINT(INFO_DBG,
584 "pci_alloc_consistent ");
585 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
586 return -ENOMEM;
587 }
588 mem_allocated += PAGE_SIZE;
589 }
590 while (k < lst_per_page) {
591 int l = (j * lst_per_page) + k;
592 if (l == config->tx_cfg[i].fifo_len)
593 break;
594 mac_control->fifos[i].list_info[l].list_virt_addr =
595 tmp_v + (k * lst_size);
596 mac_control->fifos[i].list_info[l].list_phy_addr =
597 tmp_p + (k * lst_size);
598 k++;
599 }
600 }
601 }
602
603 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
604 if (!nic->ufo_in_band_v)
605 return -ENOMEM;
606 mem_allocated += (size * sizeof(u64));
607
608 /* Allocation and initialization of RXDs in Rings */
609 size = 0;
610 for (i = 0; i < config->rx_ring_num; i++) {
611 if (config->rx_cfg[i].num_rxd %
612 (rxd_count[nic->rxd_mode] + 1)) {
613 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
614 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
615 i);
616 DBG_PRINT(ERR_DBG, "RxDs per Block");
617 return FAILURE;
618 }
619 size += config->rx_cfg[i].num_rxd;
620 mac_control->rings[i].block_count =
621 config->rx_cfg[i].num_rxd /
622 (rxd_count[nic->rxd_mode] + 1 );
623 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
624 mac_control->rings[i].block_count;
625 }
626 if (nic->rxd_mode == RXD_MODE_1)
627 size = (size * (sizeof(struct RxD1)));
628 else
629 size = (size * (sizeof(struct RxD3)));
630
631 for (i = 0; i < config->rx_ring_num; i++) {
632 mac_control->rings[i].rx_curr_get_info.block_index = 0;
633 mac_control->rings[i].rx_curr_get_info.offset = 0;
634 mac_control->rings[i].rx_curr_get_info.ring_len =
635 config->rx_cfg[i].num_rxd - 1;
636 mac_control->rings[i].rx_curr_put_info.block_index = 0;
637 mac_control->rings[i].rx_curr_put_info.offset = 0;
638 mac_control->rings[i].rx_curr_put_info.ring_len =
639 config->rx_cfg[i].num_rxd - 1;
640 mac_control->rings[i].nic = nic;
641 mac_control->rings[i].ring_no = i;
642
643 blk_cnt = config->rx_cfg[i].num_rxd /
644 (rxd_count[nic->rxd_mode] + 1);
645 /* Allocating all the Rx blocks */
646 for (j = 0; j < blk_cnt; j++) {
647 struct rx_block_info *rx_blocks;
648 int l;
649
650 rx_blocks = &mac_control->rings[i].rx_blocks[j];
651 size = SIZE_OF_BLOCK; //size is always page size
652 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
653 &tmp_p_addr);
654 if (tmp_v_addr == NULL) {
655 /*
656 * In case of failure, free_shared_mem()
657 * is called, which should free any
658 * memory that was alloced till the
659 * failure happened.
660 */
661 rx_blocks->block_virt_addr = tmp_v_addr;
662 return -ENOMEM;
663 }
664 mem_allocated += size;
665 memset(tmp_v_addr, 0, size);
666 rx_blocks->block_virt_addr = tmp_v_addr;
667 rx_blocks->block_dma_addr = tmp_p_addr;
668 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
669 rxd_count[nic->rxd_mode],
670 GFP_KERNEL);
671 if (!rx_blocks->rxds)
672 return -ENOMEM;
673 mem_allocated +=
674 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
675 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
676 rx_blocks->rxds[l].virt_addr =
677 rx_blocks->block_virt_addr +
678 (rxd_size[nic->rxd_mode] * l);
679 rx_blocks->rxds[l].dma_addr =
680 rx_blocks->block_dma_addr +
681 (rxd_size[nic->rxd_mode] * l);
682 }
683 }
684 /* Interlinking all Rx Blocks */
685 for (j = 0; j < blk_cnt; j++) {
686 tmp_v_addr =
687 mac_control->rings[i].rx_blocks[j].block_virt_addr;
688 tmp_v_addr_next =
689 mac_control->rings[i].rx_blocks[(j + 1) %
690 blk_cnt].block_virt_addr;
691 tmp_p_addr =
692 mac_control->rings[i].rx_blocks[j].block_dma_addr;
693 tmp_p_addr_next =
694 mac_control->rings[i].rx_blocks[(j + 1) %
695 blk_cnt].block_dma_addr;
696
697 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
698 pre_rxd_blk->reserved_2_pNext_RxD_block =
699 (unsigned long) tmp_v_addr_next;
700 pre_rxd_blk->pNext_RxD_Blk_physical =
701 (u64) tmp_p_addr_next;
702 }
703 }
704 if (nic->rxd_mode >= RXD_MODE_3A) {
705 /*
706 * Allocation of Storages for buffer addresses in 2BUFF mode
707 * and the buffers as well.
708 */
709 for (i = 0; i < config->rx_ring_num; i++) {
710 blk_cnt = config->rx_cfg[i].num_rxd /
711 (rxd_count[nic->rxd_mode]+ 1);
712 mac_control->rings[i].ba =
713 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
714 GFP_KERNEL);
715 if (!mac_control->rings[i].ba)
716 return -ENOMEM;
717 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
718 for (j = 0; j < blk_cnt; j++) {
719 int k = 0;
720 mac_control->rings[i].ba[j] =
721 kmalloc((sizeof(struct buffAdd) *
722 (rxd_count[nic->rxd_mode] + 1)),
723 GFP_KERNEL);
724 if (!mac_control->rings[i].ba[j])
725 return -ENOMEM;
726 mem_allocated += (sizeof(struct buffAdd) * \
727 (rxd_count[nic->rxd_mode] + 1));
728 while (k != rxd_count[nic->rxd_mode]) {
729 ba = &mac_control->rings[i].ba[j][k];
730
731 ba->ba_0_org = (void *) kmalloc
732 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
733 if (!ba->ba_0_org)
734 return -ENOMEM;
735 mem_allocated +=
736 (BUF0_LEN + ALIGN_SIZE);
737 tmp = (unsigned long)ba->ba_0_org;
738 tmp += ALIGN_SIZE;
739 tmp &= ~((unsigned long) ALIGN_SIZE);
740 ba->ba_0 = (void *) tmp;
741
742 ba->ba_1_org = (void *) kmalloc
743 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
744 if (!ba->ba_1_org)
745 return -ENOMEM;
746 mem_allocated
747 += (BUF1_LEN + ALIGN_SIZE);
748 tmp = (unsigned long) ba->ba_1_org;
749 tmp += ALIGN_SIZE;
750 tmp &= ~((unsigned long) ALIGN_SIZE);
751 ba->ba_1 = (void *) tmp;
752 k++;
753 }
754 }
755 }
756 }
757
758 /* Allocation and initialization of Statistics block */
759 size = sizeof(struct stat_block);
760 mac_control->stats_mem = pci_alloc_consistent
761 (nic->pdev, size, &mac_control->stats_mem_phy);
762
763 if (!mac_control->stats_mem) {
764 /*
765 * In case of failure, free_shared_mem() is called, which
766 * should free any memory that was alloced till the
767 * failure happened.
768 */
769 return -ENOMEM;
770 }
771 mem_allocated += size;
772 mac_control->stats_mem_sz = size;
773
774 tmp_v_addr = mac_control->stats_mem;
775 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
776 memset(tmp_v_addr, 0, size);
777 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
778 (unsigned long long) tmp_p_addr);
779 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
780 return SUCCESS;
781 }
782
783 /**
784 * free_shared_mem - Free the allocated Memory
785 * @nic: Device private variable.
786 * Description: This function is to free all memory locations allocated by
787 * the init_shared_mem() function and return it to the kernel.
788 */
789
790 static void free_shared_mem(struct s2io_nic *nic)
791 {
792 int i, j, blk_cnt, size;
793 u32 ufo_size = 0;
794 void *tmp_v_addr;
795 dma_addr_t tmp_p_addr;
796 struct mac_info *mac_control;
797 struct config_param *config;
798 int lst_size, lst_per_page;
799 struct net_device *dev = nic->dev;
800 int page_num = 0;
801
802 if (!nic)
803 return;
804
805 mac_control = &nic->mac_control;
806 config = &nic->config;
807
808 lst_size = (sizeof(struct TxD) * config->max_txds);
809 lst_per_page = PAGE_SIZE / lst_size;
810
811 for (i = 0; i < config->tx_fifo_num; i++) {
812 ufo_size += config->tx_cfg[i].fifo_len;
813 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
814 lst_per_page);
815 for (j = 0; j < page_num; j++) {
816 int mem_blks = (j * lst_per_page);
817 if (!mac_control->fifos[i].list_info)
818 return;
819 if (!mac_control->fifos[i].list_info[mem_blks].
820 list_virt_addr)
821 break;
822 pci_free_consistent(nic->pdev, PAGE_SIZE,
823 mac_control->fifos[i].
824 list_info[mem_blks].
825 list_virt_addr,
826 mac_control->fifos[i].
827 list_info[mem_blks].
828 list_phy_addr);
829 nic->mac_control.stats_info->sw_stat.mem_freed
830 += PAGE_SIZE;
831 }
832 /* If we got a zero DMA address during allocation,
833 * free the page now
834 */
835 if (mac_control->zerodma_virt_addr) {
836 pci_free_consistent(nic->pdev, PAGE_SIZE,
837 mac_control->zerodma_virt_addr,
838 (dma_addr_t)0);
839 DBG_PRINT(INIT_DBG,
840 "%s: Freeing TxDL with zero DMA addr. ",
841 dev->name);
842 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
843 mac_control->zerodma_virt_addr);
844 nic->mac_control.stats_info->sw_stat.mem_freed
845 += PAGE_SIZE;
846 }
847 kfree(mac_control->fifos[i].list_info);
848 nic->mac_control.stats_info->sw_stat.mem_freed +=
849 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
850 }
851
852 size = SIZE_OF_BLOCK;
853 for (i = 0; i < config->rx_ring_num; i++) {
854 blk_cnt = mac_control->rings[i].block_count;
855 for (j = 0; j < blk_cnt; j++) {
856 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
857 block_virt_addr;
858 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
859 block_dma_addr;
860 if (tmp_v_addr == NULL)
861 break;
862 pci_free_consistent(nic->pdev, size,
863 tmp_v_addr, tmp_p_addr);
864 nic->mac_control.stats_info->sw_stat.mem_freed += size;
865 kfree(mac_control->rings[i].rx_blocks[j].rxds);
866 nic->mac_control.stats_info->sw_stat.mem_freed +=
867 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
868 }
869 }
870
871 if (nic->rxd_mode >= RXD_MODE_3A) {
872 /* Freeing buffer storage addresses in 2BUFF mode. */
873 for (i = 0; i < config->rx_ring_num; i++) {
874 blk_cnt = config->rx_cfg[i].num_rxd /
875 (rxd_count[nic->rxd_mode] + 1);
876 for (j = 0; j < blk_cnt; j++) {
877 int k = 0;
878 if (!mac_control->rings[i].ba[j])
879 continue;
880 while (k != rxd_count[nic->rxd_mode]) {
881 struct buffAdd *ba =
882 &mac_control->rings[i].ba[j][k];
883 kfree(ba->ba_0_org);
884 nic->mac_control.stats_info->sw_stat.\
885 mem_freed += (BUF0_LEN + ALIGN_SIZE);
886 kfree(ba->ba_1_org);
887 nic->mac_control.stats_info->sw_stat.\
888 mem_freed += (BUF1_LEN + ALIGN_SIZE);
889 k++;
890 }
891 kfree(mac_control->rings[i].ba[j]);
892 nic->mac_control.stats_info->sw_stat.mem_freed += (sizeof(struct buffAdd) *
893 (rxd_count[nic->rxd_mode] + 1));
894 }
895 kfree(mac_control->rings[i].ba);
896 nic->mac_control.stats_info->sw_stat.mem_freed +=
897 (sizeof(struct buffAdd *) * blk_cnt);
898 }
899 }
900
901 if (mac_control->stats_mem) {
902 pci_free_consistent(nic->pdev,
903 mac_control->stats_mem_sz,
904 mac_control->stats_mem,
905 mac_control->stats_mem_phy);
906 nic->mac_control.stats_info->sw_stat.mem_freed +=
907 mac_control->stats_mem_sz;
908 }
909 if (nic->ufo_in_band_v) {
910 kfree(nic->ufo_in_band_v);
911 nic->mac_control.stats_info->sw_stat.mem_freed
912 += (ufo_size * sizeof(u64));
913 }
914 }
915
916 /**
917 * s2io_verify_pci_mode -
918 */
919
920 static int s2io_verify_pci_mode(struct s2io_nic *nic)
921 {
922 struct XENA_dev_config __iomem *bar0 = nic->bar0;
923 register u64 val64 = 0;
924 int mode;
925
926 val64 = readq(&bar0->pci_mode);
927 mode = (u8)GET_PCI_MODE(val64);
928
929 if ( val64 & PCI_MODE_UNKNOWN_MODE)
930 return -1; /* Unknown PCI mode */
931 return mode;
932 }
933
934 #define NEC_VENID 0x1033
935 #define NEC_DEVID 0x0125
936 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
937 {
938 struct pci_dev *tdev = NULL;
939 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
940 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
941 if (tdev->bus == s2io_pdev->bus->parent)
942 pci_dev_put(tdev);
943 return 1;
944 }
945 }
946 return 0;
947 }
948
949 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
950 /**
951 * s2io_print_pci_mode -
952 */
953 static int s2io_print_pci_mode(struct s2io_nic *nic)
954 {
955 struct XENA_dev_config __iomem *bar0 = nic->bar0;
956 register u64 val64 = 0;
957 int mode;
958 struct config_param *config = &nic->config;
959
960 val64 = readq(&bar0->pci_mode);
961 mode = (u8)GET_PCI_MODE(val64);
962
963 if ( val64 & PCI_MODE_UNKNOWN_MODE)
964 return -1; /* Unknown PCI mode */
965
966 config->bus_speed = bus_speed[mode];
967
968 if (s2io_on_nec_bridge(nic->pdev)) {
969 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
970 nic->dev->name);
971 return mode;
972 }
973
974 if (val64 & PCI_MODE_32_BITS) {
975 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
976 } else {
977 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
978 }
979
980 switch(mode) {
981 case PCI_MODE_PCI_33:
982 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
983 break;
984 case PCI_MODE_PCI_66:
985 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
986 break;
987 case PCI_MODE_PCIX_M1_66:
988 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
989 break;
990 case PCI_MODE_PCIX_M1_100:
991 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
992 break;
993 case PCI_MODE_PCIX_M1_133:
994 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
995 break;
996 case PCI_MODE_PCIX_M2_66:
997 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
998 break;
999 case PCI_MODE_PCIX_M2_100:
1000 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1001 break;
1002 case PCI_MODE_PCIX_M2_133:
1003 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1004 break;
1005 default:
1006 return -1; /* Unsupported bus speed */
1007 }
1008
1009 return mode;
1010 }
1011
1012 /**
1013 * init_nic - Initialization of hardware
1014 * @nic: device peivate variable
1015 * Description: The function sequentially configures every block
1016 * of the H/W from their reset values.
1017 * Return Value: SUCCESS on success and
1018 * '-1' on failure (endian settings incorrect).
1019 */
1020
1021 static int init_nic(struct s2io_nic *nic)
1022 {
1023 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1024 struct net_device *dev = nic->dev;
1025 register u64 val64 = 0;
1026 void __iomem *add;
1027 u32 time;
1028 int i, j;
1029 struct mac_info *mac_control;
1030 struct config_param *config;
1031 int dtx_cnt = 0;
1032 unsigned long long mem_share;
1033 int mem_size;
1034
1035 mac_control = &nic->mac_control;
1036 config = &nic->config;
1037
1038 /* to set the swapper controle on the card */
1039 if(s2io_set_swapper(nic)) {
1040 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1041 return -1;
1042 }
1043
1044 /*
1045 * Herc requires EOI to be removed from reset before XGXS, so..
1046 */
1047 if (nic->device_type & XFRAME_II_DEVICE) {
1048 val64 = 0xA500000000ULL;
1049 writeq(val64, &bar0->sw_reset);
1050 msleep(500);
1051 val64 = readq(&bar0->sw_reset);
1052 }
1053
1054 /* Remove XGXS from reset state */
1055 val64 = 0;
1056 writeq(val64, &bar0->sw_reset);
1057 msleep(500);
1058 val64 = readq(&bar0->sw_reset);
1059
1060 /* Enable Receiving broadcasts */
1061 add = &bar0->mac_cfg;
1062 val64 = readq(&bar0->mac_cfg);
1063 val64 |= MAC_RMAC_BCAST_ENABLE;
1064 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1065 writel((u32) val64, add);
1066 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1067 writel((u32) (val64 >> 32), (add + 4));
1068
1069 /* Read registers in all blocks */
1070 val64 = readq(&bar0->mac_int_mask);
1071 val64 = readq(&bar0->mc_int_mask);
1072 val64 = readq(&bar0->xgxs_int_mask);
1073
1074 /* Set MTU */
1075 val64 = dev->mtu;
1076 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1077
1078 if (nic->device_type & XFRAME_II_DEVICE) {
1079 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1080 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1081 &bar0->dtx_control, UF);
1082 if (dtx_cnt & 0x1)
1083 msleep(1); /* Necessary!! */
1084 dtx_cnt++;
1085 }
1086 } else {
1087 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1088 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1089 &bar0->dtx_control, UF);
1090 val64 = readq(&bar0->dtx_control);
1091 dtx_cnt++;
1092 }
1093 }
1094
1095 /* Tx DMA Initialization */
1096 val64 = 0;
1097 writeq(val64, &bar0->tx_fifo_partition_0);
1098 writeq(val64, &bar0->tx_fifo_partition_1);
1099 writeq(val64, &bar0->tx_fifo_partition_2);
1100 writeq(val64, &bar0->tx_fifo_partition_3);
1101
1102
1103 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1104 val64 |=
1105 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1106 13) | vBIT(config->tx_cfg[i].fifo_priority,
1107 ((i * 32) + 5), 3);
1108
1109 if (i == (config->tx_fifo_num - 1)) {
1110 if (i % 2 == 0)
1111 i++;
1112 }
1113
1114 switch (i) {
1115 case 1:
1116 writeq(val64, &bar0->tx_fifo_partition_0);
1117 val64 = 0;
1118 break;
1119 case 3:
1120 writeq(val64, &bar0->tx_fifo_partition_1);
1121 val64 = 0;
1122 break;
1123 case 5:
1124 writeq(val64, &bar0->tx_fifo_partition_2);
1125 val64 = 0;
1126 break;
1127 case 7:
1128 writeq(val64, &bar0->tx_fifo_partition_3);
1129 break;
1130 }
1131 }
1132
1133 /*
1134 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1135 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1136 */
1137 if ((nic->device_type == XFRAME_I_DEVICE) &&
1138 (get_xena_rev_id(nic->pdev) < 4))
1139 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1140
1141 val64 = readq(&bar0->tx_fifo_partition_0);
1142 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1143 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1144
1145 /*
1146 * Initialization of Tx_PA_CONFIG register to ignore packet
1147 * integrity checking.
1148 */
1149 val64 = readq(&bar0->tx_pa_cfg);
1150 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1151 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1152 writeq(val64, &bar0->tx_pa_cfg);
1153
1154 /* Rx DMA intialization. */
1155 val64 = 0;
1156 for (i = 0; i < config->rx_ring_num; i++) {
1157 val64 |=
1158 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1159 3);
1160 }
1161 writeq(val64, &bar0->rx_queue_priority);
1162
1163 /*
1164 * Allocating equal share of memory to all the
1165 * configured Rings.
1166 */
1167 val64 = 0;
1168 if (nic->device_type & XFRAME_II_DEVICE)
1169 mem_size = 32;
1170 else
1171 mem_size = 64;
1172
1173 for (i = 0; i < config->rx_ring_num; i++) {
1174 switch (i) {
1175 case 0:
1176 mem_share = (mem_size / config->rx_ring_num +
1177 mem_size % config->rx_ring_num);
1178 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1179 continue;
1180 case 1:
1181 mem_share = (mem_size / config->rx_ring_num);
1182 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1183 continue;
1184 case 2:
1185 mem_share = (mem_size / config->rx_ring_num);
1186 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1187 continue;
1188 case 3:
1189 mem_share = (mem_size / config->rx_ring_num);
1190 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1191 continue;
1192 case 4:
1193 mem_share = (mem_size / config->rx_ring_num);
1194 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1195 continue;
1196 case 5:
1197 mem_share = (mem_size / config->rx_ring_num);
1198 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1199 continue;
1200 case 6:
1201 mem_share = (mem_size / config->rx_ring_num);
1202 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1203 continue;
1204 case 7:
1205 mem_share = (mem_size / config->rx_ring_num);
1206 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1207 continue;
1208 }
1209 }
1210 writeq(val64, &bar0->rx_queue_cfg);
1211
1212 /*
1213 * Filling Tx round robin registers
1214 * as per the number of FIFOs
1215 */
1216 switch (config->tx_fifo_num) {
1217 case 1:
1218 val64 = 0x0000000000000000ULL;
1219 writeq(val64, &bar0->tx_w_round_robin_0);
1220 writeq(val64, &bar0->tx_w_round_robin_1);
1221 writeq(val64, &bar0->tx_w_round_robin_2);
1222 writeq(val64, &bar0->tx_w_round_robin_3);
1223 writeq(val64, &bar0->tx_w_round_robin_4);
1224 break;
1225 case 2:
1226 val64 = 0x0000010000010000ULL;
1227 writeq(val64, &bar0->tx_w_round_robin_0);
1228 val64 = 0x0100000100000100ULL;
1229 writeq(val64, &bar0->tx_w_round_robin_1);
1230 val64 = 0x0001000001000001ULL;
1231 writeq(val64, &bar0->tx_w_round_robin_2);
1232 val64 = 0x0000010000010000ULL;
1233 writeq(val64, &bar0->tx_w_round_robin_3);
1234 val64 = 0x0100000000000000ULL;
1235 writeq(val64, &bar0->tx_w_round_robin_4);
1236 break;
1237 case 3:
1238 val64 = 0x0001000102000001ULL;
1239 writeq(val64, &bar0->tx_w_round_robin_0);
1240 val64 = 0x0001020000010001ULL;
1241 writeq(val64, &bar0->tx_w_round_robin_1);
1242 val64 = 0x0200000100010200ULL;
1243 writeq(val64, &bar0->tx_w_round_robin_2);
1244 val64 = 0x0001000102000001ULL;
1245 writeq(val64, &bar0->tx_w_round_robin_3);
1246 val64 = 0x0001020000000000ULL;
1247 writeq(val64, &bar0->tx_w_round_robin_4);
1248 break;
1249 case 4:
1250 val64 = 0x0001020300010200ULL;
1251 writeq(val64, &bar0->tx_w_round_robin_0);
1252 val64 = 0x0100000102030001ULL;
1253 writeq(val64, &bar0->tx_w_round_robin_1);
1254 val64 = 0x0200010000010203ULL;
1255 writeq(val64, &bar0->tx_w_round_robin_2);
1256 val64 = 0x0001020001000001ULL;
1257 writeq(val64, &bar0->tx_w_round_robin_3);
1258 val64 = 0x0203000100000000ULL;
1259 writeq(val64, &bar0->tx_w_round_robin_4);
1260 break;
1261 case 5:
1262 val64 = 0x0001000203000102ULL;
1263 writeq(val64, &bar0->tx_w_round_robin_0);
1264 val64 = 0x0001020001030004ULL;
1265 writeq(val64, &bar0->tx_w_round_robin_1);
1266 val64 = 0x0001000203000102ULL;
1267 writeq(val64, &bar0->tx_w_round_robin_2);
1268 val64 = 0x0001020001030004ULL;
1269 writeq(val64, &bar0->tx_w_round_robin_3);
1270 val64 = 0x0001000000000000ULL;
1271 writeq(val64, &bar0->tx_w_round_robin_4);
1272 break;
1273 case 6:
1274 val64 = 0x0001020304000102ULL;
1275 writeq(val64, &bar0->tx_w_round_robin_0);
1276 val64 = 0x0304050001020001ULL;
1277 writeq(val64, &bar0->tx_w_round_robin_1);
1278 val64 = 0x0203000100000102ULL;
1279 writeq(val64, &bar0->tx_w_round_robin_2);
1280 val64 = 0x0304000102030405ULL;
1281 writeq(val64, &bar0->tx_w_round_robin_3);
1282 val64 = 0x0001000200000000ULL;
1283 writeq(val64, &bar0->tx_w_round_robin_4);
1284 break;
1285 case 7:
1286 val64 = 0x0001020001020300ULL;
1287 writeq(val64, &bar0->tx_w_round_robin_0);
1288 val64 = 0x0102030400010203ULL;
1289 writeq(val64, &bar0->tx_w_round_robin_1);
1290 val64 = 0x0405060001020001ULL;
1291 writeq(val64, &bar0->tx_w_round_robin_2);
1292 val64 = 0x0304050000010200ULL;
1293 writeq(val64, &bar0->tx_w_round_robin_3);
1294 val64 = 0x0102030000000000ULL;
1295 writeq(val64, &bar0->tx_w_round_robin_4);
1296 break;
1297 case 8:
1298 val64 = 0x0001020300040105ULL;
1299 writeq(val64, &bar0->tx_w_round_robin_0);
1300 val64 = 0x0200030106000204ULL;
1301 writeq(val64, &bar0->tx_w_round_robin_1);
1302 val64 = 0x0103000502010007ULL;
1303 writeq(val64, &bar0->tx_w_round_robin_2);
1304 val64 = 0x0304010002060500ULL;
1305 writeq(val64, &bar0->tx_w_round_robin_3);
1306 val64 = 0x0103020400000000ULL;
1307 writeq(val64, &bar0->tx_w_round_robin_4);
1308 break;
1309 }
1310
1311 /* Enable all configured Tx FIFO partitions */
1312 val64 = readq(&bar0->tx_fifo_partition_0);
1313 val64 |= (TX_FIFO_PARTITION_EN);
1314 writeq(val64, &bar0->tx_fifo_partition_0);
1315
1316 /* Filling the Rx round robin registers as per the
1317 * number of Rings and steering based on QoS.
1318 */
1319 switch (config->rx_ring_num) {
1320 case 1:
1321 val64 = 0x8080808080808080ULL;
1322 writeq(val64, &bar0->rts_qos_steering);
1323 break;
1324 case 2:
1325 val64 = 0x0000010000010000ULL;
1326 writeq(val64, &bar0->rx_w_round_robin_0);
1327 val64 = 0x0100000100000100ULL;
1328 writeq(val64, &bar0->rx_w_round_robin_1);
1329 val64 = 0x0001000001000001ULL;
1330 writeq(val64, &bar0->rx_w_round_robin_2);
1331 val64 = 0x0000010000010000ULL;
1332 writeq(val64, &bar0->rx_w_round_robin_3);
1333 val64 = 0x0100000000000000ULL;
1334 writeq(val64, &bar0->rx_w_round_robin_4);
1335
1336 val64 = 0x8080808040404040ULL;
1337 writeq(val64, &bar0->rts_qos_steering);
1338 break;
1339 case 3:
1340 val64 = 0x0001000102000001ULL;
1341 writeq(val64, &bar0->rx_w_round_robin_0);
1342 val64 = 0x0001020000010001ULL;
1343 writeq(val64, &bar0->rx_w_round_robin_1);
1344 val64 = 0x0200000100010200ULL;
1345 writeq(val64, &bar0->rx_w_round_robin_2);
1346 val64 = 0x0001000102000001ULL;
1347 writeq(val64, &bar0->rx_w_round_robin_3);
1348 val64 = 0x0001020000000000ULL;
1349 writeq(val64, &bar0->rx_w_round_robin_4);
1350
1351 val64 = 0x8080804040402020ULL;
1352 writeq(val64, &bar0->rts_qos_steering);
1353 break;
1354 case 4:
1355 val64 = 0x0001020300010200ULL;
1356 writeq(val64, &bar0->rx_w_round_robin_0);
1357 val64 = 0x0100000102030001ULL;
1358 writeq(val64, &bar0->rx_w_round_robin_1);
1359 val64 = 0x0200010000010203ULL;
1360 writeq(val64, &bar0->rx_w_round_robin_2);
1361 val64 = 0x0001020001000001ULL;
1362 writeq(val64, &bar0->rx_w_round_robin_3);
1363 val64 = 0x0203000100000000ULL;
1364 writeq(val64, &bar0->rx_w_round_robin_4);
1365
1366 val64 = 0x8080404020201010ULL;
1367 writeq(val64, &bar0->rts_qos_steering);
1368 break;
1369 case 5:
1370 val64 = 0x0001000203000102ULL;
1371 writeq(val64, &bar0->rx_w_round_robin_0);
1372 val64 = 0x0001020001030004ULL;
1373 writeq(val64, &bar0->rx_w_round_robin_1);
1374 val64 = 0x0001000203000102ULL;
1375 writeq(val64, &bar0->rx_w_round_robin_2);
1376 val64 = 0x0001020001030004ULL;
1377 writeq(val64, &bar0->rx_w_round_robin_3);
1378 val64 = 0x0001000000000000ULL;
1379 writeq(val64, &bar0->rx_w_round_robin_4);
1380
1381 val64 = 0x8080404020201008ULL;
1382 writeq(val64, &bar0->rts_qos_steering);
1383 break;
1384 case 6:
1385 val64 = 0x0001020304000102ULL;
1386 writeq(val64, &bar0->rx_w_round_robin_0);
1387 val64 = 0x0304050001020001ULL;
1388 writeq(val64, &bar0->rx_w_round_robin_1);
1389 val64 = 0x0203000100000102ULL;
1390 writeq(val64, &bar0->rx_w_round_robin_2);
1391 val64 = 0x0304000102030405ULL;
1392 writeq(val64, &bar0->rx_w_round_robin_3);
1393 val64 = 0x0001000200000000ULL;
1394 writeq(val64, &bar0->rx_w_round_robin_4);
1395
1396 val64 = 0x8080404020100804ULL;
1397 writeq(val64, &bar0->rts_qos_steering);
1398 break;
1399 case 7:
1400 val64 = 0x0001020001020300ULL;
1401 writeq(val64, &bar0->rx_w_round_robin_0);
1402 val64 = 0x0102030400010203ULL;
1403 writeq(val64, &bar0->rx_w_round_robin_1);
1404 val64 = 0x0405060001020001ULL;
1405 writeq(val64, &bar0->rx_w_round_robin_2);
1406 val64 = 0x0304050000010200ULL;
1407 writeq(val64, &bar0->rx_w_round_robin_3);
1408 val64 = 0x0102030000000000ULL;
1409 writeq(val64, &bar0->rx_w_round_robin_4);
1410
1411 val64 = 0x8080402010080402ULL;
1412 writeq(val64, &bar0->rts_qos_steering);
1413 break;
1414 case 8:
1415 val64 = 0x0001020300040105ULL;
1416 writeq(val64, &bar0->rx_w_round_robin_0);
1417 val64 = 0x0200030106000204ULL;
1418 writeq(val64, &bar0->rx_w_round_robin_1);
1419 val64 = 0x0103000502010007ULL;
1420 writeq(val64, &bar0->rx_w_round_robin_2);
1421 val64 = 0x0304010002060500ULL;
1422 writeq(val64, &bar0->rx_w_round_robin_3);
1423 val64 = 0x0103020400000000ULL;
1424 writeq(val64, &bar0->rx_w_round_robin_4);
1425
1426 val64 = 0x8040201008040201ULL;
1427 writeq(val64, &bar0->rts_qos_steering);
1428 break;
1429 }
1430
1431 /* UDP Fix */
1432 val64 = 0;
1433 for (i = 0; i < 8; i++)
1434 writeq(val64, &bar0->rts_frm_len_n[i]);
1435
1436 /* Set the default rts frame length for the rings configured */
1437 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1438 for (i = 0 ; i < config->rx_ring_num ; i++)
1439 writeq(val64, &bar0->rts_frm_len_n[i]);
1440
1441 /* Set the frame length for the configured rings
1442 * desired by the user
1443 */
1444 for (i = 0; i < config->rx_ring_num; i++) {
1445 /* If rts_frm_len[i] == 0 then it is assumed that user not
1446 * specified frame length steering.
1447 * If the user provides the frame length then program
1448 * the rts_frm_len register for those values or else
1449 * leave it as it is.
1450 */
1451 if (rts_frm_len[i] != 0) {
1452 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1453 &bar0->rts_frm_len_n[i]);
1454 }
1455 }
1456
1457 /* Disable differentiated services steering logic */
1458 for (i = 0; i < 64; i++) {
1459 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1460 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1461 dev->name);
1462 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1463 return FAILURE;
1464 }
1465 }
1466
1467 /* Program statistics memory */
1468 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1469
1470 if (nic->device_type == XFRAME_II_DEVICE) {
1471 val64 = STAT_BC(0x320);
1472 writeq(val64, &bar0->stat_byte_cnt);
1473 }
1474
1475 /*
1476 * Initializing the sampling rate for the device to calculate the
1477 * bandwidth utilization.
1478 */
1479 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1480 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1481 writeq(val64, &bar0->mac_link_util);
1482
1483
1484 /*
1485 * Initializing the Transmit and Receive Traffic Interrupt
1486 * Scheme.
1487 */
1488 /*
1489 * TTI Initialization. Default Tx timer gets us about
1490 * 250 interrupts per sec. Continuous interrupts are enabled
1491 * by default.
1492 */
1493 if (nic->device_type == XFRAME_II_DEVICE) {
1494 int count = (nic->config.bus_speed * 125)/2;
1495 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1496 } else {
1497
1498 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1499 }
1500 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1501 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1502 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1503 if (use_continuous_tx_intrs)
1504 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1505 writeq(val64, &bar0->tti_data1_mem);
1506
1507 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1508 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1509 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1510 writeq(val64, &bar0->tti_data2_mem);
1511
1512 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1513 writeq(val64, &bar0->tti_command_mem);
1514
1515 /*
1516 * Once the operation completes, the Strobe bit of the command
1517 * register will be reset. We poll for this particular condition
1518 * We wait for a maximum of 500ms for the operation to complete,
1519 * if it's not complete by then we return error.
1520 */
1521 time = 0;
1522 while (TRUE) {
1523 val64 = readq(&bar0->tti_command_mem);
1524 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1525 break;
1526 }
1527 if (time > 10) {
1528 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1529 dev->name);
1530 return -1;
1531 }
1532 msleep(50);
1533 time++;
1534 }
1535
1536 if (nic->config.bimodal) {
1537 int k = 0;
1538 for (k = 0; k < config->rx_ring_num; k++) {
1539 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1540 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1541 writeq(val64, &bar0->tti_command_mem);
1542
1543 /*
1544 * Once the operation completes, the Strobe bit of the command
1545 * register will be reset. We poll for this particular condition
1546 * We wait for a maximum of 500ms for the operation to complete,
1547 * if it's not complete by then we return error.
1548 */
1549 time = 0;
1550 while (TRUE) {
1551 val64 = readq(&bar0->tti_command_mem);
1552 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1553 break;
1554 }
1555 if (time > 10) {
1556 DBG_PRINT(ERR_DBG,
1557 "%s: TTI init Failed\n",
1558 dev->name);
1559 return -1;
1560 }
1561 time++;
1562 msleep(50);
1563 }
1564 }
1565 } else {
1566
1567 /* RTI Initialization */
1568 if (nic->device_type == XFRAME_II_DEVICE) {
1569 /*
1570 * Programmed to generate Apprx 500 Intrs per
1571 * second
1572 */
1573 int count = (nic->config.bus_speed * 125)/4;
1574 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1575 } else {
1576 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1577 }
1578 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1579 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1580 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1581
1582 writeq(val64, &bar0->rti_data1_mem);
1583
1584 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1585 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1586 if (nic->intr_type == MSI_X)
1587 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1588 RTI_DATA2_MEM_RX_UFC_D(0x40));
1589 else
1590 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1591 RTI_DATA2_MEM_RX_UFC_D(0x80));
1592 writeq(val64, &bar0->rti_data2_mem);
1593
1594 for (i = 0; i < config->rx_ring_num; i++) {
1595 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1596 | RTI_CMD_MEM_OFFSET(i);
1597 writeq(val64, &bar0->rti_command_mem);
1598
1599 /*
1600 * Once the operation completes, the Strobe bit of the
1601 * command register will be reset. We poll for this
1602 * particular condition. We wait for a maximum of 500ms
1603 * for the operation to complete, if it's not complete
1604 * by then we return error.
1605 */
1606 time = 0;
1607 while (TRUE) {
1608 val64 = readq(&bar0->rti_command_mem);
1609 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1610 break;
1611 }
1612 if (time > 10) {
1613 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1614 dev->name);
1615 return -1;
1616 }
1617 time++;
1618 msleep(50);
1619 }
1620 }
1621 }
1622
1623 /*
1624 * Initializing proper values as Pause threshold into all
1625 * the 8 Queues on Rx side.
1626 */
1627 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1628 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1629
1630 /* Disable RMAC PAD STRIPPING */
1631 add = &bar0->mac_cfg;
1632 val64 = readq(&bar0->mac_cfg);
1633 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1634 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1635 writel((u32) (val64), add);
1636 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1637 writel((u32) (val64 >> 32), (add + 4));
1638 val64 = readq(&bar0->mac_cfg);
1639
1640 /* Enable FCS stripping by adapter */
1641 add = &bar0->mac_cfg;
1642 val64 = readq(&bar0->mac_cfg);
1643 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1644 if (nic->device_type == XFRAME_II_DEVICE)
1645 writeq(val64, &bar0->mac_cfg);
1646 else {
1647 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1648 writel((u32) (val64), add);
1649 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1650 writel((u32) (val64 >> 32), (add + 4));
1651 }
1652
1653 /*
1654 * Set the time value to be inserted in the pause frame
1655 * generated by xena.
1656 */
1657 val64 = readq(&bar0->rmac_pause_cfg);
1658 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1659 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1660 writeq(val64, &bar0->rmac_pause_cfg);
1661
1662 /*
1663 * Set the Threshold Limit for Generating the pause frame
1664 * If the amount of data in any Queue exceeds ratio of
1665 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1666 * pause frame is generated
1667 */
1668 val64 = 0;
1669 for (i = 0; i < 4; i++) {
1670 val64 |=
1671 (((u64) 0xFF00 | nic->mac_control.
1672 mc_pause_threshold_q0q3)
1673 << (i * 2 * 8));
1674 }
1675 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1676
1677 val64 = 0;
1678 for (i = 0; i < 4; i++) {
1679 val64 |=
1680 (((u64) 0xFF00 | nic->mac_control.
1681 mc_pause_threshold_q4q7)
1682 << (i * 2 * 8));
1683 }
1684 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1685
1686 /*
1687 * TxDMA will stop Read request if the number of read split has
1688 * exceeded the limit pointed by shared_splits
1689 */
1690 val64 = readq(&bar0->pic_control);
1691 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1692 writeq(val64, &bar0->pic_control);
1693
1694 if (nic->config.bus_speed == 266) {
1695 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1696 writeq(0x0, &bar0->read_retry_delay);
1697 writeq(0x0, &bar0->write_retry_delay);
1698 }
1699
1700 /*
1701 * Programming the Herc to split every write transaction
1702 * that does not start on an ADB to reduce disconnects.
1703 */
1704 if (nic->device_type == XFRAME_II_DEVICE) {
1705 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1706 MISC_LINK_STABILITY_PRD(3);
1707 writeq(val64, &bar0->misc_control);
1708 val64 = readq(&bar0->pic_control2);
1709 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1710 writeq(val64, &bar0->pic_control2);
1711 }
1712 if (strstr(nic->product_name, "CX4")) {
1713 val64 = TMAC_AVG_IPG(0x17);
1714 writeq(val64, &bar0->tmac_avg_ipg);
1715 }
1716
1717 return SUCCESS;
1718 }
1719 #define LINK_UP_DOWN_INTERRUPT 1
1720 #define MAC_RMAC_ERR_TIMER 2
1721
1722 static int s2io_link_fault_indication(struct s2io_nic *nic)
1723 {
1724 if (nic->intr_type != INTA)
1725 return MAC_RMAC_ERR_TIMER;
1726 if (nic->device_type == XFRAME_II_DEVICE)
1727 return LINK_UP_DOWN_INTERRUPT;
1728 else
1729 return MAC_RMAC_ERR_TIMER;
1730 }
1731
1732 /**
1733 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1734 * @nic: device private variable,
1735 * @mask: A mask indicating which Intr block must be modified and,
1736 * @flag: A flag indicating whether to enable or disable the Intrs.
1737 * Description: This function will either disable or enable the interrupts
1738 * depending on the flag argument. The mask argument can be used to
1739 * enable/disable any Intr block.
1740 * Return Value: NONE.
1741 */
1742
1743 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1744 {
1745 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1746 register u64 val64 = 0, temp64 = 0;
1747
1748 /* Top level interrupt classification */
1749 /* PIC Interrupts */
1750 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1751 /* Enable PIC Intrs in the general intr mask register */
1752 val64 = TXPIC_INT_M;
1753 if (flag == ENABLE_INTRS) {
1754 temp64 = readq(&bar0->general_int_mask);
1755 temp64 &= ~((u64) val64);
1756 writeq(temp64, &bar0->general_int_mask);
1757 /*
1758 * If Hercules adapter enable GPIO otherwise
1759 * disable all PCIX, Flash, MDIO, IIC and GPIO
1760 * interrupts for now.
1761 * TODO
1762 */
1763 if (s2io_link_fault_indication(nic) ==
1764 LINK_UP_DOWN_INTERRUPT ) {
1765 temp64 = readq(&bar0->pic_int_mask);
1766 temp64 &= ~((u64) PIC_INT_GPIO);
1767 writeq(temp64, &bar0->pic_int_mask);
1768 temp64 = readq(&bar0->gpio_int_mask);
1769 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1770 writeq(temp64, &bar0->gpio_int_mask);
1771 } else {
1772 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1773 }
1774 /*
1775 * No MSI Support is available presently, so TTI and
1776 * RTI interrupts are also disabled.
1777 */
1778 } else if (flag == DISABLE_INTRS) {
1779 /*
1780 * Disable PIC Intrs in the general
1781 * intr mask register
1782 */
1783 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1784 temp64 = readq(&bar0->general_int_mask);
1785 val64 |= temp64;
1786 writeq(val64, &bar0->general_int_mask);
1787 }
1788 }
1789
1790 /* MAC Interrupts */
1791 /* Enabling/Disabling MAC interrupts */
1792 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1793 val64 = TXMAC_INT_M | RXMAC_INT_M;
1794 if (flag == ENABLE_INTRS) {
1795 temp64 = readq(&bar0->general_int_mask);
1796 temp64 &= ~((u64) val64);
1797 writeq(temp64, &bar0->general_int_mask);
1798 /*
1799 * All MAC block error interrupts are disabled for now
1800 * TODO
1801 */
1802 } else if (flag == DISABLE_INTRS) {
1803 /*
1804 * Disable MAC Intrs in the general intr mask register
1805 */
1806 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1807 writeq(DISABLE_ALL_INTRS,
1808 &bar0->mac_rmac_err_mask);
1809
1810 temp64 = readq(&bar0->general_int_mask);
1811 val64 |= temp64;
1812 writeq(val64, &bar0->general_int_mask);
1813 }
1814 }
1815
1816 /* Tx traffic interrupts */
1817 if (mask & TX_TRAFFIC_INTR) {
1818 val64 = TXTRAFFIC_INT_M;
1819 if (flag == ENABLE_INTRS) {
1820 temp64 = readq(&bar0->general_int_mask);
1821 temp64 &= ~((u64) val64);
1822 writeq(temp64, &bar0->general_int_mask);
1823 /*
1824 * Enable all the Tx side interrupts
1825 * writing 0 Enables all 64 TX interrupt levels
1826 */
1827 writeq(0x0, &bar0->tx_traffic_mask);
1828 } else if (flag == DISABLE_INTRS) {
1829 /*
1830 * Disable Tx Traffic Intrs in the general intr mask
1831 * register.
1832 */
1833 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1834 temp64 = readq(&bar0->general_int_mask);
1835 val64 |= temp64;
1836 writeq(val64, &bar0->general_int_mask);
1837 }
1838 }
1839
1840 /* Rx traffic interrupts */
1841 if (mask & RX_TRAFFIC_INTR) {
1842 val64 = RXTRAFFIC_INT_M;
1843 if (flag == ENABLE_INTRS) {
1844 temp64 = readq(&bar0->general_int_mask);
1845 temp64 &= ~((u64) val64);
1846 writeq(temp64, &bar0->general_int_mask);
1847 /* writing 0 Enables all 8 RX interrupt levels */
1848 writeq(0x0, &bar0->rx_traffic_mask);
1849 } else if (flag == DISABLE_INTRS) {
1850 /*
1851 * Disable Rx Traffic Intrs in the general intr mask
1852 * register.
1853 */
1854 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1855 temp64 = readq(&bar0->general_int_mask);
1856 val64 |= temp64;
1857 writeq(val64, &bar0->general_int_mask);
1858 }
1859 }
1860 }
1861
1862 /**
1863 * verify_pcc_quiescent- Checks for PCC quiescent state
1864 * Return: 1 If PCC is quiescence
1865 * 0 If PCC is not quiescence
1866 */
1867 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1868 {
1869 int ret = 0, herc;
1870 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1871 u64 val64 = readq(&bar0->adapter_status);
1872
1873 herc = (sp->device_type == XFRAME_II_DEVICE);
1874
1875 if (flag == FALSE) {
1876 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1877 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1878 ret = 1;
1879 } else {
1880 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1881 ret = 1;
1882 }
1883 } else {
1884 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1885 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1886 ADAPTER_STATUS_RMAC_PCC_IDLE))
1887 ret = 1;
1888 } else {
1889 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1890 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1891 ret = 1;
1892 }
1893 }
1894
1895 return ret;
1896 }
1897 /**
1898 * verify_xena_quiescence - Checks whether the H/W is ready
1899 * Description: Returns whether the H/W is ready to go or not. Depending
1900 * on whether adapter enable bit was written or not the comparison
1901 * differs and the calling function passes the input argument flag to
1902 * indicate this.
1903 * Return: 1 If xena is quiescence
1904 * 0 If Xena is not quiescence
1905 */
1906
1907 static int verify_xena_quiescence(struct s2io_nic *sp)
1908 {
1909 int mode;
1910 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1911 u64 val64 = readq(&bar0->adapter_status);
1912 mode = s2io_verify_pci_mode(sp);
1913
1914 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1915 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1916 return 0;
1917 }
1918 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1919 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1920 return 0;
1921 }
1922 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1923 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1924 return 0;
1925 }
1926 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1927 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1928 return 0;
1929 }
1930 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1931 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1932 return 0;
1933 }
1934 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1935 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1936 return 0;
1937 }
1938 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1939 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1940 return 0;
1941 }
1942 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1943 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1944 return 0;
1945 }
1946
1947 /*
1948 * In PCI 33 mode, the P_PLL is not used, and therefore,
1949 * the the P_PLL_LOCK bit in the adapter_status register will
1950 * not be asserted.
1951 */
1952 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1953 sp->device_type == XFRAME_II_DEVICE && mode !=
1954 PCI_MODE_PCI_33) {
1955 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1956 return 0;
1957 }
1958 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1959 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1960 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1961 return 0;
1962 }
1963 return 1;
1964 }
1965
1966 /**
1967 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1968 * @sp: Pointer to device specifc structure
1969 * Description :
1970 * New procedure to clear mac address reading problems on Alpha platforms
1971 *
1972 */
1973
1974 static void fix_mac_address(struct s2io_nic * sp)
1975 {
1976 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1977 u64 val64;
1978 int i = 0;
1979
1980 while (fix_mac[i] != END_SIGN) {
1981 writeq(fix_mac[i++], &bar0->gpio_control);
1982 udelay(10);
1983 val64 = readq(&bar0->gpio_control);
1984 }
1985 }
1986
1987 /**
1988 * start_nic - Turns the device on
1989 * @nic : device private variable.
1990 * Description:
1991 * This function actually turns the device on. Before this function is
1992 * called,all Registers are configured from their reset states
1993 * and shared memory is allocated but the NIC is still quiescent. On
1994 * calling this function, the device interrupts are cleared and the NIC is
1995 * literally switched on by writing into the adapter control register.
1996 * Return Value:
1997 * SUCCESS on success and -1 on failure.
1998 */
1999
2000 static int start_nic(struct s2io_nic *nic)
2001 {
2002 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2003 struct net_device *dev = nic->dev;
2004 register u64 val64 = 0;
2005 u16 subid, i;
2006 struct mac_info *mac_control;
2007 struct config_param *config;
2008
2009 mac_control = &nic->mac_control;
2010 config = &nic->config;
2011
2012 /* PRC Initialization and configuration */
2013 for (i = 0; i < config->rx_ring_num; i++) {
2014 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2015 &bar0->prc_rxd0_n[i]);
2016
2017 val64 = readq(&bar0->prc_ctrl_n[i]);
2018 if (nic->config.bimodal)
2019 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2020 if (nic->rxd_mode == RXD_MODE_1)
2021 val64 |= PRC_CTRL_RC_ENABLED;
2022 else
2023 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2024 if (nic->device_type == XFRAME_II_DEVICE)
2025 val64 |= PRC_CTRL_GROUP_READS;
2026 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2027 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2028 writeq(val64, &bar0->prc_ctrl_n[i]);
2029 }
2030
2031 if (nic->rxd_mode == RXD_MODE_3B) {
2032 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2033 val64 = readq(&bar0->rx_pa_cfg);
2034 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2035 writeq(val64, &bar0->rx_pa_cfg);
2036 }
2037
2038 if (vlan_tag_strip == 0) {
2039 val64 = readq(&bar0->rx_pa_cfg);
2040 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2041 writeq(val64, &bar0->rx_pa_cfg);
2042 vlan_strip_flag = 0;
2043 }
2044
2045 /*
2046 * Enabling MC-RLDRAM. After enabling the device, we timeout
2047 * for around 100ms, which is approximately the time required
2048 * for the device to be ready for operation.
2049 */
2050 val64 = readq(&bar0->mc_rldram_mrs);
2051 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2052 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2053 val64 = readq(&bar0->mc_rldram_mrs);
2054
2055 msleep(100); /* Delay by around 100 ms. */
2056
2057 /* Enabling ECC Protection. */
2058 val64 = readq(&bar0->adapter_control);
2059 val64 &= ~ADAPTER_ECC_EN;
2060 writeq(val64, &bar0->adapter_control);
2061
2062 /*
2063 * Clearing any possible Link state change interrupts that
2064 * could have popped up just before Enabling the card.
2065 */
2066 val64 = readq(&bar0->mac_rmac_err_reg);
2067 if (val64)
2068 writeq(val64, &bar0->mac_rmac_err_reg);
2069
2070 /*
2071 * Verify if the device is ready to be enabled, if so enable
2072 * it.
2073 */
2074 val64 = readq(&bar0->adapter_status);
2075 if (!verify_xena_quiescence(nic)) {
2076 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2077 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2078 (unsigned long long) val64);
2079 return FAILURE;
2080 }
2081
2082 /*
2083 * With some switches, link might be already up at this point.
2084 * Because of this weird behavior, when we enable laser,
2085 * we may not get link. We need to handle this. We cannot
2086 * figure out which switch is misbehaving. So we are forced to
2087 * make a global change.
2088 */
2089
2090 /* Enabling Laser. */
2091 val64 = readq(&bar0->adapter_control);
2092 val64 |= ADAPTER_EOI_TX_ON;
2093 writeq(val64, &bar0->adapter_control);
2094
2095 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2096 /*
2097 * Dont see link state interrupts initally on some switches,
2098 * so directly scheduling the link state task here.
2099 */
2100 schedule_work(&nic->set_link_task);
2101 }
2102 /* SXE-002: Initialize link and activity LED */
2103 subid = nic->pdev->subsystem_device;
2104 if (((subid & 0xFF) >= 0x07) &&
2105 (nic->device_type == XFRAME_I_DEVICE)) {
2106 val64 = readq(&bar0->gpio_control);
2107 val64 |= 0x0000800000000000ULL;
2108 writeq(val64, &bar0->gpio_control);
2109 val64 = 0x0411040400000000ULL;
2110 writeq(val64, (void __iomem *)bar0 + 0x2700);
2111 }
2112
2113 return SUCCESS;
2114 }
2115 /**
2116 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2117 */
2118 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2119 TxD *txdlp, int get_off)
2120 {
2121 struct s2io_nic *nic = fifo_data->nic;
2122 struct sk_buff *skb;
2123 struct TxD *txds;
2124 u16 j, frg_cnt;
2125
2126 txds = txdlp;
2127 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2128 pci_unmap_single(nic->pdev, (dma_addr_t)
2129 txds->Buffer_Pointer, sizeof(u64),
2130 PCI_DMA_TODEVICE);
2131 txds++;
2132 }
2133
2134 skb = (struct sk_buff *) ((unsigned long)
2135 txds->Host_Control);
2136 if (!skb) {
2137 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2138 return NULL;
2139 }
2140 pci_unmap_single(nic->pdev, (dma_addr_t)
2141 txds->Buffer_Pointer,
2142 skb->len - skb->data_len,
2143 PCI_DMA_TODEVICE);
2144 frg_cnt = skb_shinfo(skb)->nr_frags;
2145 if (frg_cnt) {
2146 txds++;
2147 for (j = 0; j < frg_cnt; j++, txds++) {
2148 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2149 if (!txds->Buffer_Pointer)
2150 break;
2151 pci_unmap_page(nic->pdev, (dma_addr_t)
2152 txds->Buffer_Pointer,
2153 frag->size, PCI_DMA_TODEVICE);
2154 }
2155 }
2156 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2157 return(skb);
2158 }
2159
2160 /**
2161 * free_tx_buffers - Free all queued Tx buffers
2162 * @nic : device private variable.
2163 * Description:
2164 * Free all queued Tx buffers.
2165 * Return Value: void
2166 */
2167
2168 static void free_tx_buffers(struct s2io_nic *nic)
2169 {
2170 struct net_device *dev = nic->dev;
2171 struct sk_buff *skb;
2172 struct TxD *txdp;
2173 int i, j;
2174 struct mac_info *mac_control;
2175 struct config_param *config;
2176 int cnt = 0;
2177
2178 mac_control = &nic->mac_control;
2179 config = &nic->config;
2180
2181 for (i = 0; i < config->tx_fifo_num; i++) {
2182 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2183 txdp = (struct TxD *) \
2184 mac_control->fifos[i].list_info[j].list_virt_addr;
2185 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2186 if (skb) {
2187 nic->mac_control.stats_info->sw_stat.mem_freed
2188 += skb->truesize;
2189 dev_kfree_skb(skb);
2190 cnt++;
2191 }
2192 }
2193 DBG_PRINT(INTR_DBG,
2194 "%s:forcibly freeing %d skbs on FIFO%d\n",
2195 dev->name, cnt, i);
2196 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2197 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2198 }
2199 }
2200
2201 /**
2202 * stop_nic - To stop the nic
2203 * @nic ; device private variable.
2204 * Description:
2205 * This function does exactly the opposite of what the start_nic()
2206 * function does. This function is called to stop the device.
2207 * Return Value:
2208 * void.
2209 */
2210
2211 static void stop_nic(struct s2io_nic *nic)
2212 {
2213 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2214 register u64 val64 = 0;
2215 u16 interruptible;
2216 struct mac_info *mac_control;
2217 struct config_param *config;
2218
2219 mac_control = &nic->mac_control;
2220 config = &nic->config;
2221
2222 /* Disable all interrupts */
2223 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2224 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2225 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2226 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2227
2228 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2229 val64 = readq(&bar0->adapter_control);
2230 val64 &= ~(ADAPTER_CNTL_EN);
2231 writeq(val64, &bar0->adapter_control);
2232 }
2233
2234 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2235 sk_buff *skb)
2236 {
2237 struct net_device *dev = nic->dev;
2238 struct sk_buff *frag_list;
2239 void *tmp;
2240
2241 /* Buffer-1 receives L3/L4 headers */
2242 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2243 (nic->pdev, skb->data, l3l4hdr_size + 4,
2244 PCI_DMA_FROMDEVICE);
2245
2246 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2247 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2248 if (skb_shinfo(skb)->frag_list == NULL) {
2249 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
2250 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2251 return -ENOMEM ;
2252 }
2253 frag_list = skb_shinfo(skb)->frag_list;
2254 skb->truesize += frag_list->truesize;
2255 nic->mac_control.stats_info->sw_stat.mem_allocated
2256 += frag_list->truesize;
2257 frag_list->next = NULL;
2258 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2259 frag_list->data = tmp;
2260 skb_reset_tail_pointer(frag_list);
2261
2262 /* Buffer-2 receives L4 data payload */
2263 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2264 frag_list->data, dev->mtu,
2265 PCI_DMA_FROMDEVICE);
2266 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2267 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2268
2269 return SUCCESS;
2270 }
2271
2272 /**
2273 * fill_rx_buffers - Allocates the Rx side skbs
2274 * @nic: device private variable
2275 * @ring_no: ring number
2276 * Description:
2277 * The function allocates Rx side skbs and puts the physical
2278 * address of these buffers into the RxD buffer pointers, so that the NIC
2279 * can DMA the received frame into these locations.
2280 * The NIC supports 3 receive modes, viz
2281 * 1. single buffer,
2282 * 2. three buffer and
2283 * 3. Five buffer modes.
2284 * Each mode defines how many fragments the received frame will be split
2285 * up into by the NIC. The frame is split into L3 header, L4 Header,
2286 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2287 * is split into 3 fragments. As of now only single buffer mode is
2288 * supported.
2289 * Return Value:
2290 * SUCCESS on success or an appropriate -ve value on failure.
2291 */
2292
2293 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2294 {
2295 struct net_device *dev = nic->dev;
2296 struct sk_buff *skb;
2297 struct RxD_t *rxdp;
2298 int off, off1, size, block_no, block_no1;
2299 u32 alloc_tab = 0;
2300 u32 alloc_cnt;
2301 struct mac_info *mac_control;
2302 struct config_param *config;
2303 u64 tmp;
2304 struct buffAdd *ba;
2305 unsigned long flags;
2306 struct RxD_t *first_rxdp = NULL;
2307 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2308
2309 mac_control = &nic->mac_control;
2310 config = &nic->config;
2311 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2312 atomic_read(&nic->rx_bufs_left[ring_no]);
2313
2314 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2315 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2316 while (alloc_tab < alloc_cnt) {
2317 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2318 block_index;
2319 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2320
2321 rxdp = mac_control->rings[ring_no].
2322 rx_blocks[block_no].rxds[off].virt_addr;
2323
2324 if ((block_no == block_no1) && (off == off1) &&
2325 (rxdp->Host_Control)) {
2326 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2327 dev->name);
2328 DBG_PRINT(INTR_DBG, " info equated\n");
2329 goto end;
2330 }
2331 if (off && (off == rxd_count[nic->rxd_mode])) {
2332 mac_control->rings[ring_no].rx_curr_put_info.
2333 block_index++;
2334 if (mac_control->rings[ring_no].rx_curr_put_info.
2335 block_index == mac_control->rings[ring_no].
2336 block_count)
2337 mac_control->rings[ring_no].rx_curr_put_info.
2338 block_index = 0;
2339 block_no = mac_control->rings[ring_no].
2340 rx_curr_put_info.block_index;
2341 if (off == rxd_count[nic->rxd_mode])
2342 off = 0;
2343 mac_control->rings[ring_no].rx_curr_put_info.
2344 offset = off;
2345 rxdp = mac_control->rings[ring_no].
2346 rx_blocks[block_no].block_virt_addr;
2347 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2348 dev->name, rxdp);
2349 }
2350 if(!napi) {
2351 spin_lock_irqsave(&nic->put_lock, flags);
2352 mac_control->rings[ring_no].put_pos =
2353 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2354 spin_unlock_irqrestore(&nic->put_lock, flags);
2355 } else {
2356 mac_control->rings[ring_no].put_pos =
2357 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2358 }
2359 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2360 ((nic->rxd_mode >= RXD_MODE_3A) &&
2361 (rxdp->Control_2 & BIT(0)))) {
2362 mac_control->rings[ring_no].rx_curr_put_info.
2363 offset = off;
2364 goto end;
2365 }
2366 /* calculate size of skb based on ring mode */
2367 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2368 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2369 if (nic->rxd_mode == RXD_MODE_1)
2370 size += NET_IP_ALIGN;
2371 else if (nic->rxd_mode == RXD_MODE_3B)
2372 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2373 else
2374 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2375
2376 /* allocate skb */
2377 skb = dev_alloc_skb(size);
2378 if(!skb) {
2379 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2380 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2381 if (first_rxdp) {
2382 wmb();
2383 first_rxdp->Control_1 |= RXD_OWN_XENA;
2384 }
2385 nic->mac_control.stats_info->sw_stat. \
2386 mem_alloc_fail_cnt++;
2387 return -ENOMEM ;
2388 }
2389 nic->mac_control.stats_info->sw_stat.mem_allocated
2390 += skb->truesize;
2391 if (nic->rxd_mode == RXD_MODE_1) {
2392 /* 1 buffer mode - normal operation mode */
2393 memset(rxdp, 0, sizeof(struct RxD1));
2394 skb_reserve(skb, NET_IP_ALIGN);
2395 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2396 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2397 PCI_DMA_FROMDEVICE);
2398 rxdp->Control_2 =
2399 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2400
2401 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2402 /*
2403 * 2 or 3 buffer mode -
2404 * Both 2 buffer mode and 3 buffer mode provides 128
2405 * byte aligned receive buffers.
2406 *
2407 * 3 buffer mode provides header separation where in
2408 * skb->data will have L3/L4 headers where as
2409 * skb_shinfo(skb)->frag_list will have the L4 data
2410 * payload
2411 */
2412
2413 /* save buffer pointers to avoid frequent dma mapping */
2414 Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr;
2415 Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr;
2416 memset(rxdp, 0, sizeof(struct RxD3));
2417 /* restore the buffer pointers for dma sync*/
2418 ((struct RxD3*)rxdp)->Buffer0_ptr = Buffer0_ptr;
2419 ((struct RxD3*)rxdp)->Buffer1_ptr = Buffer1_ptr;
2420
2421 ba = &mac_control->rings[ring_no].ba[block_no][off];
2422 skb_reserve(skb, BUF0_LEN);
2423 tmp = (u64)(unsigned long) skb->data;
2424 tmp += ALIGN_SIZE;
2425 tmp &= ~ALIGN_SIZE;
2426 skb->data = (void *) (unsigned long)tmp;
2427 skb_reset_tail_pointer(skb);
2428
2429 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2430 ((struct RxD3*)rxdp)->Buffer0_ptr =
2431 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2432 PCI_DMA_FROMDEVICE);
2433 else
2434 pci_dma_sync_single_for_device(nic->pdev,
2435 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2436 BUF0_LEN, PCI_DMA_FROMDEVICE);
2437 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2438 if (nic->rxd_mode == RXD_MODE_3B) {
2439 /* Two buffer mode */
2440
2441 /*
2442 * Buffer2 will have L3/L4 header plus
2443 * L4 payload
2444 */
2445 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2446 (nic->pdev, skb->data, dev->mtu + 4,
2447 PCI_DMA_FROMDEVICE);
2448
2449 /* Buffer-1 will be dummy buffer. Not used */
2450 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2451 ((struct RxD3*)rxdp)->Buffer1_ptr =
2452 pci_map_single(nic->pdev,
2453 ba->ba_1, BUF1_LEN,
2454 PCI_DMA_FROMDEVICE);
2455 }
2456 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2457 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2458 (dev->mtu + 4);
2459 } else {
2460 /* 3 buffer mode */
2461 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2462 nic->mac_control.stats_info->sw_stat.\
2463 mem_freed += skb->truesize;
2464 dev_kfree_skb_irq(skb);
2465 if (first_rxdp) {
2466 wmb();
2467 first_rxdp->Control_1 |=
2468 RXD_OWN_XENA;
2469 }
2470 return -ENOMEM ;
2471 }
2472 }
2473 rxdp->Control_2 |= BIT(0);
2474 }
2475 rxdp->Host_Control = (unsigned long) (skb);
2476 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2477 rxdp->Control_1 |= RXD_OWN_XENA;
2478 off++;
2479 if (off == (rxd_count[nic->rxd_mode] + 1))
2480 off = 0;
2481 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2482
2483 rxdp->Control_2 |= SET_RXD_MARKER;
2484 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2485 if (first_rxdp) {
2486 wmb();
2487 first_rxdp->Control_1 |= RXD_OWN_XENA;
2488 }
2489 first_rxdp = rxdp;
2490 }
2491 atomic_inc(&nic->rx_bufs_left[ring_no]);
2492 alloc_tab++;
2493 }
2494
2495 end:
2496 /* Transfer ownership of first descriptor to adapter just before
2497 * exiting. Before that, use memory barrier so that ownership
2498 * and other fields are seen by adapter correctly.
2499 */
2500 if (first_rxdp) {
2501 wmb();
2502 first_rxdp->Control_1 |= RXD_OWN_XENA;
2503 }
2504
2505 return SUCCESS;
2506 }
2507
2508 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2509 {
2510 struct net_device *dev = sp->dev;
2511 int j;
2512 struct sk_buff *skb;
2513 struct RxD_t *rxdp;
2514 struct mac_info *mac_control;
2515 struct buffAdd *ba;
2516
2517 mac_control = &sp->mac_control;
2518 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2519 rxdp = mac_control->rings[ring_no].
2520 rx_blocks[blk].rxds[j].virt_addr;
2521 skb = (struct sk_buff *)
2522 ((unsigned long) rxdp->Host_Control);
2523 if (!skb) {
2524 continue;
2525 }
2526 if (sp->rxd_mode == RXD_MODE_1) {
2527 pci_unmap_single(sp->pdev, (dma_addr_t)
2528 ((struct RxD1*)rxdp)->Buffer0_ptr,
2529 dev->mtu +
2530 HEADER_ETHERNET_II_802_3_SIZE
2531 + HEADER_802_2_SIZE +
2532 HEADER_SNAP_SIZE,
2533 PCI_DMA_FROMDEVICE);
2534 memset(rxdp, 0, sizeof(struct RxD1));
2535 } else if(sp->rxd_mode == RXD_MODE_3B) {
2536 ba = &mac_control->rings[ring_no].
2537 ba[blk][j];
2538 pci_unmap_single(sp->pdev, (dma_addr_t)
2539 ((struct RxD3*)rxdp)->Buffer0_ptr,
2540 BUF0_LEN,
2541 PCI_DMA_FROMDEVICE);
2542 pci_unmap_single(sp->pdev, (dma_addr_t)
2543 ((struct RxD3*)rxdp)->Buffer1_ptr,
2544 BUF1_LEN,
2545 PCI_DMA_FROMDEVICE);
2546 pci_unmap_single(sp->pdev, (dma_addr_t)
2547 ((struct RxD3*)rxdp)->Buffer2_ptr,
2548 dev->mtu + 4,
2549 PCI_DMA_FROMDEVICE);
2550 memset(rxdp, 0, sizeof(struct RxD3));
2551 } else {
2552 pci_unmap_single(sp->pdev, (dma_addr_t)
2553 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2554 PCI_DMA_FROMDEVICE);
2555 pci_unmap_single(sp->pdev, (dma_addr_t)
2556 ((struct RxD3*)rxdp)->Buffer1_ptr,
2557 l3l4hdr_size + 4,
2558 PCI_DMA_FROMDEVICE);
2559 pci_unmap_single(sp->pdev, (dma_addr_t)
2560 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2561 PCI_DMA_FROMDEVICE);
2562 memset(rxdp, 0, sizeof(struct RxD3));
2563 }
2564 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2565 dev_kfree_skb(skb);
2566 atomic_dec(&sp->rx_bufs_left[ring_no]);
2567 }
2568 }
2569
2570 /**
2571 * free_rx_buffers - Frees all Rx buffers
2572 * @sp: device private variable.
2573 * Description:
2574 * This function will free all Rx buffers allocated by host.
2575 * Return Value:
2576 * NONE.
2577 */
2578
2579 static void free_rx_buffers(struct s2io_nic *sp)
2580 {
2581 struct net_device *dev = sp->dev;
2582 int i, blk = 0, buf_cnt = 0;
2583 struct mac_info *mac_control;
2584 struct config_param *config;
2585
2586 mac_control = &sp->mac_control;
2587 config = &sp->config;
2588
2589 for (i = 0; i < config->rx_ring_num; i++) {
2590 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2591 free_rxd_blk(sp,i,blk);
2592
2593 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2594 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2595 mac_control->rings[i].rx_curr_put_info.offset = 0;
2596 mac_control->rings[i].rx_curr_get_info.offset = 0;
2597 atomic_set(&sp->rx_bufs_left[i], 0);
2598 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2599 dev->name, buf_cnt, i);
2600 }
2601 }
2602
2603 /**
2604 * s2io_poll - Rx interrupt handler for NAPI support
2605 * @dev : pointer to the device structure.
2606 * @budget : The number of packets that were budgeted to be processed
2607 * during one pass through the 'Poll" function.
2608 * Description:
2609 * Comes into picture only if NAPI support has been incorporated. It does
2610 * the same thing that rx_intr_handler does, but not in a interrupt context
2611 * also It will process only a given number of packets.
2612 * Return value:
2613 * 0 on success and 1 if there are No Rx packets to be processed.
2614 */
2615
2616 static int s2io_poll(struct net_device *dev, int *budget)
2617 {
2618 struct s2io_nic *nic = dev->priv;
2619 int pkt_cnt = 0, org_pkts_to_process;
2620 struct mac_info *mac_control;
2621 struct config_param *config;
2622 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2623 int i;
2624
2625 atomic_inc(&nic->isr_cnt);
2626 mac_control = &nic->mac_control;
2627 config = &nic->config;
2628
2629 nic->pkts_to_process = *budget;
2630 if (nic->pkts_to_process > dev->quota)
2631 nic->pkts_to_process = dev->quota;
2632 org_pkts_to_process = nic->pkts_to_process;
2633
2634 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2635 readl(&bar0->rx_traffic_int);
2636
2637 for (i = 0; i < config->rx_ring_num; i++) {
2638 rx_intr_handler(&mac_control->rings[i]);
2639 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2640 if (!nic->pkts_to_process) {
2641 /* Quota for the current iteration has been met */
2642 goto no_rx;
2643 }
2644 }
2645 if (!pkt_cnt)
2646 pkt_cnt = 1;
2647
2648 dev->quota -= pkt_cnt;
2649 *budget -= pkt_cnt;
2650 netif_rx_complete(dev);
2651
2652 for (i = 0; i < config->rx_ring_num; i++) {
2653 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2654 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2655 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2656 break;
2657 }
2658 }
2659 /* Re enable the Rx interrupts. */
2660 writeq(0x0, &bar0->rx_traffic_mask);
2661 readl(&bar0->rx_traffic_mask);
2662 atomic_dec(&nic->isr_cnt);
2663 return 0;
2664
2665 no_rx:
2666 dev->quota -= pkt_cnt;
2667 *budget -= pkt_cnt;
2668
2669 for (i = 0; i < config->rx_ring_num; i++) {
2670 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2671 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2672 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2673 break;
2674 }
2675 }
2676 atomic_dec(&nic->isr_cnt);
2677 return 1;
2678 }
2679
2680 #ifdef CONFIG_NET_POLL_CONTROLLER
2681 /**
2682 * s2io_netpoll - netpoll event handler entry point
2683 * @dev : pointer to the device structure.
2684 * Description:
2685 * This function will be called by upper layer to check for events on the
2686 * interface in situations where interrupts are disabled. It is used for
2687 * specific in-kernel networking tasks, such as remote consoles and kernel
2688 * debugging over the network (example netdump in RedHat).
2689 */
2690 static void s2io_netpoll(struct net_device *dev)
2691 {
2692 struct s2io_nic *nic = dev->priv;
2693 struct mac_info *mac_control;
2694 struct config_param *config;
2695 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2696 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2697 int i;
2698
2699 if (pci_channel_offline(nic->pdev))
2700 return;
2701
2702 disable_irq(dev->irq);
2703
2704 atomic_inc(&nic->isr_cnt);
2705 mac_control = &nic->mac_control;
2706 config = &nic->config;
2707
2708 writeq(val64, &bar0->rx_traffic_int);
2709 writeq(val64, &bar0->tx_traffic_int);
2710
2711 /* we need to free up the transmitted skbufs or else netpoll will
2712 * run out of skbs and will fail and eventually netpoll application such
2713 * as netdump will fail.
2714 */
2715 for (i = 0; i < config->tx_fifo_num; i++)
2716 tx_intr_handler(&mac_control->fifos[i]);
2717
2718 /* check for received packet and indicate up to network */
2719 for (i = 0; i < config->rx_ring_num; i++)
2720 rx_intr_handler(&mac_control->rings[i]);
2721
2722 for (i = 0; i < config->rx_ring_num; i++) {
2723 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2724 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2725 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2726 break;
2727 }
2728 }
2729 atomic_dec(&nic->isr_cnt);
2730 enable_irq(dev->irq);
2731 return;
2732 }
2733 #endif
2734
2735 /**
2736 * rx_intr_handler - Rx interrupt handler
2737 * @nic: device private variable.
2738 * Description:
2739 * If the interrupt is because of a received frame or if the
2740 * receive ring contains fresh as yet un-processed frames,this function is
2741 * called. It picks out the RxD at which place the last Rx processing had
2742 * stopped and sends the skb to the OSM's Rx handler and then increments
2743 * the offset.
2744 * Return Value:
2745 * NONE.
2746 */
2747 static void rx_intr_handler(struct ring_info *ring_data)
2748 {
2749 struct s2io_nic *nic = ring_data->nic;
2750 struct net_device *dev = (struct net_device *) nic->dev;
2751 int get_block, put_block, put_offset;
2752 struct rx_curr_get_info get_info, put_info;
2753 struct RxD_t *rxdp;
2754 struct sk_buff *skb;
2755 int pkt_cnt = 0;
2756 int i;
2757
2758 spin_lock(&nic->rx_lock);
2759 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2760 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2761 __FUNCTION__, dev->name);
2762 spin_unlock(&nic->rx_lock);
2763 return;
2764 }
2765
2766 get_info = ring_data->rx_curr_get_info;
2767 get_block = get_info.block_index;
2768 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2769 put_block = put_info.block_index;
2770 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2771 if (!napi) {
2772 spin_lock(&nic->put_lock);
2773 put_offset = ring_data->put_pos;
2774 spin_unlock(&nic->put_lock);
2775 } else
2776 put_offset = ring_data->put_pos;
2777
2778 while (RXD_IS_UP2DT(rxdp)) {
2779 /*
2780 * If your are next to put index then it's
2781 * FIFO full condition
2782 */
2783 if ((get_block == put_block) &&
2784 (get_info.offset + 1) == put_info.offset) {
2785 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2786 break;
2787 }
2788 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2789 if (skb == NULL) {
2790 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2791 dev->name);
2792 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2793 spin_unlock(&nic->rx_lock);
2794 return;
2795 }
2796 if (nic->rxd_mode == RXD_MODE_1) {
2797 pci_unmap_single(nic->pdev, (dma_addr_t)
2798 ((struct RxD1*)rxdp)->Buffer0_ptr,
2799 dev->mtu +
2800 HEADER_ETHERNET_II_802_3_SIZE +
2801 HEADER_802_2_SIZE +
2802 HEADER_SNAP_SIZE,
2803 PCI_DMA_FROMDEVICE);
2804 } else if (nic->rxd_mode == RXD_MODE_3B) {
2805 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2806 ((struct RxD3*)rxdp)->Buffer0_ptr,
2807 BUF0_LEN, PCI_DMA_FROMDEVICE);
2808 pci_unmap_single(nic->pdev, (dma_addr_t)
2809 ((struct RxD3*)rxdp)->Buffer2_ptr,
2810 dev->mtu + 4,
2811 PCI_DMA_FROMDEVICE);
2812 } else {
2813 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2814 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2815 PCI_DMA_FROMDEVICE);
2816 pci_unmap_single(nic->pdev, (dma_addr_t)
2817 ((struct RxD3*)rxdp)->Buffer1_ptr,
2818 l3l4hdr_size + 4,
2819 PCI_DMA_FROMDEVICE);
2820 pci_unmap_single(nic->pdev, (dma_addr_t)
2821 ((struct RxD3*)rxdp)->Buffer2_ptr,
2822 dev->mtu, PCI_DMA_FROMDEVICE);
2823 }
2824 prefetch(skb->data);
2825 rx_osm_handler(ring_data, rxdp);
2826 get_info.offset++;
2827 ring_data->rx_curr_get_info.offset = get_info.offset;
2828 rxdp = ring_data->rx_blocks[get_block].
2829 rxds[get_info.offset].virt_addr;
2830 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2831 get_info.offset = 0;
2832 ring_data->rx_curr_get_info.offset = get_info.offset;
2833 get_block++;
2834 if (get_block == ring_data->block_count)
2835 get_block = 0;
2836 ring_data->rx_curr_get_info.block_index = get_block;
2837 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2838 }
2839
2840 nic->pkts_to_process -= 1;
2841 if ((napi) && (!nic->pkts_to_process))
2842 break;
2843 pkt_cnt++;
2844 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2845 break;
2846 }
2847 if (nic->lro) {
2848 /* Clear all LRO sessions before exiting */
2849 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2850 struct lro *lro = &nic->lro0_n[i];
2851 if (lro->in_use) {
2852 update_L3L4_header(nic, lro);
2853 queue_rx_frame(lro->parent);
2854 clear_lro_session(lro);
2855 }
2856 }
2857 }
2858
2859 spin_unlock(&nic->rx_lock);
2860 }
2861
2862 /**
2863 * tx_intr_handler - Transmit interrupt handler
2864 * @nic : device private variable
2865 * Description:
2866 * If an interrupt was raised to indicate DMA complete of the
2867 * Tx packet, this function is called. It identifies the last TxD
2868 * whose buffer was freed and frees all skbs whose data have already
2869 * DMA'ed into the NICs internal memory.
2870 * Return Value:
2871 * NONE
2872 */
2873
2874 static void tx_intr_handler(struct fifo_info *fifo_data)
2875 {
2876 struct s2io_nic *nic = fifo_data->nic;
2877 struct net_device *dev = (struct net_device *) nic->dev;
2878 struct tx_curr_get_info get_info, put_info;
2879 struct sk_buff *skb;
2880 struct TxD *txdlp;
2881 u8 err_mask;
2882
2883 get_info = fifo_data->tx_curr_get_info;
2884 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2885 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2886 list_virt_addr;
2887 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2888 (get_info.offset != put_info.offset) &&
2889 (txdlp->Host_Control)) {
2890 /* Check for TxD errors */
2891 if (txdlp->Control_1 & TXD_T_CODE) {
2892 unsigned long long err;
2893 err = txdlp->Control_1 & TXD_T_CODE;
2894 if (err & 0x1) {
2895 nic->mac_control.stats_info->sw_stat.
2896 parity_err_cnt++;
2897 }
2898
2899 /* update t_code statistics */
2900 err_mask = err >> 48;
2901 switch(err_mask) {
2902 case 2:
2903 nic->mac_control.stats_info->sw_stat.
2904 tx_buf_abort_cnt++;
2905 break;
2906
2907 case 3:
2908 nic->mac_control.stats_info->sw_stat.
2909 tx_desc_abort_cnt++;
2910 break;
2911
2912 case 7:
2913 nic->mac_control.stats_info->sw_stat.
2914 tx_parity_err_cnt++;
2915 break;
2916
2917 case 10:
2918 nic->mac_control.stats_info->sw_stat.
2919 tx_link_loss_cnt++;
2920 break;
2921
2922 case 15:
2923 nic->mac_control.stats_info->sw_stat.
2924 tx_list_proc_err_cnt++;
2925 break;
2926 }
2927 }
2928
2929 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2930 if (skb == NULL) {
2931 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2932 __FUNCTION__);
2933 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2934 return;
2935 }
2936
2937 /* Updating the statistics block */
2938 nic->stats.tx_bytes += skb->len;
2939 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2940 dev_kfree_skb_irq(skb);
2941
2942 get_info.offset++;
2943 if (get_info.offset == get_info.fifo_len + 1)
2944 get_info.offset = 0;
2945 txdlp = (struct TxD *) fifo_data->list_info
2946 [get_info.offset].list_virt_addr;
2947 fifo_data->tx_curr_get_info.offset =
2948 get_info.offset;
2949 }
2950
2951 spin_lock(&nic->tx_lock);
2952 if (netif_queue_stopped(dev))
2953 netif_wake_queue(dev);
2954 spin_unlock(&nic->tx_lock);
2955 }
2956
2957 /**
2958 * s2io_mdio_write - Function to write in to MDIO registers
2959 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2960 * @addr : address value
2961 * @value : data value
2962 * @dev : pointer to net_device structure
2963 * Description:
2964 * This function is used to write values to the MDIO registers
2965 * NONE
2966 */
2967 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2968 {
2969 u64 val64 = 0x0;
2970 struct s2io_nic *sp = dev->priv;
2971 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2972
2973 //address transaction
2974 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2975 | MDIO_MMD_DEV_ADDR(mmd_type)
2976 | MDIO_MMS_PRT_ADDR(0x0);
2977 writeq(val64, &bar0->mdio_control);
2978 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2979 writeq(val64, &bar0->mdio_control);
2980 udelay(100);
2981
2982 //Data transaction
2983 val64 = 0x0;
2984 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2985 | MDIO_MMD_DEV_ADDR(mmd_type)
2986 | MDIO_MMS_PRT_ADDR(0x0)
2987 | MDIO_MDIO_DATA(value)
2988 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2989 writeq(val64, &bar0->mdio_control);
2990 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2991 writeq(val64, &bar0->mdio_control);
2992 udelay(100);
2993
2994 val64 = 0x0;
2995 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2996 | MDIO_MMD_DEV_ADDR(mmd_type)
2997 | MDIO_MMS_PRT_ADDR(0x0)
2998 | MDIO_OP(MDIO_OP_READ_TRANS);
2999 writeq(val64, &bar0->mdio_control);
3000 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3001 writeq(val64, &bar0->mdio_control);
3002 udelay(100);
3003
3004 }
3005
3006 /**
3007 * s2io_mdio_read - Function to write in to MDIO registers
3008 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3009 * @addr : address value
3010 * @dev : pointer to net_device structure
3011 * Description:
3012 * This function is used to read values to the MDIO registers
3013 * NONE
3014 */
3015 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3016 {
3017 u64 val64 = 0x0;
3018 u64 rval64 = 0x0;
3019 struct s2io_nic *sp = dev->priv;
3020 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3021
3022 /* address transaction */
3023 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3024 | MDIO_MMD_DEV_ADDR(mmd_type)
3025 | MDIO_MMS_PRT_ADDR(0x0);
3026 writeq(val64, &bar0->mdio_control);
3027 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3028 writeq(val64, &bar0->mdio_control);
3029 udelay(100);
3030
3031 /* Data transaction */
3032 val64 = 0x0;
3033 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3034 | MDIO_MMD_DEV_ADDR(mmd_type)
3035 | MDIO_MMS_PRT_ADDR(0x0)
3036 | MDIO_OP(MDIO_OP_READ_TRANS);
3037 writeq(val64, &bar0->mdio_control);
3038 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3039 writeq(val64, &bar0->mdio_control);
3040 udelay(100);
3041
3042 /* Read the value from regs */
3043 rval64 = readq(&bar0->mdio_control);
3044 rval64 = rval64 & 0xFFFF0000;
3045 rval64 = rval64 >> 16;
3046 return rval64;
3047 }
3048 /**
3049 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3050 * @counter : couter value to be updated
3051 * @flag : flag to indicate the status
3052 * @type : counter type
3053 * Description:
3054 * This function is to check the status of the xpak counters value
3055 * NONE
3056 */
3057
3058 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3059 {
3060 u64 mask = 0x3;
3061 u64 val64;
3062 int i;
3063 for(i = 0; i <index; i++)
3064 mask = mask << 0x2;
3065
3066 if(flag > 0)
3067 {
3068 *counter = *counter + 1;
3069 val64 = *regs_stat & mask;
3070 val64 = val64 >> (index * 0x2);
3071 val64 = val64 + 1;
3072 if(val64 == 3)
3073 {
3074 switch(type)
3075 {
3076 case 1:
3077 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3078 "service. Excessive temperatures may "
3079 "result in premature transceiver "
3080 "failure \n");
3081 break;
3082 case 2:
3083 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3084 "service Excessive bias currents may "
3085 "indicate imminent laser diode "
3086 "failure \n");
3087 break;
3088 case 3:
3089 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3090 "service Excessive laser output "
3091 "power may saturate far-end "
3092 "receiver\n");
3093 break;
3094 default:
3095 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3096 "type \n");
3097 }
3098 val64 = 0x0;
3099 }
3100 val64 = val64 << (index * 0x2);
3101 *regs_stat = (*regs_stat & (~mask)) | (val64);
3102
3103 } else {
3104 *regs_stat = *regs_stat & (~mask);
3105 }
3106 }
3107
3108 /**
3109 * s2io_updt_xpak_counter - Function to update the xpak counters
3110 * @dev : pointer to net_device struct
3111 * Description:
3112 * This function is to upate the status of the xpak counters value
3113 * NONE
3114 */
3115 static void s2io_updt_xpak_counter(struct net_device *dev)
3116 {
3117 u16 flag = 0x0;
3118 u16 type = 0x0;
3119 u16 val16 = 0x0;
3120 u64 val64 = 0x0;
3121 u64 addr = 0x0;
3122
3123 struct s2io_nic *sp = dev->priv;
3124 struct stat_block *stat_info = sp->mac_control.stats_info;
3125
3126 /* Check the communication with the MDIO slave */
3127 addr = 0x0000;
3128 val64 = 0x0;
3129 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3130 if((val64 == 0xFFFF) || (val64 == 0x0000))
3131 {
3132 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3133 "Returned %llx\n", (unsigned long long)val64);
3134 return;
3135 }
3136
3137 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3138 if(val64 != 0x2040)
3139 {
3140 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3141 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3142 (unsigned long long)val64);
3143 return;
3144 }
3145
3146 /* Loading the DOM register to MDIO register */
3147 addr = 0xA100;
3148 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3149 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3150
3151 /* Reading the Alarm flags */
3152 addr = 0xA070;
3153 val64 = 0x0;
3154 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3155
3156 flag = CHECKBIT(val64, 0x7);
3157 type = 1;
3158 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3159 &stat_info->xpak_stat.xpak_regs_stat,
3160 0x0, flag, type);
3161
3162 if(CHECKBIT(val64, 0x6))
3163 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3164
3165 flag = CHECKBIT(val64, 0x3);
3166 type = 2;
3167 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3168 &stat_info->xpak_stat.xpak_regs_stat,
3169 0x2, flag, type);
3170
3171 if(CHECKBIT(val64, 0x2))
3172 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3173
3174 flag = CHECKBIT(val64, 0x1);
3175 type = 3;
3176 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3177 &stat_info->xpak_stat.xpak_regs_stat,
3178 0x4, flag, type);
3179
3180 if(CHECKBIT(val64, 0x0))
3181 stat_info->xpak_stat.alarm_laser_output_power_low++;
3182
3183 /* Reading the Warning flags */
3184 addr = 0xA074;
3185 val64 = 0x0;
3186 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3187
3188 if(CHECKBIT(val64, 0x7))
3189 stat_info->xpak_stat.warn_transceiver_temp_high++;
3190
3191 if(CHECKBIT(val64, 0x6))
3192 stat_info->xpak_stat.warn_transceiver_temp_low++;
3193
3194 if(CHECKBIT(val64, 0x3))
3195 stat_info->xpak_stat.warn_laser_bias_current_high++;
3196
3197 if(CHECKBIT(val64, 0x2))
3198 stat_info->xpak_stat.warn_laser_bias_current_low++;
3199
3200 if(CHECKBIT(val64, 0x1))
3201 stat_info->xpak_stat.warn_laser_output_power_high++;
3202
3203 if(CHECKBIT(val64, 0x0))
3204 stat_info->xpak_stat.warn_laser_output_power_low++;
3205 }
3206
3207 /**
3208 * alarm_intr_handler - Alarm Interrrupt handler
3209 * @nic: device private variable
3210 * Description: If the interrupt was neither because of Rx packet or Tx
3211 * complete, this function is called. If the interrupt was to indicate
3212 * a loss of link, the OSM link status handler is invoked for any other
3213 * alarm interrupt the block that raised the interrupt is displayed
3214 * and a H/W reset is issued.
3215 * Return Value:
3216 * NONE
3217 */
3218
3219 static void alarm_intr_handler(struct s2io_nic *nic)
3220 {
3221 struct net_device *dev = (struct net_device *) nic->dev;
3222 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3223 register u64 val64 = 0, err_reg = 0;
3224 u64 cnt;
3225 int i;
3226 if (atomic_read(&nic->card_state) == CARD_DOWN)
3227 return;
3228 if (pci_channel_offline(nic->pdev))
3229 return;
3230 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3231 /* Handling the XPAK counters update */
3232 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3233 /* waiting for an hour */
3234 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3235 } else {
3236 s2io_updt_xpak_counter(dev);
3237 /* reset the count to zero */
3238 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3239 }
3240
3241 /* Handling link status change error Intr */
3242 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3243 err_reg = readq(&bar0->mac_rmac_err_reg);
3244 writeq(err_reg, &bar0->mac_rmac_err_reg);
3245 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3246 schedule_work(&nic->set_link_task);
3247 }
3248 }
3249
3250 /* Handling Ecc errors */
3251 val64 = readq(&bar0->mc_err_reg);
3252 writeq(val64, &bar0->mc_err_reg);
3253 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3254 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3255 nic->mac_control.stats_info->sw_stat.
3256 double_ecc_errs++;
3257 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3258 dev->name);
3259 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3260 if (nic->device_type != XFRAME_II_DEVICE) {
3261 /* Reset XframeI only if critical error */
3262 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3263 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3264 netif_stop_queue(dev);
3265 schedule_work(&nic->rst_timer_task);
3266 nic->mac_control.stats_info->sw_stat.
3267 soft_reset_cnt++;
3268 }
3269 }
3270 } else {
3271 nic->mac_control.stats_info->sw_stat.
3272 single_ecc_errs++;
3273 }
3274 }
3275
3276 /* In case of a serious error, the device will be Reset. */
3277 val64 = readq(&bar0->serr_source);
3278 if (val64 & SERR_SOURCE_ANY) {
3279 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3280 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3281 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3282 (unsigned long long)val64);
3283 netif_stop_queue(dev);
3284 schedule_work(&nic->rst_timer_task);
3285 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3286 }
3287
3288 /*
3289 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3290 * Error occurs, the adapter will be recycled by disabling the
3291 * adapter enable bit and enabling it again after the device
3292 * becomes Quiescent.
3293 */
3294 val64 = readq(&bar0->pcc_err_reg);
3295 writeq(val64, &bar0->pcc_err_reg);
3296 if (val64 & PCC_FB_ECC_DB_ERR) {
3297 u64 ac = readq(&bar0->adapter_control);
3298 ac &= ~(ADAPTER_CNTL_EN);
3299 writeq(ac, &bar0->adapter_control);
3300 ac = readq(&bar0->adapter_control);
3301 schedule_work(&nic->set_link_task);
3302 }
3303 /* Check for data parity error */
3304 val64 = readq(&bar0->pic_int_status);
3305 if (val64 & PIC_INT_GPIO) {
3306 val64 = readq(&bar0->gpio_int_reg);
3307 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3308 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3309 schedule_work(&nic->rst_timer_task);
3310 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3311 }
3312 }
3313
3314 /* Check for ring full counter */
3315 if (nic->device_type & XFRAME_II_DEVICE) {
3316 val64 = readq(&bar0->ring_bump_counter1);
3317 for (i=0; i<4; i++) {
3318 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3319 cnt >>= 64 - ((i+1)*16);
3320 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3321 += cnt;
3322 }
3323
3324 val64 = readq(&bar0->ring_bump_counter2);
3325 for (i=0; i<4; i++) {
3326 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3327 cnt >>= 64 - ((i+1)*16);
3328 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3329 += cnt;
3330 }
3331 }
3332
3333 /* Other type of interrupts are not being handled now, TODO */
3334 }
3335
3336 /**
3337 * wait_for_cmd_complete - waits for a command to complete.
3338 * @sp : private member of the device structure, which is a pointer to the
3339 * s2io_nic structure.
3340 * Description: Function that waits for a command to Write into RMAC
3341 * ADDR DATA registers to be completed and returns either success or
3342 * error depending on whether the command was complete or not.
3343 * Return value:
3344 * SUCCESS on success and FAILURE on failure.
3345 */
3346
3347 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3348 int bit_state)
3349 {
3350 int ret = FAILURE, cnt = 0, delay = 1;
3351 u64 val64;
3352
3353 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3354 return FAILURE;
3355
3356 do {
3357 val64 = readq(addr);
3358 if (bit_state == S2IO_BIT_RESET) {
3359 if (!(val64 & busy_bit)) {
3360 ret = SUCCESS;
3361 break;
3362 }
3363 } else {
3364 if (!(val64 & busy_bit)) {
3365 ret = SUCCESS;
3366 break;
3367 }
3368 }
3369
3370 if(in_interrupt())
3371 mdelay(delay);
3372 else
3373 msleep(delay);
3374
3375 if (++cnt >= 10)
3376 delay = 50;
3377 } while (cnt < 20);
3378 return ret;
3379 }
3380 /*
3381 * check_pci_device_id - Checks if the device id is supported
3382 * @id : device id
3383 * Description: Function to check if the pci device id is supported by driver.
3384 * Return value: Actual device id if supported else PCI_ANY_ID
3385 */
3386 static u16 check_pci_device_id(u16 id)
3387 {
3388 switch (id) {
3389 case PCI_DEVICE_ID_HERC_WIN:
3390 case PCI_DEVICE_ID_HERC_UNI:
3391 return XFRAME_II_DEVICE;
3392 case PCI_DEVICE_ID_S2IO_UNI:
3393 case PCI_DEVICE_ID_S2IO_WIN:
3394 return XFRAME_I_DEVICE;
3395 default:
3396 return PCI_ANY_ID;
3397 }
3398 }
3399
3400 /**
3401 * s2io_reset - Resets the card.
3402 * @sp : private member of the device structure.
3403 * Description: Function to Reset the card. This function then also
3404 * restores the previously saved PCI configuration space registers as
3405 * the card reset also resets the configuration space.
3406 * Return value:
3407 * void.
3408 */
3409
3410 static void s2io_reset(struct s2io_nic * sp)
3411 {
3412 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3413 u64 val64;
3414 u16 subid, pci_cmd;
3415 int i;
3416 u16 val16;
3417 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3418 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3419
3420 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3421 __FUNCTION__, sp->dev->name);
3422
3423 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3424 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3425
3426 if (sp->device_type == XFRAME_II_DEVICE) {
3427 int ret;
3428 ret = pci_set_power_state(sp->pdev, 3);
3429 if (!ret)
3430 ret = pci_set_power_state(sp->pdev, 0);
3431 else {
3432 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3433 __FUNCTION__);
3434 goto old_way;
3435 }
3436 msleep(20);
3437 goto new_way;
3438 }
3439 old_way:
3440 val64 = SW_RESET_ALL;
3441 writeq(val64, &bar0->sw_reset);
3442 new_way:
3443 if (strstr(sp->product_name, "CX4")) {
3444 msleep(750);
3445 }
3446 msleep(250);
3447 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3448
3449 /* Restore the PCI state saved during initialization. */
3450 pci_restore_state(sp->pdev);
3451 pci_read_config_word(sp->pdev, 0x2, &val16);
3452 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3453 break;
3454 msleep(200);
3455 }
3456
3457 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3458 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3459 }
3460
3461 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3462
3463 s2io_init_pci(sp);
3464
3465 /* Set swapper to enable I/O register access */
3466 s2io_set_swapper(sp);
3467
3468 /* Restore the MSIX table entries from local variables */
3469 restore_xmsi_data(sp);
3470
3471 /* Clear certain PCI/PCI-X fields after reset */
3472 if (sp->device_type == XFRAME_II_DEVICE) {
3473 /* Clear "detected parity error" bit */
3474 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3475
3476 /* Clearing PCIX Ecc status register */
3477 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3478
3479 /* Clearing PCI_STATUS error reflected here */
3480 writeq(BIT(62), &bar0->txpic_int_reg);
3481 }
3482
3483 /* Reset device statistics maintained by OS */
3484 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3485
3486 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3487 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3488 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3489 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3490 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3491 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3492 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3493 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3494 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3495 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3496 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3497 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3498 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3499 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3500 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3501 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3502 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3503 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3504 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3505
3506 /* SXE-002: Configure link and activity LED to turn it off */
3507 subid = sp->pdev->subsystem_device;
3508 if (((subid & 0xFF) >= 0x07) &&
3509 (sp->device_type == XFRAME_I_DEVICE)) {
3510 val64 = readq(&bar0->gpio_control);
3511 val64 |= 0x0000800000000000ULL;
3512 writeq(val64, &bar0->gpio_control);
3513 val64 = 0x0411040400000000ULL;
3514 writeq(val64, (void __iomem *)bar0 + 0x2700);
3515 }
3516
3517 /*
3518 * Clear spurious ECC interrupts that would have occured on
3519 * XFRAME II cards after reset.
3520 */
3521 if (sp->device_type == XFRAME_II_DEVICE) {
3522 val64 = readq(&bar0->pcc_err_reg);
3523 writeq(val64, &bar0->pcc_err_reg);
3524 }
3525
3526 /* restore the previously assigned mac address */
3527 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3528
3529 sp->device_enabled_once = FALSE;
3530 }
3531
3532 /**
3533 * s2io_set_swapper - to set the swapper controle on the card
3534 * @sp : private member of the device structure,
3535 * pointer to the s2io_nic structure.
3536 * Description: Function to set the swapper control on the card
3537 * correctly depending on the 'endianness' of the system.
3538 * Return value:
3539 * SUCCESS on success and FAILURE on failure.
3540 */
3541
3542 static int s2io_set_swapper(struct s2io_nic * sp)
3543 {
3544 struct net_device *dev = sp->dev;
3545 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3546 u64 val64, valt, valr;
3547
3548 /*
3549 * Set proper endian settings and verify the same by reading
3550 * the PIF Feed-back register.
3551 */
3552
3553 val64 = readq(&bar0->pif_rd_swapper_fb);
3554 if (val64 != 0x0123456789ABCDEFULL) {
3555 int i = 0;
3556 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3557 0x8100008181000081ULL, /* FE=1, SE=0 */
3558 0x4200004242000042ULL, /* FE=0, SE=1 */
3559 0}; /* FE=0, SE=0 */
3560
3561 while(i<4) {
3562 writeq(value[i], &bar0->swapper_ctrl);
3563 val64 = readq(&bar0->pif_rd_swapper_fb);
3564 if (val64 == 0x0123456789ABCDEFULL)
3565 break;
3566 i++;
3567 }
3568 if (i == 4) {
3569 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3570 dev->name);
3571 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3572 (unsigned long long) val64);
3573 return FAILURE;
3574 }
3575 valr = value[i];
3576 } else {
3577 valr = readq(&bar0->swapper_ctrl);
3578 }
3579
3580 valt = 0x0123456789ABCDEFULL;
3581 writeq(valt, &bar0->xmsi_address);
3582 val64 = readq(&bar0->xmsi_address);
3583
3584 if(val64 != valt) {
3585 int i = 0;
3586 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3587 0x0081810000818100ULL, /* FE=1, SE=0 */
3588 0x0042420000424200ULL, /* FE=0, SE=1 */
3589 0}; /* FE=0, SE=0 */
3590
3591 while(i<4) {
3592 writeq((value[i] | valr), &bar0->swapper_ctrl);
3593 writeq(valt, &bar0->xmsi_address);
3594 val64 = readq(&bar0->xmsi_address);
3595 if(val64 == valt)
3596 break;
3597 i++;
3598 }
3599 if(i == 4) {
3600 unsigned long long x = val64;
3601 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3602 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3603 return FAILURE;
3604 }
3605 }
3606 val64 = readq(&bar0->swapper_ctrl);
3607 val64 &= 0xFFFF000000000000ULL;
3608
3609 #ifdef __BIG_ENDIAN
3610 /*
3611 * The device by default set to a big endian format, so a
3612 * big endian driver need not set anything.
3613 */
3614 val64 |= (SWAPPER_CTRL_TXP_FE |
3615 SWAPPER_CTRL_TXP_SE |
3616 SWAPPER_CTRL_TXD_R_FE |
3617 SWAPPER_CTRL_TXD_W_FE |
3618 SWAPPER_CTRL_TXF_R_FE |
3619 SWAPPER_CTRL_RXD_R_FE |
3620 SWAPPER_CTRL_RXD_W_FE |
3621 SWAPPER_CTRL_RXF_W_FE |
3622 SWAPPER_CTRL_XMSI_FE |
3623 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3624 if (sp->intr_type == INTA)
3625 val64 |= SWAPPER_CTRL_XMSI_SE;
3626 writeq(val64, &bar0->swapper_ctrl);
3627 #else
3628 /*
3629 * Initially we enable all bits to make it accessible by the
3630 * driver, then we selectively enable only those bits that
3631 * we want to set.
3632 */
3633 val64 |= (SWAPPER_CTRL_TXP_FE |
3634 SWAPPER_CTRL_TXP_SE |
3635 SWAPPER_CTRL_TXD_R_FE |
3636 SWAPPER_CTRL_TXD_R_SE |
3637 SWAPPER_CTRL_TXD_W_FE |
3638 SWAPPER_CTRL_TXD_W_SE |
3639 SWAPPER_CTRL_TXF_R_FE |
3640 SWAPPER_CTRL_RXD_R_FE |
3641 SWAPPER_CTRL_RXD_R_SE |
3642 SWAPPER_CTRL_RXD_W_FE |
3643 SWAPPER_CTRL_RXD_W_SE |
3644 SWAPPER_CTRL_RXF_W_FE |
3645 SWAPPER_CTRL_XMSI_FE |
3646 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3647 if (sp->intr_type == INTA)
3648 val64 |= SWAPPER_CTRL_XMSI_SE;
3649 writeq(val64, &bar0->swapper_ctrl);
3650 #endif
3651 val64 = readq(&bar0->swapper_ctrl);
3652
3653 /*
3654 * Verifying if endian settings are accurate by reading a
3655 * feedback register.
3656 */
3657 val64 = readq(&bar0->pif_rd_swapper_fb);
3658 if (val64 != 0x0123456789ABCDEFULL) {
3659 /* Endian settings are incorrect, calls for another dekko. */
3660 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3661 dev->name);
3662 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3663 (unsigned long long) val64);
3664 return FAILURE;
3665 }
3666
3667 return SUCCESS;
3668 }
3669
3670 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3671 {
3672 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3673 u64 val64;
3674 int ret = 0, cnt = 0;
3675
3676 do {
3677 val64 = readq(&bar0->xmsi_access);
3678 if (!(val64 & BIT(15)))
3679 break;
3680 mdelay(1);
3681 cnt++;
3682 } while(cnt < 5);
3683 if (cnt == 5) {
3684 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3685 ret = 1;
3686 }
3687
3688 return ret;
3689 }
3690
3691 static void restore_xmsi_data(struct s2io_nic *nic)
3692 {
3693 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3694 u64 val64;
3695 int i;
3696
3697 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3698 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3699 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3700 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3701 writeq(val64, &bar0->xmsi_access);
3702 if (wait_for_msix_trans(nic, i)) {
3703 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3704 continue;
3705 }
3706 }
3707 }
3708
3709 static void store_xmsi_data(struct s2io_nic *nic)
3710 {
3711 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3712 u64 val64, addr, data;
3713 int i;
3714
3715 /* Store and display */
3716 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3717 val64 = (BIT(15) | vBIT(i, 26, 6));
3718 writeq(val64, &bar0->xmsi_access);
3719 if (wait_for_msix_trans(nic, i)) {
3720 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3721 continue;
3722 }
3723 addr = readq(&bar0->xmsi_address);
3724 data = readq(&bar0->xmsi_data);
3725 if (addr && data) {
3726 nic->msix_info[i].addr = addr;
3727 nic->msix_info[i].data = data;
3728 }
3729 }
3730 }
3731
3732 int s2io_enable_msi(struct s2io_nic *nic)
3733 {
3734 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3735 u16 msi_ctrl, msg_val;
3736 struct config_param *config = &nic->config;
3737 struct net_device *dev = nic->dev;
3738 u64 val64, tx_mat, rx_mat;
3739 int i, err;
3740
3741 val64 = readq(&bar0->pic_control);
3742 val64 &= ~BIT(1);
3743 writeq(val64, &bar0->pic_control);
3744
3745 err = pci_enable_msi(nic->pdev);
3746 if (err) {
3747 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3748 nic->dev->name);
3749 return err;
3750 }
3751
3752 /*
3753 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3754 * for interrupt handling.
3755 */
3756 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3757 msg_val ^= 0x1;
3758 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3759 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3760
3761 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3762 msi_ctrl |= 0x10;
3763 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3764
3765 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3766 tx_mat = readq(&bar0->tx_mat0_n[0]);
3767 for (i=0; i<config->tx_fifo_num; i++) {
3768 tx_mat |= TX_MAT_SET(i, 1);
3769 }
3770 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3771
3772 rx_mat = readq(&bar0->rx_mat);
3773 for (i=0; i<config->rx_ring_num; i++) {
3774 rx_mat |= RX_MAT_SET(i, 1);
3775 }
3776 writeq(rx_mat, &bar0->rx_mat);
3777
3778 dev->irq = nic->pdev->irq;
3779 return 0;
3780 }
3781
3782 static int s2io_enable_msi_x(struct s2io_nic *nic)
3783 {
3784 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3785 u64 tx_mat, rx_mat;
3786 u16 msi_control; /* Temp variable */
3787 int ret, i, j, msix_indx = 1;
3788
3789 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3790 GFP_KERNEL);
3791 if (nic->entries == NULL) {
3792 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3793 __FUNCTION__);
3794 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3795 return -ENOMEM;
3796 }
3797 nic->mac_control.stats_info->sw_stat.mem_allocated
3798 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3799 memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3800
3801 nic->s2io_entries =
3802 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3803 GFP_KERNEL);
3804 if (nic->s2io_entries == NULL) {
3805 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3806 __FUNCTION__);
3807 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3808 kfree(nic->entries);
3809 nic->mac_control.stats_info->sw_stat.mem_freed
3810 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3811 return -ENOMEM;
3812 }
3813 nic->mac_control.stats_info->sw_stat.mem_allocated
3814 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3815 memset(nic->s2io_entries, 0,
3816 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3817
3818 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3819 nic->entries[i].entry = i;
3820 nic->s2io_entries[i].entry = i;
3821 nic->s2io_entries[i].arg = NULL;
3822 nic->s2io_entries[i].in_use = 0;
3823 }
3824
3825 tx_mat = readq(&bar0->tx_mat0_n[0]);
3826 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3827 tx_mat |= TX_MAT_SET(i, msix_indx);
3828 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3829 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3830 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3831 }
3832 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3833
3834 if (!nic->config.bimodal) {
3835 rx_mat = readq(&bar0->rx_mat);
3836 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3837 rx_mat |= RX_MAT_SET(j, msix_indx);
3838 nic->s2io_entries[msix_indx].arg
3839 = &nic->mac_control.rings[j];
3840 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3841 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3842 }
3843 writeq(rx_mat, &bar0->rx_mat);
3844 } else {
3845 tx_mat = readq(&bar0->tx_mat0_n[7]);
3846 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3847 tx_mat |= TX_MAT_SET(i, msix_indx);
3848 nic->s2io_entries[msix_indx].arg
3849 = &nic->mac_control.rings[j];
3850 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3851 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3852 }
3853 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3854 }
3855
3856 nic->avail_msix_vectors = 0;
3857 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3858 /* We fail init if error or we get less vectors than min required */
3859 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3860 nic->avail_msix_vectors = ret;
3861 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3862 }
3863 if (ret) {
3864 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3865 kfree(nic->entries);
3866 nic->mac_control.stats_info->sw_stat.mem_freed
3867 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3868 kfree(nic->s2io_entries);
3869 nic->mac_control.stats_info->sw_stat.mem_freed
3870 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3871 nic->entries = NULL;
3872 nic->s2io_entries = NULL;
3873 nic->avail_msix_vectors = 0;
3874 return -ENOMEM;
3875 }
3876 if (!nic->avail_msix_vectors)
3877 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3878
3879 /*
3880 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3881 * in the herc NIC. (Temp change, needs to be removed later)
3882 */
3883 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3884 msi_control |= 0x1; /* Enable MSI */
3885 pci_write_config_word(nic->pdev, 0x42, msi_control);
3886
3887 return 0;
3888 }
3889
3890 /* ********************************************************* *
3891 * Functions defined below concern the OS part of the driver *
3892 * ********************************************************* */
3893
3894 /**
3895 * s2io_open - open entry point of the driver
3896 * @dev : pointer to the device structure.
3897 * Description:
3898 * This function is the open entry point of the driver. It mainly calls a
3899 * function to allocate Rx buffers and inserts them into the buffer
3900 * descriptors and then enables the Rx part of the NIC.
3901 * Return value:
3902 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3903 * file on failure.
3904 */
3905
3906 static int s2io_open(struct net_device *dev)
3907 {
3908 struct s2io_nic *sp = dev->priv;
3909 int err = 0;
3910
3911 /*
3912 * Make sure you have link off by default every time
3913 * Nic is initialized
3914 */
3915 netif_carrier_off(dev);
3916 sp->last_link_state = 0;
3917
3918 /* Initialize H/W and enable interrupts */
3919 err = s2io_card_up(sp);
3920 if (err) {
3921 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3922 dev->name);
3923 goto hw_init_failed;
3924 }
3925
3926 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3927 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3928 s2io_card_down(sp);
3929 err = -ENODEV;
3930 goto hw_init_failed;
3931 }
3932
3933 netif_start_queue(dev);
3934 return 0;
3935
3936 hw_init_failed:
3937 if (sp->intr_type == MSI_X) {
3938 if (sp->entries) {
3939 kfree(sp->entries);
3940 sp->mac_control.stats_info->sw_stat.mem_freed
3941 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3942 }
3943 if (sp->s2io_entries) {
3944 kfree(sp->s2io_entries);
3945 sp->mac_control.stats_info->sw_stat.mem_freed
3946 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3947 }
3948 }
3949 return err;
3950 }
3951
3952 /**
3953 * s2io_close -close entry point of the driver
3954 * @dev : device pointer.
3955 * Description:
3956 * This is the stop entry point of the driver. It needs to undo exactly
3957 * whatever was done by the open entry point,thus it's usually referred to
3958 * as the close function.Among other things this function mainly stops the
3959 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3960 * Return value:
3961 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3962 * file on failure.
3963 */
3964
3965 static int s2io_close(struct net_device *dev)
3966 {
3967 struct s2io_nic *sp = dev->priv;
3968
3969 netif_stop_queue(dev);
3970 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3971 s2io_card_down(sp);
3972
3973 sp->device_close_flag = TRUE; /* Device is shut down. */
3974 return 0;
3975 }
3976
3977 /**
3978 * s2io_xmit - Tx entry point of te driver
3979 * @skb : the socket buffer containing the Tx data.
3980 * @dev : device pointer.
3981 * Description :
3982 * This function is the Tx entry point of the driver. S2IO NIC supports
3983 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3984 * NOTE: when device cant queue the pkt,just the trans_start variable will
3985 * not be upadted.
3986 * Return value:
3987 * 0 on success & 1 on failure.
3988 */
3989
3990 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3991 {
3992 struct s2io_nic *sp = dev->priv;
3993 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3994 register u64 val64;
3995 struct TxD *txdp;
3996 struct TxFIFO_element __iomem *tx_fifo;
3997 unsigned long flags;
3998 u16 vlan_tag = 0;
3999 int vlan_priority = 0;
4000 struct mac_info *mac_control;
4001 struct config_param *config;
4002 int offload_type;
4003
4004 mac_control = &sp->mac_control;
4005 config = &sp->config;
4006
4007 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4008
4009 if (unlikely(skb->len <= 0)) {
4010 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4011 dev_kfree_skb_any(skb);
4012 return 0;
4013 }
4014
4015 spin_lock_irqsave(&sp->tx_lock, flags);
4016 if (atomic_read(&sp->card_state) == CARD_DOWN) {
4017 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4018 dev->name);
4019 spin_unlock_irqrestore(&sp->tx_lock, flags);
4020 dev_kfree_skb(skb);
4021 return 0;
4022 }
4023
4024 queue = 0;
4025 /* Get Fifo number to Transmit based on vlan priority */
4026 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4027 vlan_tag = vlan_tx_tag_get(skb);
4028 vlan_priority = vlan_tag >> 13;
4029 queue = config->fifo_mapping[vlan_priority];
4030 }
4031
4032 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4033 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4034 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4035 list_virt_addr;
4036
4037 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4038 /* Avoid "put" pointer going beyond "get" pointer */
4039 if (txdp->Host_Control ||
4040 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4041 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4042 netif_stop_queue(dev);
4043 dev_kfree_skb(skb);
4044 spin_unlock_irqrestore(&sp->tx_lock, flags);
4045 return 0;
4046 }
4047
4048 offload_type = s2io_offload_type(skb);
4049 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4050 txdp->Control_1 |= TXD_TCP_LSO_EN;
4051 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4052 }
4053 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4054 txdp->Control_2 |=
4055 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4056 TXD_TX_CKO_UDP_EN);
4057 }
4058 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4059 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4060 txdp->Control_2 |= config->tx_intr_type;
4061
4062 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4063 txdp->Control_2 |= TXD_VLAN_ENABLE;
4064 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4065 }
4066
4067 frg_len = skb->len - skb->data_len;
4068 if (offload_type == SKB_GSO_UDP) {
4069 int ufo_size;
4070
4071 ufo_size = s2io_udp_mss(skb);
4072 ufo_size &= ~7;
4073 txdp->Control_1 |= TXD_UFO_EN;
4074 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4075 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4076 #ifdef __BIG_ENDIAN
4077 sp->ufo_in_band_v[put_off] =
4078 (u64)skb_shinfo(skb)->ip6_frag_id;
4079 #else
4080 sp->ufo_in_band_v[put_off] =
4081 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4082 #endif
4083 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4084 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4085 sp->ufo_in_band_v,
4086 sizeof(u64), PCI_DMA_TODEVICE);
4087 txdp++;
4088 }
4089
4090 txdp->Buffer_Pointer = pci_map_single
4091 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4092 txdp->Host_Control = (unsigned long) skb;
4093 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4094 if (offload_type == SKB_GSO_UDP)
4095 txdp->Control_1 |= TXD_UFO_EN;
4096
4097 frg_cnt = skb_shinfo(skb)->nr_frags;
4098 /* For fragmented SKB. */
4099 for (i = 0; i < frg_cnt; i++) {
4100 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4101 /* A '0' length fragment will be ignored */
4102 if (!frag->size)
4103 continue;
4104 txdp++;
4105 txdp->Buffer_Pointer = (u64) pci_map_page
4106 (sp->pdev, frag->page, frag->page_offset,
4107 frag->size, PCI_DMA_TODEVICE);
4108 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4109 if (offload_type == SKB_GSO_UDP)
4110 txdp->Control_1 |= TXD_UFO_EN;
4111 }
4112 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4113
4114 if (offload_type == SKB_GSO_UDP)
4115 frg_cnt++; /* as Txd0 was used for inband header */
4116
4117 tx_fifo = mac_control->tx_FIFO_start[queue];
4118 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4119 writeq(val64, &tx_fifo->TxDL_Pointer);
4120
4121 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4122 TX_FIFO_LAST_LIST);
4123 if (offload_type)
4124 val64 |= TX_FIFO_SPECIAL_FUNC;
4125
4126 writeq(val64, &tx_fifo->List_Control);
4127
4128 mmiowb();
4129
4130 put_off++;
4131 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4132 put_off = 0;
4133 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4134
4135 /* Avoid "put" pointer going beyond "get" pointer */
4136 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4137 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4138 DBG_PRINT(TX_DBG,
4139 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4140 put_off, get_off);
4141 netif_stop_queue(dev);
4142 }
4143 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4144 dev->trans_start = jiffies;
4145 spin_unlock_irqrestore(&sp->tx_lock, flags);
4146
4147 return 0;
4148 }
4149
4150 static void
4151 s2io_alarm_handle(unsigned long data)
4152 {
4153 struct s2io_nic *sp = (struct s2io_nic *)data;
4154
4155 alarm_intr_handler(sp);
4156 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4157 }
4158
4159 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4160 {
4161 int rxb_size, level;
4162
4163 if (!sp->lro) {
4164 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4165 level = rx_buffer_level(sp, rxb_size, rng_n);
4166
4167 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4168 int ret;
4169 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4170 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4171 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4172 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4173 __FUNCTION__);
4174 clear_bit(0, (&sp->tasklet_status));
4175 return -1;
4176 }
4177 clear_bit(0, (&sp->tasklet_status));
4178 } else if (level == LOW)
4179 tasklet_schedule(&sp->task);
4180
4181 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4182 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4183 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4184 }
4185 return 0;
4186 }
4187
4188 static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4189 {
4190 struct net_device *dev = (struct net_device *) dev_id;
4191 struct s2io_nic *sp = dev->priv;
4192 int i;
4193 struct mac_info *mac_control;
4194 struct config_param *config;
4195
4196 atomic_inc(&sp->isr_cnt);
4197 mac_control = &sp->mac_control;
4198 config = &sp->config;
4199 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4200
4201 /* If Intr is because of Rx Traffic */
4202 for (i = 0; i < config->rx_ring_num; i++)
4203 rx_intr_handler(&mac_control->rings[i]);
4204
4205 /* If Intr is because of Tx Traffic */
4206 for (i = 0; i < config->tx_fifo_num; i++)
4207 tx_intr_handler(&mac_control->fifos[i]);
4208
4209 /*
4210 * If the Rx buffer count is below the panic threshold then
4211 * reallocate the buffers from the interrupt handler itself,
4212 * else schedule a tasklet to reallocate the buffers.
4213 */
4214 for (i = 0; i < config->rx_ring_num; i++)
4215 s2io_chk_rx_buffers(sp, i);
4216
4217 atomic_dec(&sp->isr_cnt);
4218 return IRQ_HANDLED;
4219 }
4220
4221 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4222 {
4223 struct ring_info *ring = (struct ring_info *)dev_id;
4224 struct s2io_nic *sp = ring->nic;
4225
4226 atomic_inc(&sp->isr_cnt);
4227
4228 rx_intr_handler(ring);
4229 s2io_chk_rx_buffers(sp, ring->ring_no);
4230
4231 atomic_dec(&sp->isr_cnt);
4232 return IRQ_HANDLED;
4233 }
4234
4235 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4236 {
4237 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4238 struct s2io_nic *sp = fifo->nic;
4239
4240 atomic_inc(&sp->isr_cnt);
4241 tx_intr_handler(fifo);
4242 atomic_dec(&sp->isr_cnt);
4243 return IRQ_HANDLED;
4244 }
4245 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4246 {
4247 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4248 u64 val64;
4249
4250 val64 = readq(&bar0->pic_int_status);
4251 if (val64 & PIC_INT_GPIO) {
4252 val64 = readq(&bar0->gpio_int_reg);
4253 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4254 (val64 & GPIO_INT_REG_LINK_UP)) {
4255 /*
4256 * This is unstable state so clear both up/down
4257 * interrupt and adapter to re-evaluate the link state.
4258 */
4259 val64 |= GPIO_INT_REG_LINK_DOWN;
4260 val64 |= GPIO_INT_REG_LINK_UP;
4261 writeq(val64, &bar0->gpio_int_reg);
4262 val64 = readq(&bar0->gpio_int_mask);
4263 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4264 GPIO_INT_MASK_LINK_DOWN);
4265 writeq(val64, &bar0->gpio_int_mask);
4266 }
4267 else if (val64 & GPIO_INT_REG_LINK_UP) {
4268 val64 = readq(&bar0->adapter_status);
4269 /* Enable Adapter */
4270 val64 = readq(&bar0->adapter_control);
4271 val64 |= ADAPTER_CNTL_EN;
4272 writeq(val64, &bar0->adapter_control);
4273 val64 |= ADAPTER_LED_ON;
4274 writeq(val64, &bar0->adapter_control);
4275 if (!sp->device_enabled_once)
4276 sp->device_enabled_once = 1;
4277
4278 s2io_link(sp, LINK_UP);
4279 /*
4280 * unmask link down interrupt and mask link-up
4281 * intr
4282 */
4283 val64 = readq(&bar0->gpio_int_mask);
4284 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4285 val64 |= GPIO_INT_MASK_LINK_UP;
4286 writeq(val64, &bar0->gpio_int_mask);
4287
4288 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4289 val64 = readq(&bar0->adapter_status);
4290 s2io_link(sp, LINK_DOWN);
4291 /* Link is down so unmaks link up interrupt */
4292 val64 = readq(&bar0->gpio_int_mask);
4293 val64 &= ~GPIO_INT_MASK_LINK_UP;
4294 val64 |= GPIO_INT_MASK_LINK_DOWN;
4295 writeq(val64, &bar0->gpio_int_mask);
4296
4297 /* turn off LED */
4298 val64 = readq(&bar0->adapter_control);
4299 val64 = val64 &(~ADAPTER_LED_ON);
4300 writeq(val64, &bar0->adapter_control);
4301 }
4302 }
4303 val64 = readq(&bar0->gpio_int_mask);
4304 }
4305
4306 /**
4307 * s2io_isr - ISR handler of the device .
4308 * @irq: the irq of the device.
4309 * @dev_id: a void pointer to the dev structure of the NIC.
4310 * Description: This function is the ISR handler of the device. It
4311 * identifies the reason for the interrupt and calls the relevant
4312 * service routines. As a contongency measure, this ISR allocates the
4313 * recv buffers, if their numbers are below the panic value which is
4314 * presently set to 25% of the original number of rcv buffers allocated.
4315 * Return value:
4316 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4317 * IRQ_NONE: will be returned if interrupt is not from our device
4318 */
4319 static irqreturn_t s2io_isr(int irq, void *dev_id)
4320 {
4321 struct net_device *dev = (struct net_device *) dev_id;
4322 struct s2io_nic *sp = dev->priv;
4323 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4324 int i;
4325 u64 reason = 0;
4326 struct mac_info *mac_control;
4327 struct config_param *config;
4328
4329 /* Pretend we handled any irq's from a disconnected card */
4330 if (pci_channel_offline(sp->pdev))
4331 return IRQ_NONE;
4332
4333 atomic_inc(&sp->isr_cnt);
4334 mac_control = &sp->mac_control;
4335 config = &sp->config;
4336
4337 /*
4338 * Identify the cause for interrupt and call the appropriate
4339 * interrupt handler. Causes for the interrupt could be;
4340 * 1. Rx of packet.
4341 * 2. Tx complete.
4342 * 3. Link down.
4343 * 4. Error in any functional blocks of the NIC.
4344 */
4345 reason = readq(&bar0->general_int_status);
4346
4347 if (!reason) {
4348 /* The interrupt was not raised by us. */
4349 atomic_dec(&sp->isr_cnt);
4350 return IRQ_NONE;
4351 }
4352 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4353 /* Disable device and get out */
4354 atomic_dec(&sp->isr_cnt);
4355 return IRQ_NONE;
4356 }
4357
4358 if (napi) {
4359 if (reason & GEN_INTR_RXTRAFFIC) {
4360 if ( likely ( netif_rx_schedule_prep(dev)) ) {
4361 __netif_rx_schedule(dev);
4362 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4363 }
4364 else
4365 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4366 }
4367 } else {
4368 /*
4369 * Rx handler is called by default, without checking for the
4370 * cause of interrupt.
4371 * rx_traffic_int reg is an R1 register, writing all 1's
4372 * will ensure that the actual interrupt causing bit get's
4373 * cleared and hence a read can be avoided.
4374 */
4375 if (reason & GEN_INTR_RXTRAFFIC)
4376 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4377
4378 for (i = 0; i < config->rx_ring_num; i++) {
4379 rx_intr_handler(&mac_control->rings[i]);
4380 }
4381 }
4382
4383 /*
4384 * tx_traffic_int reg is an R1 register, writing all 1's
4385 * will ensure that the actual interrupt causing bit get's
4386 * cleared and hence a read can be avoided.
4387 */
4388 if (reason & GEN_INTR_TXTRAFFIC)
4389 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4390
4391 for (i = 0; i < config->tx_fifo_num; i++)
4392 tx_intr_handler(&mac_control->fifos[i]);
4393
4394 if (reason & GEN_INTR_TXPIC)
4395 s2io_txpic_intr_handle(sp);
4396 /*
4397 * If the Rx buffer count is below the panic threshold then
4398 * reallocate the buffers from the interrupt handler itself,
4399 * else schedule a tasklet to reallocate the buffers.
4400 */
4401 if (!napi) {
4402 for (i = 0; i < config->rx_ring_num; i++)
4403 s2io_chk_rx_buffers(sp, i);
4404 }
4405
4406 writeq(0, &bar0->general_int_mask);
4407 readl(&bar0->general_int_status);
4408
4409 atomic_dec(&sp->isr_cnt);
4410 return IRQ_HANDLED;
4411 }
4412
4413 /**
4414 * s2io_updt_stats -
4415 */
4416 static void s2io_updt_stats(struct s2io_nic *sp)
4417 {
4418 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4419 u64 val64;
4420 int cnt = 0;
4421
4422 if (atomic_read(&sp->card_state) == CARD_UP) {
4423 /* Apprx 30us on a 133 MHz bus */
4424 val64 = SET_UPDT_CLICKS(10) |
4425 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4426 writeq(val64, &bar0->stat_cfg);
4427 do {
4428 udelay(100);
4429 val64 = readq(&bar0->stat_cfg);
4430 if (!(val64 & BIT(0)))
4431 break;
4432 cnt++;
4433 if (cnt == 5)
4434 break; /* Updt failed */
4435 } while(1);
4436 }
4437 }
4438
4439 /**
4440 * s2io_get_stats - Updates the device statistics structure.
4441 * @dev : pointer to the device structure.
4442 * Description:
4443 * This function updates the device statistics structure in the s2io_nic
4444 * structure and returns a pointer to the same.
4445 * Return value:
4446 * pointer to the updated net_device_stats structure.
4447 */
4448
4449 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4450 {
4451 struct s2io_nic *sp = dev->priv;
4452 struct mac_info *mac_control;
4453 struct config_param *config;
4454
4455
4456 mac_control = &sp->mac_control;
4457 config = &sp->config;
4458
4459 /* Configure Stats for immediate updt */
4460 s2io_updt_stats(sp);
4461
4462 sp->stats.tx_packets =
4463 le32_to_cpu(mac_control->stats_info->tmac_frms);
4464 sp->stats.tx_errors =
4465 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4466 sp->stats.rx_errors =
4467 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4468 sp->stats.multicast =
4469 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4470 sp->stats.rx_length_errors =
4471 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4472
4473 return (&sp->stats);
4474 }
4475
4476 /**
4477 * s2io_set_multicast - entry point for multicast address enable/disable.
4478 * @dev : pointer to the device structure
4479 * Description:
4480 * This function is a driver entry point which gets called by the kernel
4481 * whenever multicast addresses must be enabled/disabled. This also gets
4482 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4483 * determine, if multicast address must be enabled or if promiscuous mode
4484 * is to be disabled etc.
4485 * Return value:
4486 * void.
4487 */
4488
4489 static void s2io_set_multicast(struct net_device *dev)
4490 {
4491 int i, j, prev_cnt;
4492 struct dev_mc_list *mclist;
4493 struct s2io_nic *sp = dev->priv;
4494 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4495 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4496 0xfeffffffffffULL;
4497 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4498 void __iomem *add;
4499
4500 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4501 /* Enable all Multicast addresses */
4502 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4503 &bar0->rmac_addr_data0_mem);
4504 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4505 &bar0->rmac_addr_data1_mem);
4506 val64 = RMAC_ADDR_CMD_MEM_WE |
4507 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4508 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4509 writeq(val64, &bar0->rmac_addr_cmd_mem);
4510 /* Wait till command completes */
4511 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4512 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4513 S2IO_BIT_RESET);
4514
4515 sp->m_cast_flg = 1;
4516 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4517 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4518 /* Disable all Multicast addresses */
4519 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4520 &bar0->rmac_addr_data0_mem);
4521 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4522 &bar0->rmac_addr_data1_mem);
4523 val64 = RMAC_ADDR_CMD_MEM_WE |
4524 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4525 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4526 writeq(val64, &bar0->rmac_addr_cmd_mem);
4527 /* Wait till command completes */
4528 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4529 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4530 S2IO_BIT_RESET);
4531
4532 sp->m_cast_flg = 0;
4533 sp->all_multi_pos = 0;
4534 }
4535
4536 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4537 /* Put the NIC into promiscuous mode */
4538 add = &bar0->mac_cfg;
4539 val64 = readq(&bar0->mac_cfg);
4540 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4541
4542 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4543 writel((u32) val64, add);
4544 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4545 writel((u32) (val64 >> 32), (add + 4));
4546
4547 if (vlan_tag_strip != 1) {
4548 val64 = readq(&bar0->rx_pa_cfg);
4549 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4550 writeq(val64, &bar0->rx_pa_cfg);
4551 vlan_strip_flag = 0;
4552 }
4553
4554 val64 = readq(&bar0->mac_cfg);
4555 sp->promisc_flg = 1;
4556 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4557 dev->name);
4558 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4559 /* Remove the NIC from promiscuous mode */
4560 add = &bar0->mac_cfg;
4561 val64 = readq(&bar0->mac_cfg);
4562 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4563
4564 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4565 writel((u32) val64, add);
4566 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4567 writel((u32) (val64 >> 32), (add + 4));
4568
4569 if (vlan_tag_strip != 0) {
4570 val64 = readq(&bar0->rx_pa_cfg);
4571 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4572 writeq(val64, &bar0->rx_pa_cfg);
4573 vlan_strip_flag = 1;
4574 }
4575
4576 val64 = readq(&bar0->mac_cfg);
4577 sp->promisc_flg = 0;
4578 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4579 dev->name);
4580 }
4581
4582 /* Update individual M_CAST address list */
4583 if ((!sp->m_cast_flg) && dev->mc_count) {
4584 if (dev->mc_count >
4585 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4586 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4587 dev->name);
4588 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4589 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4590 return;
4591 }
4592
4593 prev_cnt = sp->mc_addr_count;
4594 sp->mc_addr_count = dev->mc_count;
4595
4596 /* Clear out the previous list of Mc in the H/W. */
4597 for (i = 0; i < prev_cnt; i++) {
4598 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4599 &bar0->rmac_addr_data0_mem);
4600 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4601 &bar0->rmac_addr_data1_mem);
4602 val64 = RMAC_ADDR_CMD_MEM_WE |
4603 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4604 RMAC_ADDR_CMD_MEM_OFFSET
4605 (MAC_MC_ADDR_START_OFFSET + i);
4606 writeq(val64, &bar0->rmac_addr_cmd_mem);
4607
4608 /* Wait for command completes */
4609 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4610 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4611 S2IO_BIT_RESET)) {
4612 DBG_PRINT(ERR_DBG, "%s: Adding ",
4613 dev->name);
4614 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4615 return;
4616 }
4617 }
4618
4619 /* Create the new Rx filter list and update the same in H/W. */
4620 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4621 i++, mclist = mclist->next) {
4622 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4623 ETH_ALEN);
4624 mac_addr = 0;
4625 for (j = 0; j < ETH_ALEN; j++) {
4626 mac_addr |= mclist->dmi_addr[j];
4627 mac_addr <<= 8;
4628 }
4629 mac_addr >>= 8;
4630 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4631 &bar0->rmac_addr_data0_mem);
4632 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4633 &bar0->rmac_addr_data1_mem);
4634 val64 = RMAC_ADDR_CMD_MEM_WE |
4635 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4636 RMAC_ADDR_CMD_MEM_OFFSET
4637 (i + MAC_MC_ADDR_START_OFFSET);
4638 writeq(val64, &bar0->rmac_addr_cmd_mem);
4639
4640 /* Wait for command completes */
4641 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4642 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4643 S2IO_BIT_RESET)) {
4644 DBG_PRINT(ERR_DBG, "%s: Adding ",
4645 dev->name);
4646 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4647 return;
4648 }
4649 }
4650 }
4651 }
4652
4653 /**
4654 * s2io_set_mac_addr - Programs the Xframe mac address
4655 * @dev : pointer to the device structure.
4656 * @addr: a uchar pointer to the new mac address which is to be set.
4657 * Description : This procedure will program the Xframe to receive
4658 * frames with new Mac Address
4659 * Return value: SUCCESS on success and an appropriate (-)ve integer
4660 * as defined in errno.h file on failure.
4661 */
4662
4663 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4664 {
4665 struct s2io_nic *sp = dev->priv;
4666 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4667 register u64 val64, mac_addr = 0;
4668 int i;
4669 u64 old_mac_addr = 0;
4670
4671 /*
4672 * Set the new MAC address as the new unicast filter and reflect this
4673 * change on the device address registered with the OS. It will be
4674 * at offset 0.
4675 */
4676 for (i = 0; i < ETH_ALEN; i++) {
4677 mac_addr <<= 8;
4678 mac_addr |= addr[i];
4679 old_mac_addr <<= 8;
4680 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4681 }
4682
4683 if(0 == mac_addr)
4684 return SUCCESS;
4685
4686 /* Update the internal structure with this new mac address */
4687 if(mac_addr != old_mac_addr) {
4688 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4689 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4690 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4691 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4692 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4693 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4694 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4695 }
4696
4697 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4698 &bar0->rmac_addr_data0_mem);
4699
4700 val64 =
4701 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4702 RMAC_ADDR_CMD_MEM_OFFSET(0);
4703 writeq(val64, &bar0->rmac_addr_cmd_mem);
4704 /* Wait till command completes */
4705 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4706 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4707 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4708 return FAILURE;
4709 }
4710
4711 return SUCCESS;
4712 }
4713
4714 /**
4715 * s2io_ethtool_sset - Sets different link parameters.
4716 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4717 * @info: pointer to the structure with parameters given by ethtool to set
4718 * link information.
4719 * Description:
4720 * The function sets different link parameters provided by the user onto
4721 * the NIC.
4722 * Return value:
4723 * 0 on success.
4724 */
4725
4726 static int s2io_ethtool_sset(struct net_device *dev,
4727 struct ethtool_cmd *info)
4728 {
4729 struct s2io_nic *sp = dev->priv;
4730 if ((info->autoneg == AUTONEG_ENABLE) ||
4731 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4732 return -EINVAL;
4733 else {
4734 s2io_close(sp->dev);
4735 s2io_open(sp->dev);
4736 }
4737
4738 return 0;
4739 }
4740
4741 /**
4742 * s2io_ethtol_gset - Return link specific information.
4743 * @sp : private member of the device structure, pointer to the
4744 * s2io_nic structure.
4745 * @info : pointer to the structure with parameters given by ethtool
4746 * to return link information.
4747 * Description:
4748 * Returns link specific information like speed, duplex etc.. to ethtool.
4749 * Return value :
4750 * return 0 on success.
4751 */
4752
4753 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4754 {
4755 struct s2io_nic *sp = dev->priv;
4756 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4757 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4758 info->port = PORT_FIBRE;
4759 /* info->transceiver?? TODO */
4760
4761 if (netif_carrier_ok(sp->dev)) {
4762 info->speed = 10000;
4763 info->duplex = DUPLEX_FULL;
4764 } else {
4765 info->speed = -1;
4766 info->duplex = -1;
4767 }
4768
4769 info->autoneg = AUTONEG_DISABLE;
4770 return 0;
4771 }
4772
4773 /**
4774 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4775 * @sp : private member of the device structure, which is a pointer to the
4776 * s2io_nic structure.
4777 * @info : pointer to the structure with parameters given by ethtool to
4778 * return driver information.
4779 * Description:
4780 * Returns driver specefic information like name, version etc.. to ethtool.
4781 * Return value:
4782 * void
4783 */
4784
4785 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4786 struct ethtool_drvinfo *info)
4787 {
4788 struct s2io_nic *sp = dev->priv;
4789
4790 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4791 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4792 strncpy(info->fw_version, "", sizeof(info->fw_version));
4793 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4794 info->regdump_len = XENA_REG_SPACE;
4795 info->eedump_len = XENA_EEPROM_SPACE;
4796 info->testinfo_len = S2IO_TEST_LEN;
4797
4798 if (sp->device_type == XFRAME_I_DEVICE)
4799 info->n_stats = XFRAME_I_STAT_LEN;
4800 else
4801 info->n_stats = XFRAME_II_STAT_LEN;
4802 }
4803
4804 /**
4805 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4806 * @sp: private member of the device structure, which is a pointer to the
4807 * s2io_nic structure.
4808 * @regs : pointer to the structure with parameters given by ethtool for
4809 * dumping the registers.
4810 * @reg_space: The input argumnet into which all the registers are dumped.
4811 * Description:
4812 * Dumps the entire register space of xFrame NIC into the user given
4813 * buffer area.
4814 * Return value :
4815 * void .
4816 */
4817
4818 static void s2io_ethtool_gregs(struct net_device *dev,
4819 struct ethtool_regs *regs, void *space)
4820 {
4821 int i;
4822 u64 reg;
4823 u8 *reg_space = (u8 *) space;
4824 struct s2io_nic *sp = dev->priv;
4825
4826 regs->len = XENA_REG_SPACE;
4827 regs->version = sp->pdev->subsystem_device;
4828
4829 for (i = 0; i < regs->len; i += 8) {
4830 reg = readq(sp->bar0 + i);
4831 memcpy((reg_space + i), &reg, 8);
4832 }
4833 }
4834
4835 /**
4836 * s2io_phy_id - timer function that alternates adapter LED.
4837 * @data : address of the private member of the device structure, which
4838 * is a pointer to the s2io_nic structure, provided as an u32.
4839 * Description: This is actually the timer function that alternates the
4840 * adapter LED bit of the adapter control bit to set/reset every time on
4841 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4842 * once every second.
4843 */
4844 static void s2io_phy_id(unsigned long data)
4845 {
4846 struct s2io_nic *sp = (struct s2io_nic *) data;
4847 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4848 u64 val64 = 0;
4849 u16 subid;
4850
4851 subid = sp->pdev->subsystem_device;
4852 if ((sp->device_type == XFRAME_II_DEVICE) ||
4853 ((subid & 0xFF) >= 0x07)) {
4854 val64 = readq(&bar0->gpio_control);
4855 val64 ^= GPIO_CTRL_GPIO_0;
4856 writeq(val64, &bar0->gpio_control);
4857 } else {
4858 val64 = readq(&bar0->adapter_control);
4859 val64 ^= ADAPTER_LED_ON;
4860 writeq(val64, &bar0->adapter_control);
4861 }
4862
4863 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4864 }
4865
4866 /**
4867 * s2io_ethtool_idnic - To physically identify the nic on the system.
4868 * @sp : private member of the device structure, which is a pointer to the
4869 * s2io_nic structure.
4870 * @id : pointer to the structure with identification parameters given by
4871 * ethtool.
4872 * Description: Used to physically identify the NIC on the system.
4873 * The Link LED will blink for a time specified by the user for
4874 * identification.
4875 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4876 * identification is possible only if it's link is up.
4877 * Return value:
4878 * int , returns 0 on success
4879 */
4880
4881 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4882 {
4883 u64 val64 = 0, last_gpio_ctrl_val;
4884 struct s2io_nic *sp = dev->priv;
4885 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4886 u16 subid;
4887
4888 subid = sp->pdev->subsystem_device;
4889 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4890 if ((sp->device_type == XFRAME_I_DEVICE) &&
4891 ((subid & 0xFF) < 0x07)) {
4892 val64 = readq(&bar0->adapter_control);
4893 if (!(val64 & ADAPTER_CNTL_EN)) {
4894 printk(KERN_ERR
4895 "Adapter Link down, cannot blink LED\n");
4896 return -EFAULT;
4897 }
4898 }
4899 if (sp->id_timer.function == NULL) {
4900 init_timer(&sp->id_timer);
4901 sp->id_timer.function = s2io_phy_id;
4902 sp->id_timer.data = (unsigned long) sp;
4903 }
4904 mod_timer(&sp->id_timer, jiffies);
4905 if (data)
4906 msleep_interruptible(data * HZ);
4907 else
4908 msleep_interruptible(MAX_FLICKER_TIME);
4909 del_timer_sync(&sp->id_timer);
4910
4911 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4912 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4913 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4914 }
4915
4916 return 0;
4917 }
4918
4919 static void s2io_ethtool_gringparam(struct net_device *dev,
4920 struct ethtool_ringparam *ering)
4921 {
4922 struct s2io_nic *sp = dev->priv;
4923 int i,tx_desc_count=0,rx_desc_count=0;
4924
4925 if (sp->rxd_mode == RXD_MODE_1)
4926 ering->rx_max_pending = MAX_RX_DESC_1;
4927 else if (sp->rxd_mode == RXD_MODE_3B)
4928 ering->rx_max_pending = MAX_RX_DESC_2;
4929 else if (sp->rxd_mode == RXD_MODE_3A)
4930 ering->rx_max_pending = MAX_RX_DESC_3;
4931
4932 ering->tx_max_pending = MAX_TX_DESC;
4933 for (i = 0 ; i < sp->config.tx_fifo_num ; i++) {
4934 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4935 }
4936 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4937 ering->tx_pending = tx_desc_count;
4938 rx_desc_count = 0;
4939 for (i = 0 ; i < sp->config.rx_ring_num ; i++) {
4940 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
4941 }
4942 ering->rx_pending = rx_desc_count;
4943
4944 ering->rx_mini_max_pending = 0;
4945 ering->rx_mini_pending = 0;
4946 if(sp->rxd_mode == RXD_MODE_1)
4947 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
4948 else if (sp->rxd_mode == RXD_MODE_3B)
4949 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
4950 ering->rx_jumbo_pending = rx_desc_count;
4951 }
4952
4953 /**
4954 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4955 * @sp : private member of the device structure, which is a pointer to the
4956 * s2io_nic structure.
4957 * @ep : pointer to the structure with pause parameters given by ethtool.
4958 * Description:
4959 * Returns the Pause frame generation and reception capability of the NIC.
4960 * Return value:
4961 * void
4962 */
4963 static void s2io_ethtool_getpause_data(struct net_device *dev,
4964 struct ethtool_pauseparam *ep)
4965 {
4966 u64 val64;
4967 struct s2io_nic *sp = dev->priv;
4968 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4969
4970 val64 = readq(&bar0->rmac_pause_cfg);
4971 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4972 ep->tx_pause = TRUE;
4973 if (val64 & RMAC_PAUSE_RX_ENABLE)
4974 ep->rx_pause = TRUE;
4975 ep->autoneg = FALSE;
4976 }
4977
4978 /**
4979 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4980 * @sp : private member of the device structure, which is a pointer to the
4981 * s2io_nic structure.
4982 * @ep : pointer to the structure with pause parameters given by ethtool.
4983 * Description:
4984 * It can be used to set or reset Pause frame generation or reception
4985 * support of the NIC.
4986 * Return value:
4987 * int, returns 0 on Success
4988 */
4989
4990 static int s2io_ethtool_setpause_data(struct net_device *dev,
4991 struct ethtool_pauseparam *ep)
4992 {
4993 u64 val64;
4994 struct s2io_nic *sp = dev->priv;
4995 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4996
4997 val64 = readq(&bar0->rmac_pause_cfg);
4998 if (ep->tx_pause)
4999 val64 |= RMAC_PAUSE_GEN_ENABLE;
5000 else
5001 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5002 if (ep->rx_pause)
5003 val64 |= RMAC_PAUSE_RX_ENABLE;
5004 else
5005 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5006 writeq(val64, &bar0->rmac_pause_cfg);
5007 return 0;
5008 }
5009
5010 /**
5011 * read_eeprom - reads 4 bytes of data from user given offset.
5012 * @sp : private member of the device structure, which is a pointer to the
5013 * s2io_nic structure.
5014 * @off : offset at which the data must be written
5015 * @data : Its an output parameter where the data read at the given
5016 * offset is stored.
5017 * Description:
5018 * Will read 4 bytes of data from the user given offset and return the
5019 * read data.
5020 * NOTE: Will allow to read only part of the EEPROM visible through the
5021 * I2C bus.
5022 * Return value:
5023 * -1 on failure and 0 on success.
5024 */
5025
5026 #define S2IO_DEV_ID 5
5027 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5028 {
5029 int ret = -1;
5030 u32 exit_cnt = 0;
5031 u64 val64;
5032 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5033
5034 if (sp->device_type == XFRAME_I_DEVICE) {
5035 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5036 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5037 I2C_CONTROL_CNTL_START;
5038 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5039
5040 while (exit_cnt < 5) {
5041 val64 = readq(&bar0->i2c_control);
5042 if (I2C_CONTROL_CNTL_END(val64)) {
5043 *data = I2C_CONTROL_GET_DATA(val64);
5044 ret = 0;
5045 break;
5046 }
5047 msleep(50);
5048 exit_cnt++;
5049 }
5050 }
5051
5052 if (sp->device_type == XFRAME_II_DEVICE) {
5053 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5054 SPI_CONTROL_BYTECNT(0x3) |
5055 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5056 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5057 val64 |= SPI_CONTROL_REQ;
5058 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5059 while (exit_cnt < 5) {
5060 val64 = readq(&bar0->spi_control);
5061 if (val64 & SPI_CONTROL_NACK) {
5062 ret = 1;
5063 break;
5064 } else if (val64 & SPI_CONTROL_DONE) {
5065 *data = readq(&bar0->spi_data);
5066 *data &= 0xffffff;
5067 ret = 0;
5068 break;
5069 }
5070 msleep(50);
5071 exit_cnt++;
5072 }
5073 }
5074 return ret;
5075 }
5076
5077 /**
5078 * write_eeprom - actually writes the relevant part of the data value.
5079 * @sp : private member of the device structure, which is a pointer to the
5080 * s2io_nic structure.
5081 * @off : offset at which the data must be written
5082 * @data : The data that is to be written
5083 * @cnt : Number of bytes of the data that are actually to be written into
5084 * the Eeprom. (max of 3)
5085 * Description:
5086 * Actually writes the relevant part of the data value into the Eeprom
5087 * through the I2C bus.
5088 * Return value:
5089 * 0 on success, -1 on failure.
5090 */
5091
5092 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5093 {
5094 int exit_cnt = 0, ret = -1;
5095 u64 val64;
5096 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5097
5098 if (sp->device_type == XFRAME_I_DEVICE) {
5099 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5100 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5101 I2C_CONTROL_CNTL_START;
5102 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5103
5104 while (exit_cnt < 5) {
5105 val64 = readq(&bar0->i2c_control);
5106 if (I2C_CONTROL_CNTL_END(val64)) {
5107 if (!(val64 & I2C_CONTROL_NACK))
5108 ret = 0;
5109 break;
5110 }
5111 msleep(50);
5112 exit_cnt++;
5113 }
5114 }
5115
5116 if (sp->device_type == XFRAME_II_DEVICE) {
5117 int write_cnt = (cnt == 8) ? 0 : cnt;
5118 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5119
5120 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5121 SPI_CONTROL_BYTECNT(write_cnt) |
5122 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5123 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5124 val64 |= SPI_CONTROL_REQ;
5125 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5126 while (exit_cnt < 5) {
5127 val64 = readq(&bar0->spi_control);
5128 if (val64 & SPI_CONTROL_NACK) {
5129 ret = 1;
5130 break;
5131 } else if (val64 & SPI_CONTROL_DONE) {
5132 ret = 0;
5133 break;
5134 }
5135 msleep(50);
5136 exit_cnt++;
5137 }
5138 }
5139 return ret;
5140 }
5141 static void s2io_vpd_read(struct s2io_nic *nic)
5142 {
5143 u8 *vpd_data;
5144 u8 data;
5145 int i=0, cnt, fail = 0;
5146 int vpd_addr = 0x80;
5147
5148 if (nic->device_type == XFRAME_II_DEVICE) {
5149 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5150 vpd_addr = 0x80;
5151 }
5152 else {
5153 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5154 vpd_addr = 0x50;
5155 }
5156 strcpy(nic->serial_num, "NOT AVAILABLE");
5157
5158 vpd_data = kmalloc(256, GFP_KERNEL);
5159 if (!vpd_data) {
5160 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5161 return;
5162 }
5163 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5164
5165 for (i = 0; i < 256; i +=4 ) {
5166 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5167 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5168 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5169 for (cnt = 0; cnt <5; cnt++) {
5170 msleep(2);
5171 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5172 if (data == 0x80)
5173 break;
5174 }
5175 if (cnt >= 5) {
5176 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5177 fail = 1;
5178 break;
5179 }
5180 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5181 (u32 *)&vpd_data[i]);
5182 }
5183
5184 if(!fail) {
5185 /* read serial number of adapter */
5186 for (cnt = 0; cnt < 256; cnt++) {
5187 if ((vpd_data[cnt] == 'S') &&
5188 (vpd_data[cnt+1] == 'N') &&
5189 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5190 memset(nic->serial_num, 0, VPD_STRING_LEN);
5191 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5192 vpd_data[cnt+2]);
5193 break;
5194 }
5195 }
5196 }
5197
5198 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5199 memset(nic->product_name, 0, vpd_data[1]);
5200 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5201 }
5202 kfree(vpd_data);
5203 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5204 }
5205
5206 /**
5207 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5208 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5209 * @eeprom : pointer to the user level structure provided by ethtool,
5210 * containing all relevant information.
5211 * @data_buf : user defined value to be written into Eeprom.
5212 * Description: Reads the values stored in the Eeprom at given offset
5213 * for a given length. Stores these values int the input argument data
5214 * buffer 'data_buf' and returns these to the caller (ethtool.)
5215 * Return value:
5216 * int 0 on success
5217 */
5218
5219 static int s2io_ethtool_geeprom(struct net_device *dev,
5220 struct ethtool_eeprom *eeprom, u8 * data_buf)
5221 {
5222 u32 i, valid;
5223 u64 data;
5224 struct s2io_nic *sp = dev->priv;
5225
5226 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5227
5228 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5229 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5230
5231 for (i = 0; i < eeprom->len; i += 4) {
5232 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5233 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5234 return -EFAULT;
5235 }
5236 valid = INV(data);
5237 memcpy((data_buf + i), &valid, 4);
5238 }
5239 return 0;
5240 }
5241
5242 /**
5243 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5244 * @sp : private member of the device structure, which is a pointer to the
5245 * s2io_nic structure.
5246 * @eeprom : pointer to the user level structure provided by ethtool,
5247 * containing all relevant information.
5248 * @data_buf ; user defined value to be written into Eeprom.
5249 * Description:
5250 * Tries to write the user provided value in the Eeprom, at the offset
5251 * given by the user.
5252 * Return value:
5253 * 0 on success, -EFAULT on failure.
5254 */
5255
5256 static int s2io_ethtool_seeprom(struct net_device *dev,
5257 struct ethtool_eeprom *eeprom,
5258 u8 * data_buf)
5259 {
5260 int len = eeprom->len, cnt = 0;
5261 u64 valid = 0, data;
5262 struct s2io_nic *sp = dev->priv;
5263
5264 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5265 DBG_PRINT(ERR_DBG,
5266 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5267 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5268 eeprom->magic);
5269 return -EFAULT;
5270 }
5271
5272 while (len) {
5273 data = (u32) data_buf[cnt] & 0x000000FF;
5274 if (data) {
5275 valid = (u32) (data << 24);
5276 } else
5277 valid = data;
5278
5279 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5280 DBG_PRINT(ERR_DBG,
5281 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5282 DBG_PRINT(ERR_DBG,
5283 "write into the specified offset\n");
5284 return -EFAULT;
5285 }
5286 cnt++;
5287 len--;
5288 }
5289
5290 return 0;
5291 }
5292
5293 /**
5294 * s2io_register_test - reads and writes into all clock domains.
5295 * @sp : private member of the device structure, which is a pointer to the
5296 * s2io_nic structure.
5297 * @data : variable that returns the result of each of the test conducted b
5298 * by the driver.
5299 * Description:
5300 * Read and write into all clock domains. The NIC has 3 clock domains,
5301 * see that registers in all the three regions are accessible.
5302 * Return value:
5303 * 0 on success.
5304 */
5305
5306 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5307 {
5308 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5309 u64 val64 = 0, exp_val;
5310 int fail = 0;
5311
5312 val64 = readq(&bar0->pif_rd_swapper_fb);
5313 if (val64 != 0x123456789abcdefULL) {
5314 fail = 1;
5315 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5316 }
5317
5318 val64 = readq(&bar0->rmac_pause_cfg);
5319 if (val64 != 0xc000ffff00000000ULL) {
5320 fail = 1;
5321 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5322 }
5323
5324 val64 = readq(&bar0->rx_queue_cfg);
5325 if (sp->device_type == XFRAME_II_DEVICE)
5326 exp_val = 0x0404040404040404ULL;
5327 else
5328 exp_val = 0x0808080808080808ULL;
5329 if (val64 != exp_val) {
5330 fail = 1;
5331 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5332 }
5333
5334 val64 = readq(&bar0->xgxs_efifo_cfg);
5335 if (val64 != 0x000000001923141EULL) {
5336 fail = 1;
5337 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5338 }
5339
5340 val64 = 0x5A5A5A5A5A5A5A5AULL;
5341 writeq(val64, &bar0->xmsi_data);
5342 val64 = readq(&bar0->xmsi_data);
5343 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5344 fail = 1;
5345 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5346 }
5347
5348 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5349 writeq(val64, &bar0->xmsi_data);
5350 val64 = readq(&bar0->xmsi_data);
5351 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5352 fail = 1;
5353 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5354 }
5355
5356 *data = fail;
5357 return fail;
5358 }
5359
5360 /**
5361 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5362 * @sp : private member of the device structure, which is a pointer to the
5363 * s2io_nic structure.
5364 * @data:variable that returns the result of each of the test conducted by
5365 * the driver.
5366 * Description:
5367 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5368 * register.
5369 * Return value:
5370 * 0 on success.
5371 */
5372
5373 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5374 {
5375 int fail = 0;
5376 u64 ret_data, org_4F0, org_7F0;
5377 u8 saved_4F0 = 0, saved_7F0 = 0;
5378 struct net_device *dev = sp->dev;
5379
5380 /* Test Write Error at offset 0 */
5381 /* Note that SPI interface allows write access to all areas
5382 * of EEPROM. Hence doing all negative testing only for Xframe I.
5383 */
5384 if (sp->device_type == XFRAME_I_DEVICE)
5385 if (!write_eeprom(sp, 0, 0, 3))
5386 fail = 1;
5387
5388 /* Save current values at offsets 0x4F0 and 0x7F0 */
5389 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5390 saved_4F0 = 1;
5391 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5392 saved_7F0 = 1;
5393
5394 /* Test Write at offset 4f0 */
5395 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5396 fail = 1;
5397 if (read_eeprom(sp, 0x4F0, &ret_data))
5398 fail = 1;
5399
5400 if (ret_data != 0x012345) {
5401 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5402 "Data written %llx Data read %llx\n",
5403 dev->name, (unsigned long long)0x12345,
5404 (unsigned long long)ret_data);
5405 fail = 1;
5406 }
5407
5408 /* Reset the EEPROM data go FFFF */
5409 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5410
5411 /* Test Write Request Error at offset 0x7c */
5412 if (sp->device_type == XFRAME_I_DEVICE)
5413 if (!write_eeprom(sp, 0x07C, 0, 3))
5414 fail = 1;
5415
5416 /* Test Write Request at offset 0x7f0 */
5417 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5418 fail = 1;
5419 if (read_eeprom(sp, 0x7F0, &ret_data))
5420 fail = 1;
5421
5422 if (ret_data != 0x012345) {
5423 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5424 "Data written %llx Data read %llx\n",
5425 dev->name, (unsigned long long)0x12345,
5426 (unsigned long long)ret_data);
5427 fail = 1;
5428 }
5429
5430 /* Reset the EEPROM data go FFFF */
5431 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5432
5433 if (sp->device_type == XFRAME_I_DEVICE) {
5434 /* Test Write Error at offset 0x80 */
5435 if (!write_eeprom(sp, 0x080, 0, 3))
5436 fail = 1;
5437
5438 /* Test Write Error at offset 0xfc */
5439 if (!write_eeprom(sp, 0x0FC, 0, 3))
5440 fail = 1;
5441
5442 /* Test Write Error at offset 0x100 */
5443 if (!write_eeprom(sp, 0x100, 0, 3))
5444 fail = 1;
5445
5446 /* Test Write Error at offset 4ec */
5447 if (!write_eeprom(sp, 0x4EC, 0, 3))
5448 fail = 1;
5449 }
5450
5451 /* Restore values at offsets 0x4F0 and 0x7F0 */
5452 if (saved_4F0)
5453 write_eeprom(sp, 0x4F0, org_4F0, 3);
5454 if (saved_7F0)
5455 write_eeprom(sp, 0x7F0, org_7F0, 3);
5456
5457 *data = fail;
5458 return fail;
5459 }
5460
5461 /**
5462 * s2io_bist_test - invokes the MemBist test of the card .
5463 * @sp : private member of the device structure, which is a pointer to the
5464 * s2io_nic structure.
5465 * @data:variable that returns the result of each of the test conducted by
5466 * the driver.
5467 * Description:
5468 * This invokes the MemBist test of the card. We give around
5469 * 2 secs time for the Test to complete. If it's still not complete
5470 * within this peiod, we consider that the test failed.
5471 * Return value:
5472 * 0 on success and -1 on failure.
5473 */
5474
5475 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5476 {
5477 u8 bist = 0;
5478 int cnt = 0, ret = -1;
5479
5480 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5481 bist |= PCI_BIST_START;
5482 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5483
5484 while (cnt < 20) {
5485 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5486 if (!(bist & PCI_BIST_START)) {
5487 *data = (bist & PCI_BIST_CODE_MASK);
5488 ret = 0;
5489 break;
5490 }
5491 msleep(100);
5492 cnt++;
5493 }
5494
5495 return ret;
5496 }
5497
5498 /**
5499 * s2io-link_test - verifies the link state of the nic
5500 * @sp ; private member of the device structure, which is a pointer to the
5501 * s2io_nic structure.
5502 * @data: variable that returns the result of each of the test conducted by
5503 * the driver.
5504 * Description:
5505 * The function verifies the link state of the NIC and updates the input
5506 * argument 'data' appropriately.
5507 * Return value:
5508 * 0 on success.
5509 */
5510
5511 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5512 {
5513 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5514 u64 val64;
5515
5516 val64 = readq(&bar0->adapter_status);
5517 if(!(LINK_IS_UP(val64)))
5518 *data = 1;
5519 else
5520 *data = 0;
5521
5522 return *data;
5523 }
5524
5525 /**
5526 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5527 * @sp - private member of the device structure, which is a pointer to the
5528 * s2io_nic structure.
5529 * @data - variable that returns the result of each of the test
5530 * conducted by the driver.
5531 * Description:
5532 * This is one of the offline test that tests the read and write
5533 * access to the RldRam chip on the NIC.
5534 * Return value:
5535 * 0 on success.
5536 */
5537
5538 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5539 {
5540 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5541 u64 val64;
5542 int cnt, iteration = 0, test_fail = 0;
5543
5544 val64 = readq(&bar0->adapter_control);
5545 val64 &= ~ADAPTER_ECC_EN;
5546 writeq(val64, &bar0->adapter_control);
5547
5548 val64 = readq(&bar0->mc_rldram_test_ctrl);
5549 val64 |= MC_RLDRAM_TEST_MODE;
5550 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5551
5552 val64 = readq(&bar0->mc_rldram_mrs);
5553 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5554 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5555
5556 val64 |= MC_RLDRAM_MRS_ENABLE;
5557 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5558
5559 while (iteration < 2) {
5560 val64 = 0x55555555aaaa0000ULL;
5561 if (iteration == 1) {
5562 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5563 }
5564 writeq(val64, &bar0->mc_rldram_test_d0);
5565
5566 val64 = 0xaaaa5a5555550000ULL;
5567 if (iteration == 1) {
5568 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5569 }
5570 writeq(val64, &bar0->mc_rldram_test_d1);
5571
5572 val64 = 0x55aaaaaaaa5a0000ULL;
5573 if (iteration == 1) {
5574 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5575 }
5576 writeq(val64, &bar0->mc_rldram_test_d2);
5577
5578 val64 = (u64) (0x0000003ffffe0100ULL);
5579 writeq(val64, &bar0->mc_rldram_test_add);
5580
5581 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5582 MC_RLDRAM_TEST_GO;
5583 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5584
5585 for (cnt = 0; cnt < 5; cnt++) {
5586 val64 = readq(&bar0->mc_rldram_test_ctrl);
5587 if (val64 & MC_RLDRAM_TEST_DONE)
5588 break;
5589 msleep(200);
5590 }
5591
5592 if (cnt == 5)
5593 break;
5594
5595 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5596 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5597
5598 for (cnt = 0; cnt < 5; cnt++) {
5599 val64 = readq(&bar0->mc_rldram_test_ctrl);
5600 if (val64 & MC_RLDRAM_TEST_DONE)
5601 break;
5602 msleep(500);
5603 }
5604
5605 if (cnt == 5)
5606 break;
5607
5608 val64 = readq(&bar0->mc_rldram_test_ctrl);
5609 if (!(val64 & MC_RLDRAM_TEST_PASS))
5610 test_fail = 1;
5611
5612 iteration++;
5613 }
5614
5615 *data = test_fail;
5616
5617 /* Bring the adapter out of test mode */
5618 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5619
5620 return test_fail;
5621 }
5622
5623 /**
5624 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5625 * @sp : private member of the device structure, which is a pointer to the
5626 * s2io_nic structure.
5627 * @ethtest : pointer to a ethtool command specific structure that will be
5628 * returned to the user.
5629 * @data : variable that returns the result of each of the test
5630 * conducted by the driver.
5631 * Description:
5632 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5633 * the health of the card.
5634 * Return value:
5635 * void
5636 */
5637
5638 static void s2io_ethtool_test(struct net_device *dev,
5639 struct ethtool_test *ethtest,
5640 uint64_t * data)
5641 {
5642 struct s2io_nic *sp = dev->priv;
5643 int orig_state = netif_running(sp->dev);
5644
5645 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5646 /* Offline Tests. */
5647 if (orig_state)
5648 s2io_close(sp->dev);
5649
5650 if (s2io_register_test(sp, &data[0]))
5651 ethtest->flags |= ETH_TEST_FL_FAILED;
5652
5653 s2io_reset(sp);
5654
5655 if (s2io_rldram_test(sp, &data[3]))
5656 ethtest->flags |= ETH_TEST_FL_FAILED;
5657
5658 s2io_reset(sp);
5659
5660 if (s2io_eeprom_test(sp, &data[1]))
5661 ethtest->flags |= ETH_TEST_FL_FAILED;
5662
5663 if (s2io_bist_test(sp, &data[4]))
5664 ethtest->flags |= ETH_TEST_FL_FAILED;
5665
5666 if (orig_state)
5667 s2io_open(sp->dev);
5668
5669 data[2] = 0;
5670 } else {
5671 /* Online Tests. */
5672 if (!orig_state) {
5673 DBG_PRINT(ERR_DBG,
5674 "%s: is not up, cannot run test\n",
5675 dev->name);
5676 data[0] = -1;
5677 data[1] = -1;
5678 data[2] = -1;
5679 data[3] = -1;
5680 data[4] = -1;
5681 }
5682
5683 if (s2io_link_test(sp, &data[2]))
5684 ethtest->flags |= ETH_TEST_FL_FAILED;
5685
5686 data[0] = 0;
5687 data[1] = 0;
5688 data[3] = 0;
5689 data[4] = 0;
5690 }
5691 }
5692
5693 static void s2io_get_ethtool_stats(struct net_device *dev,
5694 struct ethtool_stats *estats,
5695 u64 * tmp_stats)
5696 {
5697 int i = 0;
5698 struct s2io_nic *sp = dev->priv;
5699 struct stat_block *stat_info = sp->mac_control.stats_info;
5700
5701 s2io_updt_stats(sp);
5702 tmp_stats[i++] =
5703 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5704 le32_to_cpu(stat_info->tmac_frms);
5705 tmp_stats[i++] =
5706 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5707 le32_to_cpu(stat_info->tmac_data_octets);
5708 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5709 tmp_stats[i++] =
5710 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5711 le32_to_cpu(stat_info->tmac_mcst_frms);
5712 tmp_stats[i++] =
5713 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5714 le32_to_cpu(stat_info->tmac_bcst_frms);
5715 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5716 tmp_stats[i++] =
5717 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5718 le32_to_cpu(stat_info->tmac_ttl_octets);
5719 tmp_stats[i++] =
5720 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5721 le32_to_cpu(stat_info->tmac_ucst_frms);
5722 tmp_stats[i++] =
5723 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5724 le32_to_cpu(stat_info->tmac_nucst_frms);
5725 tmp_stats[i++] =
5726 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5727 le32_to_cpu(stat_info->tmac_any_err_frms);
5728 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5729 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5730 tmp_stats[i++] =
5731 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5732 le32_to_cpu(stat_info->tmac_vld_ip);
5733 tmp_stats[i++] =
5734 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5735 le32_to_cpu(stat_info->tmac_drop_ip);
5736 tmp_stats[i++] =
5737 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5738 le32_to_cpu(stat_info->tmac_icmp);
5739 tmp_stats[i++] =
5740 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5741 le32_to_cpu(stat_info->tmac_rst_tcp);
5742 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5743 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5744 le32_to_cpu(stat_info->tmac_udp);
5745 tmp_stats[i++] =
5746 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5747 le32_to_cpu(stat_info->rmac_vld_frms);
5748 tmp_stats[i++] =
5749 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5750 le32_to_cpu(stat_info->rmac_data_octets);
5751 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5752 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5753 tmp_stats[i++] =
5754 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5755 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5756 tmp_stats[i++] =
5757 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5758 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5759 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5760 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5761 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5762 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5763 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5764 tmp_stats[i++] =
5765 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5766 le32_to_cpu(stat_info->rmac_ttl_octets);
5767 tmp_stats[i++] =
5768 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5769 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5770 tmp_stats[i++] =
5771 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5772 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5773 tmp_stats[i++] =
5774 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5775 le32_to_cpu(stat_info->rmac_discarded_frms);
5776 tmp_stats[i++] =
5777 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5778 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5779 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5780 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5781 tmp_stats[i++] =
5782 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5783 le32_to_cpu(stat_info->rmac_usized_frms);
5784 tmp_stats[i++] =
5785 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5786 le32_to_cpu(stat_info->rmac_osized_frms);
5787 tmp_stats[i++] =
5788 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5789 le32_to_cpu(stat_info->rmac_frag_frms);
5790 tmp_stats[i++] =
5791 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5792 le32_to_cpu(stat_info->rmac_jabber_frms);
5793 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5794 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5795 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5796 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5797 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5798 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5799 tmp_stats[i++] =
5800 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5801 le32_to_cpu(stat_info->rmac_ip);
5802 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5803 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5804 tmp_stats[i++] =
5805 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5806 le32_to_cpu(stat_info->rmac_drop_ip);
5807 tmp_stats[i++] =
5808 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5809 le32_to_cpu(stat_info->rmac_icmp);
5810 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5811 tmp_stats[i++] =
5812 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5813 le32_to_cpu(stat_info->rmac_udp);
5814 tmp_stats[i++] =
5815 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5816 le32_to_cpu(stat_info->rmac_err_drp_udp);
5817 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5818 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5819 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5820 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5821 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5822 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5823 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5824 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5825 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5826 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5827 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5828 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5829 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5830 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5831 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5832 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5833 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5834 tmp_stats[i++] =
5835 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5836 le32_to_cpu(stat_info->rmac_pause_cnt);
5837 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5838 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5839 tmp_stats[i++] =
5840 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5841 le32_to_cpu(stat_info->rmac_accepted_ip);
5842 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5843 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5844 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5845 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5846 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5847 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5848 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5849 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5850 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5851 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5852 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5853 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5854 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5855 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5856 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5857 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5858 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5859 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5860 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5861
5862 /* Enhanced statistics exist only for Hercules */
5863 if(sp->device_type == XFRAME_II_DEVICE) {
5864 tmp_stats[i++] =
5865 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5866 tmp_stats[i++] =
5867 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5868 tmp_stats[i++] =
5869 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5870 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5871 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5872 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5873 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5874 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5875 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5876 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5877 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5878 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5879 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5880 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5881 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5882 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5883 }
5884
5885 tmp_stats[i++] = 0;
5886 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5887 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5888 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5889 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5890 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5891 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5892 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5893 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5894 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5895 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5896 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5897 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5898 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5899 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5900 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5901 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5902 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5903 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5904 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5905 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5906 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5907 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5908 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5909 if (stat_info->sw_stat.num_aggregations) {
5910 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5911 int count = 0;
5912 /*
5913 * Since 64-bit divide does not work on all platforms,
5914 * do repeated subtraction.
5915 */
5916 while (tmp >= stat_info->sw_stat.num_aggregations) {
5917 tmp -= stat_info->sw_stat.num_aggregations;
5918 count++;
5919 }
5920 tmp_stats[i++] = count;
5921 }
5922 else
5923 tmp_stats[i++] = 0;
5924 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5925 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
5926 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
5927 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
5928 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
5929 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
5930 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
5931 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
5932
5933 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
5934 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
5935 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
5936 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
5937 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
5938
5939 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
5940 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
5941 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
5942 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
5943 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
5944 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
5945 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
5946 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
5947 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
5948 }
5949
5950 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5951 {
5952 return (XENA_REG_SPACE);
5953 }
5954
5955
5956 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5957 {
5958 struct s2io_nic *sp = dev->priv;
5959
5960 return (sp->rx_csum);
5961 }
5962
5963 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5964 {
5965 struct s2io_nic *sp = dev->priv;
5966
5967 if (data)
5968 sp->rx_csum = 1;
5969 else
5970 sp->rx_csum = 0;
5971
5972 return 0;
5973 }
5974
5975 static int s2io_get_eeprom_len(struct net_device *dev)
5976 {
5977 return (XENA_EEPROM_SPACE);
5978 }
5979
5980 static int s2io_ethtool_self_test_count(struct net_device *dev)
5981 {
5982 return (S2IO_TEST_LEN);
5983 }
5984
5985 static void s2io_ethtool_get_strings(struct net_device *dev,
5986 u32 stringset, u8 * data)
5987 {
5988 int stat_size = 0;
5989 struct s2io_nic *sp = dev->priv;
5990
5991 switch (stringset) {
5992 case ETH_SS_TEST:
5993 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5994 break;
5995 case ETH_SS_STATS:
5996 stat_size = sizeof(ethtool_xena_stats_keys);
5997 memcpy(data, &ethtool_xena_stats_keys,stat_size);
5998 if(sp->device_type == XFRAME_II_DEVICE) {
5999 memcpy(data + stat_size,
6000 &ethtool_enhanced_stats_keys,
6001 sizeof(ethtool_enhanced_stats_keys));
6002 stat_size += sizeof(ethtool_enhanced_stats_keys);
6003 }
6004
6005 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6006 sizeof(ethtool_driver_stats_keys));
6007 }
6008 }
6009 static int s2io_ethtool_get_stats_count(struct net_device *dev)
6010 {
6011 struct s2io_nic *sp = dev->priv;
6012 int stat_count = 0;
6013 switch(sp->device_type) {
6014 case XFRAME_I_DEVICE:
6015 stat_count = XFRAME_I_STAT_LEN;
6016 break;
6017
6018 case XFRAME_II_DEVICE:
6019 stat_count = XFRAME_II_STAT_LEN;
6020 break;
6021 }
6022
6023 return stat_count;
6024 }
6025
6026 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6027 {
6028 if (data)
6029 dev->features |= NETIF_F_IP_CSUM;
6030 else
6031 dev->features &= ~NETIF_F_IP_CSUM;
6032
6033 return 0;
6034 }
6035
6036 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6037 {
6038 return (dev->features & NETIF_F_TSO) != 0;
6039 }
6040 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6041 {
6042 if (data)
6043 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6044 else
6045 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6046
6047 return 0;
6048 }
6049
6050 static const struct ethtool_ops netdev_ethtool_ops = {
6051 .get_settings = s2io_ethtool_gset,
6052 .set_settings = s2io_ethtool_sset,
6053 .get_drvinfo = s2io_ethtool_gdrvinfo,
6054 .get_regs_len = s2io_ethtool_get_regs_len,
6055 .get_regs = s2io_ethtool_gregs,
6056 .get_link = ethtool_op_get_link,
6057 .get_eeprom_len = s2io_get_eeprom_len,
6058 .get_eeprom = s2io_ethtool_geeprom,
6059 .set_eeprom = s2io_ethtool_seeprom,
6060 .get_ringparam = s2io_ethtool_gringparam,
6061 .get_pauseparam = s2io_ethtool_getpause_data,
6062 .set_pauseparam = s2io_ethtool_setpause_data,
6063 .get_rx_csum = s2io_ethtool_get_rx_csum,
6064 .set_rx_csum = s2io_ethtool_set_rx_csum,
6065 .get_tx_csum = ethtool_op_get_tx_csum,
6066 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6067 .get_sg = ethtool_op_get_sg,
6068 .set_sg = ethtool_op_set_sg,
6069 .get_tso = s2io_ethtool_op_get_tso,
6070 .set_tso = s2io_ethtool_op_set_tso,
6071 .get_ufo = ethtool_op_get_ufo,
6072 .set_ufo = ethtool_op_set_ufo,
6073 .self_test_count = s2io_ethtool_self_test_count,
6074 .self_test = s2io_ethtool_test,
6075 .get_strings = s2io_ethtool_get_strings,
6076 .phys_id = s2io_ethtool_idnic,
6077 .get_stats_count = s2io_ethtool_get_stats_count,
6078 .get_ethtool_stats = s2io_get_ethtool_stats
6079 };
6080
6081 /**
6082 * s2io_ioctl - Entry point for the Ioctl
6083 * @dev : Device pointer.
6084 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6085 * a proprietary structure used to pass information to the driver.
6086 * @cmd : This is used to distinguish between the different commands that
6087 * can be passed to the IOCTL functions.
6088 * Description:
6089 * Currently there are no special functionality supported in IOCTL, hence
6090 * function always return EOPNOTSUPPORTED
6091 */
6092
6093 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6094 {
6095 return -EOPNOTSUPP;
6096 }
6097
6098 /**
6099 * s2io_change_mtu - entry point to change MTU size for the device.
6100 * @dev : device pointer.
6101 * @new_mtu : the new MTU size for the device.
6102 * Description: A driver entry point to change MTU size for the device.
6103 * Before changing the MTU the device must be stopped.
6104 * Return value:
6105 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6106 * file on failure.
6107 */
6108
6109 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6110 {
6111 struct s2io_nic *sp = dev->priv;
6112
6113 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6114 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6115 dev->name);
6116 return -EPERM;
6117 }
6118
6119 dev->mtu = new_mtu;
6120 if (netif_running(dev)) {
6121 s2io_card_down(sp);
6122 netif_stop_queue(dev);
6123 if (s2io_card_up(sp)) {
6124 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6125 __FUNCTION__);
6126 }
6127 if (netif_queue_stopped(dev))
6128 netif_wake_queue(dev);
6129 } else { /* Device is down */
6130 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6131 u64 val64 = new_mtu;
6132
6133 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6134 }
6135
6136 return 0;
6137 }
6138
6139 /**
6140 * s2io_tasklet - Bottom half of the ISR.
6141 * @dev_adr : address of the device structure in dma_addr_t format.
6142 * Description:
6143 * This is the tasklet or the bottom half of the ISR. This is
6144 * an extension of the ISR which is scheduled by the scheduler to be run
6145 * when the load on the CPU is low. All low priority tasks of the ISR can
6146 * be pushed into the tasklet. For now the tasklet is used only to
6147 * replenish the Rx buffers in the Rx buffer descriptors.
6148 * Return value:
6149 * void.
6150 */
6151
6152 static void s2io_tasklet(unsigned long dev_addr)
6153 {
6154 struct net_device *dev = (struct net_device *) dev_addr;
6155 struct s2io_nic *sp = dev->priv;
6156 int i, ret;
6157 struct mac_info *mac_control;
6158 struct config_param *config;
6159
6160 mac_control = &sp->mac_control;
6161 config = &sp->config;
6162
6163 if (!TASKLET_IN_USE) {
6164 for (i = 0; i < config->rx_ring_num; i++) {
6165 ret = fill_rx_buffers(sp, i);
6166 if (ret == -ENOMEM) {
6167 DBG_PRINT(INFO_DBG, "%s: Out of ",
6168 dev->name);
6169 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6170 break;
6171 } else if (ret == -EFILL) {
6172 DBG_PRINT(INFO_DBG,
6173 "%s: Rx Ring %d is full\n",
6174 dev->name, i);
6175 break;
6176 }
6177 }
6178 clear_bit(0, (&sp->tasklet_status));
6179 }
6180 }
6181
6182 /**
6183 * s2io_set_link - Set the LInk status
6184 * @data: long pointer to device private structue
6185 * Description: Sets the link status for the adapter
6186 */
6187
6188 static void s2io_set_link(struct work_struct *work)
6189 {
6190 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6191 struct net_device *dev = nic->dev;
6192 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6193 register u64 val64;
6194 u16 subid;
6195
6196 rtnl_lock();
6197
6198 if (!netif_running(dev))
6199 goto out_unlock;
6200
6201 if (test_and_set_bit(0, &(nic->link_state))) {
6202 /* The card is being reset, no point doing anything */
6203 goto out_unlock;
6204 }
6205
6206 subid = nic->pdev->subsystem_device;
6207 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6208 /*
6209 * Allow a small delay for the NICs self initiated
6210 * cleanup to complete.
6211 */
6212 msleep(100);
6213 }
6214
6215 val64 = readq(&bar0->adapter_status);
6216 if (LINK_IS_UP(val64)) {
6217 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6218 if (verify_xena_quiescence(nic)) {
6219 val64 = readq(&bar0->adapter_control);
6220 val64 |= ADAPTER_CNTL_EN;
6221 writeq(val64, &bar0->adapter_control);
6222 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6223 nic->device_type, subid)) {
6224 val64 = readq(&bar0->gpio_control);
6225 val64 |= GPIO_CTRL_GPIO_0;
6226 writeq(val64, &bar0->gpio_control);
6227 val64 = readq(&bar0->gpio_control);
6228 } else {
6229 val64 |= ADAPTER_LED_ON;
6230 writeq(val64, &bar0->adapter_control);
6231 }
6232 nic->device_enabled_once = TRUE;
6233 } else {
6234 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6235 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6236 netif_stop_queue(dev);
6237 }
6238 }
6239 val64 = readq(&bar0->adapter_status);
6240 if (!LINK_IS_UP(val64)) {
6241 DBG_PRINT(ERR_DBG, "%s:", dev->name);
6242 DBG_PRINT(ERR_DBG, " Link down after enabling ");
6243 DBG_PRINT(ERR_DBG, "device \n");
6244 } else
6245 s2io_link(nic, LINK_UP);
6246 } else {
6247 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6248 subid)) {
6249 val64 = readq(&bar0->gpio_control);
6250 val64 &= ~GPIO_CTRL_GPIO_0;
6251 writeq(val64, &bar0->gpio_control);
6252 val64 = readq(&bar0->gpio_control);
6253 }
6254 s2io_link(nic, LINK_DOWN);
6255 }
6256 clear_bit(0, &(nic->link_state));
6257
6258 out_unlock:
6259 rtnl_unlock();
6260 }
6261
6262 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6263 struct buffAdd *ba,
6264 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6265 u64 *temp2, int size)
6266 {
6267 struct net_device *dev = sp->dev;
6268 struct sk_buff *frag_list;
6269
6270 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6271 /* allocate skb */
6272 if (*skb) {
6273 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6274 /*
6275 * As Rx frame are not going to be processed,
6276 * using same mapped address for the Rxd
6277 * buffer pointer
6278 */
6279 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
6280 } else {
6281 *skb = dev_alloc_skb(size);
6282 if (!(*skb)) {
6283 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6284 DBG_PRINT(INFO_DBG, "memory to allocate ");
6285 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6286 sp->mac_control.stats_info->sw_stat. \
6287 mem_alloc_fail_cnt++;
6288 return -ENOMEM ;
6289 }
6290 sp->mac_control.stats_info->sw_stat.mem_allocated
6291 += (*skb)->truesize;
6292 /* storing the mapped addr in a temp variable
6293 * such it will be used for next rxd whose
6294 * Host Control is NULL
6295 */
6296 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
6297 pci_map_single( sp->pdev, (*skb)->data,
6298 size - NET_IP_ALIGN,
6299 PCI_DMA_FROMDEVICE);
6300 rxdp->Host_Control = (unsigned long) (*skb);
6301 }
6302 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6303 /* Two buffer Mode */
6304 if (*skb) {
6305 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6306 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6307 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6308 } else {
6309 *skb = dev_alloc_skb(size);
6310 if (!(*skb)) {
6311 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6312 DBG_PRINT(INFO_DBG, "memory to allocate ");
6313 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6314 sp->mac_control.stats_info->sw_stat. \
6315 mem_alloc_fail_cnt++;
6316 return -ENOMEM;
6317 }
6318 sp->mac_control.stats_info->sw_stat.mem_allocated
6319 += (*skb)->truesize;
6320 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6321 pci_map_single(sp->pdev, (*skb)->data,
6322 dev->mtu + 4,
6323 PCI_DMA_FROMDEVICE);
6324 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6325 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6326 PCI_DMA_FROMDEVICE);
6327 rxdp->Host_Control = (unsigned long) (*skb);
6328
6329 /* Buffer-1 will be dummy buffer not used */
6330 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6331 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6332 PCI_DMA_FROMDEVICE);
6333 }
6334 } else if ((rxdp->Host_Control == 0)) {
6335 /* Three buffer mode */
6336 if (*skb) {
6337 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6338 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6339 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6340 } else {
6341 *skb = dev_alloc_skb(size);
6342 if (!(*skb)) {
6343 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6344 DBG_PRINT(INFO_DBG, "memory to allocate ");
6345 DBG_PRINT(INFO_DBG, "3 buf mode SKBs\n");
6346 sp->mac_control.stats_info->sw_stat. \
6347 mem_alloc_fail_cnt++;
6348 return -ENOMEM;
6349 }
6350 sp->mac_control.stats_info->sw_stat.mem_allocated
6351 += (*skb)->truesize;
6352 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6353 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6354 PCI_DMA_FROMDEVICE);
6355 /* Buffer-1 receives L3/L4 headers */
6356 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6357 pci_map_single( sp->pdev, (*skb)->data,
6358 l3l4hdr_size + 4,
6359 PCI_DMA_FROMDEVICE);
6360 /*
6361 * skb_shinfo(skb)->frag_list will have L4
6362 * data payload
6363 */
6364 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6365 ALIGN_SIZE);
6366 if (skb_shinfo(*skb)->frag_list == NULL) {
6367 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6368 failed\n ", dev->name);
6369 sp->mac_control.stats_info->sw_stat. \
6370 mem_alloc_fail_cnt++;
6371 return -ENOMEM ;
6372 }
6373 frag_list = skb_shinfo(*skb)->frag_list;
6374 frag_list->next = NULL;
6375 sp->mac_control.stats_info->sw_stat.mem_allocated
6376 += frag_list->truesize;
6377 /*
6378 * Buffer-2 receives L4 data payload
6379 */
6380 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6381 pci_map_single( sp->pdev, frag_list->data,
6382 dev->mtu, PCI_DMA_FROMDEVICE);
6383 }
6384 }
6385 return 0;
6386 }
6387 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6388 int size)
6389 {
6390 struct net_device *dev = sp->dev;
6391 if (sp->rxd_mode == RXD_MODE_1) {
6392 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6393 } else if (sp->rxd_mode == RXD_MODE_3B) {
6394 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6395 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6396 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6397 } else {
6398 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6399 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6400 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6401 }
6402 }
6403
6404 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6405 {
6406 int i, j, k, blk_cnt = 0, size;
6407 struct mac_info * mac_control = &sp->mac_control;
6408 struct config_param *config = &sp->config;
6409 struct net_device *dev = sp->dev;
6410 struct RxD_t *rxdp = NULL;
6411 struct sk_buff *skb = NULL;
6412 struct buffAdd *ba = NULL;
6413 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6414
6415 /* Calculate the size based on ring mode */
6416 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6417 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6418 if (sp->rxd_mode == RXD_MODE_1)
6419 size += NET_IP_ALIGN;
6420 else if (sp->rxd_mode == RXD_MODE_3B)
6421 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6422 else
6423 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6424
6425 for (i = 0; i < config->rx_ring_num; i++) {
6426 blk_cnt = config->rx_cfg[i].num_rxd /
6427 (rxd_count[sp->rxd_mode] +1);
6428
6429 for (j = 0; j < blk_cnt; j++) {
6430 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6431 rxdp = mac_control->rings[i].
6432 rx_blocks[j].rxds[k].virt_addr;
6433 if(sp->rxd_mode >= RXD_MODE_3A)
6434 ba = &mac_control->rings[i].ba[j][k];
6435 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6436 &skb,(u64 *)&temp0_64,
6437 (u64 *)&temp1_64,
6438 (u64 *)&temp2_64,
6439 size) == ENOMEM) {
6440 return 0;
6441 }
6442
6443 set_rxd_buffer_size(sp, rxdp, size);
6444 wmb();
6445 /* flip the Ownership bit to Hardware */
6446 rxdp->Control_1 |= RXD_OWN_XENA;
6447 }
6448 }
6449 }
6450 return 0;
6451
6452 }
6453
6454 static int s2io_add_isr(struct s2io_nic * sp)
6455 {
6456 int ret = 0;
6457 struct net_device *dev = sp->dev;
6458 int err = 0;
6459
6460 if (sp->intr_type == MSI)
6461 ret = s2io_enable_msi(sp);
6462 else if (sp->intr_type == MSI_X)
6463 ret = s2io_enable_msi_x(sp);
6464 if (ret) {
6465 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6466 sp->intr_type = INTA;
6467 }
6468
6469 /* Store the values of the MSIX table in the struct s2io_nic structure */
6470 store_xmsi_data(sp);
6471
6472 /* After proper initialization of H/W, register ISR */
6473 if (sp->intr_type == MSI) {
6474 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6475 IRQF_SHARED, sp->name, dev);
6476 if (err) {
6477 pci_disable_msi(sp->pdev);
6478 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6479 dev->name);
6480 return -1;
6481 }
6482 }
6483 if (sp->intr_type == MSI_X) {
6484 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6485
6486 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6487 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6488 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6489 dev->name, i);
6490 err = request_irq(sp->entries[i].vector,
6491 s2io_msix_fifo_handle, 0, sp->desc[i],
6492 sp->s2io_entries[i].arg);
6493 /* If either data or addr is zero print it */
6494 if(!(sp->msix_info[i].addr &&
6495 sp->msix_info[i].data)) {
6496 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6497 "Data:0x%lx\n",sp->desc[i],
6498 (unsigned long long)
6499 sp->msix_info[i].addr,
6500 (unsigned long)
6501 ntohl(sp->msix_info[i].data));
6502 } else {
6503 msix_tx_cnt++;
6504 }
6505 } else {
6506 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6507 dev->name, i);
6508 err = request_irq(sp->entries[i].vector,
6509 s2io_msix_ring_handle, 0, sp->desc[i],
6510 sp->s2io_entries[i].arg);
6511 /* If either data or addr is zero print it */
6512 if(!(sp->msix_info[i].addr &&
6513 sp->msix_info[i].data)) {
6514 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6515 "Data:0x%lx\n",sp->desc[i],
6516 (unsigned long long)
6517 sp->msix_info[i].addr,
6518 (unsigned long)
6519 ntohl(sp->msix_info[i].data));
6520 } else {
6521 msix_rx_cnt++;
6522 }
6523 }
6524 if (err) {
6525 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6526 "failed\n", dev->name, i);
6527 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6528 return -1;
6529 }
6530 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6531 }
6532 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6533 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6534 }
6535 if (sp->intr_type == INTA) {
6536 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6537 sp->name, dev);
6538 if (err) {
6539 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6540 dev->name);
6541 return -1;
6542 }
6543 }
6544 return 0;
6545 }
6546 static void s2io_rem_isr(struct s2io_nic * sp)
6547 {
6548 int cnt = 0;
6549 struct net_device *dev = sp->dev;
6550
6551 if (sp->intr_type == MSI_X) {
6552 int i;
6553 u16 msi_control;
6554
6555 for (i=1; (sp->s2io_entries[i].in_use ==
6556 MSIX_REGISTERED_SUCCESS); i++) {
6557 int vector = sp->entries[i].vector;
6558 void *arg = sp->s2io_entries[i].arg;
6559
6560 free_irq(vector, arg);
6561 }
6562 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6563 msi_control &= 0xFFFE; /* Disable MSI */
6564 pci_write_config_word(sp->pdev, 0x42, msi_control);
6565
6566 pci_disable_msix(sp->pdev);
6567 } else {
6568 free_irq(sp->pdev->irq, dev);
6569 if (sp->intr_type == MSI) {
6570 u16 val;
6571
6572 pci_disable_msi(sp->pdev);
6573 pci_read_config_word(sp->pdev, 0x4c, &val);
6574 val ^= 0x1;
6575 pci_write_config_word(sp->pdev, 0x4c, val);
6576 }
6577 }
6578 /* Waiting till all Interrupt handlers are complete */
6579 cnt = 0;
6580 do {
6581 msleep(10);
6582 if (!atomic_read(&sp->isr_cnt))
6583 break;
6584 cnt++;
6585 } while(cnt < 5);
6586 }
6587
6588 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6589 {
6590 int cnt = 0;
6591 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6592 unsigned long flags;
6593 register u64 val64 = 0;
6594
6595 del_timer_sync(&sp->alarm_timer);
6596 /* If s2io_set_link task is executing, wait till it completes. */
6597 while (test_and_set_bit(0, &(sp->link_state))) {
6598 msleep(50);
6599 }
6600 atomic_set(&sp->card_state, CARD_DOWN);
6601
6602 /* disable Tx and Rx traffic on the NIC */
6603 if (do_io)
6604 stop_nic(sp);
6605
6606 s2io_rem_isr(sp);
6607
6608 /* Kill tasklet. */
6609 tasklet_kill(&sp->task);
6610
6611 /* Check if the device is Quiescent and then Reset the NIC */
6612 while(do_io) {
6613 /* As per the HW requirement we need to replenish the
6614 * receive buffer to avoid the ring bump. Since there is
6615 * no intention of processing the Rx frame at this pointwe are
6616 * just settting the ownership bit of rxd in Each Rx
6617 * ring to HW and set the appropriate buffer size
6618 * based on the ring mode
6619 */
6620 rxd_owner_bit_reset(sp);
6621
6622 val64 = readq(&bar0->adapter_status);
6623 if (verify_xena_quiescence(sp)) {
6624 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6625 break;
6626 }
6627
6628 msleep(50);
6629 cnt++;
6630 if (cnt == 10) {
6631 DBG_PRINT(ERR_DBG,
6632 "s2io_close:Device not Quiescent ");
6633 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6634 (unsigned long long) val64);
6635 break;
6636 }
6637 }
6638 if (do_io)
6639 s2io_reset(sp);
6640
6641 spin_lock_irqsave(&sp->tx_lock, flags);
6642 /* Free all Tx buffers */
6643 free_tx_buffers(sp);
6644 spin_unlock_irqrestore(&sp->tx_lock, flags);
6645
6646 /* Free all Rx buffers */
6647 spin_lock_irqsave(&sp->rx_lock, flags);
6648 free_rx_buffers(sp);
6649 spin_unlock_irqrestore(&sp->rx_lock, flags);
6650
6651 clear_bit(0, &(sp->link_state));
6652 }
6653
6654 static void s2io_card_down(struct s2io_nic * sp)
6655 {
6656 do_s2io_card_down(sp, 1);
6657 }
6658
6659 static int s2io_card_up(struct s2io_nic * sp)
6660 {
6661 int i, ret = 0;
6662 struct mac_info *mac_control;
6663 struct config_param *config;
6664 struct net_device *dev = (struct net_device *) sp->dev;
6665 u16 interruptible;
6666
6667 /* Initialize the H/W I/O registers */
6668 if (init_nic(sp) != 0) {
6669 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6670 dev->name);
6671 s2io_reset(sp);
6672 return -ENODEV;
6673 }
6674
6675 /*
6676 * Initializing the Rx buffers. For now we are considering only 1
6677 * Rx ring and initializing buffers into 30 Rx blocks
6678 */
6679 mac_control = &sp->mac_control;
6680 config = &sp->config;
6681
6682 for (i = 0; i < config->rx_ring_num; i++) {
6683 if ((ret = fill_rx_buffers(sp, i))) {
6684 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6685 dev->name);
6686 s2io_reset(sp);
6687 free_rx_buffers(sp);
6688 return -ENOMEM;
6689 }
6690 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6691 atomic_read(&sp->rx_bufs_left[i]));
6692 }
6693 /* Maintain the state prior to the open */
6694 if (sp->promisc_flg)
6695 sp->promisc_flg = 0;
6696 if (sp->m_cast_flg) {
6697 sp->m_cast_flg = 0;
6698 sp->all_multi_pos= 0;
6699 }
6700
6701 /* Setting its receive mode */
6702 s2io_set_multicast(dev);
6703
6704 if (sp->lro) {
6705 /* Initialize max aggregatable pkts per session based on MTU */
6706 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6707 /* Check if we can use(if specified) user provided value */
6708 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6709 sp->lro_max_aggr_per_sess = lro_max_pkts;
6710 }
6711
6712 /* Enable Rx Traffic and interrupts on the NIC */
6713 if (start_nic(sp)) {
6714 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6715 s2io_reset(sp);
6716 free_rx_buffers(sp);
6717 return -ENODEV;
6718 }
6719
6720 /* Add interrupt service routine */
6721 if (s2io_add_isr(sp) != 0) {
6722 if (sp->intr_type == MSI_X)
6723 s2io_rem_isr(sp);
6724 s2io_reset(sp);
6725 free_rx_buffers(sp);
6726 return -ENODEV;
6727 }
6728
6729 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6730
6731 /* Enable tasklet for the device */
6732 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6733
6734 /* Enable select interrupts */
6735 if (sp->intr_type != INTA)
6736 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6737 else {
6738 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6739 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6740 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6741 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6742 }
6743
6744
6745 atomic_set(&sp->card_state, CARD_UP);
6746 return 0;
6747 }
6748
6749 /**
6750 * s2io_restart_nic - Resets the NIC.
6751 * @data : long pointer to the device private structure
6752 * Description:
6753 * This function is scheduled to be run by the s2io_tx_watchdog
6754 * function after 0.5 secs to reset the NIC. The idea is to reduce
6755 * the run time of the watch dog routine which is run holding a
6756 * spin lock.
6757 */
6758
6759 static void s2io_restart_nic(struct work_struct *work)
6760 {
6761 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6762 struct net_device *dev = sp->dev;
6763
6764 rtnl_lock();
6765
6766 if (!netif_running(dev))
6767 goto out_unlock;
6768
6769 s2io_card_down(sp);
6770 if (s2io_card_up(sp)) {
6771 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6772 dev->name);
6773 }
6774 netif_wake_queue(dev);
6775 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6776 dev->name);
6777 out_unlock:
6778 rtnl_unlock();
6779 }
6780
6781 /**
6782 * s2io_tx_watchdog - Watchdog for transmit side.
6783 * @dev : Pointer to net device structure
6784 * Description:
6785 * This function is triggered if the Tx Queue is stopped
6786 * for a pre-defined amount of time when the Interface is still up.
6787 * If the Interface is jammed in such a situation, the hardware is
6788 * reset (by s2io_close) and restarted again (by s2io_open) to
6789 * overcome any problem that might have been caused in the hardware.
6790 * Return value:
6791 * void
6792 */
6793
6794 static void s2io_tx_watchdog(struct net_device *dev)
6795 {
6796 struct s2io_nic *sp = dev->priv;
6797
6798 if (netif_carrier_ok(dev)) {
6799 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
6800 schedule_work(&sp->rst_timer_task);
6801 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6802 }
6803 }
6804
6805 /**
6806 * rx_osm_handler - To perform some OS related operations on SKB.
6807 * @sp: private member of the device structure,pointer to s2io_nic structure.
6808 * @skb : the socket buffer pointer.
6809 * @len : length of the packet
6810 * @cksum : FCS checksum of the frame.
6811 * @ring_no : the ring from which this RxD was extracted.
6812 * Description:
6813 * This function is called by the Rx interrupt serivce routine to perform
6814 * some OS related operations on the SKB before passing it to the upper
6815 * layers. It mainly checks if the checksum is OK, if so adds it to the
6816 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6817 * to the upper layer. If the checksum is wrong, it increments the Rx
6818 * packet error count, frees the SKB and returns error.
6819 * Return value:
6820 * SUCCESS on success and -1 on failure.
6821 */
6822 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6823 {
6824 struct s2io_nic *sp = ring_data->nic;
6825 struct net_device *dev = (struct net_device *) sp->dev;
6826 struct sk_buff *skb = (struct sk_buff *)
6827 ((unsigned long) rxdp->Host_Control);
6828 int ring_no = ring_data->ring_no;
6829 u16 l3_csum, l4_csum;
6830 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6831 struct lro *lro;
6832 u8 err_mask;
6833
6834 skb->dev = dev;
6835
6836 if (err) {
6837 /* Check for parity error */
6838 if (err & 0x1) {
6839 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6840 }
6841 err_mask = err >> 48;
6842 switch(err_mask) {
6843 case 1:
6844 sp->mac_control.stats_info->sw_stat.
6845 rx_parity_err_cnt++;
6846 break;
6847
6848 case 2:
6849 sp->mac_control.stats_info->sw_stat.
6850 rx_abort_cnt++;
6851 break;
6852
6853 case 3:
6854 sp->mac_control.stats_info->sw_stat.
6855 rx_parity_abort_cnt++;
6856 break;
6857
6858 case 4:
6859 sp->mac_control.stats_info->sw_stat.
6860 rx_rda_fail_cnt++;
6861 break;
6862
6863 case 5:
6864 sp->mac_control.stats_info->sw_stat.
6865 rx_unkn_prot_cnt++;
6866 break;
6867
6868 case 6:
6869 sp->mac_control.stats_info->sw_stat.
6870 rx_fcs_err_cnt++;
6871 break;
6872
6873 case 7:
6874 sp->mac_control.stats_info->sw_stat.
6875 rx_buf_size_err_cnt++;
6876 break;
6877
6878 case 8:
6879 sp->mac_control.stats_info->sw_stat.
6880 rx_rxd_corrupt_cnt++;
6881 break;
6882
6883 case 15:
6884 sp->mac_control.stats_info->sw_stat.
6885 rx_unkn_err_cnt++;
6886 break;
6887 }
6888 /*
6889 * Drop the packet if bad transfer code. Exception being
6890 * 0x5, which could be due to unsupported IPv6 extension header.
6891 * In this case, we let stack handle the packet.
6892 * Note that in this case, since checksum will be incorrect,
6893 * stack will validate the same.
6894 */
6895 if (err_mask != 0x5) {
6896 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
6897 dev->name, err_mask);
6898 sp->stats.rx_crc_errors++;
6899 sp->mac_control.stats_info->sw_stat.mem_freed
6900 += skb->truesize;
6901 dev_kfree_skb(skb);
6902 atomic_dec(&sp->rx_bufs_left[ring_no]);
6903 rxdp->Host_Control = 0;
6904 return 0;
6905 }
6906 }
6907
6908 /* Updating statistics */
6909 rxdp->Host_Control = 0;
6910 if (sp->rxd_mode == RXD_MODE_1) {
6911 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6912
6913 sp->stats.rx_bytes += len;
6914 skb_put(skb, len);
6915
6916 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6917 int get_block = ring_data->rx_curr_get_info.block_index;
6918 int get_off = ring_data->rx_curr_get_info.offset;
6919 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6920 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6921 unsigned char *buff = skb_push(skb, buf0_len);
6922
6923 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6924 sp->stats.rx_bytes += buf0_len + buf2_len;
6925 memcpy(buff, ba->ba_0, buf0_len);
6926
6927 if (sp->rxd_mode == RXD_MODE_3A) {
6928 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6929
6930 skb_put(skb, buf1_len);
6931 skb->len += buf2_len;
6932 skb->data_len += buf2_len;
6933 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6934 sp->stats.rx_bytes += buf1_len;
6935
6936 } else
6937 skb_put(skb, buf2_len);
6938 }
6939
6940 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6941 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6942 (sp->rx_csum)) {
6943 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6944 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6945 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6946 /*
6947 * NIC verifies if the Checksum of the received
6948 * frame is Ok or not and accordingly returns
6949 * a flag in the RxD.
6950 */
6951 skb->ip_summed = CHECKSUM_UNNECESSARY;
6952 if (sp->lro) {
6953 u32 tcp_len;
6954 u8 *tcp;
6955 int ret = 0;
6956
6957 ret = s2io_club_tcp_session(skb->data, &tcp,
6958 &tcp_len, &lro, rxdp, sp);
6959 switch (ret) {
6960 case 3: /* Begin anew */
6961 lro->parent = skb;
6962 goto aggregate;
6963 case 1: /* Aggregate */
6964 {
6965 lro_append_pkt(sp, lro,
6966 skb, tcp_len);
6967 goto aggregate;
6968 }
6969 case 4: /* Flush session */
6970 {
6971 lro_append_pkt(sp, lro,
6972 skb, tcp_len);
6973 queue_rx_frame(lro->parent);
6974 clear_lro_session(lro);
6975 sp->mac_control.stats_info->
6976 sw_stat.flush_max_pkts++;
6977 goto aggregate;
6978 }
6979 case 2: /* Flush both */
6980 lro->parent->data_len =
6981 lro->frags_len;
6982 sp->mac_control.stats_info->
6983 sw_stat.sending_both++;
6984 queue_rx_frame(lro->parent);
6985 clear_lro_session(lro);
6986 goto send_up;
6987 case 0: /* sessions exceeded */
6988 case -1: /* non-TCP or not
6989 * L2 aggregatable
6990 */
6991 case 5: /*
6992 * First pkt in session not
6993 * L3/L4 aggregatable
6994 */
6995 break;
6996 default:
6997 DBG_PRINT(ERR_DBG,
6998 "%s: Samadhana!!\n",
6999 __FUNCTION__);
7000 BUG();
7001 }
7002 }
7003 } else {
7004 /*
7005 * Packet with erroneous checksum, let the
7006 * upper layers deal with it.
7007 */
7008 skb->ip_summed = CHECKSUM_NONE;
7009 }
7010 } else {
7011 skb->ip_summed = CHECKSUM_NONE;
7012 }
7013 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7014 if (!sp->lro) {
7015 skb->protocol = eth_type_trans(skb, dev);
7016 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7017 vlan_strip_flag)) {
7018 /* Queueing the vlan frame to the upper layer */
7019 if (napi)
7020 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7021 RXD_GET_VLAN_TAG(rxdp->Control_2));
7022 else
7023 vlan_hwaccel_rx(skb, sp->vlgrp,
7024 RXD_GET_VLAN_TAG(rxdp->Control_2));
7025 } else {
7026 if (napi)
7027 netif_receive_skb(skb);
7028 else
7029 netif_rx(skb);
7030 }
7031 } else {
7032 send_up:
7033 queue_rx_frame(skb);
7034 }
7035 dev->last_rx = jiffies;
7036 aggregate:
7037 atomic_dec(&sp->rx_bufs_left[ring_no]);
7038 return SUCCESS;
7039 }
7040
7041 /**
7042 * s2io_link - stops/starts the Tx queue.
7043 * @sp : private member of the device structure, which is a pointer to the
7044 * s2io_nic structure.
7045 * @link : inidicates whether link is UP/DOWN.
7046 * Description:
7047 * This function stops/starts the Tx queue depending on whether the link
7048 * status of the NIC is is down or up. This is called by the Alarm
7049 * interrupt handler whenever a link change interrupt comes up.
7050 * Return value:
7051 * void.
7052 */
7053
7054 static void s2io_link(struct s2io_nic * sp, int link)
7055 {
7056 struct net_device *dev = (struct net_device *) sp->dev;
7057
7058 if (link != sp->last_link_state) {
7059 if (link == LINK_DOWN) {
7060 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7061 netif_carrier_off(dev);
7062 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7063 sp->mac_control.stats_info->sw_stat.link_up_time =
7064 jiffies - sp->start_time;
7065 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7066 } else {
7067 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7068 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7069 sp->mac_control.stats_info->sw_stat.link_down_time =
7070 jiffies - sp->start_time;
7071 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7072 netif_carrier_on(dev);
7073 }
7074 }
7075 sp->last_link_state = link;
7076 sp->start_time = jiffies;
7077 }
7078
7079 /**
7080 * get_xena_rev_id - to identify revision ID of xena.
7081 * @pdev : PCI Dev structure
7082 * Description:
7083 * Function to identify the Revision ID of xena.
7084 * Return value:
7085 * returns the revision ID of the device.
7086 */
7087
7088 static int get_xena_rev_id(struct pci_dev *pdev)
7089 {
7090 u8 id = 0;
7091 int ret;
7092 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
7093 return id;
7094 }
7095
7096 /**
7097 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7098 * @sp : private member of the device structure, which is a pointer to the
7099 * s2io_nic structure.
7100 * Description:
7101 * This function initializes a few of the PCI and PCI-X configuration registers
7102 * with recommended values.
7103 * Return value:
7104 * void
7105 */
7106
7107 static void s2io_init_pci(struct s2io_nic * sp)
7108 {
7109 u16 pci_cmd = 0, pcix_cmd = 0;
7110
7111 /* Enable Data Parity Error Recovery in PCI-X command register. */
7112 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7113 &(pcix_cmd));
7114 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7115 (pcix_cmd | 1));
7116 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7117 &(pcix_cmd));
7118
7119 /* Set the PErr Response bit in PCI command register. */
7120 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7121 pci_write_config_word(sp->pdev, PCI_COMMAND,
7122 (pci_cmd | PCI_COMMAND_PARITY));
7123 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7124 }
7125
7126 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7127 {
7128 if ( tx_fifo_num > 8) {
7129 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7130 "supported\n");
7131 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7132 tx_fifo_num = 8;
7133 }
7134 if ( rx_ring_num > 8) {
7135 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7136 "supported\n");
7137 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7138 rx_ring_num = 8;
7139 }
7140 if (*dev_intr_type != INTA)
7141 napi = 0;
7142
7143 #ifndef CONFIG_PCI_MSI
7144 if (*dev_intr_type != INTA) {
7145 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
7146 "MSI/MSI-X. Defaulting to INTA\n");
7147 *dev_intr_type = INTA;
7148 }
7149 #else
7150 if (*dev_intr_type > MSI_X) {
7151 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7152 "Defaulting to INTA\n");
7153 *dev_intr_type = INTA;
7154 }
7155 #endif
7156 if ((*dev_intr_type == MSI_X) &&
7157 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7158 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7159 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7160 "Defaulting to INTA\n");
7161 *dev_intr_type = INTA;
7162 }
7163
7164 if (rx_ring_mode > 3) {
7165 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7166 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
7167 rx_ring_mode = 3;
7168 }
7169 return SUCCESS;
7170 }
7171
7172 /**
7173 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7174 * or Traffic class respectively.
7175 * @nic: device peivate variable
7176 * Description: The function configures the receive steering to
7177 * desired receive ring.
7178 * Return Value: SUCCESS on success and
7179 * '-1' on failure (endian settings incorrect).
7180 */
7181 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7182 {
7183 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7184 register u64 val64 = 0;
7185
7186 if (ds_codepoint > 63)
7187 return FAILURE;
7188
7189 val64 = RTS_DS_MEM_DATA(ring);
7190 writeq(val64, &bar0->rts_ds_mem_data);
7191
7192 val64 = RTS_DS_MEM_CTRL_WE |
7193 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7194 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7195
7196 writeq(val64, &bar0->rts_ds_mem_ctrl);
7197
7198 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7199 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7200 S2IO_BIT_RESET);
7201 }
7202
7203 /**
7204 * s2io_init_nic - Initialization of the adapter .
7205 * @pdev : structure containing the PCI related information of the device.
7206 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7207 * Description:
7208 * The function initializes an adapter identified by the pci_dec structure.
7209 * All OS related initialization including memory and device structure and
7210 * initlaization of the device private variable is done. Also the swapper
7211 * control register is initialized to enable read and write into the I/O
7212 * registers of the device.
7213 * Return value:
7214 * returns 0 on success and negative on failure.
7215 */
7216
7217 static int __devinit
7218 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7219 {
7220 struct s2io_nic *sp;
7221 struct net_device *dev;
7222 int i, j, ret;
7223 int dma_flag = FALSE;
7224 u32 mac_up, mac_down;
7225 u64 val64 = 0, tmp64 = 0;
7226 struct XENA_dev_config __iomem *bar0 = NULL;
7227 u16 subid;
7228 struct mac_info *mac_control;
7229 struct config_param *config;
7230 int mode;
7231 u8 dev_intr_type = intr_type;
7232
7233 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7234 return ret;
7235
7236 if ((ret = pci_enable_device(pdev))) {
7237 DBG_PRINT(ERR_DBG,
7238 "s2io_init_nic: pci_enable_device failed\n");
7239 return ret;
7240 }
7241
7242 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7243 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7244 dma_flag = TRUE;
7245 if (pci_set_consistent_dma_mask
7246 (pdev, DMA_64BIT_MASK)) {
7247 DBG_PRINT(ERR_DBG,
7248 "Unable to obtain 64bit DMA for \
7249 consistent allocations\n");
7250 pci_disable_device(pdev);
7251 return -ENOMEM;
7252 }
7253 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7254 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7255 } else {
7256 pci_disable_device(pdev);
7257 return -ENOMEM;
7258 }
7259 if (dev_intr_type != MSI_X) {
7260 if (pci_request_regions(pdev, s2io_driver_name)) {
7261 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
7262 pci_disable_device(pdev);
7263 return -ENODEV;
7264 }
7265 }
7266 else {
7267 if (!(request_mem_region(pci_resource_start(pdev, 0),
7268 pci_resource_len(pdev, 0), s2io_driver_name))) {
7269 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
7270 pci_disable_device(pdev);
7271 return -ENODEV;
7272 }
7273 if (!(request_mem_region(pci_resource_start(pdev, 2),
7274 pci_resource_len(pdev, 2), s2io_driver_name))) {
7275 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
7276 release_mem_region(pci_resource_start(pdev, 0),
7277 pci_resource_len(pdev, 0));
7278 pci_disable_device(pdev);
7279 return -ENODEV;
7280 }
7281 }
7282
7283 dev = alloc_etherdev(sizeof(struct s2io_nic));
7284 if (dev == NULL) {
7285 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7286 pci_disable_device(pdev);
7287 pci_release_regions(pdev);
7288 return -ENODEV;
7289 }
7290
7291 pci_set_master(pdev);
7292 pci_set_drvdata(pdev, dev);
7293 SET_MODULE_OWNER(dev);
7294 SET_NETDEV_DEV(dev, &pdev->dev);
7295
7296 /* Private member variable initialized to s2io NIC structure */
7297 sp = dev->priv;
7298 memset(sp, 0, sizeof(struct s2io_nic));
7299 sp->dev = dev;
7300 sp->pdev = pdev;
7301 sp->high_dma_flag = dma_flag;
7302 sp->device_enabled_once = FALSE;
7303 if (rx_ring_mode == 1)
7304 sp->rxd_mode = RXD_MODE_1;
7305 if (rx_ring_mode == 2)
7306 sp->rxd_mode = RXD_MODE_3B;
7307 if (rx_ring_mode == 3)
7308 sp->rxd_mode = RXD_MODE_3A;
7309
7310 sp->intr_type = dev_intr_type;
7311
7312 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7313 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7314 sp->device_type = XFRAME_II_DEVICE;
7315 else
7316 sp->device_type = XFRAME_I_DEVICE;
7317
7318 sp->lro = lro;
7319
7320 /* Initialize some PCI/PCI-X fields of the NIC. */
7321 s2io_init_pci(sp);
7322
7323 /*
7324 * Setting the device configuration parameters.
7325 * Most of these parameters can be specified by the user during
7326 * module insertion as they are module loadable parameters. If
7327 * these parameters are not not specified during load time, they
7328 * are initialized with default values.
7329 */
7330 mac_control = &sp->mac_control;
7331 config = &sp->config;
7332
7333 /* Tx side parameters. */
7334 config->tx_fifo_num = tx_fifo_num;
7335 for (i = 0; i < MAX_TX_FIFOS; i++) {
7336 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7337 config->tx_cfg[i].fifo_priority = i;
7338 }
7339
7340 /* mapping the QoS priority to the configured fifos */
7341 for (i = 0; i < MAX_TX_FIFOS; i++)
7342 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7343
7344 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7345 for (i = 0; i < config->tx_fifo_num; i++) {
7346 config->tx_cfg[i].f_no_snoop =
7347 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7348 if (config->tx_cfg[i].fifo_len < 65) {
7349 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7350 break;
7351 }
7352 }
7353 /* + 2 because one Txd for skb->data and one Txd for UFO */
7354 config->max_txds = MAX_SKB_FRAGS + 2;
7355
7356 /* Rx side parameters. */
7357 config->rx_ring_num = rx_ring_num;
7358 for (i = 0; i < MAX_RX_RINGS; i++) {
7359 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7360 (rxd_count[sp->rxd_mode] + 1);
7361 config->rx_cfg[i].ring_priority = i;
7362 }
7363
7364 for (i = 0; i < rx_ring_num; i++) {
7365 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7366 config->rx_cfg[i].f_no_snoop =
7367 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7368 }
7369
7370 /* Setting Mac Control parameters */
7371 mac_control->rmac_pause_time = rmac_pause_time;
7372 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7373 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7374
7375
7376 /* Initialize Ring buffer parameters. */
7377 for (i = 0; i < config->rx_ring_num; i++)
7378 atomic_set(&sp->rx_bufs_left[i], 0);
7379
7380 /* Initialize the number of ISRs currently running */
7381 atomic_set(&sp->isr_cnt, 0);
7382
7383 /* initialize the shared memory used by the NIC and the host */
7384 if (init_shared_mem(sp)) {
7385 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7386 dev->name);
7387 ret = -ENOMEM;
7388 goto mem_alloc_failed;
7389 }
7390
7391 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7392 pci_resource_len(pdev, 0));
7393 if (!sp->bar0) {
7394 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7395 dev->name);
7396 ret = -ENOMEM;
7397 goto bar0_remap_failed;
7398 }
7399
7400 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7401 pci_resource_len(pdev, 2));
7402 if (!sp->bar1) {
7403 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7404 dev->name);
7405 ret = -ENOMEM;
7406 goto bar1_remap_failed;
7407 }
7408
7409 dev->irq = pdev->irq;
7410 dev->base_addr = (unsigned long) sp->bar0;
7411
7412 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7413 for (j = 0; j < MAX_TX_FIFOS; j++) {
7414 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7415 (sp->bar1 + (j * 0x00020000));
7416 }
7417
7418 /* Driver entry points */
7419 dev->open = &s2io_open;
7420 dev->stop = &s2io_close;
7421 dev->hard_start_xmit = &s2io_xmit;
7422 dev->get_stats = &s2io_get_stats;
7423 dev->set_multicast_list = &s2io_set_multicast;
7424 dev->do_ioctl = &s2io_ioctl;
7425 dev->change_mtu = &s2io_change_mtu;
7426 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7427 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7428 dev->vlan_rx_register = s2io_vlan_rx_register;
7429
7430 /*
7431 * will use eth_mac_addr() for dev->set_mac_address
7432 * mac address will be set every time dev->open() is called
7433 */
7434 dev->poll = s2io_poll;
7435 dev->weight = 32;
7436
7437 #ifdef CONFIG_NET_POLL_CONTROLLER
7438 dev->poll_controller = s2io_netpoll;
7439 #endif
7440
7441 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7442 if (sp->high_dma_flag == TRUE)
7443 dev->features |= NETIF_F_HIGHDMA;
7444 dev->features |= NETIF_F_TSO;
7445 dev->features |= NETIF_F_TSO6;
7446 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7447 dev->features |= NETIF_F_UFO;
7448 dev->features |= NETIF_F_HW_CSUM;
7449 }
7450
7451 dev->tx_timeout = &s2io_tx_watchdog;
7452 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7453 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7454 INIT_WORK(&sp->set_link_task, s2io_set_link);
7455
7456 pci_save_state(sp->pdev);
7457
7458 /* Setting swapper control on the NIC, for proper reset operation */
7459 if (s2io_set_swapper(sp)) {
7460 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7461 dev->name);
7462 ret = -EAGAIN;
7463 goto set_swap_failed;
7464 }
7465
7466 /* Verify if the Herc works on the slot its placed into */
7467 if (sp->device_type & XFRAME_II_DEVICE) {
7468 mode = s2io_verify_pci_mode(sp);
7469 if (mode < 0) {
7470 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7471 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7472 ret = -EBADSLT;
7473 goto set_swap_failed;
7474 }
7475 }
7476
7477 /* Not needed for Herc */
7478 if (sp->device_type & XFRAME_I_DEVICE) {
7479 /*
7480 * Fix for all "FFs" MAC address problems observed on
7481 * Alpha platforms
7482 */
7483 fix_mac_address(sp);
7484 s2io_reset(sp);
7485 }
7486
7487 /*
7488 * MAC address initialization.
7489 * For now only one mac address will be read and used.
7490 */
7491 bar0 = sp->bar0;
7492 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7493 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7494 writeq(val64, &bar0->rmac_addr_cmd_mem);
7495 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7496 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7497 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7498 mac_down = (u32) tmp64;
7499 mac_up = (u32) (tmp64 >> 32);
7500
7501 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7502 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7503 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7504 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7505 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7506 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7507
7508 /* Set the factory defined MAC address initially */
7509 dev->addr_len = ETH_ALEN;
7510 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7511
7512 /* reset Nic and bring it to known state */
7513 s2io_reset(sp);
7514
7515 /*
7516 * Initialize the tasklet status and link state flags
7517 * and the card state parameter
7518 */
7519 atomic_set(&(sp->card_state), 0);
7520 sp->tasklet_status = 0;
7521 sp->link_state = 0;
7522
7523 /* Initialize spinlocks */
7524 spin_lock_init(&sp->tx_lock);
7525
7526 if (!napi)
7527 spin_lock_init(&sp->put_lock);
7528 spin_lock_init(&sp->rx_lock);
7529
7530 /*
7531 * SXE-002: Configure link and activity LED to init state
7532 * on driver load.
7533 */
7534 subid = sp->pdev->subsystem_device;
7535 if ((subid & 0xFF) >= 0x07) {
7536 val64 = readq(&bar0->gpio_control);
7537 val64 |= 0x0000800000000000ULL;
7538 writeq(val64, &bar0->gpio_control);
7539 val64 = 0x0411040400000000ULL;
7540 writeq(val64, (void __iomem *) bar0 + 0x2700);
7541 val64 = readq(&bar0->gpio_control);
7542 }
7543
7544 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7545
7546 if (register_netdev(dev)) {
7547 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7548 ret = -ENODEV;
7549 goto register_failed;
7550 }
7551 s2io_vpd_read(sp);
7552 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7553 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7554 sp->product_name, get_xena_rev_id(sp->pdev));
7555 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7556 s2io_driver_version);
7557 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7558 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7559 sp->def_mac_addr[0].mac_addr[0],
7560 sp->def_mac_addr[0].mac_addr[1],
7561 sp->def_mac_addr[0].mac_addr[2],
7562 sp->def_mac_addr[0].mac_addr[3],
7563 sp->def_mac_addr[0].mac_addr[4],
7564 sp->def_mac_addr[0].mac_addr[5]);
7565 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7566 if (sp->device_type & XFRAME_II_DEVICE) {
7567 mode = s2io_print_pci_mode(sp);
7568 if (mode < 0) {
7569 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7570 ret = -EBADSLT;
7571 unregister_netdev(dev);
7572 goto set_swap_failed;
7573 }
7574 }
7575 switch(sp->rxd_mode) {
7576 case RXD_MODE_1:
7577 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7578 dev->name);
7579 break;
7580 case RXD_MODE_3B:
7581 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7582 dev->name);
7583 break;
7584 case RXD_MODE_3A:
7585 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7586 dev->name);
7587 break;
7588 }
7589
7590 if (napi)
7591 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7592 switch(sp->intr_type) {
7593 case INTA:
7594 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7595 break;
7596 case MSI:
7597 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7598 break;
7599 case MSI_X:
7600 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7601 break;
7602 }
7603 if (sp->lro)
7604 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7605 dev->name);
7606 if (ufo)
7607 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7608 " enabled\n", dev->name);
7609 /* Initialize device name */
7610 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7611
7612 /* Initialize bimodal Interrupts */
7613 sp->config.bimodal = bimodal;
7614 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7615 sp->config.bimodal = 0;
7616 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7617 dev->name);
7618 }
7619
7620 /*
7621 * Make Link state as off at this point, when the Link change
7622 * interrupt comes the state will be automatically changed to
7623 * the right state.
7624 */
7625 netif_carrier_off(dev);
7626
7627 return 0;
7628
7629 register_failed:
7630 set_swap_failed:
7631 iounmap(sp->bar1);
7632 bar1_remap_failed:
7633 iounmap(sp->bar0);
7634 bar0_remap_failed:
7635 mem_alloc_failed:
7636 free_shared_mem(sp);
7637 pci_disable_device(pdev);
7638 if (dev_intr_type != MSI_X)
7639 pci_release_regions(pdev);
7640 else {
7641 release_mem_region(pci_resource_start(pdev, 0),
7642 pci_resource_len(pdev, 0));
7643 release_mem_region(pci_resource_start(pdev, 2),
7644 pci_resource_len(pdev, 2));
7645 }
7646 pci_set_drvdata(pdev, NULL);
7647 free_netdev(dev);
7648
7649 return ret;
7650 }
7651
7652 /**
7653 * s2io_rem_nic - Free the PCI device
7654 * @pdev: structure containing the PCI related information of the device.
7655 * Description: This function is called by the Pci subsystem to release a
7656 * PCI device and free up all resource held up by the device. This could
7657 * be in response to a Hot plug event or when the driver is to be removed
7658 * from memory.
7659 */
7660
7661 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7662 {
7663 struct net_device *dev =
7664 (struct net_device *) pci_get_drvdata(pdev);
7665 struct s2io_nic *sp;
7666
7667 if (dev == NULL) {
7668 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7669 return;
7670 }
7671
7672 flush_scheduled_work();
7673
7674 sp = dev->priv;
7675 unregister_netdev(dev);
7676
7677 free_shared_mem(sp);
7678 iounmap(sp->bar0);
7679 iounmap(sp->bar1);
7680 if (sp->intr_type != MSI_X)
7681 pci_release_regions(pdev);
7682 else {
7683 release_mem_region(pci_resource_start(pdev, 0),
7684 pci_resource_len(pdev, 0));
7685 release_mem_region(pci_resource_start(pdev, 2),
7686 pci_resource_len(pdev, 2));
7687 }
7688 pci_set_drvdata(pdev, NULL);
7689 free_netdev(dev);
7690 pci_disable_device(pdev);
7691 }
7692
7693 /**
7694 * s2io_starter - Entry point for the driver
7695 * Description: This function is the entry point for the driver. It verifies
7696 * the module loadable parameters and initializes PCI configuration space.
7697 */
7698
7699 int __init s2io_starter(void)
7700 {
7701 return pci_register_driver(&s2io_driver);
7702 }
7703
7704 /**
7705 * s2io_closer - Cleanup routine for the driver
7706 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7707 */
7708
7709 static __exit void s2io_closer(void)
7710 {
7711 pci_unregister_driver(&s2io_driver);
7712 DBG_PRINT(INIT_DBG, "cleanup done\n");
7713 }
7714
7715 module_init(s2io_starter);
7716 module_exit(s2io_closer);
7717
7718 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7719 struct tcphdr **tcp, struct RxD_t *rxdp)
7720 {
7721 int ip_off;
7722 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7723
7724 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7725 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7726 __FUNCTION__);
7727 return -1;
7728 }
7729
7730 /* TODO:
7731 * By default the VLAN field in the MAC is stripped by the card, if this
7732 * feature is turned off in rx_pa_cfg register, then the ip_off field
7733 * has to be shifted by a further 2 bytes
7734 */
7735 switch (l2_type) {
7736 case 0: /* DIX type */
7737 case 4: /* DIX type with VLAN */
7738 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7739 break;
7740 /* LLC, SNAP etc are considered non-mergeable */
7741 default:
7742 return -1;
7743 }
7744
7745 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7746 ip_len = (u8)((*ip)->ihl);
7747 ip_len <<= 2;
7748 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7749
7750 return 0;
7751 }
7752
7753 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7754 struct tcphdr *tcp)
7755 {
7756 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7757 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7758 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7759 return -1;
7760 return 0;
7761 }
7762
7763 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7764 {
7765 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7766 }
7767
7768 static void initiate_new_session(struct lro *lro, u8 *l2h,
7769 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7770 {
7771 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7772 lro->l2h = l2h;
7773 lro->iph = ip;
7774 lro->tcph = tcp;
7775 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7776 lro->tcp_ack = ntohl(tcp->ack_seq);
7777 lro->sg_num = 1;
7778 lro->total_len = ntohs(ip->tot_len);
7779 lro->frags_len = 0;
7780 /*
7781 * check if we saw TCP timestamp. Other consistency checks have
7782 * already been done.
7783 */
7784 if (tcp->doff == 8) {
7785 u32 *ptr;
7786 ptr = (u32 *)(tcp+1);
7787 lro->saw_ts = 1;
7788 lro->cur_tsval = *(ptr+1);
7789 lro->cur_tsecr = *(ptr+2);
7790 }
7791 lro->in_use = 1;
7792 }
7793
7794 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7795 {
7796 struct iphdr *ip = lro->iph;
7797 struct tcphdr *tcp = lro->tcph;
7798 __sum16 nchk;
7799 struct stat_block *statinfo = sp->mac_control.stats_info;
7800 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7801
7802 /* Update L3 header */
7803 ip->tot_len = htons(lro->total_len);
7804 ip->check = 0;
7805 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7806 ip->check = nchk;
7807
7808 /* Update L4 header */
7809 tcp->ack_seq = lro->tcp_ack;
7810 tcp->window = lro->window;
7811
7812 /* Update tsecr field if this session has timestamps enabled */
7813 if (lro->saw_ts) {
7814 u32 *ptr = (u32 *)(tcp + 1);
7815 *(ptr+2) = lro->cur_tsecr;
7816 }
7817
7818 /* Update counters required for calculation of
7819 * average no. of packets aggregated.
7820 */
7821 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7822 statinfo->sw_stat.num_aggregations++;
7823 }
7824
7825 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7826 struct tcphdr *tcp, u32 l4_pyld)
7827 {
7828 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7829 lro->total_len += l4_pyld;
7830 lro->frags_len += l4_pyld;
7831 lro->tcp_next_seq += l4_pyld;
7832 lro->sg_num++;
7833
7834 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7835 lro->tcp_ack = tcp->ack_seq;
7836 lro->window = tcp->window;
7837
7838 if (lro->saw_ts) {
7839 u32 *ptr;
7840 /* Update tsecr and tsval from this packet */
7841 ptr = (u32 *) (tcp + 1);
7842 lro->cur_tsval = *(ptr + 1);
7843 lro->cur_tsecr = *(ptr + 2);
7844 }
7845 }
7846
7847 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7848 struct tcphdr *tcp, u32 tcp_pyld_len)
7849 {
7850 u8 *ptr;
7851
7852 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7853
7854 if (!tcp_pyld_len) {
7855 /* Runt frame or a pure ack */
7856 return -1;
7857 }
7858
7859 if (ip->ihl != 5) /* IP has options */
7860 return -1;
7861
7862 /* If we see CE codepoint in IP header, packet is not mergeable */
7863 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7864 return -1;
7865
7866 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7867 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7868 tcp->ece || tcp->cwr || !tcp->ack) {
7869 /*
7870 * Currently recognize only the ack control word and
7871 * any other control field being set would result in
7872 * flushing the LRO session
7873 */
7874 return -1;
7875 }
7876
7877 /*
7878 * Allow only one TCP timestamp option. Don't aggregate if
7879 * any other options are detected.
7880 */
7881 if (tcp->doff != 5 && tcp->doff != 8)
7882 return -1;
7883
7884 if (tcp->doff == 8) {
7885 ptr = (u8 *)(tcp + 1);
7886 while (*ptr == TCPOPT_NOP)
7887 ptr++;
7888 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7889 return -1;
7890
7891 /* Ensure timestamp value increases monotonically */
7892 if (l_lro)
7893 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7894 return -1;
7895
7896 /* timestamp echo reply should be non-zero */
7897 if (*((u32 *)(ptr+6)) == 0)
7898 return -1;
7899 }
7900
7901 return 0;
7902 }
7903
7904 static int
7905 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7906 struct RxD_t *rxdp, struct s2io_nic *sp)
7907 {
7908 struct iphdr *ip;
7909 struct tcphdr *tcph;
7910 int ret = 0, i;
7911
7912 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7913 rxdp))) {
7914 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7915 ip->saddr, ip->daddr);
7916 } else {
7917 return ret;
7918 }
7919
7920 tcph = (struct tcphdr *)*tcp;
7921 *tcp_len = get_l4_pyld_length(ip, tcph);
7922 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7923 struct lro *l_lro = &sp->lro0_n[i];
7924 if (l_lro->in_use) {
7925 if (check_for_socket_match(l_lro, ip, tcph))
7926 continue;
7927 /* Sock pair matched */
7928 *lro = l_lro;
7929
7930 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7931 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7932 "0x%x, actual 0x%x\n", __FUNCTION__,
7933 (*lro)->tcp_next_seq,
7934 ntohl(tcph->seq));
7935
7936 sp->mac_control.stats_info->
7937 sw_stat.outof_sequence_pkts++;
7938 ret = 2;
7939 break;
7940 }
7941
7942 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7943 ret = 1; /* Aggregate */
7944 else
7945 ret = 2; /* Flush both */
7946 break;
7947 }
7948 }
7949
7950 if (ret == 0) {
7951 /* Before searching for available LRO objects,
7952 * check if the pkt is L3/L4 aggregatable. If not
7953 * don't create new LRO session. Just send this
7954 * packet up.
7955 */
7956 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7957 return 5;
7958 }
7959
7960 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7961 struct lro *l_lro = &sp->lro0_n[i];
7962 if (!(l_lro->in_use)) {
7963 *lro = l_lro;
7964 ret = 3; /* Begin anew */
7965 break;
7966 }
7967 }
7968 }
7969
7970 if (ret == 0) { /* sessions exceeded */
7971 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7972 __FUNCTION__);
7973 *lro = NULL;
7974 return ret;
7975 }
7976
7977 switch (ret) {
7978 case 3:
7979 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7980 break;
7981 case 2:
7982 update_L3L4_header(sp, *lro);
7983 break;
7984 case 1:
7985 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7986 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7987 update_L3L4_header(sp, *lro);
7988 ret = 4; /* Flush the LRO */
7989 }
7990 break;
7991 default:
7992 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7993 __FUNCTION__);
7994 break;
7995 }
7996
7997 return ret;
7998 }
7999
8000 static void clear_lro_session(struct lro *lro)
8001 {
8002 static u16 lro_struct_size = sizeof(struct lro);
8003
8004 memset(lro, 0, lro_struct_size);
8005 }
8006
8007 static void queue_rx_frame(struct sk_buff *skb)
8008 {
8009 struct net_device *dev = skb->dev;
8010
8011 skb->protocol = eth_type_trans(skb, dev);
8012 if (napi)
8013 netif_receive_skb(skb);
8014 else
8015 netif_rx(skb);
8016 }
8017
8018 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8019 struct sk_buff *skb,
8020 u32 tcp_len)
8021 {
8022 struct sk_buff *first = lro->parent;
8023
8024 first->len += tcp_len;
8025 first->data_len = lro->frags_len;
8026 skb_pull(skb, (skb->len - tcp_len));
8027 if (skb_shinfo(first)->frag_list)
8028 lro->last_frag->next = skb;
8029 else
8030 skb_shinfo(first)->frag_list = skb;
8031 first->truesize += skb->truesize;
8032 lro->last_frag = skb;
8033 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8034 return;
8035 }
8036
8037 /**
8038 * s2io_io_error_detected - called when PCI error is detected
8039 * @pdev: Pointer to PCI device
8040 * @state: The current pci conneection state
8041 *
8042 * This function is called after a PCI bus error affecting
8043 * this device has been detected.
8044 */
8045 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8046 pci_channel_state_t state)
8047 {
8048 struct net_device *netdev = pci_get_drvdata(pdev);
8049 struct s2io_nic *sp = netdev->priv;
8050
8051 netif_device_detach(netdev);
8052
8053 if (netif_running(netdev)) {
8054 /* Bring down the card, while avoiding PCI I/O */
8055 do_s2io_card_down(sp, 0);
8056 sp->device_close_flag = TRUE; /* Device is shut down. */
8057 }
8058 pci_disable_device(pdev);
8059
8060 return PCI_ERS_RESULT_NEED_RESET;
8061 }
8062
8063 /**
8064 * s2io_io_slot_reset - called after the pci bus has been reset.
8065 * @pdev: Pointer to PCI device
8066 *
8067 * Restart the card from scratch, as if from a cold-boot.
8068 * At this point, the card has exprienced a hard reset,
8069 * followed by fixups by BIOS, and has its config space
8070 * set up identically to what it was at cold boot.
8071 */
8072 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8073 {
8074 struct net_device *netdev = pci_get_drvdata(pdev);
8075 struct s2io_nic *sp = netdev->priv;
8076
8077 if (pci_enable_device(pdev)) {
8078 printk(KERN_ERR "s2io: "
8079 "Cannot re-enable PCI device after reset.\n");
8080 return PCI_ERS_RESULT_DISCONNECT;
8081 }
8082
8083 pci_set_master(pdev);
8084 s2io_reset(sp);
8085
8086 return PCI_ERS_RESULT_RECOVERED;
8087 }
8088
8089 /**
8090 * s2io_io_resume - called when traffic can start flowing again.
8091 * @pdev: Pointer to PCI device
8092 *
8093 * This callback is called when the error recovery driver tells
8094 * us that its OK to resume normal operation.
8095 */
8096 static void s2io_io_resume(struct pci_dev *pdev)
8097 {
8098 struct net_device *netdev = pci_get_drvdata(pdev);
8099 struct s2io_nic *sp = netdev->priv;
8100
8101 if (netif_running(netdev)) {
8102 if (s2io_card_up(sp)) {
8103 printk(KERN_ERR "s2io: "
8104 "Can't bring device back up after reset.\n");
8105 return;
8106 }
8107
8108 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8109 s2io_card_down(sp);
8110 printk(KERN_ERR "s2io: "
8111 "Can't resetore mac addr after reset.\n");
8112 return;
8113 }
8114 }
8115
8116 netif_device_attach(netdev);
8117 netif_wake_queue(netdev);
8118 }
This page took 0.214495 seconds and 5 git commands to generate.